bokeh.plotting.show

Here are the examples of the python api bokeh.plotting.show taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

42 Examples 7

3 View Source File : plotting.py
License : MIT License
Project Creator : andyljones

    def initialize(self, **kwargs):
        plotters = {}
        for subplot, prefixes in self.groups.items():
            readers = [self.readers[p] for p in prefixes] 
            plotters[subplot] = readers[0].plotter(readers, **kwargs)
        self.plotters = plotters

        children = [p.figure for p in self.plotters.values()]
        grid = bol.gridplot(children, ncols=5, plot_width=300, plot_height=300, merge_tools=False)

        from IPython.display import clear_output
        clear_output(wait=True)
        self.handle = bop.show(grid, notebook_handle=True)

    def refresh(self):

3 View Source File : callback.py
License : Apache License 2.0
Project Creator : awslabs

    def setup_chart(self):
        self.start_time = datetime.datetime.now()
        self.x_axis_val = []
        self.y_axis_val = []
        self.fig.line(self.x_axis_val, self.y_axis_val)
        return bokeh.plotting.show(self.fig, notebook_handle=True)

    def elapsed(self):

3 View Source File : backend.py
License : Apache License 2.0
Project Creator : ecmwf

    def plot_graph_pandas(self, frame, time: str, variable: str):
        from bokeh.plotting import figure, show

        p = figure(title="Simple line example", x_axis_label="x", y_axis_label="y")
        p.line(frame[time], frame["value"])
        show(p)

    def option(self, name, default=None):

3 View Source File : mw_plot_bokeh.py
License : MIT License
Project Creator : henrysky

    def show(self, notebook=True):
        if self._in_jupyter and notebook:
            output_notebook() 
        else: 
            pass
        show(self.bokeh_fig)

    def savefig(self, file='MWPlot.html'):

3 View Source File : mw_plot_bokeh.py
License : MIT License
Project Creator : henrysky

    def show(self, notebook=True):
        if self._in_jupyter and notebook: 
            output_notebook()
        else: 
            pass
        show(self.bokeh_fig)

    def savefig(self, file='MWSkyMap.html'):

3 View Source File : base.py
License : MIT License
Project Creator : PatrikHlobil

def show(
    obj, browser=None, new="tab", notebook_handle=False, notebook_url="localhost:8888"
):

    global SUPPRESS_OUTPUT
    if SUPPRESS_OUTPUT:
        return obj

    if OUTPUT_TYPE != "zeppelin":
        bokeh.plotting.show(obj, browser, new, notebook_handle, notebook_url)
    else:
        html_embedded = embedded_html(obj, resources=None)
        print("%html\n\n" + html_embedded)


show.__doc__ = bokeh.plotting.show.__doc__

0 View Source File : activity_details.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def activity_details(local_time, activity_group):

    f'''
    # Activity Details: {local_time} ({activity_group})
    '''

    '''
    $contents
    '''

    '''
    ## Load Data
    
    Open a connection to the database and load the data we require.
    '''

    s = session('-v2')

    activity = std_activity_statistics(s, activity_journal=local_time, activity_group=activity_group)
    health = std_health_statistics(s)
    hr_zones = hr_zones_from_database(s, local_time, activity_group)
    climbs = Statistics(s, sources=climb_sources(s, local_time, activity_group=activity_group)). \
        by_name(SectorCalculator, N.CLIMB_ANY, N.VERTICAL_POWER, like=True).with_. \
        copy_with_units().df
    active = Statistics(s, activity_journal=local_time, activity_group=activity_group). \
        by_name(ActivityCalculator, N.ACTIVE_TIME, N.ACTIVE_DISTANCE). \
        with_.copy_with_units().df.append(climbs)

    f'''
    ## Activity Plots
    
    To the right of each plot of data against distance is a related plot of cumulative data
    (except the last, cadence, which isn't useful and so replaced by HR zones).
    Green and red areas indicate differences between the two dates. 
    Additional red lines on the altitude plot are auto-detected climbs.
    
    Plot tools support zoom, dragging, etc.
    '''

    output_file(filename='/dev/null')

    sp = comparison_line_plot(700, 200, N.DISTANCE_KM, N.MED_SPEED_KMH, activity, ylo=0)
    add_climb_zones(sp, climbs, activity)
    sp_c = cumulative_plot(200, 200, N.MED_SPEED_KMH, activity, ylo=0)
    xrange = sp.x_range if sp else None

    el = comparison_line_plot(700, 200, N.DISTANCE_KM, N.ELEVATION_M, activity, x_range=xrange)
    add_climbs(el, climbs, activity)
    el_c = cumulative_plot(200, 200, N.CLIMB_MS, activity)
    xrange = xrange or (el.x_range if el else None)

    hri = comparison_line_plot(700, 200, N.DISTANCE_KM, N.HR_IMPULSE_10, activity, ylo=0, x_range=xrange)
    add_climb_zones(hri, climbs, activity)
    hri_c = cumulative_plot(200, 200, N.HR_IMPULSE_10, activity, ylo=0)
    xrange = xrange or (hri.x_range if hri else None)

    hr = comparison_line_plot(700, 200, N.DISTANCE_KM, N.HEART_RATE_BPM, activity, x_range=xrange)
    add_hr_zones(hr, activity, N.DISTANCE_KM, hr_zones)
    add_climb_zones(hr, climbs, activity)
    hr_c = cumulative_plot(200, 200, N.HEART_RATE_BPM, activity)
    xrange = xrange or (hr.x_range if hr else None)

    pw = comparison_line_plot(700, 200, N.DISTANCE_KM, N.MED_POWER_ESTIMATE_W, activity, ylo=0, x_range=xrange)
    pw.varea(source=activity, x=N.DISTANCE_KM, y1=0, y2=N.MED_VERTICAL_POWER_W,
             level='underlay', color='black', fill_alpha=0.25)
    add_climb_zones(pw, climbs, activity)
    pw_c = cumulative_plot(200, 200, N.MED_POWER_ESTIMATE_W, activity, ylo=0)
    xrange = xrange or (pw.x_range if pw else None)

    cd = comparison_line_plot(700, 200, N.DISTANCE_KM, N.MED_CADENCE_RPM, activity, ylo=0, x_range=xrange)
    add_climb_zones(cd, climbs, activity)
    hr_h = histogram_plot(200, 200, N.HR_ZONE, activity, xlo=1, xhi=5)

    show(gridplot([[el, el_c], [sp, sp_c], [hri, hri_c], [hr, hr_c], [pw, pw_c], [cd, hr_h]]))

    '''
    ## Activity Maps
    '''

    map = map_plot(400, 400, activity)
    m_el = map_intensity_signed(200, 200, activity, N.GRADE_PC, ranges=map, power=0.5)
    m_sp = map_intensity(200, 200, activity, N.MED_SPEED_KMH, ranges=map, power=2)
    m_hr = map_intensity(200, 200, activity, N.HR_IMPULSE_10, ranges=map)
    m_pw = map_intensity(200, 200, activity, N.MED_POWER_ESTIMATE_W, ranges=map)
    show(row(map, gridplot([[m_el, m_sp], [m_hr, m_pw]], toolbar_location='right')))

    '''
    ## Activity Statistics
    '''

    '''
    Active time and distance exclude pauses.
    '''

    active[[N.ACTIVE_TIME_S, N.ACTIVE_DISTANCE_KM]].dropna(). \
        transform({N.ACTIVE_TIME_S: format_seconds, N.ACTIVE_DISTANCE_KM: format_km})

    '''
    Climbs are auto-detected and shown only for the main activity. They are included in the elevation plot above.
    '''

    if present(climbs, N.CLIMB_TIME):
        display(transform(climbs,
                          {N.CLIMB_TIME: format_seconds, N.CLIMB_ELEVATION: format_metres,
                           N.CLIMB_DISTANCE: format_km, N.CLIMB_GRADIENT: format_percent,
                           N.VERTICAL_POWER: format_watts, N.CLIMB_CATEGORY: lambda x: x}))

    '''
    ## Health and Fitness
    '''

    fitness, fatigue = like(N.FITNESS_ANY, health.columns), like(N.FATIGUE_ANY, health.columns)
    colours = ['black'] * len(fitness) + ['red'] * len(fatigue)
    alphas = [1.0] * len(fitness) + [0.5] * len(fatigue)
    ff = multi_line_plot(900, 300, N.TIME, fitness + fatigue, health, colours, alphas=alphas)
    xrange = ff.x_range if ff else None
    add_multi_line_at_index(ff, N.TIME, fitness + fatigue, health, colours, alphas=alphas, index=-1)
    atd = std_distance_time_plot(900, 200, health, x_range=xrange)
    show(gridplot([[ff], [atd]]))

0 View Source File : all_activities.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def all_activities(start, finish):

    f'''
    # All Activities: {start.split()[0]} - {finish.split()[0]}
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')
    maps = [map_thumbnail(100, 120, data)
            for data in (Statistics(s, activity_journal=aj).
                             by_name(ActivityReader, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y).
                             by_name(ActivityCalculator, N.ACTIVE_DISTANCE, N.ACTIVE_TIME).df
                         for aj in s.query(ActivityJournal).
                             filter(ActivityJournal.start >= local_date_to_time(start),
                                    ActivityJournal.start   <  = local_date_to_time(finish)).
                             order_by(ActivityJournal.start.desc()).all())
            if len(data[N.SPHERICAL_MERCATOR_X].dropna()) > 10]
    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_notebook()
    show(htile(maps, 8))

0 View Source File : all_group_activities.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def all_group_activities(start, finish, activity_group):

    f'''
    # All Activities for {activity_group}: {start.split()[0]} - {finish.split()[0]}
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')
    maps = [map_thumbnail(100, 120, data)
            for data in (Statistics(s, activity_journal=aj).
                             by_name(ActivityReader, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y).
                             by_name(ActivityCalculator, N.ACTIVE_DISTANCE, N.ACTIVE_TIME).df
                         for aj in s.query(ActivityJournal).
                             filter(ActivityJournal.start >= local_date_to_time(start),
                                    ActivityJournal.start   <   local_date_to_time(finish),
                                    ActivityJournal.activity_group_id == ActivityGroup.from_name(s, activity_group)).
                             order_by(ActivityJournal.start.desc()).all())
            if len(data[N.SPHERICAL_MERCATOR_X].dropna()) > 10]
    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_notebook()
    show(htile(maps, 8))

0 View Source File : compare_activities.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def compare_activities(local_time, compare_time, activity_group):

    f'''
    # Compare Activities: {local_time} v {compare_time} ({activity_group})
    '''

    '''
    $contents
    '''

    '''
    ## Load Data
    
    Open a connection to the database and load the data we require.
    '''

    s = session('-v2')

    activity = std_activity_statistics(s, activity_journal=local_time, activity_group=activity_group)
    compare = std_activity_statistics(s, activity_journal=compare_time, activity_group=activity_group)
    health = std_health_statistics(s)
    hr_zones = hr_zones_from_database(s, local_time, activity_group)
    climbs = Statistics(s, sources=climb_sources(s, local_time, activity_group=activity_group)). \
        by_name(SectorCalculator, N.CLIMB_ANY, N.VERTICAL_POWER, like=True).with_. \
        copy_with_units().df
    active = Statistics(s, activity_journal=local_time, activity_group=activity_group). \
        by_name(ActivityCalculator, N.ACTIVE_TIME, N.ACTIVE_DISTANCE). \
        with_.copy_with_units().df.append(climbs)

    f'''
    ## Activity Plots
    
    The black line shows data from {local_time}, 
    the grey line from {compare_time}. 
    To the right of each plot of data against distance is a related plot of cumulative data
    (except the last, cadence, which isn't useful and so replaced by HR zones).
    Green and red areas indicate differences between the two dates. 
    Additional red lines on the altitude plot are auto-detected climbs.
    
    Plot tools support zoom, dragging, etc.
    '''

    output_file(filename='/dev/null')

    sp = comparison_line_plot(700, 200, N.DISTANCE_KM, N.MED_SPEED_KMH, activity, other=compare, ylo=0)
    add_climb_zones(sp, climbs, activity)
    sp_c = cumulative_plot(200, 200, N.MED_SPEED_KMH, activity, other=compare, ylo=0)
    xrange = sp.x_range if sp else None

    el = comparison_line_plot(700, 200, N.DISTANCE_KM, N.ELEVATION_M, activity, other=compare, x_range=xrange)
    add_climbs(el, climbs, activity)
    el_c = cumulative_plot(200, 200, N.CLIMB_MS, activity, other=compare)
    xrange = xrange or (el.x_range if el else None)

    hri = comparison_line_plot(700, 200, N.DISTANCE_KM, N.HR_IMPULSE_10, activity, other=compare, ylo=0, x_range=xrange)
    add_climb_zones(hri, climbs, activity)
    hri_c = cumulative_plot(200, 200, N.HR_IMPULSE_10, activity, other=compare, ylo=0)
    xrange = xrange or (hri.x_range if hri else None)

    hr = comparison_line_plot(700, 200, N.DISTANCE_KM, N.HEART_RATE_BPM, activity, other=compare, x_range=xrange)
    add_hr_zones(hr, activity, N.DISTANCE_KM, hr_zones)
    add_climb_zones(hr, climbs, activity)
    hr_c = cumulative_plot(200, 200, N.HEART_RATE_BPM, activity, other=compare)
    xrange = xrange or (hr.x_range if hr else None)

    pw = comparison_line_plot(700, 200, N.DISTANCE_KM, N.MED_POWER_ESTIMATE_W, activity, other=compare, ylo=0, x_range=xrange)
    add_climb_zones(pw, climbs, activity)
    pw_c = cumulative_plot(200, 200, N.MED_POWER_ESTIMATE_W, activity, other=compare, ylo=0)
    xrange = xrange or (pw.x_range if pw else None)

    cd = comparison_line_plot(700, 200, N.DISTANCE_KM, N.MED_CADENCE_RPM, activity, other=compare, ylo=0, x_range=xrange)
    add_climb_zones(cd, climbs, activity)
    hr_h = histogram_plot(200, 200, N.HR_ZONE, activity, xlo=1, xhi=5)

    show(gridplot([[el, el_c], [sp, sp_c], [hri, hri_c], [hr, hr_c], [pw, pw_c], [cd, hr_h]]))

    '''
    ## Activity Maps
    '''

    map = map_plot(400, 400, activity, other=compare)
    m_el = map_intensity_signed(200, 200, activity, N.GRADE_PC, ranges=map, power=0.5)
    m_sp = map_intensity(200, 200, activity, N.MED_SPEED_KMH, ranges=map, power=2)
    m_hr = map_intensity(200, 200, activity, N.HR_IMPULSE_10, ranges=map)
    m_pw = map_intensity(200, 200, activity, N.MED_POWER_ESTIMATE_W, ranges=map)
    show(row(map, gridplot([[m_el, m_sp], [m_hr, m_pw]], toolbar_location='right')))

    '''
    ## Activity Statistics
    '''

    '''
    Active time and distance exclude pauses.
    '''

    active[[N.ACTIVE_TIME_S, N.ACTIVE_DISTANCE_KM]].dropna(). \
        transform({N.ACTIVE_TIME_S: format_seconds, N.ACTIVE_DISTANCE_KM: format_km})

    '''
    Climbs are auto-detected and shown only for the main activity. They are included in the elevation plot above.
    '''

    if present(climbs, N.CLIMB_TIME):
        display(transform(climbs,
                          {N.CLIMB_TIME: format_seconds, N.CLIMB_ELEVATION: format_metres,
                           N.CLIMB_DISTANCE: format_km, N.CLIMB_GRADIENT: format_percent,
                           N.VERTICAL_POWER: format_watts, N.CLIMB_CATEGORY: lambda x: x}))

    '''
    ## Health and Fitness
    '''

    fitness, fatigue = like(N.FITNESS_ANY, health.columns), like(N.FATIGUE_ANY, health.columns)
    colours = ['black'] * len(fitness) + ['red'] * len(fatigue)
    alphas = [1.0] * len(fitness) + [0.5] * len(fatigue)
    ff = multi_line_plot(900, 300, N.TIME, fitness + fatigue, health, colours, alphas=alphas)
    xrange = ff.x_range if ff else None
    add_multi_line_at_index(ff, N.TIME, fitness + fatigue, health, colours, alphas=alphas, index=-1)
    atd = std_distance_time_plot(900, 200, health, x_range=xrange)
    show(gridplot([[ff], [atd]]))

0 View Source File : fit_power_parameters.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def fit_power_parameters(bookmark, large):

    f'''
    # Fit Power Parameters to {bookmark}

    This notebook allows you to calculate power parameters (CdA - air resistance, Crr - rolling resistance)
    from bookmarked activities.

    Beforehand you should have generated the bookmark by running

        > python -m ch2.data.coasting

    or similar to identify sections of activities with little pedalling.
    See that module for more information.

    The `large` parameter means that each bookmark is taken in its entirety.
    The alternative is that they are divided into small sub-samples reflecting the data sample rate.
    '''

    '''
    $contents
    '''

    '''
    ## Load Data
    
    Open a connection to the database and load the data we require.
    '''
    s = session('-v 5')
    large = strtobool(large)
    route = Statistics(s, bookmarks=bookmarks(s, bookmark)). \
        by_name(ActivityReader, N.LATITUDE, N.LONGITUDE, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y,
                N.DISTANCE, N.ELEVATION, N.SPEED, N.CADENCE)
    route.sort_index(inplace=True)  # bookmarks are not sorted by time
    if large:
        route, max_gap = bookend(route), None
    else:
        max_gap = 10
    route = add_differentials(route, max_gap=max_gap)
    if large:
        route = route.iloc[1::2]
    route.describe()

    '''
    ## Add Energy Calculations
    '''
    weight = 64+12  # weight of rider + bike / kg  todo - extract weight from db
    route = add_energy_budget(route, weight)
    route = add_cda_estimate(route)
    route = add_crr_estimate(route, weight)
    route.describe()

    '''
    ## Plot Constraints
    
    The calculations above added an energy budget for each "step" in the data.
    These included values for CdA and Crr that would "explain" the decrease in energy 
    (taking each alone - so the CdA is that required for all energy lost to air resistance, 
    the Crr is that required for all energy lost to rolling resistance).
    
    But we know that both CdA and Crr could be important.
    So what we want is a linear combination of the two.
    For example, maybe the energy loss is 90% due to CdA and 10% due to Crr.
    All these possible linear combinations lie on a line that joins 100% CdA and 0% Crr with 0% CdA and 100% Crr.
    
    So the plot below shows all possible combinations of CdA and Crr.
    And what we are looking for is the most common value.
    So we want to know where the plot is darkest / the lines are most dense. 
    '''
    output_notebook()
    f = figure(plot_width=500, plot_height=400)
    clean = route.loc[route[N.DELTA_ENERGY]   <   0].dropna()
    cs = pd.DataFrame({N.CDA: [(0, cda) for cda in clean[N.CDA]],
                       N.CRR: [(crr, 0) for crr in clean[N.CRR]]})
    f.multi_line(xs=N.CDA, ys=N.CRR, source=cs, line_alpha=0.1, line_color='black')
    f.xaxis.axis_label = 'CdA'
    f.yaxis.axis_label = 'Crr'
    show(f)

    '''
    ## CdA Only
    
    If we ignore Crr then we can estimate CdA by looking at the relative number of constraints of CdA
    where Crr is zero.
    
    We do this by fitting to binned data.
    The peak in the fit(s) gives the value of CdA if Crr is unimportant.
    '''
    bins = np.linspace(0, 1.5, 30)
    width = bins[1] - bins[0]
    counts = clean[N.CDA].groupby(pd.cut(clean[N.CDA], bins)).size()
    print(counts.describe())

    cda = pd.DataFrame({N.CDA: 0.5 * (bins[:-1] + bins[1:]), 'n': counts.values})
    f = figure(plot_width=900, plot_height=300)
    f.quad(top=counts, left=bins[:-1]+0.1*width, right=bins[1:]-0.1*width, bottom=0)
    for order in range(2, 20, 2):
        coeff = sp.polyfit(cda[N.CDA], cda['n'], order)
        p = sp.poly1d(coeff)
        print(order, fmin(lambda x: -p(x), 0.6, disp=0)[0])
        f.line(x=cda[N.CDA], y=p(cda[N.CDA]), line_color='orange')
    show(f)

    '''
    ## Sample Constraints
    
    If we want to include Crr then we need to find a way to measure the "peak" in the messy line plot above.
    To do this we convert to a collection of points and then fit a 2D density function.
    
    Conversion to points is done by selecting points at random on each line.
    (You might think that shorter lines should generate less points.
    The way I see it, each line is an observation that constrains CdA and Crr.
    Each observation has equal weight, so each line generates a point.)
    
    Random points avoids any systematic patterns from uniform sampling 
    and allows re-runs to give some idea of noise. 
    '''

    def sample():
        clean.loc[:, 'random'] = np.random.random(size=len(clean))
        clean.loc[:, 'x'] = clean[N.CDA] * clean['random']
        clean.loc[:, 'y'] = clean[N.CRR] * (1 - clean['random'])
        return clean.loc[:, ['x', 'y']]

    s = pd.concat([sample() for _ in range(100 if large else 10)])
    print(s.describe())

    f = figure(plot_width=600, plot_height=600)
    f.scatter(x='x', y='y', source=s)
    show(f)

    '''
    ## Smooth, Find Maximum
    
    We generate and plot a Gaussian kernel density estimate.
    
    See https://towardsdatascience.com/simple-example-of-2d-density-plots-in-python-83b83b934f67
    
    You may want to play around with bandwidth by supplying a second argument to gaussian_kde. 
    See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html
    '''

    kernel = sp.stats.gaussian_kde(s.transpose())

    xmin, xmax = 0, 1
    ymin, ymax = 0, 0.02
    xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
    xy = np.vstack([xx.ravel(), yy.ravel()])
    smooth = np.reshape(kernel(xy), xx.shape)

    fig = plt.figure(figsize=(8,8))
    ax = fig.gca()
    ax.set_xlim(xmin, xmax)
    ax.set_ylim(ymin, ymax)
    ax.contourf(xx, yy, smooth, cmap='coolwarm')
    cset = ax.contour(xx, yy, smooth, colors='k')
    ax.clabel(cset, inline=1, fontsize=10)
    ax.set_xlabel('CdA')
    ax.set_ylabel('Crr')
    plt.title('2D Gaussian Kernel density estimation')

    '''
    For my data this shows (roughly):
    
    * large=True: Crr ~ 0.005-0.006 and CdA ~ 0.40-0.45
    
    * large=False: Crr ~ 0.01 and CdA ~ 0.35-0.40
    
    which supports the idea that Crr isn't well-constrained by the data.
    
    I chose large=True values since I suspect the larger intervals make the elevation values more accurate, 
    
    Since I am loading data with a command like
    
        > ch2 activities -D kit=cotic -- ~/archive/fit/**/*.fit
    
    I define this constant:
    
        > ch2 constants add --single Power.cotic \
            --description 'Bike namedtuple values to calculate power for this kit' \
            --validate ch2.stats.calculate.power.Bike
        > ch2 constants set Power.cotic '{"cda": 0.42, "crr": 0.0055, "weight": 12}'
        
    With that, the standard configuration should calculate power estimates.
    '''

0 View Source File : gmap_activities.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def gmap_activities(start, finish, activity_group, google_key):

    f'''
    # Google Maps Activities: {start.split()[0]} - {finish.split()[0]} / {activity_group}
    '''

    '''
    $contents
    '''

    '''
    ## Read Data
    '''

    s = session('-v2')
    data_frames = [activity_statistics(s, LATITUDE, LONGITUDE, activity_journal=aj)
                   for aj in s.query(ActivityJournal).
                       filter(ActivityJournal.start >= local_date_to_time(start),
                              ActivityJournal.start   <   local_date_to_time(finish),
                              ActivityJournal.activity_group == ActivityGroup.from_name(s, activity_group)).
                       all()]
    data_frames = [data_frame.dropna() for data_frame in data_frames if not data_frame.dropna().empty]
    print(f'Found {len(data_frames)} activities')

    '''
    ## Calculate Centre
    '''

    ll = [(data_frame[LATITUDE].mean(), data_frame[LONGITUDE].mean()) for data_frame in data_frames]
    ll = list(zip(*ll))
    ll = (median(ll[0]), median(ll[1]))

    '''
    ## Display
    '''

    map_options = GMapOptions(lat=ll[0], lng=ll[1], map_type="roadmap", scale_control=True)
    f = gmap(google_key, map_options, title=f'{start.split()[0]} - {finish.split()[0]} / {activity_group}',
             tools='pan,zoom_in,zoom_out,reset,undo,redo,save', output_backend=DEFAULT_BACKEND)
    for data_frame in data_frames:
        f.line(x=LONGITUDE, y=LATITUDE, source=data_frame)

    show(f)

0 View Source File : health.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def health():

    '''
    # Health
    '''

    '''
    $contents
    '''

    '''
    ## Load Data
    
    Open a connection to the database and load the data we require.
    '''

    s = session('-v2')
    health = std_health_statistics(s)

    '''
    ## Health and Fitness
    '''

    output_file(filename='/dev/null')

    fitness, fatigue = like(N.FITNESS_ANY, health.columns), like(N.FATIGUE_ANY, health.columns)
    colours = ['black'] * len(fitness) + ['red'] * len(fatigue)
    alphas = [1.0] * len(fitness) + [0.5] * len(fatigue)
    ff = multi_line_plot(900, 300, N.TIME, fitness + fatigue, health, colours, alphas=alphas)
    xrange = ff.x_range if ff else None
    add_multi_line_at_index(ff, N.TIME, fitness + fatigue, health, colours, alphas=alphas, index=-1)
    atd = std_distance_time_plot(900, 200, health, x_range=xrange)
    show(gridplot([[ff], [atd]]))

0 View Source File : month.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def month(month):

    f'''
    # Month: {month}
    '''

    '''
    $contents
    '''

    '''
    ## Preparation
    '''
    
    s = session('-v2')
    output_file(filename='/dev/null')
    map_size = 100
    month_start = to_date(month).replace(day=1)

    '''
    ## Generate Plot
    '''

    def days():

        for i in Calendar().iterweekdays():
            yield Div(text=f'  <  h2>{day_name[i]} < /h2>')

        day = month_start - dt.timedelta(days=month_start.weekday())
        while day.replace(day=1)  < = month_start:
            for weekday in range(7):
                if day.month == month_start.month:
                    contents = [Div(text=f' < h1>{day.strftime("%d")} < /h1>')]
                    for a in s.query(ActivityJournal). \
                            filter(ActivityJournal.start >= local_date_to_time(day),
                                   ActivityJournal.start  <  local_date_to_time(day + dt.timedelta(days=1))).all():
                        df = Statistics(s, activity_journal=a). \
                            by_name(ActivityReader, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y).df
                        contents.append(map_thumbnail(map_size, map_size, df, title=False))
                        df = Statistics(s, activity_journal=a). \
                            by_name(ActivityCalculator, N.ACTIVE_DISTANCE, N.ACTIVE_TIME).df
                        contents.append(Div(
                            text=f'{format_km(df[N.ACTIVE_DISTANCE][0])} {format_seconds(df[N.ACTIVE_TIME][0])}'))
                else:
                    contents = [Spacer()]
                yield column(contents)
                day += dt.timedelta(days=1)

    show(grid(list(days()), ncols=7))

0 View Source File : similar_activities.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def similar_activities(local_time):

    f'''
    # Similar Activities: {local_time.split()[0]}
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')

    maps = [map_thumbnail(100, 120, data)
            for data in (Statistics(s, activity_journal=similar[0]).
                             by_name(ActivityReader, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y).
                             by_name(ActivityCalculator, N.ACTIVE_DISTANCE, N.ACTIVE_TIME).df
                         for similar in nearby_activities(s, local_time=local_time))
            if len(data[N.SPHERICAL_MERCATOR_X].dropna()) > 10]

    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_file(filename='/dev/null')
    show(htile(maps, 8))

0 View Source File : some_activities.py
License : GNU Affero General Public License v3.0
Project Creator : andrewcooke

def some_activities(constraint):

    f'''
    # Some Activities: {constraint}

    This displays thumbnails of routes that match the query over statistics.  For example,

        Active Distance > 40 & Active Distance   <   60

    will show all activities with a distance between 40 and 60 km.
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')
    maps = [map_thumbnail(100, 120, data)
            for data in (activity_statistics(s, SPHERICAL_MERCATOR_X, SPHERICAL_MERCATOR_Y,
                                             ACTIVE_DISTANCE, TOTAL_CLIMB,
                                             activity_journal=aj)
                         for aj in constrained_sources(s, constraint))
            if len(data[SPHERICAL_MERCATOR_X].dropna()) > 10]
    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_notebook()
    show(htile(maps, 8))

0 View Source File : plots.py
License : Apache License 2.0
Project Creator : awslabs

def mousover_plot(datadict, attr_x, attr_y, attr_color=None, attr_size=None, save_file=None, plot_title="",
                  point_transparency = 0.5, point_size=20, default_color="#2222aa", hidden_keys = [], show_plot=False):
    """ Produces dynamic scatter plot that can be interacted with by mousing over each point to see its label
        Args:
            datadict (dict): keys contain attributes, values of lists of data from each attribute to plot (each list index corresponds to datapoint).
                             The values of all extra keys in this dict are considered (string) labels to assign to datapoints when they are moused over.
                             Apply _formatDict() to any entries in datadict which are themselves dicts.
            attr_x (str): name of column in dataframe whose values are shown on x-axis (eg. 'latency'). Can be categorical or numeric values
            attr_y (str): name of column in dataframe whose values are shown on y-axis (eg. 'validation performance'). Must be numeric values.
            attr_size (str): name of column in dataframe whose values determine size of dots (eg. 'memory consumption'). Must be numeric values.
            attr_color (str): name of column in dataframe whose values determine color of dots  (eg. one of the hyperparameters). Can be categorical or numeric values
            point_labels (list): list of strings describing the label for each dot (must be in same order as rows of dataframe)
            save_file (str): where to save plot to (html) file (if None, plot is not saved)
            plot_title (str): Title of plot and html file
            point_transparency (float): alpha value of points, lower = more transparent
            point_size (int): size of points, higher = larger
            hidden keys (list[str]): which keys of datadict NOT to show labels for.
            show_plot (bool): whether to show plot
    """
    try:
        with warning_filter():
            import bokeh
            from bokeh.plotting import output_file, ColumnDataSource, show, figure, save
            from bokeh.models import HoverTool, CategoricalColorMapper, LinearColorMapper, Legend, LegendItem, ColorBar
            from bokeh.palettes import Category20
    except ImportError:
        warnings.warn('AutoGluon summary plots cannot be created because bokeh is not installed. To see plots, please do: "pip install bokeh==2.0.1"')
        return None

    n = len(datadict[attr_x])
    for key in datadict.keys():  # Check lengths are all the same
        if len(datadict[key]) != n:
            raise ValueError("Key %s in datadict has different length than %s" % (key, attr_x))

    attr_x_is_string = any([type(val)==str for val in datadict[attr_x]])
    if attr_x_is_string:
        attr_x_levels = list(set(datadict[attr_x]))  # use this to translate between int-indices and x-values
        og_x_vals = datadict[attr_x][:]
        attr_x2 = attr_x + "___"  # this key must not already be in datadict.
        hidden_keys.append(attr_x2)
        datadict[attr_x2] = [attr_x_levels.index(category) for category in og_x_vals] # convert to ints

    legend = None
    if attr_color is not None:
        attr_color_is_string = any([type(val) == str for val in datadict[attr_color]])
        color_datavals = datadict[attr_color]
        if attr_color_is_string:
            attr_color_levels = list(set(color_datavals))
            colorpalette = Category20[20]
            color_mapper = CategoricalColorMapper(factors=attr_color_levels, palette=[colorpalette[2*i % len(colorpalette)] for i in range(len(attr_color_levels))])
            legend = attr_color
        else:
            color_mapper = LinearColorMapper(palette='Magma256', low=min(datadict[attr_color]), high=max(datadict[attr_color])*1.25)
        default_color = {'field': attr_color, 'transform': color_mapper}

    if attr_size is not None:  # different size for each point, ensure mean-size == point_size
        attr_size2 = attr_size + "____"
        hidden_keys.append(attr_size2)
        og_sizevals = np.array(datadict[attr_size])
        sizevals = point_size + (og_sizevals - np.mean(og_sizevals))/np.std(og_sizevals) * (point_size/2)
        if np.min(sizevals)   <   0:
            sizevals = -np.min(sizevals) + sizevals + 1.0
        datadict[attr_size2] = list(sizevals)
        point_size = attr_size2

    if save_file is not None:
        output_file(save_file, title=plot_title)
        print("Plot summary of models saved to file: %s" % save_file)

    source = ColumnDataSource(datadict)
    TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,save"
    p = figure(title=plot_title, tools=TOOLS)
    if attr_x_is_string:
        circ = p.circle(attr_x2, attr_y, line_color=default_color, line_alpha = point_transparency,
                        fill_color=default_color, fill_alpha=point_transparency, size=point_size, source=source)
    else:
        circ = p.circle(attr_x, attr_y, line_color=default_color, line_alpha = point_transparency,
                        fill_color=default_color, fill_alpha=point_transparency, size=point_size, source=source)
    hover = p.select(dict(type=HoverTool))
    hover.tooltips = OrderedDict([(key,'@'+key+'{safe}') for key in datadict.keys() if key not in hidden_keys])
    # Format axes:
    p.xaxis.axis_label = attr_x
    p.yaxis.axis_label = attr_y
    if attr_x_is_string: # add x-ticks:
        p.xaxis.ticker = list(range(len(attr_x_levels)))
        p.xaxis.major_label_overrides = {i: attr_x_levels[i] for i in range(len(attr_x_levels))}

    # Legend additions:
    if attr_color is not None and attr_color_is_string:
        legend_it = []
        for i in range(len(attr_color_levels)):
            legend_it.append(LegendItem(label=attr_color_levels[i], renderers=[circ], index=datadict[attr_color].index(attr_color_levels[i])))
        legend = Legend(items=legend_it, location=(0, 0))
        p.add_layout(legend, 'right')

    if attr_color is not None and not attr_color_is_string:
        color_bar = ColorBar(color_mapper=color_mapper, title = attr_color,
                             label_standoff=12, border_line_color=None, location=(0,0))
        p.add_layout(color_bar, 'right')

    if attr_size is not None:
        p.add_layout(Legend(items=[LegendItem(label='Size of points based on "'+attr_size + '"')]), 'below')

    if show_plot:
        show(p)
    elif save_file is not None:
        save(p)

0 View Source File : plot.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : canzarlab

def show(plot_to_show):
    """Display a plot, either interactive or static.

    Parameters
    ----------
    plot_to_show: Output of a plotting command (matplotlib axis or bokeh figure)
        The plot to show

    Returns
    -------
    None
    """
    if isinstance(plot_to_show, plt.Axes):
        show_static()
    elif isinstance(plot_to_show, bpl.Figure):
        show_interactive(plot_to_show)
    else:
        raise ValueError(
            "The type of ``plot_to_show`` was not valid, or not understood."
        )


def points(

0 View Source File : plot.py
License : MIT License
Project Creator : dmitriy-serdyuk

    def __init__(self, document, channels, open_browser=False,
                 start_server=False, server_url=None, **kwargs):
        if not BOKEH_AVAILABLE:
            raise ImportError

        if server_url is None:
            server_url = config.bokeh_server

        self.plots = {}
        self.start_server = start_server
        self.document = document
        self.server_url = server_url
        self._startserver()

        # Create figures for each group of channels
        self.p = []
        self.p_indices = {}
        self.color_indices = {}
        for i, channel_set in enumerate(channels):
            channel_set_opts = {}
            if isinstance(channel_set, dict):
                channel_set_opts = channel_set
                channel_set = channel_set_opts.pop('channels')
            channel_set_opts.setdefault('title',
                                        '{} #{}'.format(document, i + 1))
            channel_set_opts.setdefault('x_axis_label', 'iterations')
            channel_set_opts.setdefault('y_axis_label', 'value')
            self.p.append(figure(**channel_set_opts))
            for j, channel in enumerate(channel_set):
                self.p_indices[channel] = i
                self.color_indices[channel] = j
        if open_browser:
            show()

        kwargs.setdefault('after_epoch', True)
        kwargs.setdefault("before_first_epoch", True)
        kwargs.setdefault("after_training", True)
        super(Plot, self).__init__(**kwargs)

    @property

0 View Source File : dominance.py
License : MIT License
Project Creator : dominance-analysis

	def plot_waterfall_relative_importance(self,incremental_rsquare_df):
		index = list(incremental_rsquare_df['Features'].values)
		data = {'Percentage Relative Importance': list(incremental_rsquare_df['percentage_incremental_r2'].values)}
		df = pd.DataFrame(data=data,index=index)
		
		net = df['Percentage Relative Importance'].sum()
		# print("Net ",net)

		df['running_total'] = df['Percentage Relative Importance'].cumsum()
		df['y_start'] = df['running_total'] - df['Percentage Relative Importance']

		df['label_pos'] = df['running_total']

		df_net = pd.DataFrame.from_records([(net, net, 0, net)],
			columns=['Percentage Relative Importance', 'running_total', 'y_start', 'label_pos'],index=["net"])
		
		df = df.append(df_net)

		df['color'] = '#1de9b6'
		df.loc[df['Percentage Relative Importance'] == 100, 'color'] = '#29b6f6'
		df.loc[df['Percentage Relative Importance']   <   0, 'label_pos'] = df.label_pos - 10000
		df["bar_label"] = df["Percentage Relative Importance"].map('{:,.1f}'.format)

		TOOLS = "reset,save"
		source = ColumnDataSource(df)
		p = figure(tools=TOOLS, x_range=list(df.index), y_range=(0, net+10),
			plot_width=1000, title = "Percentage Relative Importance Waterfall")

		p.segment(x0='index', y0='y_start', x1="index", y1='running_total',
			source=source, color="color", line_width=35)

		p.grid.grid_line_alpha=0.4
		p.yaxis[0].formatter = NumeralTickFormatter(format="(0 a)")
		p.xaxis.axis_label = "Predictors"
		p.yaxis.axis_label = "Percentage Relative Importance(%)"
		p.xaxis.axis_label_text_font_size='12pt'
		p.yaxis.axis_label_text_font_size='12pt'

		labels = LabelSet(x='index', y='label_pos', text='bar_label',
		text_font_size="11pt", level='glyph',
		x_offset=-14, y_offset=0, source=source)
		p.add_layout(labels)
		p.xaxis.major_label_orientation = -math.pi/4
		show(p)

	def plot_incremental_rsquare(self):

0 View Source File : container.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : griquelme

    def pca_scores(self, x_pc: int = 1, y_pc: int = 2, hue: str = _sample_class,
                   ignore_classes: Optional[List[str]] = None,
                   show_order: bool = False, scaling: Optional[str] = None,
                   normalization: Optional[str] = None, draw: bool = True,
                   fig_params: Optional[dict] = None,
                   scatter_params: Optional[dict] = None
                   ) -> bokeh.plotting.Figure:
        """
        plots PCA scores
        
        Parameters
        ----------
        x_pc: int
            Principal component number to plot along X axis.
        y_pc: int
            Principal component number to plot along Y axis.
        hue: {"class", "type", "batch"}
            How to color samples. "class" color points according to sample
            class, "type" color points according to the sample type
            assigned in the mapping and "batch" uses batch information. Samples
            classes without a mapping are not shown in the plot
        ignore_classes : list[str], optional
            classes in the data to ignore to build the PCA model.
        show_order: bool
            add a label with the run order.
        scaling: {`autoscaling`, `rescaling`, `pareto`}, optional
            scaling method.
        normalization: {`sum`, `max`, `euclidean`}, optional
            normalization method
        draw: bool
            If True calls bokeh.plotting.show on fig.
        fig_params: dict, optional
            Optional parameters to pass to bokeh figure
        scatter_params: dict, optional
            Optional parameters to pass to bokeh scatter plot.
        
        Returns
        -------
        bokeh.plotting.Figure.
        """
        default_fig_params = {"aspect_ratio": 1}
        if fig_params is None:
            fig_params = default_fig_params
        else:
            default_fig_params.update(fig_params)
            fig_params = default_fig_params

        default_scatter_params = {"size": 6}
        if scatter_params is None:
            scatter_params = default_scatter_params
        else:
            default_scatter_params.update(scatter_params)
            scatter_params = default_scatter_params

        tooltips = [(_sample_class, "@{}".format(_sample_class)),
                    (_sample_order, "@{}".format(_sample_order)),
                    (_sample_batch, "@{}".format(_sample_batch)),
                    (_sample_id, "@{}".format(_sample_id))]
        fig = bokeh.plotting.figure(tooltips=tooltips, **fig_params)

        x_name = "PC" + str(x_pc)
        y_name = "PC" + str(y_pc)
        n_comps = max(x_pc, y_pc)
        score, _, variance, total_var = \
            self._data_container.metrics.pca(n_components=n_comps,
                                             ignore_classes=ignore_classes,
                                             normalization=normalization,
                                             scaling=scaling)
        score = score.join(self._data_container.sample_metadata)

        if hue == _sample_type:
            rev_map = _reverse_mapping(self._data_container.mapping)
            score[_sample_type] = (score[_sample_class]
                                   .apply(lambda x: rev_map.get(x)))
            score = score[~pd.isna(score[_sample_type])]
        elif hue == _sample_batch:
            score[_sample_batch] = score[_sample_batch].astype(str)

        # setup the colors
        unique_values = score[hue].unique().astype(str)
        score = ColumnDataSource(score)
        cmap = Category10[10]
        palette = cmap * (int(unique_values.size / len(cmap)) + 1)
        palette = palette[:unique_values.size]
        # TODO: Category10_3 should be in a parameter file

        fig.scatter(source=score, x=x_name, y=y_name,
                    color=factor_cmap(hue, palette, unique_values),
                    legend_group=hue, **scatter_params)

        #  figure appearance
        x_label = x_name + " ({:.1f} %)"
        x_label = x_label.format(variance[x_pc - 1] * 100 / total_var)
        y_label = y_name + " ({:.1f} %)"
        y_label = y_label.format(variance[y_pc - 1] * 100 / total_var)
        fig.xaxis.axis_label = x_label
        fig.yaxis.axis_label = y_label
        fig.yaxis.axis_label_text_font_style = "bold"
        fig.xaxis.axis_label_text_font_style = "bold"

        if show_order:
            labels = LabelSet(x=x_name, y=y_name, text=_sample_order,
                              level="glyph", x_offset=3, y_offset=3,
                              source=score, render_mode="canvas",
                              text_font_size="8pt")
            fig.add_layout(labels)

        if draw:
            bokeh.plotting.show(fig)
        return fig

    def pca_loadings(self, x_pc=1, y_pc=2, scaling: Optional[str] = None,

0 View Source File : container.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : griquelme

    def pca_loadings(self, x_pc=1, y_pc=2, scaling: Optional[str] = None,
                     normalization: Optional[str] = None, draw: bool = True,
                     fig_params: Optional[dict] = None,
                     scatter_params: Optional[dict] = None
                     ) -> bokeh.plotting.Figure:
        """
        plots PCA loadings.

        Parameters
        ----------
        x_pc: int
            Principal component number to plot along X axis.
        y_pc: int
            Principal component number to plot along Y axis.
        scaling: {`autoscaling`, `rescaling`, `pareto`}, optional
            scaling method.
        normalization: {`sum`, `max`, `euclidean`}, optional
            normalizing method
        draw: bool
            If True, calls bokeh.plotting.show on figure
        fig_params: dict, optional
            Optional parameters to pass into bokeh figure
        scatter_params: dict, optional
            Optional parameters to pass into bokeh scatter plot.


        Returns
        -------
        bokeh.plotting.Figure.
        """
        default_fig_params = {"aspect_ratio": 1}
        if fig_params is None:
            fig_params = default_fig_params
        else:
            default_fig_params.update(fig_params)
            fig_params = default_fig_params

        if scatter_params is None:
            scatter_params = dict()

        tooltips = [("feature", "@feature"), ("m/z", "@mz"),
                    ("rt", "@rt"), ("charge", "@charge")]
        fig = bokeh.plotting.figure(tooltips=tooltips, **fig_params)

        x_name = "PC" + str(x_pc)
        y_name = "PC" + str(y_pc)
        n_comps = max(x_pc, y_pc)
        _, loadings, variance, total_var = \
            self._data_container.metrics.pca(n_components=n_comps,
                                             normalization=normalization,
                                             scaling=scaling)
        loadings = loadings.join(self._data_container.feature_metadata)
        loadings = ColumnDataSource(loadings)

        fig.scatter(source=loadings, x=x_name, y=y_name, **scatter_params)

        # set axis label names with % variance
        x_label = x_name + " ({:.1f} %)"
        x_label = x_label.format(variance[x_pc - 1] * 100 / total_var)
        y_label = y_name + " ({:.1f} %)"
        y_label = y_label.format(variance[y_pc - 1] * 100 / total_var)
        fig.xaxis.axis_label = x_label
        fig.yaxis.axis_label = y_label
        fig.yaxis.axis_label_text_font_style = "bold"
        fig.xaxis.axis_label_text_font_style = "bold"

        if draw:
            bokeh.plotting.show(fig)
        return fig

    def feature(self, ft: str, hue: str = _sample_class,

0 View Source File : container.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : griquelme

    def feature(self, ft: str, hue: str = _sample_class,
                ignore_classes: Optional[List[str]] = None,
                draw: bool = True,
                fig_params: Optional[dict] = None,
                scatter_params: Optional[dict] = None) -> bokeh.plotting.Figure:
        """
        plots a feature intensity as a function of the run order.

        Parameters
        ----------
        ft: str
            Feature to plot. Index of feature in `feature_metadata`
        hue: {"class", "type"}
        ignore_classes : list[str], optional
            exclude samples from the listed classes in the plot
        draw: bool
            If True calls bokeh.plotting.show on figure.
        fig_params: dict
            key-value parameters to pass to bokeh figure
        scatter_params: dict
            key-value parameters to pass to bokeh circle

        Returns
        -------
        bokeh.plotting.Figure
        """

        default_fig_params = {"aspect_ratio": 1.5}
        if fig_params is None:
            fig_params = default_fig_params
        else:
            default_fig_params.update(fig_params)
            fig_params = default_fig_params

        if scatter_params is None:
            scatter_params = dict()

        if ignore_classes is None:
            ignore_classes = list()

        source = (self._data_container.sample_metadata
                  .join(self._data_container.data_matrix[ft]))

        ignore_samples = source[_sample_class].isin(ignore_classes)
        source = source[~ignore_samples]

        if hue == _sample_type:
            rev_map = _reverse_mapping(self._data_container.mapping)
            source[_sample_type] = (source[_sample_class]
                                    .apply(lambda x: rev_map.get(x)))
            source = source[~source[_sample_type].isna()]
        elif hue == _sample_batch:
            source[_sample_batch] = source[_sample_batch].astype(str)

        # setup the colors
        unique_values = source[hue].unique().astype(str)
        cmap = Category10[10]
        palette = cmap * (int(unique_values.size / len(cmap)) + 1)
        palette = palette[:unique_values.size]

        source = ColumnDataSource(source)

        tooltips = [(_sample_class, "@{}".format(_sample_class)),
                    (_sample_order, "@{}".format(_sample_order)),
                    (_sample_batch, "@{}".format(_sample_batch)),
                    (_sample_id, "@{}".format(_sample_id))]
        fig = bokeh.plotting.figure(tooltips=tooltips, **fig_params)
        cmap_factor = factor_cmap(hue, palette, unique_values)
        fig.scatter(source=source, x=_sample_order, y=ft, color=cmap_factor,
                    legend_group=hue, **scatter_params)

        fig.xaxis.axis_label = "Run order"
        fig.yaxis.axis_label = "{} intensity [au]".format(ft)
        fig.yaxis.axis_label_text_font_style = "bold"
        fig.yaxis.formatter.precision = 2
        fig.xaxis.formatter.precision = 2
        fig.xaxis.axis_label_text_font_style = "bold"

        if draw:
            bokeh.plotting.show(fig)
        return fig


class SeabornPlotMethods(object):   # pragma: no cover

0 View Source File : _plot_bokeh.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : griquelme

def plot_chromatogram(rt: np.ndarray, spint: np.ndarray,
                      peaks: Optional[List[Peak]], draw: bool = True,
                      fig_params: Optional[dict] = None,
                      line_params: Optional[dict] = None
                      ) -> bokeh.plotting.Figure:
    """
    Plots a chromatogram

    Parameters
    ----------
    rt : array
        array of retention time
    spint : array
        array of intensity
    peaks : List[Peaks]
    draw : bool, optional
        if True run bokeh show function.
    fig_params : dict
        key-value parameters to pass into bokeh figure function.
    line_params : dict
        key-value parameters to pass into bokeh line function.

    Returns
    -------
    bokeh Figure

    """
    default_line_params = {"line_width": 1, "line_color": "black", "alpha": 0.8}
    default_fig_params = {"aspect_ratio": 1.5}
    cmap = Set3[12]

    if line_params is None:
        line_params = default_line_params
    else:
        for params in line_params:
            default_line_params[params] = line_params[params]
        line_params = default_line_params

    if fig_params is None:
        fig_params = default_fig_params
    else:
        default_fig_params.update(fig_params)
        fig_params = default_fig_params

    fig = bokeh.plotting.figure(**fig_params)
    fig.line(rt, spint, **line_params)
    if peaks is not None:
        for k, peak in enumerate(peaks):
            fig.varea(rt[peak.start:peak.end], spint[peak.start:peak.end],
                      0, fill_alpha=0.8, fill_color=cmap[k % 12])
            # k % 12 is used to cycle over the colormap

    #  figure appearance
    fig.xaxis.axis_label = "Rt [s]"
    fig.yaxis.axis_label = "intensity [au]"
    fig.yaxis.axis_label_text_font_style = "bold"
    fig.yaxis.formatter.precision = 2
    fig.xaxis.axis_label_text_font_style = "bold"

    if draw:
        bokeh.plotting.show(fig)
    return fig


def plot_ms_spectrum(mz: np.ndarray, spint: np.ndarray,

0 View Source File : _plot_bokeh.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : griquelme

def plot_ms_spectrum(mz: np.ndarray, spint: np.ndarray,
                     draw: bool = True, fig_params: Optional[dict] = None,
                     line_params: Optional[dict] = None
                     ) -> bokeh.plotting.Figure:
    """
    Plot a mass spectrum.

    Parameters
    ----------
    mz : array
        array of m/z
    spint : array
        array of intensities
    draw : bool, optional
        if True run bokeh show function.
    fig_params : dict
        key-value parameters to pass into bokeh figure function.
    line_params : dict
        key-value parameters to pass into bokeh line function.

    Returns
    -------
    bokeh Figure
    """
    default_line_params = {"line_width": 1, "line_color": "black",
                           "alpha": 0.8}

    if line_params is None:
        line_params = default_line_params
    else:
        for params in line_params:
            default_line_params[params] = line_params[params]
        line_params = default_line_params

    default_fig_params = {"aspect_ratio": 1.5}
    if fig_params is None:
        fig_params = default_fig_params
    else:
        default_fig_params.update(fig_params)
        fig_params = default_fig_params

    fig = bokeh.plotting.figure(**fig_params)
    fig.line(mz, spint, **line_params)

    #  figure appearance
    fig.xaxis.axis_label = "m/z"
    fig.yaxis.axis_label = "intensity [au]"
    fig.yaxis.axis_label_text_font_style = "bold"
    fig.yaxis.formatter.precision = 2
    fig.xaxis.axis_label_text_font_style = "bold"

    if draw:
        bokeh.plotting.show(fig)
    return fig

0 View Source File : cli.py
License : MIT License
Project Creator : IQTLabs

def visualize(
    fasta,
    width,
    palette,
    color,
    hide,
    bar,
    title,
    separate,
    cols,
    link_x,
    link_y,
    output,
    offline,
    method,
    dimensions,
    skip,
    mode,
    legend_loc,
    output_backend,
    downsample,
):
    # check filetype
    if fasta is None:
        raise ValueError("Must provide FASTA file.")

    # handle selecting the palette
    palette = small_palettes[palette]

    # handle setting the dimensions automatically if not specified
    if not dimensions:
        dimensions = (750, 500)

    if (
        len([record for _f in fasta for record in Fasta(_f, read_long_names=True)])
        > len(palette)
        and mode != "file"
    ):
        if len(fasta) > 1 and mode == "auto":
            if not skip:
                print(
                    "Visualizing each file in separate color. To override, provide mode selection."
                )
            mode = "file"
        else:
            print("Visualizing each sequence in black.")
            color = False
    elif mode == "auto":
        mode = "seq"

    # get all the sequences
    seqs = []
    color_counter = 0
    warned = False
    for i, _f in enumerate(fasta):
        for j, seq in enumerate(
            Fasta(_f, sequence_always_upper=True, read_long_names=True)
        ):
            seqs.append(
                Box(
                    color=palette[color_counter + 1 if color_counter > 2 else 3][
                        color_counter
                    ]
                    if color
                    else "black",
                    name=_f if mode == "file" else seq.name,
                    raw_seq=str(seq),
                )
            )

            # check the length of the seq
            if len(seq) > 10000 and not skip and not warned and downsample == 1:
                click.confirm(
                    "You are plotting a long sequence ({} bp). This may be very slow, although downsampling might help. "
                    "Do you want to continue?".format(len(seq)),
                    abort=True,
                )
                warned = True

            if mode == "seq":
                color_counter += 1
        if mode == "file":
            color_counter += 1

    # warn if plotting a large number of seqs
    if len(seqs) > 500 and not skip:
        click.confirm(
            "You are plotting a large number of sequences ({}). This may be very slow, although downsampling might help. "
            "Do you want to continue?".format(len(seqs)),
            abort=True,
        )

    # warn if using a bad method
    if (
        max([len(seq.raw_seq) for seq in seqs]) > 25
        and method in ["qi", "randic"]
        and not skip
    ):
        click.confirm(
            "This method is not well suited to a sequence of this length. "
            "Do you want to continue?",
            abort=True,
        )

    axis_labels = {
        "squiggle": {"x": "position (BP)", "y": None},
        "gates": {"x": "C-G axis", "y": "A-T axis"},
        "yau": {"x": None, "y": None},
        "yau-bp": {"x": "position (BP)", "y": None},
        "randic": {"x": "position (BP)", "y": "nucleotide"},
        "qi": {"x": "position (BP)", "y": "dinucleotide"},
    }

    # the number of figures to draw is either the number of sequences or files (or 1)
    if separate:
        if mode == "seq":
            fig_count = len(seqs)
        elif mode == "file":
            fig_count = len(fasta)
    else:
        fig_count = 1

    fig = []
    for i in range(fig_count):

        # link the axes, if requested
        if i > 0 and link_x:
            x_range = fig[i - 1].x_range
        else:
            x_range = None
        if i > 0 and link_y:
            y_range = fig[i - 1].y_range
        else:
            y_range = None

        # the y axes for randic and qi are bases
        if method == "randic":
            y_range = ["A", "T", "G", "C"]
        elif method == "qi":
            y_range = [
                "AA",
                "AC",
                "AG",
                "AT",
                "CA",
                "CC",
                "CG",
                "CT",
                "GA",
                "GC",
                "GG",
                "GT",
                "TA",
                "TC",
                "TG",
                "TT",
            ]

        fig.append(
            figure(
                x_axis_label=axis_labels[method]["x"],
                y_axis_label=axis_labels[method]["y"],
                title=title,
                x_range=x_range,
                y_range=y_range,
                plot_width=dimensions[0],
                plot_height=dimensions[1],
                output_backend=output_backend,
            )
        )

    # show a progress bar if processing multiple files
    if len(seqs) > 1 and bar:
        _seqs = tqdm(seqs, unit=" seqs", leave=False)
    else:
        _seqs = seqs

    for i, seq in enumerate(_seqs):
        # perform the actual transformation
        transformed = transform(seq.raw_seq, method=method)
        if downsample > 1:
            transformed = (transformed[0][::downsample], transformed[1][::downsample])

        # figure (no pun intended) which figure to plot the data on
        if separate:
            if mode == "seq":
                _fig = fig[i]
            elif mode == "file":
                _fig = fig[fasta.index(seq.name)]

            # add a title to the plot
            _fig.title = annotations.Title()
            if mode == "seq":
                _fig.title.text = seq.name
            elif mode == "file":
                _fig.title.text = click.format_filename(seq.name, shorten=True)
        else:
            _fig = fig[0]
            _fig.title = annotations.Title()

            # if only plotting on one figure, set up the title
            if title:
                _fig.title.text = title
            elif len(seqs) > 1 and not title and len(fasta) == 1:
                _fig.title.text = click.format_filename(fasta[0], shorten=True)
            elif len(seqs) == 1:
                # if just plotting one sequence, title it with the name of the sequence
                _fig.title.text = seq.name

        # randic and qi method's have categorical y axes
        if method == "randic":
            y = list(seq.raw_seq)
        elif method == "qi":
            y = [seq.raw_seq[i : i + 2] for i in range(len(seq.raw_seq))]
            y = [str(i) for i in y if len(i) == 2]
        else:
            y = transformed[1]

        # figure out whether to add a legend
        if (separate or not color or mode == "file" or len(seqs) == 1) and not hide:
            legend = None
        else:
            legend = click.format_filename(seq.name, shorten=True)

        # optimization for comparing large FASTA files without hiding
        try:
            if mode == "file" and seqs[i + 1].color != seq.color and not separate:
                legend = click.format_filename(seq.name, shorten=True)
        except IndexError:
            if mode == "file" and not separate:
                legend = click.format_filename(seq.name, shorten=True)

        # do the actual plotting

        # set up the legend
        if legend is not None:
            _fig.line(
                x=transformed[0],
                y=y,
                line_width=width,
                legend_label=legend,
                color=seq.color,
            )
            _fig.legend.location = legend_loc
            if hide:
                _fig.legend.click_policy = "hide"
        else:
            _fig.line(x=transformed[0], y=y, line_width=width, color=seq.color)

    # clean up the tqdm bar
    try:
        _seqs.close()
    except AttributeError:
        pass

    # lay out the figure
    if separate:
        plot = gridplot(
            fig,
            ncols=math.ceil(len(fig) ** 0.5) if cols == 0 else cols,
            toolbar_options=dict(logo=None),
        )  # note that 0 denotes the automatic default
    else:
        plot = fig[0]

    if output is not None and output.endswith(".html"):
        output_file(
            output, title="Squiggle Visualization" if title is not None else title
        )
        save(plot, resources=INLINE if offline else None)
    else:
        show(plot)


if __name__ == "__main__":

0 View Source File : tooltip_plot.py
License : MIT License
Project Creator : jackgoffinet

def tooltip_plot(embedding, images, output_dir='temp', num_imgs=10000, title="",
	n=30000, grid=False):
	"""
	Create a scatterplot of the embedding with spectrogram tooltips.

	TO DO
	-----
	* Set the aspect ratio to 1

	Parameters
	----------
	embedding : numpy.ndarray
		The scatterplot coordinates. Shape: (num_points, 2)
	images : numpy.ndarray
		A spectrogram image for each scatter point. Shape:
		(num_points, height, width)
	output_dir : str, optional
		Directory where html and jpegs are written. Deafaults to "temp".
	num_imgs : int, optional
		Number of points with tooltip images. Defaults to 10000.
	title : str, optional
		Title of plot. Defaults to ''.
	n : int, optional
		Total number of scatterpoints to plot. Defaults to 30000.
	grid : bool, optional
		Show x and y grid? Defaults to `False`.
	"""
	# Shuffle the embedding and images.
	np.random.seed(42)
	perm = np.random.permutation(len(embedding))
	np.random.seed(None)
	embedding = embedding[perm]
	images = images[perm]

	n = min(len(embedding), n)
	num_imgs = min(len(images), num_imgs)
	_write_images(embedding, images, output_dir=output_dir, num_imgs=num_imgs, n=n)
	output_file(os.path.join(output_dir, "main.html"))
	source = ColumnDataSource(
			data=dict(
				x=embedding[:num_imgs,0],
				y=embedding[:num_imgs,1],
				imgs = ['./'+str(i)+'.jpg' for i in range(num_imgs)],
			)
		)
	source2 = ColumnDataSource(
			data=dict(
				x=embedding[num_imgs:,0],
				y=embedding[num_imgs:,1],
			)
		)
	p = figure(plot_width=800, plot_height=600, title=title)
	p.scatter('x', 'y', size=3, fill_color='blue', fill_alpha=0.1, source=source2)
	tooltip_points = p.scatter('x', 'y', size=5, fill_color='red', source=source)
	hover = HoverTool(
			renderers=[tooltip_points],
			tooltips="""
			  <  div>
				 < div>
					 < img
						src="@imgs" height="128" alt="@imgs" width="128"
						style="float: left; margin: 0px 0px 0px 0px;"
						border="1"
					> < /img>
				 < /div>
			 < /div>
			"""
		)
	p.add_tools(hover)
	p.title.align = "center"
	p.title.text_font_size = "25px"
	p.axis.visible = grid
	p.xgrid.visible = grid
	p.ygrid.visible = grid
	show(p)


def _save_image(data, filename):

0 View Source File : Grapher.py
License : GNU General Public License v3.0
Project Creator : Jobenland

def Main():
    
    #adds the options panel to the top
    sg.SetOptions(element_padding=(0,0))

    #adds the options to the bar at top
    menu_def = [['File', ['Open', 'Exit']],
                ['Generate', ['Preview','Graph', ['line', 'bar',], 'Reset'],],
                ['Help', ['About...','Submit An Issue']]]

    columm_layout = [[]]

    #creates the preview portion to look at the opened csv file
    MAX_ROWS = 800
    MAX_COL = 10
    data=[]
    header_lisst=[1,2,3,4,5,6,7]
    #ammends the columns and set up the key to utilize features of the preview tab
    for i in range(MAX_ROWS):
        inputs = [sg.T('{}'.format(i), size=(4,1), justification='right')] + [sg.In(size=(10, 1), pad=(1, 1), justification='right', key=(i,j), do_not_clear=True) for j in range(MAX_COL)]
        columm_layout.append(inputs)

    #sets the preview pane to see the preview portion
    Preview = [ [sg.Menu(menu_def)],
                [sg.Text('To preview imported data, select '),sg.Text("Generate -> Preview ", text_color = 'blue'),sg.Text("Check the file path below           ")], 
                [sg.Text('Make sure the text is green below before proceeding')],
                [sg.Text('                                                                                            ')],
                [sg.Text('No CSV has been entered', size = (67,1),text_color = 'red',key = 'fn')],
                [sg.Text('                                                        ')]]
               #[sg.Column(columm_layout, size=(410,100), scrollable=True)],
               #[sg.Table(values=data,max_col_width=1,headings=header_lisst,
               #auto_size_columns=False, justification='right',alternating_row_color = 'lightblue',vertical_scroll_only=False,key='tab')]]
    #sets the layout for the graph settings on the box
    Setting = [[sg.Slider(range=(1,1500), default_value=610, size=(10,10), orientation='horizontal', key = 'height',font=('Helvetica', 12)),
                    sg.Text('    Name: ', size=(10,1)), sg.InputText(key='graphtitle', size=(15,1)), sg.Text('   Title of x-axis ', size = (14,1)),
                    sg.InputText(key='xlabel', size=(15,1))],
               [sg.Text('Enter graph Height')],
               [sg.Slider(range=(1,1500), default_value=650, size=(10,10), orientation='horizontal', key = 'width', font=('Helvetica', 12)),
                    sg.Text('       '),sg.Text(' Legend Location  '), sg.InputCombo(['Top Left','Top Right','Bottom Left', 'Bottom Right'], key = 'legendloc'),
                    sg.Text('  Title of y-axis ', size=(13,1)), sg.InputText(key='ylabel',size=(15,1))],
               [sg.Text('Enter graph Width')],
               [sg.Text(' ')],
               [sg.Checkbox('Graph Multiple Data Sets       Data Mark Type',default = False,key='multiA'),sg.Combo(['Dot','No Marker', 'Square', 'Triangle', 'Inverted Triangle','Diamond'], key= 'dot'),
                    sg.Text('    '),sg.Text('Size of Mark      '),sg.Slider(range=(1,50), default_value=5, size=(10,10), orientation='horizontal', key = 'size', font=('Helvetica', 12))],
                [sg.Text(' ')],
               [sg.Text('Choose Axis Visibility   '),sg.InputCombo(['Both Axis Visibile','Only X Axis','Only Y Axis', 'Both Axis Invisible'], key = 'axis')],
               [sg.Text(' ')],

               [sg.Text('Select the X axis'), sg.Text('                              Select the y axis(s)')],
               [sg.Listbox(['Load CSV to See available headers'], key = 'xheaders', size=(40,6)),sg.Listbox(['Load CSV to See available headers'],select_mode='multiple',key = 'yheaders', size=(40,6)), sg.Checkbox('Maintain Aspect',default = False,key='ASR')]]
              
    #general layout bringing all the smaller frames together
    layout = [[sg.Text('First, Use Open to load a CSV into the program and verify the correct path in the preview box.')],
              [sg.Text('Note to keep the box a perfect square, leave the height and width at 610 by 650')], 
              [sg.Frame('Preview', Preview, title_color='green', font = 'Any 12'), sg.Image('Img/UMD.png')],
              [sg.Frame('Graph Settings', Setting, title_color='blue', font = 'Any 12')],
              [sg.Text('Property of Maryland Energy Innovation Institute                                                                        written by Jonathan Obenland', text_color = 'red')],
              [sg.Text('All rights reserved under GNU-GPL version 3                                                                             Python 3.x   Build: ', text_color = 'blue'),sg.Text("PASSING",text_color = 'green')]]
 
    #names the table and creates the layout
    window1 = sg.Window('Table',icon = 'Img/graph.ico', return_keyboard_events=True).Layout(layout).Finalize()

    #starts the event listener for the window
    window2_active = False
    while True:

        #reads the window
        event1, values1 = window1.Read()
        
        #if exit
        if event1 is None or event1 == 'Exit':
            break

        #if they click about
        elif event1 == 'About...':
            sg.Popup('A simple graphing program to plot various headers and points from a CSV')

        #if they click open
        elif event1 == 'Open':
            
            #opens up a window to choose a file with the extension .csv
            filename = sg.PopupGetFile('filename to open', no_window=True, file_types=(("CSV Files","*.csv"),))

            nameUpdate = window1.FindElement('fn')
            nameUpdate.Update(filename, text_color = 'green')

            #populates the first box for choosing the x axis
            if filename is not None:
                with open(filename, "r") as infile:
                    #sets the headers to an array
                    reader = csv.reader(infile)
                    header_list=[]
                    header_list = next(reader)
                    print(header_list)
                    #updates the box with the array of x headers
                    headerupdate = window1.FindElement('xheaders')
                    headerupdate.Update(header_list)

            #populates the secound box for choosing the y axis to include     
            if filename is not None:
                with open(filename, "r") as infile:
                    #sets the headers to an array
                    reader = csv.reader(infile)
                    header_list2=[]
                    header_list2 = next(reader)
                    print(header_list)
                    #updates the box with the new headers from the array
                    headerupdate2 = window1.FindElement('yheaders')
                    headerupdate2.Update(header_list2)  
            
            #opens in read and populates var with everything in the csv
            if filename is not None:
                with open(filename, "r") as infile:
                    reader = csv.reader(infile)
                    try:
                        #read everything into rows
                        data = list(reader)

                    #error handling to make sure that the file is readable
                    except:

                        #let the user know they tried to load a bad file
                        sg.PopupError('Error reading file')
                        continue
                '''
                if filename is not None:
                    with open(filename,'r') as infile:
                        reader = csv.reader(infile)
                        header_lisst=next(reader)
                        try:
                            data=list(reader)
                        except:
                            sg.PopupError("error reading file")
                        table = window.FindElement('tab')
                        table.Update(values=data)
                '''        
                '''
                
                # clear the table
                [window.FindElement((i,j)).Update('') for j in range(MAX_COL) for i in range(MAX_ROWS)]

                #cycles through the array of rows and enumerates the data
                for i, row in enumerate(data):

                    #for each row in enumerated data then item -- var j
                    for j, item in enumerate(row):

                        #location of each box in the preview
                        location = (i,j)

                        #for each part try and read the table
                        try:            
                            target_element = window.FindElement(location)
                            new_value = item
                            if target_element is not None and new_value != '':
                                target_element.Update(new_value)

                        #no work no do
                        except:
                            pass
            '''
                
        #TODO fix the save function to allow a user to change a field and save it as a csv
        #FIXME saves in an unreadable corrupt CSV
        elif event1 == 'Save':
            filename = sg.PopupGetFile('filename to open', save_as = True,no_window=True, file_types=(("CSV Files","*.csv"),))
            for i, row in enumerate(data):
                for j, item in enumerate(row):
                    location = (i,j)
                    try:
                        target_element = window1.FindElement(location)
                    except:
                        pass
                    deform = values1[target_element]
                    print (deform)
        elif event1 == "Preview":
            if not window2_active:
                window2_active=True
                if filename is not None:
                    with open(filename, 'r') as infile:
                        reader = csv.reader(infile)
                        header_lisst = next(reader)
                        data = list(reader)
                layout2 = [[sg.Table(values = data,
                                        headings = header_lisst,
                                        auto_size_columns = False,
                                        max_col_width = 15,
                                        justification = 'right',
                                        alternating_row_color = 'lightblue',
                                        vertical_scroll_only = False,
                                        num_rows = min(len(data),20))]]

                window2 = sg.Window(filename + '   PREVIEW').Layout(layout2)
            if window2_active:
                event2, values2 = window2.Read(timeout=100)
                if event2 is None or event2 == 'Exit':
                    window2_active = False
                    window2.Close()

        #if the user decides to select line graph
        elif event1 == 'line':

            #names everything for use in the rest of the program            
            name = values1['graphtitle']
            plotwidthfloat= values1['width']
            plotheightfloat= values1['height']
            xaxisname = values1['xlabel']
            yaxisname = values1['ylabel']

            #changes the type floats to type Ints
            plotwidth=int(plotwidthfloat)
            plotheight=int(plotheightfloat)

            #read the csv of the given filename
            weight = pd.read_csv(filename)


            #gets the values of the headers
            xhead = values1['xheaders']
            yhead = values1['yheaders']

            #array of colors to choose from
            #TODO add more colors
            colorar = ['green','blue','red','orange','aqua','black', 'pink', 'cyan', 'purple', 'magenta']

            #headers.Update(header_list)
            if values1['ASR'] == True:
                p = figure(title = name, plot_width=plotwidth, plot_height=plotheight, match_aspect=True)
            if values1['ASR'] == False:
                p = figure(title = name, plot_width=plotwidth, plot_height=plotheight)

            p.xaxis.axis_label = xaxisname
            p.yaxis.axis_label = yaxisname
            i=0
            
            if values1['multiA'] == True:
                
                '''
                for i in range(len(xhead)):
                    xxGet=weight[xhead[i]]
                    print(xxGet)
                    xxIndex = xxGet[i]
                    print(xxIndex)
                    splitTest = xxIndex.split(',')
                    print (splitTest)
                    xx = splitTest[0]
                    yy = splitTest[1]
                    print (xx)
                '''
                #goes through each of the headers selected and uses them in the program
                for i in range(len(yhead)):
                    #headerupdate2 = window1.FindElement('xheaders')
                    #headerupdate2.Update('Multi Axis enabled, Select Y only')
                    #sets the color to the array at the index. leave for now
                    ccolor = colorar[i]
                    print(ccolor)
                    xx=[]
                    yy=[]
                    #increments yy with a number at the end for adding more lines
                    varYY = 'yy'+str(i)
                    varYY = weight[yhead[i]]
                    yList = varYY.tolist()
                    
                    for line in yList:
                        splitter = line.strip('(')
                        splitter = splitter.strip(')')
                        splitter = splitter.split(',')
                        splitterIntX = (float(splitter[0]))
                        splitterIntY = (float(splitter[1]))
                        xx.append(splitterIntX)
                        yy.append(splitterIntY)
                   
                    #creates the lines
                    dotMrk = values1['dot']
                    sizeOfMrk = values1['size']
                    markSize = int(sizeOfMrk)
                     
                    if dotMrk == 'Dot':
                        p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                        p.circle(xx,yy, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'No Marker':
                        p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                    if dotMrk == 'Square':
                        p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                        p.square(xx,yy, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'Triangle':
                        p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                        p.triangle(xx,yy, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'Inverted Triangle':
                        p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                        p.inverted_triangle(xx,yy, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'Diamond':
                        p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                        p.diamond(xx,yy, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)

                    #p.circle(xx,yy, legend=yhead[i],fill_color=ccolor,line_color=ccolor)
                    #p.line(xx,yy, legend=yhead[i],line_color=ccolor)
                    #testing...
                    print(yhead[i])


            if values1['multiA'] == False:
            #unecessary but still leave here for now
                for i in range(len(xhead)):
                    xx=weight[xhead[i]]
                
                #goes through each of the headers selected and uses them in the program
                for i in range(len(yhead)):
                    
                    #sets the color to the array at the index. leave for now
                    ccolor = colorar[i]
                    print(ccolor)
                    
                    #increments yy with a number at the end for adding more lines
                    var = 'yy'+str(i)
                    var = weight[yhead[i]]
                    print(var)
                    #creates the lines
                    dotMrk = values1['dot']
                    sizeOfMrk = values1['size']
                    markSize = int(sizeOfMrk)
                     
                    if dotMrk == 'Dot':
                        p.line(xx,var, legend=yhead[i],line_color=ccolor)
                        p.circle(xx,var, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'No Marker':
                        p.line(xx,var, legend=yhead[i],line_color=ccolor)
                    if dotMrk == 'Square':
                        p.line(xx,var, legend=yhead[i],line_color=ccolor)
                        p.square(xx,var, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'Triangle':
                        p.line(xx,var, legend=yhead[i],line_color=ccolor)
                        p.triangle(xx,var, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'Inverted Triangle':
                        p.line(xx,var, legend=yhead[i],line_color=ccolor)
                        p.inverted_triangle(xx,var, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)
                    if dotMrk == 'Diamond':
                        p.line(xx,var, legend=yhead[i],line_color=ccolor)
                        p.diamond(xx,var, legend=yhead[i],fill_color=ccolor,line_color=ccolor,size= markSize)

                    #testing...
                    print(yhead[i])

            #output for the html file
            output_file(name+'.html')

            #grabbing the data from the legend area spot
            lloc = values1['legendloc']
            
            #user selected location of legend
            if lloc == 'Top Right':
                p.legend.location = "top_right"

            elif lloc == 'Top Left':
                p.legend.location = "top_left"

            elif lloc == 'Bottom Right':
                p.legend.location = "bottom_right"

            elif lloc == 'Bottom Left':
                p.legend.location ="bottom_left" 

            axvi = values1['axis']       
            if axvi == 'Both Axis Visibile':
                p.yaxis.visible = True
                p.xaxis.visible = True
            if axvi == 'Only X Axis':
                p.yaxis.visible = False
            if axvi == 'Only Y Axis':
                p.xaxis.visible = False
            if axvi == 'Both Axis Invisible':
                p.yaxis.visible = False
                p.xaxis.visible = False

            show(p)

        #if the user wants to reset the program
        elif event1 == "Reset":
                window1.Close()
                Main()

        elif event1 == 'Submit An Issue':
            a_website = "https://github.com/Jobenland/CSV-Graph-Generator/issues"
            webbrowser.open_new(a_website)

        # if a valid table location entered, change that location's value
        try:
            location = (int(values1['inputrow']), int(values1['inputcol']))
            target_element = window1.FindElement(location)
            new_value = values1['value']
            if target_element is not None and new_value != '':
                target_element.Update(new_value)
        except:
            pass

Main()

0 View Source File : simulations.py
License : MIT License
Project Creator : kevin218

def simulate_lightcurve(target, snr=1000., npts=1000, nbins=10, radius=None, ldcs=('quadratic', [0.1, 0.1]), plot=False):
    """Simulate lightcurve data for the given target exoplanet

    Parameters
    ----------
    target: str
        The name of the target to simulate
    snr: float
        The signal to noise to use
    npts: int
        The number of points in each lightcurve
    nbins: int
        The number of lightcurves
    radius: array-like, float (optional)
        The radius or radii value(s) to use
    ldcs: sequence
        The limb darkening profile name and coefficients
    plot: bool
        Plot the figure

    Returns
    -------
    tuple
        The time, flux, uncertainty, and transit parameters
    """
    try:

        # Get the system parameters from ExoMAST
        targ, url = utils.get_target_data(target)
        name = targ.get('canonical_name') or target
        t0 = targ.get('transit_time', 0.)
        dt = targ.get('transit_duration', 1.)

        # Generate transit parameters with batman
        params = batman.TransitParams()
        params.t0 = t0
        params.rp = targ.get('Rp/Rs') or 0.1
        params.per = targ.get('orbital_period') or 0.5
        params.inc = targ.get('inclination') or 90.
        params.a = targ.get('a/Rs') or 15.
        params.ecc = targ.get('eccentricity') or 0.
        params.w = targ.get('omega') or 90.
        params.limb_dark = 'nonlinear' if ldcs[0] == '4-parameter' else ldcs[0]
        params.transittype = 'primary'
        params.u = ldcs[1]

        # Generate a time axis
        time = np.linspace(t0-dt, t0+dt, npts)

        # Make the transit model
        transit = batman.TransitModel(params, time, transittype='primary')

        # Generate the lightcurves
        flux = []
        if radius is None:
            radius = params.rp
        radii = [radius]*nbins if isinstance(radius, (int, float)) else radius
        for r in radii:
            params.rp = r
            flux.append(transit.light_curve(params))

        # Add noise
        ideal_flux = np.asarray(flux)
        flux = np.random.normal(loc=ideal_flux, scale=ideal_flux/snr)
        unc = flux - ideal_flux

        # Plot it
        if plot:
            fig = figure(title=name)
            fig.circle(time, flux[0])
            fig.xaxis.axis_label = targ.get('transit_time_unit') or 'MJD'
            fig.yaxis.axis_label = 'Relative Flux'
            show(fig)

        return time, flux, unc, targ

    except IOError:
        raise ValueError('{}: Could not simulate light curve for this target'.format(target))

0 View Source File : dash_online.py
License : BSD 2-Clause "Simplified" License
Project Creator : kourgeorge

    def __init__(self, file_path="lines.html"):
        self._source = ColumnDataSource(dict(
            time=[], average=[], low=[], high=[], open=[], close=[],
            ma=[], macd=[], macd9=[], macdh=[], color=[]))

        output_file(file_path)

        p = figure(plot_height=500, tools="xpan,xwheel_zoom,xbox_zoom,reset", x_axis_type=None, y_axis_location="right")
        p.x_range.follow = "end"
        p.x_range.follow_interval = 100
        p.x_range.range_padding = 0

        p.line(x='time', y='average', alpha=0.2, line_width=3, color='navy', source=self._source)
        p.line(x='time', y='ma', alpha=0.8, line_width=2, color='orange', source=self._source)
        p.segment(x0='time', y0='low', x1='time', y1='high', line_width=2, color='black', source=self._source)
        p.segment(x0='time', y0='open', x1='time', y1='close', line_width=8, color='color', source=self._source)

        show(p)

    def update(self, t):

0 View Source File : Show_Graphs.py
License : MIT License
Project Creator : kubapilch

def create_graph2(show_id, save, normalize, load_file, data, average, seasons):
    """
    Creates graph from given arguments
    """
    # If seasons not specify set as all
    if seasons is None:
        seasons = range(200)

    reviews_raw = None
    votes_raw = None

    # Get raw data depending on user choice, from files or imdb
    data_raw = get_data_from_file(show_id, save, data) if load_file else get_data_from_internet(show_id, save, data)

    # Set data
    reviews_raw = data_raw[0].get('data', None)
    votes_raw = data_raw[1].get('data', None)

    # Try to get the title of the show from reviews, if can't try from votes and set as show id if can't
    show_name = data_raw[0].get('show_name', data_raw[1].get('show_name', show_id))

    # Prepare data for presentation
    reviews_prepared = prepare_data_for_presentation(reviews_raw, seasons)
    votes_prepared = prepare_data_for_presentation(votes_raw, seasons)

    # Normalize number of votes if needed
    if normalize:
        votes_prepared = normalize_data(votes_prepared)
    
    # Get lables for the graph
    labels = list(reviews_prepared.keys()) if not reviews_prepared is None else list(votes_prepared)

    # Set data range
    if not reviews_prepared is None or normalize:
        data_range = (0, 11)
    else:
        # Set data range as 0-max value + 10% of max value
        data_range = (0, max(reviews_prepared.values()) + (0.1*max(votes_prepared.values())))
    
    # Create graph object
    p = figure(x_range=labels, y_range=data_range, sizing_mode='stretch_both', title=show_name)
    print('Rendering the graph..')

    # Title location
    p.title_location = 'above'

    # Rotate axis, in radian PI/4 == 45*
    p.xaxis.major_label_orientation = 3.14/4

    # Add reviews lines
    if not reviews_raw is None:
        p.line(labels, list(reviews_prepared.values()), line_width=2, legend='Reviews', muted_alpha=0.1)

        if average:
            # Calculate average review value
            average_review = sum(reviews_prepared.values())/len(reviews_prepared.values())
            
            p.line(labels, average_review, line_width=1, legend='Average review', line_color='black', muted_alpha=0.1)

    # Add number of votes lines
    if not votes_raw is None:
        p.line(labels, list(votes_prepared.values()), line_width=2, legend='Number of votes', line_color='red', muted_alpha=0.1)

        if average:
            # Calculate average review value
            average_votes = sum(votes_prepared.values())/len(votes_prepared.values())
            
            p.line(labels, average_votes, line_width=1, legend='Average number of votes', line_color='green', muted_alpha=0.1)
    
    # Add legend
    p.legend.location = 'bottom_left'
    p.legend.click_policy = 'mute'

    # Specify output file
    output_file('Show_Graph_{0}.html'.format(show_id))
    
    # Show the graph
    show(p)

def parse_arguments():

0 View Source File : _comparison.py
License : MIT License
Project Creator : metriculous-ml

    def display(
        self, include_spacer: Optional[bool] = None, width: Optional[str] = None
    ) -> None:
        """Displays a table with quantities and figures in a Jupyter notebook."""

        # Increase usable Jupyter notebook width when comparing many models or if specified by user
        if width is not None or len(self.evaluations) >= 4:
            _display_html_in_notebook(
                f"""
                  <  style>.container
                    {{
                        width:{width or '90%'} !important;
                        margin: auto !important;
                    }}
                 < /style>"
                """
            )

        # noinspection PyTypeChecker
        _display_html_in_notebook(_html_quantity_comparison_table(self.evaluations))
        output_notebook()

        for row in _figure_rows(self.evaluations, include_spacer=include_spacer):
            plotting.show(row)

        # noinspection PyBroadException
        try:
            # Play a sound to indicate that results are ready
            os.system("afplay /System/Library/Sounds/Tink.aiff")
        except Exception:
            pass

    def html(self, include_spacer: Optional[bool] = None) -> str:

0 View Source File : draw_utils.py
License : Apache License 2.0
Project Creator : modernatx

def view_alignment(
    aligned,
    fontsize="9pt",
    show_N=100,
    colorscheme=aa_chemistry_simple,
    boxwidth=9,
    boxheight=15,
    label_width=None,
    show_descriptions=False,
    show_grouping=False,
):
    """Bokeh sequence alignment view for protein and nucleic acid sequences


    :sa: https://dmnfarrell.github.io/bioinformatics/bokeh-sequence-aligner

    :param aligned: MultipleSeqAlignment object
    :param fontsize: font size for text labels
    :param show_N: size of sequence window (in number of sequence letters)
    :param colorscheme: a weblogo ColorScheme object
    :param boxwidth: column width of alignment
    :param boxheight: row height of alignment
    :param label_width: maximum length of row label; if None, extend to maximum label length
    :param show_descriptions: if True, show SeqRecord description for each row
    :param show_grouping: if True, highlight changes from reference in red against green background,
        instead of using the residue colorscheme
    :returns: A Bokeh plot of the Multiple Sequence Alignment.
    """

    def get_colors(seqs, color_scheme):
        """make colors for letters in sequence

        :param seqs: A string sequence.
        :param color_scheme: A string.
        :returns: a sequence of colors for each letter in seqs.
        """
        # get colors
        color_dict = convert_colorscheme_to_color_map(color_scheme, color_format="hex")
        # assign colors to sequences
        text = [i for s in list(seqs) for i in s]
        return [color_dict[a] for a in text]

    def get_colors_for_matching(seqs):
        """match/mismatch color scheme for show_grouping

        :param seqs: Sequences for which colors need to be matched.
        :returns: a list of colors (strings)
        """
        refseq = seqs[0]
        colors = list()
        for seq in list(seqs):
            for xs, ref_s in zip(seq, refseq):
                colors.append(apply_matching_colorscheme(xs, ref_s, color_format="hex"))
        return colors

    # make sequence and id lists from the aligned object
    seqs = [rec.seq for rec in (aligned)]
    if show_descriptions:
        labels = [f"{row} - {rec.description} ({rec.id})" for (row, rec) in enumerate(aligned)]
    else:
        labels = [f"{row} - {rec.id}" for (row, rec) in enumerate(aligned)]

    if label_width:
        labels = [label[:label_width] for label in labels]
    else:
        label_width = max(len(label) for label in labels)

    text = [i for s in list(seqs) for i in s]
    if show_grouping:
        colors = get_colors_for_matching(seqs)
    else:
        colors = get_colors(seqs, colorscheme)
    N = len(seqs[0])
    S = len(seqs)

    x = np.arange(1, N + 1)
    # need to reverse y so that sequences are plotted top-to-bottom
    y = np.arange(S - 1, -1, -1)
    # creates a 2D grid of coords from the 1D arrays
    xx, yy = np.meshgrid(x, y)
    # flattens the arrays
    gx = xx.ravel()
    gy = yy.flatten()
    # use recty for rect coords with an offset
    recty = gy + 0.5
    # now we can create the ColumnDataSource with all the arrays
    source = bk.models.ColumnDataSource(dict(x=gx, y=gy, recty=recty, text=text, colors=colors))
    plot_height = len(seqs) * boxheight + 50
    x_range = bk.models.Range1d(0, N + 1, bounds="auto")
    viewlen = min(show_N, N)
    # view_range is for the close up view
    view_range = (0, viewlen)
    tools = "xpan,xwheel_zoom,reset,save"

    # plot_width combines length of text labels and number of letters in sequence view window
    # note: this part requires additional tuning; 5 pixel average width of y-axis labels is a guess
    plot_width = int(5 * label_width) + boxwidth * viewlen + 40

    # entire sequence view (no text, with zoom)
    p = figure(
        title=None,
        plot_width=plot_width,
        plot_height=50,
        x_range=x_range,
        y_range=(0, S),
        tools=tools,
        min_border=0,
        toolbar_location="below",
    )
    rects = bk.models.glyphs.Rect(
        x="x",
        y="recty",
        width=1,
        height=1,
        fill_color="colors",
        line_color=None,
        fill_alpha=0.6,
    )
    p.add_glyph(source, rects)
    p.yaxis.visible = False
    p.grid.visible = False

    # sequence text view with ability to scroll along x axis
    p1 = figure(
        title=None,
        plot_width=plot_width,
        plot_height=plot_height,
        x_range=view_range,
        y_range=labels[::-1],
        tools="xpan,reset,save",
        min_border=0,
        toolbar_location="below",
    )  # , lod_factor=1)
    glyph = bk.models.glyphs.Text(
        x="x",
        y="y",
        text="text",
        text_align="center",
        text_color="black",
        text_font=value("monospace"),
        text_font_size=fontsize,
    )
    rects = bk.models.glyphs.Rect(
        x="x",
        y="recty",
        width=1,
        height=1,
        fill_color="colors",
        line_color=None,
        fill_alpha=0.4,
    )
    p1.add_glyph(source, glyph)
    p1.add_glyph(source, rects)

    p1.grid.visible = False
    p1.xaxis.major_label_text_font_style = "bold"
    p1.yaxis.minor_tick_line_width = 0
    p1.yaxis.major_tick_line_width = 0

    p = bk.layouts.gridplot([[p], [p1]], toolbar_location="below")
    show(p)
    return p

0 View Source File : viz.py
License : MIT License
Project Creator : nivlab

def plot_raw_blinks(fname, raw, overwrite=True, show=False):
    """Plot detected (and corrected) blinks in raw pupillometry data."""
    from bokeh.plotting import figure, output_file, show
    from bokeh.models import BoxZoomTool, Range1d
    
    ## I/O handling.
    if not os.path.splitext(fname.lower()) == '.html': fname = '%s.html' %fname
    if os.path.isfile(fname): os.remove(fname)        
        
    ## Initialize html file.
    output_file(fname)

    ## Initialize figure.
    x_range = (raw.times.min(), raw.times.max())
    y_range = np.array((np.nanmin(raw.data[:,-1]),np.nanmax(raw.data[:,-1])))
    y_range += 0.1 * np.concatenate([-np.diff(y_range), np.diff(y_range)])
    plot = figure(plot_width=1200, plot_height=300, toolbar_location="right", 
                  x_range = x_range, y_range=y_range, tools="wheel_zoom,reset",
                  x_axis_label='Time (s)', y_axis_label='Pupillometry (au)',
                  title="Blink Correction")
    plot.add_tools(BoxZoomTool(dimensions="width"))

    ## Plot raw pupillometry data.
    plot.line(raw.times, raw.data[:,-1], line_width=2)
    
    ## Plot blink periods.
    X, Y = [], []
    for i, j in raw.blinks:
        X.append(raw.times[i:j])
        Y.append(raw.data[i:j,-1])
        plot.patch(np.hstack((X[-1],X[-1][::-1])), 
                   np.hstack((np.ones_like(X[-1])*3000, np.ones_like(X[-1])*5000)),
                  color='black', alpha=0.07)
    plot.multi_line(X, Y, line_width=2, line_color='orange')    
    
    ## Save/display.
    if show: show(plot)
    
# plot_raw_blinks("test", raw, show=True)

def plot_heatmaps(info_with_aoi, raw_pos_data, contrast, config):

0 View Source File : visualization.py
License : Apache License 2.0
Project Creator : oeg-upm

def example():
    def mscatter(p, x, y, marker):
        p.scatter(x, y, marker=marker, size=15,
                  line_color="navy", fill_color="orange", alpha=0.5)

    def mtext(p, x, y, text):
        p.text(x, y, text=[text],
               text_color="firebrick", text_align="center", text_font_size="10pt")

    p = figure(title="Bokeh Markers", toolbar_location=None)
    p.grid.grid_line_color = None
    p.background_fill_color = "#eeeeee"

    N = 10

    mscatter(p, random(N) + 2, random(N) + 1, "circle")
    mscatter(p, random(N) + 4, random(N) + 1, "square")
    mscatter(p, random(N) + 6, random(N) + 1, "triangle")
    mscatter(p, random(N) + 8, random(N) + 1, "asterisk")

    mscatter(p, random(N) + 2, random(N) + 4, "circle_x")
    mscatter(p, random(N) + 4, random(N) + 4, "square_x")
    mscatter(p, random(N) + 6, random(N) + 4, "inverted_triangle")
    mscatter(p, random(N) + 8, random(N) + 4, "x")

    mscatter(p, random(N) + 2, random(N) + 7, "circle_cross")
    mscatter(p, random(N) + 4, random(N) + 7, "square_cross")
    mscatter(p, random(N) + 6, random(N) + 7, "diamond")
    mscatter(p, random(N) + 8, random(N) + 7, "cross")

    mtext(p, 2.5, 0.5, "circle / o")
    mtext(p, 4.5, 0.5, "square")
    mtext(p, 6.5, 0.5, "triangle")
    mtext(p, 8.5, 0.5, "asterisk / *")

    mtext(p, 2.5, 3.5, "circle_x / ox")
    mtext(p, 4.5, 3.5, "square_x")
    mtext(p, 6.5, 3.5, "inverted_triangle")
    mtext(p, 8.5, 3.5, "x")

    mtext(p, 2.5, 6.5, "circle_cross / o+")
    mtext(p, 4.5, 6.5, "square_cross")
    mtext(p, 6.5, 6.5, "diamond")
    mtext(p, 8.5, 6.5, "cross / +")

    output_file("markers.html", title="markers.py example")

    show(p)  # open a browser


def draw_model_2d(model, data=None, membership=None, show_figure=True):

0 View Source File : visualization.py
License : Apache License 2.0
Project Creator : oeg-upm

def draw_model_2d(model, data=None, membership=None, show_figure=True):
    title = "draw FCM model"
    fig = figure(title=title, toolbar_location=None)
    fig.grid.grid_line_color = None
    fig.background_fill_color = "#eeeeee"
    output_p = None
    for clus, cc_color in enumerate(zip(model.cluster_centers_, color_gen())):
        cc, color = cc_color
        fig = draw_points_2d(np.array([cc]), fig=fig, title=title, marker="diamond", size=15,
                             line_color="navy", fill_color=color, alpha=1.0)
        if data is not None and membership is not None:
            for idx, data_point in enumerate(data):
                # print idx
                # print clus
                print membership[idx][clus]
                fig = draw_points_2d(np.array([data_point]), fig=fig, title=title, marker="circle", size=10,
                                     line_color="navy", fill_color=color, alpha=membership[idx][clus])
    if show_figure:
        show(fig)
    return fig


def draw_points_2d(points, fig=None, title="figure 123", **kwargs):

0 View Source File : plot.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : Open-ET

    def _plot(self, FluxObj, ncols=1, output_type='save', out_file=None, 
            suptitle='', plot_width=1000, plot_height=450, 
            sizing_mode='scale_both', merge_tools=False, link_x=True, **kwargs): 
        """ 
        Private routine for aggregated validation plots that are used by
        the :meth:`.QaQc.plot` and :meth:`.Data.plot` methods.
        """
        # get daily and monthly time series with internal names, get units
        monthly = False
        if hasattr(FluxObj, 'monthly_df'):
            # will run correction as of now if it is a QaQc
            monthly = True
            monthly_df = FluxObj.monthly_df.rename(columns=FluxObj.inv_map) 
            # avoid plotting single point- errors out bokeh datetime axis, etc.
            for c in monthly_df.columns:
                if monthly_df[c].notna().sum()   <  = 1:
                    monthly_df.drop(c, axis=1, inplace=True)
            monthly_source = ColumnDataSource(monthly_df)

        # so that the correction is run, may change this
        FluxObj.df.head(); # if Data, need to access to calc vp/vpd 
        df = FluxObj.df.rename(columns=FluxObj.inv_map) 
        variables = FluxObj.variables
        units = FluxObj.units 
        # bokeh column sources for tooltips
        daily_source=ColumnDataSource(df)
        # for aggregating plots
        daily_line = []
        daily_scatter = []
        monthly_line = []
        monthly_scatter = []

        if output_type == 'save':
            output_file(out_file)

        def _get_units(plt_vars, units):
            """
            Helper function to figure out units for multivariate plots.
            If none of plt_vars exist return None, if multiple units are found
            print a warning that vars have different units. Returns string if 
            one or more units are found- first found if multiple. 
            """
            ret = [] 
            for v in plt_vars:
                unit = units.get(v, None)
                if unit is not None:
                    ret.append(unit)
            if len(ret) == 0:
                ret = None
            elif len(set(ret)) > 1:
                print(
                    'WARNING: variables: {} are not of the same units'.format(
                        ','.join(plt_vars)
                    )
                )
                ret = ret[0]
            elif len(set(ret)) == 1:
                ret = ret[0]

            return ret


        # run through each plot, daily then monthly versions
        #### 
        # energy balance time series plots
        #### 
        plt_vars = ['LE', 'H', 'Rn', 'G']
        colors = ['blue', 'red', 'black', 'green']
        title = 'Daily Surface Energy Balance Components'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='energy_balance_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=plt_vars
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print(
                'Energy balance components time series grapths missing all '
                'variables'
            )
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Surface Energy Balance Components'
            fig = figure(x_axis_label=x_label, y_axis_label=y_label,title=title,
                width=plot_width, height=plot_height, 
                name='energy_balance_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=plt_vars
            )
            monthly_line.append(fig)

        #### 
        # incoming shortwave and ASCE potential clear sky time series plots
        #### 
        plt_vars = ['sw_in', 'rso']
        # only plot if we have both
        if set(plt_vars).issubset(df.columns):
            labels = ['Station Rs', 'ASCE Rso']
            colors = ['black', 'red']
            title =\
                'Daily Incoming Shortwave (Rs) and ASCE Clear Sky Shortwave '+\
                'Radiation (Rso)'
            x_label = 'date'
            y_label = _get_units(plt_vars, units)
            fig = figure(x_axis_label=x_label, y_axis_label=y_label, 
                title=title, width=plot_width, height=plot_height, 
                name='Rs_daily'
            )
            fig = Plot.add_lines(
                fig, df, plt_vars, colors, x_label, daily_source, labels=labels
            )
            if fig is not None:
                daily_line.append(fig)
                ## same for monthly fig (removed for now)
                #title='Monthly Incoming Shortwave and ASCE Potential Radiation'
                #fig = figure(
                #    x_axis_label=x_label,y_axis_label=y_label,title=title,
                #    width=plot_width, height=plot_height
                #)
                #fig = Plot.add_lines(
                #    fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                #    labels=labels
                #)
                #monthly_line.append(fig)
        else:
            print(
                'Shortwave and potential clear sky radiation time series '
                'grapths missing all variables'
            )

        #### 
        # multiple soil heat flux sensor time series plots
        #### 
        # keep user names for these in hover 
        g_re = re.compile('^[gG]_[\d+mean|corr]|G$')
        g_vars = [
            v for v in variables if g_re.match(v) and v in df.columns
        ]
        num_lines = len(g_vars)
        if num_lines > 1:
            rename_dict = {k:variables[k] for k in g_vars}
            tmp_df = df[g_vars].rename(columns=rename_dict)
            tmp_source = ColumnDataSource(tmp_df)
            plt_vars = list(rename_dict.values())
            colors = Viridis256[0:-1:int(256/num_lines)]
            title = 'Daily Soil Heat Flux (Multiple Sensors)'
            x_label = 'date'
            y_label = _get_units(g_vars, units)
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                plot_width=plot_width, plot_height=plot_height, name='G_daily'
            )
            fig = Plot.add_lines(
                fig, tmp_df, plt_vars, colors, x_label, tmp_source, 
                labels=plt_vars
            )
            if fig is not None:
                daily_line.append(fig)
            if fig is not None and monthly:
                # same for monthly fig
                g_vars = [
                    v for v in variables if g_re.match(v) and v in \
                        monthly_df.columns
                ]
                num_lines = len(g_vars)
                if num_lines > 1:
                    tmp_df = monthly_df[g_vars].rename(columns=rename_dict)
                    tmp_source = ColumnDataSource(tmp_df)
                    title = 'Monthly Soil Heat Flux (Multiple Sensors)'
                    fig = figure(
                        x_axis_label=x_label, y_axis_label=y_label,title=title,
                        plot_width=plot_width, plot_height=plot_height, 
                        name='G_monthly'
                    )
                    fig = Plot.add_lines(
                        fig, tmp_df, plt_vars, colors, x_label, tmp_source,
                        labels=plt_vars
                    )
                    monthly_line.append(fig)
            # do not print warning if missing multiple soil moisture recordings

        #### 
        # radiation time series plots
        #### 
        plt_vars = ['sw_in', 'lw_in', 'sw_out', 'lw_out']
        colors = ['red', 'darkred', 'blue', 'navy']
        title = 'Daily Radiation Components'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='radiation_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=plt_vars
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print(
                'Radiation components time series grapths missing all variables'
            )
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Radiation Components'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='radiation_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=plt_vars
            )
            monthly_line.append(fig)


        #### 
        # temperature time series plot
        #### 
        plt_vars = ['t_max','t_avg','t_min','t_dew']
        colors = ['red','black','blue','green']
        title = 'Daily Average Air Temperature'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='temp_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=plt_vars
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print(
                'Average air temperature time series grapths missing all '
                'variables'
            )
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Average Air Temperature'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label,title=title,
                width=plot_width, height=plot_height, name='temp_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=plt_vars
            )
            monthly_line.append(fig)

        #### 
        # vapor pressure time series plots
        #### 
        plt_vars = ['vp', 'vpd']
        colors = ['black', 'darkred']
        title = 'Daily Average Vapor Pressure and Deficit'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='vap_press_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=plt_vars
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print('Vapor pressure time series grapths missing all variables')
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Average Vapor Pressure'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='vap_press_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=plt_vars
            )
            monthly_line.append(fig)

        #### 
        # windpseed time series plot
        #### 
        plt_vars = ['ws']
        colors = ['black']
        title = 'Daily Average Windspeed'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='wind_daily'
        )
        fig = Plot.add_lines(fig, df, plt_vars, colors, x_label, daily_source)
        if fig is not None:
            daily_line.append(fig)
        else:
            print('Windspeed time series grapths missing all variables')
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Average Windspeed'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='wind_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source
            )
            monthly_line.append(fig)

        #### 
        # precipitation time series plots
        #### 
        plt_vars = ['ppt', 'gridMET_prcp']
        labels = ['station', 'gridMET']
        colors = ['black', 'red']
        title = 'Daily Station and gridMET Precipitation'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='precip_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=labels
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print('Precipitation time series grapths missing all variables')
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Station and gridMET Precipitation'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='precip_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=labels
            )
            monthly_line.append(fig)

        #### 
        # latent energy time series plots
        #### 
        plt_vars = ['LE', 'LE_corr', 'LE_user_corr']
        colors = ['black', 'red', 'darkorange']
        title = 'Daily Average Latent Energy Flux'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='LE_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=plt_vars
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print('Latent energy time series grapths missing all variables')
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Average Latent Energy Flux'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='LE_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=plt_vars
            )
            monthly_line.append(fig)

        #### 
        # ET time series plots
        #### 
        refET = 'ETr' if 'ETrF' in df.columns else 'ETo'
        plt_vars = ['ET', 'ET_corr', 'ET_user_corr', f'gridMET_{refET}']
        labels = plt_vars[0:3] + [refET]
        colors = ['black', 'red', 'darkorange', 'blue']
        title = 'Daily Evapotranspiration'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='ET_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=labels
        )
        if 'ET_fill_val' in df.columns and fig is not None:
            # make gap fill values more visible
            Plot.line_plot(
                fig, 'date', 'ET_fill_val', daily_source, 'green', 
                label='ET_fill_val', line_width=3
            )

        if fig is not None:
            daily_line.append(fig)
        else:
            print(
                'Evapotranspiration time series grapths missing all variables'
            )
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Evapotranspiration'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='ET_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=labels
            )
            monthly_line.append(fig)

        #### 
        # number gap filled days monthly time series plot
        #### 
        if monthly and 'ET_gap' in monthly_df.columns:
            txt = ''
            if 'ET_corr' in df.columns:
                txt = ' Corrected'
            title = 'Number of Gap Filled Days in{} Monthly ET'.format(txt)
            x_label = 'date'
            y_label = 'number of gap-filled days'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='ET_gaps'
            )
            x = 'date'
            y = 'ET_gap'
            color = 'black'
            Plot.line_plot(fig, x, y, monthly_source, color)
            monthly_line.append(fig)
        elif monthly:
            print('Monthly count of gap filled ET days plot missing variable')

        #### 
        # ETrF time series plots
        ####
        plt_vars = [f'{refET}F', f'{refET}F_filtered']
        colors = ['black', 'red']
        title = f'Daily Fraction of Reference ET ({refET}F)'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name=f'{refET}F_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=plt_vars
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print(
                'Fraction of reference ET time series grapths missing all '
                'variables'
            )
        if fig is not None and monthly:
            # same for monthly fig
            title = f'Monthly Fraction of Reference ET ({refET}F)'
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name=f'{refET}F_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=plt_vars
            )
            monthly_line.append(fig)

        #### 
        # energy balance ratio time series plots
        #### 
        plt_vars = ['ebr', 'ebr_corr', 'ebr_user_corr']
        colors = ['black', 'red', 'darkorange']
        title = 'Daily Energy Balance Ratio with Long-term Mean'
        x_label = 'date'
        y_label = _get_units(plt_vars, units)
        # add mean EBR for each time series in legend
        labels = []
        for i, v in enumerate(plt_vars):
            if v in df.columns:
                added_text = ': {}'.format(str(round(df[v].mean(),2)))
                labels.append(plt_vars[i] + added_text)
            else:
                labels.append(None)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_height, name='EBR_daily'
        )
        fig = Plot.add_lines(
            fig, df, plt_vars, colors, x_label, daily_source, labels=labels
        )
        if fig is not None:
            daily_line.append(fig)
        else:
            print(
                'Energy balance ratio time series grapths missing all '
                'variables'
            )
        if fig is not None and monthly:
            # same for monthly fig
            title = 'Monthly Energy Balance Ratio with Long-term Mean'
            # add mean for monthly EBRs to legend
            labels = []
            for i, v in enumerate(plt_vars):
                if v in monthly_df.columns:
                    added_text = ': {}'.format(
                        str(round(monthly_df[v].mean(),2))
                    )
                    labels.append(plt_vars[i] + added_text)
                else:
                    labels.append(None)
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                width=plot_width, height=plot_height, name='EBR_monthly'
            )
            fig = Plot.add_lines(
                fig, monthly_df, plt_vars, colors, x_label, monthly_source,
                labels=labels
            )
            monthly_line.append(fig)

        #### 
        # energy balance closure scatter plots
        #### 
        title = 'Daily Energy Balance Closure, Energy Versus Flux with Slope '\
            'Through Origin'
        unit = _get_units(['LE', 'H', 'Rn', 'G'], units)
        y_label = 'LE + H ({})'.format(unit)
        x_label = 'Rn - G ({})'.format(unit)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_width, name='energy_vs_flux_daily'
        )
        y_vars = ['flux', 'flux_corr', 'flux_user_corr']
        colors = ['black', 'red', 'darkorange']
        labels = ['init', 'corr', 'user_corr']
        # add plot pairs to plot if they exist, add 1:1
        mins_maxs = []
        n_vars_fnd = 0
        for i, v in enumerate(y_vars):
            if v in df.columns and not df[v].isna().all():
                n_vars_fnd += 1
                if v == 'flux_corr' and 'energy_corr' in df.columns:
                    x_var = 'energy_corr'
                else:
                    x_var = 'energy'
                min_max = Plot.scatter_plot(
                    fig, x_var, v, daily_source, colors[i], label=labels[i]
                )
                if min_max is not None:
                    mins_maxs.append(min_max)
        if n_vars_fnd > 0:
            # add scaled one to one line
            mins_maxs = np.array(mins_maxs)
            if not pd.isna(mins_maxs).all():
                x_min = min(mins_maxs[:,0])
                x_max = max(mins_maxs[:,1])
                y_min = min(mins_maxs[:,2])
                y_max = max(mins_maxs[:,3])
                ax_min, ax_max = min([x_min,y_min]), max([x_max,y_max])
                ax_min -= 0.02*abs(ax_max-ax_min)
                ax_max += 0.02*abs(ax_max-ax_min)
                fig.x_range=Range1d(ax_min, ax_max)
                fig.y_range=Range1d(ax_min, ax_max)
                one2one_vals = np.arange(ax_min, ax_max,1)
                fig.line(
                    one2one_vals, one2one_vals, legend_label='1:1 line', 
                    color='black', line_dash='dashed'
                )
                daily_scatter.append(fig)
            if monthly:
                # same for monthly fig
                title = 'Monthly Energy Balance Closure, Energy Versus Flux '\
                    'with Slope Through Origin'
                fig = figure(
                    x_axis_label=x_label, y_axis_label=y_label, title=title,
                    width=plot_width, height=plot_width, 
                    name='energy_vs_flux_monthly'
                )
                mins_maxs = []
                for i, v in enumerate(y_vars):
                    if v in monthly_df.columns:
                        min_max = Plot.scatter_plot(
                            fig, 'energy', v, monthly_source, colors[i], 
                            label=labels[i]
                        )
                        if min_max is not None:
                            mins_maxs.append(min_max)
                mins_maxs = np.array(mins_maxs)
                # check if not all pairs are empty, if not plot 1:1
                if not pd.isna(mins_maxs).all():
                    x_min = min(mins_maxs[:,0])
                    x_max = max(mins_maxs[:,1])
                    y_min = min(mins_maxs[:,2])
                    y_max = max(mins_maxs[:,3])
                    ax_min, ax_max = min([x_min,y_min]), max([x_max,y_max])
                    ax_min -= 0.02*abs(ax_max-ax_min)
                    ax_max += 0.02*abs(ax_max-ax_min)
                    fig.x_range=Range1d(ax_min, ax_max)
                    fig.y_range=Range1d(ax_min, ax_max)
                    one2one_vals = np.arange(ax_min, ax_max,1)
                    fig.line(
                        one2one_vals, one2one_vals, legend_label='1:1 line', 
                        color='black', line_dash='dashed'
                    )
                    monthly_scatter.append(fig)
        else:
            print('Energy balance scatter grapths missing all variables')


        #### 
        # latent energy scatter plots
        #### 
        title = 'Daily Latent Energy, Initial Versus Corrected'
        unit = _get_units(['LE', 'LE_corr', 'LE_user_corr'], units)
        y_label = 'corrected ({})'.format(unit)
        x_label = 'initial ({})'.format(unit)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_width, name='LE_scatter_daily'
        )
        y_vars = ['LE_corr', 'LE_user_corr']
        colors = ['red', 'darkorange']
        labels = ['corr', 'user_corr']
        # add plot pairs to plot if they exist, add 1:1
        mins_maxs = []
        n_vars_fnd = 0
        for i, v in enumerate(y_vars):
            if v in df.columns and not df[v].isna().all():
                n_vars_fnd += 1
                min_max = Plot.scatter_plot(
                    fig, 'LE', v, daily_source, colors[i], label=labels[i]
                )
                mins_maxs.append(min_max)
        if n_vars_fnd > 0:
            # add scaled one to one line
            mins_maxs = np.array(mins_maxs)
            if not pd.isna(mins_maxs).all():
                x_min = min(mins_maxs[:,0])
                x_max = max(mins_maxs[:,1])
                y_min = min(mins_maxs[:,2])
                y_max = max(mins_maxs[:,3])
                ax_min, ax_max = min([x_min,y_min]), max([x_max,y_max])
                ax_min -= 0.02*abs(ax_max-ax_min)
                ax_max += 0.02*abs(ax_max-ax_min)
                fig.x_range=Range1d(ax_min, ax_max)
                fig.y_range=Range1d(ax_min, ax_max)
                one2one_vals = np.arange(ax_min, ax_max,1)
                fig.line(
                    one2one_vals, one2one_vals, legend_label='1:1 line', 
                    color='black', line_dash='dashed'
                )
                daily_scatter.append(fig)
            if monthly:
                # same for monthly fig
                title = 'Monthly Latent Energy, Initial Versus Corrected'
                fig = figure(
                    x_axis_label=x_label, y_axis_label=y_label, title=title,
                    width=plot_width, height=plot_width, 
                    name='LE_scatter_monthly'
                )
                mins_maxs = []
                for i, v in enumerate(y_vars):
                    if v in monthly_df.columns:
                        min_max = Plot.scatter_plot(
                            fig, 'LE', v, monthly_source, colors[i], 
                            label=labels[i]
                        )
                        if min_max is not None:
                            mins_maxs.append(min_max)
                mins_maxs = np.array(mins_maxs)
                # check if not all pairs are empty, if not plot 1:1
                if not pd.isna(mins_maxs).all():
                    x_min = min(mins_maxs[:,0])
                    x_max = max(mins_maxs[:,1])
                    y_min = min(mins_maxs[:,2])
                    y_max = max(mins_maxs[:,3])
                    ax_min, ax_max = min([x_min,y_min]), max([x_max,y_max])
                    ax_min -= 0.02*abs(ax_max-ax_min)
                    ax_max += 0.02*abs(ax_max-ax_min)
                    fig.x_range=Range1d(ax_min, ax_max)
                    fig.y_range=Range1d(ax_min, ax_max)
                    one2one_vals = np.arange(ax_min, ax_max,1)
                    fig.line(
                        one2one_vals, one2one_vals, legend_label='1:1 line', 
                        color='black', line_dash='dashed'
                    )
                    monthly_scatter.append(fig)
        else:
            print('Latent energy scatter grapths missing all variables')

        #### 
        # ET scatter plots
        #### 
        title = 'Daily Evapotranspiration, Initial Versus Corrected'
        unit = _get_units(['ET', 'ET_corr', 'ET_user_corr'], units)
        y_label = 'corrected ({})'.format(unit)
        x_label = 'initial ({})'.format(unit)
        fig = figure(
            x_axis_label=x_label, y_axis_label=y_label, title=title,
            width=plot_width, height=plot_width, name='ET_scatter_daily'
        )
        y_vars = ['ET_corr', 'ET_user_corr']
        colors = ['red', 'darkorange']
        labels = ['corr', 'user_corr']
        # add plot pairs to plot if they exist, add 1:1
        mins_maxs = []
        n_vars_fnd = 0
        for i, v in enumerate(y_vars):
            if v in df.columns and not df[v].isna().all():
                n_vars_fnd += 1
                min_max = Plot.scatter_plot(
                    fig, 'ET', v, daily_source, colors[i], label=labels[i]
                )
                mins_maxs.append(min_max)
        if n_vars_fnd > 0:
            # add scaled one to one line
            mins_maxs = np.array(mins_maxs)
            x_min = min(mins_maxs[:,0])
            x_max = max(mins_maxs[:,1])
            y_min = min(mins_maxs[:,2])
            y_max = max(mins_maxs[:,3])
            ax_min, ax_max = min([x_min,y_min]), max([x_max,y_max])
            ax_min -= 0.02*abs(ax_max-ax_min)
            ax_max += 0.02*abs(ax_max-ax_min)
            fig.x_range=Range1d(ax_min, ax_max)
            fig.y_range=Range1d(ax_min, ax_max)
            one2one_vals = np.arange(ax_min, ax_max,1)
            fig.line(
                one2one_vals, one2one_vals, legend_label='1:1 line', 
                color='black', line_dash='dashed'
            )
            daily_scatter.append(fig)
            if monthly:
                # same for monthly fig
                title = 'Monthly Evapotranspiration, Initial Versus Corrected'
                fig = figure(
                    x_axis_label=x_label, y_axis_label=y_label, title=title,
                    width=plot_width, height=plot_width, 
                    name='ET_scatter_monthly'
                )
                mins_maxs = []
                for i, v in enumerate(y_vars):
                    if v in monthly_df.columns:
                        min_max = Plot.scatter_plot(
                            fig, 'ET', v, monthly_source, colors[i], 
                            label=labels[i]
                        )
                        mins_maxs.append(min_max)
                mins_maxs = np.array(mins_maxs)
                # check if not all pairs are empty, if not plot 1:1
                if not pd.isna(mins_maxs).all():
                    x_min = min(mins_maxs[:,0])
                    x_max = max(mins_maxs[:,1])
                    y_min = min(mins_maxs[:,2])
                    y_max = max(mins_maxs[:,3])
                    ax_min, ax_max = min([x_min,y_min]), max([x_max,y_max])
                    ax_min -= 0.02*abs(ax_max-ax_min)
                    ax_max += 0.02*abs(ax_max-ax_min)
                    fig.x_range=Range1d(ax_min, ax_max)
                    fig.y_range=Range1d(ax_min, ax_max)
                    one2one_vals = np.arange(ax_min, ax_max,1)
                    fig.line(
                        one2one_vals, one2one_vals, legend_label='1:1 line', 
                        color='black', line_dash='dashed'
                    )
                    monthly_scatter.append(fig)
        else:
            print('Evapotranspiration scatter grapths missing all variables')

        #### 
        # multiple soil moisture time series plots
        #### 
        # keep user names for these in hover 
        theta_re = re.compile('theta_[\d+|mean]')
        theta_vars = [
            v for v in variables if theta_re.match(v) and v in df.columns
        ]
        num_lines = len(theta_vars)
        if num_lines > 0 and not df[theta_vars].isna().all().all():
            rename_dict = {k:variables[k] for k in theta_vars}
            tmp_df = df[theta_vars].rename(columns=rename_dict)
            tmp_source = ColumnDataSource(tmp_df)
            plt_vars = list(rename_dict.values())
            colors = Viridis256[0:-1:int(256/num_lines)]
            title = 'Daily Soil Moisture (Multiple Sensors)'
            x_label = 'date'
            y_label = _get_units(theta_vars, units)
            fig = figure(
                x_axis_label=x_label, y_axis_label=y_label, title=title,
                plot_width=plot_width, plot_height=plot_height, 
                name='theta_daily'
            )
            fig = Plot.add_lines(
                fig, tmp_df, plt_vars, colors, x_label, tmp_source, 
                labels=plt_vars
            )
            if fig is not None:
                daily_line.append(fig)
            theta_vars = [
                v for v in variables if theta_re.match(v) and v in\
                    df.columns
            ]
            if fig is not None and monthly and len(theta_vars) > 0:
                # same for monthly fig
                tmp_df = monthly_df[theta_vars].rename(columns=rename_dict)
                tmp_source = ColumnDataSource(tmp_df)
                title = 'Monthly Soil Moisture (Multiple Sensors)'
                fig = figure(
                    x_axis_label=x_label, y_axis_label=y_label, title=title,
                    plot_width=plot_width, plot_height=plot_height,
                    name='theta_monthly'
                )
                fig = Plot.add_lines(
                    fig, tmp_df, plt_vars, colors, x_label, tmp_source,
                    labels=plt_vars
                )
                monthly_line.append(fig)
            # do not print warning if missing multiple soil moisture recordings


        # Aggregate plots and output depending on options
        # remove None values in different figure groups 
        daily_line = list(filter(None, daily_line))
        daily_scatter = list(filter(None, daily_scatter))
        monthly_line = list(filter(None, monthly_line))
        monthly_scatter = list(filter(None, monthly_scatter))
        # link axes for time series plots
        if link_x:
            for each in daily_line:
                each.x_range = daily_line[0].x_range
            for each in monthly_line:
                each.x_range = monthly_line[0].x_range
        figs = daily_line + daily_scatter + monthly_line + monthly_scatter
        grid = gridplot(
            figs, ncols=ncols, plot_width=None, plot_height=None, 
            sizing_mode=sizing_mode, merge_tools=merge_tools, **kwargs
        )
        if output_type == 'show':
            show(column(Div(text=suptitle),grid))
        elif output_type == 'notebook':
            from bokeh.io import output_notebook
            output_notebook()
            show(column(Div(text=suptitle),grid))
        elif output_type == 'save':
            save(column(Div(text=suptitle),grid))
        elif output_type == 'return_figs':
            return figs
        elif output_type == 'return_grid':
            return grid

        reset_output()

0 View Source File : geoplot.py
License : MIT License
Project Creator : PatrikHlobil

def geoplot(  # noqa C901
    gdf_in,
    geometry_column="geometry",
    figure=None,
    figsize=None,
    title="",
    xlabel="Longitude",
    ylabel="Latitude",
    xlim=None,
    ylim=None,
    color="blue",
    colormap=None,
    colormap_uselog=False,
    colormap_range=None,
    category=None,
    dropdown=None,
    slider=None,
    slider_range=None,
    slider_name="",
    show_colorbar=True,
    colorbar_tick_format=None,
    xrange=None,
    yrange=None,
    hovertool=True,
    hovertool_columns=[],
    hovertool_string=None,
    simplify_shapes=None,
    tile_provider="CARTODBPOSITRON_RETINA",
    tile_provider_url=None,
    tile_attribution="",
    tile_alpha=1,
    panning=True,
    zooming=True,
    toolbar_location="right",
    show_figure=True,
    return_figure=True,
    return_html=False,
    legend=True,
    webgl=True,
    **kwargs,
):
    """Doc-String: TODO"""

    # Imports:
    import bokeh.plotting
    from bokeh.layouts import column, row
    from bokeh.models import (
        BasicTicker,
        BoxZoomTool,
        ColorBar,
        ColumnDataSource,
        GeoJSONDataSource,
        HoverTool,
        LinearColorMapper,
        LogColorMapper,
        LogTicker,
        Select,
        Slider,
        WheelZoomTool,
    )
    from bokeh.models.callbacks import CustomJS
    from bokeh.models.widgets import Dropdown
    from bokeh.palettes import all_palettes
    from bokeh.plotting import show

    # Make a copy of the input geodataframe:
    gdf = gdf_in.copy()

    # Check layertypes:
    if type(gdf) != pd.DataFrame:
        layertypes = []
        if "Point" in str(gdf.geom_type.unique()):
            layertypes.append("Point")
        if "Line" in str(gdf.geom_type.unique()):
            layertypes.append("Line")
        if "Polygon" in str(gdf.geom_type.unique()):
            layertypes.append("Polygon")
        if len(layertypes) > 1:
            raise Exception(
                f"Can only plot GeoDataFrames/Series with single type of geometry (either Point, Line or Polygon). Provided is a GeoDataFrame/Series with types: {layertypes}"
            )
    else:
        layertypes = ["Point"]

    # Get and check provided parameters for geoplot:
    figure_options = {
        "title": title,
        "x_axis_label": xlabel,
        "y_axis_label": ylabel,
        "plot_width": 600,
        "plot_height": 400,
        "toolbar_location": toolbar_location,
        "active_scroll": "wheel_zoom",
        "x_axis_type": "mercator",
        "y_axis_type": "mercator",
        "match_aspect": True,
    }
    if figsize is not None:
        width, height = figsize
        figure_options["plot_width"] = width
        figure_options["plot_height"] = height
    if webgl:
        figure_options["output_backend"] = "webgl"

    if type(gdf) != pd.DataFrame:
        # Convert GeoDataFrame to Web Mercator Projection:
        gdf.to_crs(epsg=3857, inplace=True)

        # Simplify shapes if wanted:
        if isinstance(simplify_shapes, numbers.Number):
            if layertypes[0] in ["Line", "Polygon"]:
                gdf[geometry_column] = gdf[geometry_column].simplify(simplify_shapes)
        elif simplify_shapes is not None:
            raise ValueError(
                "  <  simplify_shapes> parameter only accepts numbers or None."
            )

    # Check for category, dropdown or slider (choropleth map column):
    category_options = 0
    if category is not None:
        category_options += 1
        category_columns = [category]
    if dropdown is not None:
        category_options += 1
        category_columns = dropdown
    if slider is not None:
        category_options += 1
        category_columns = slider
    if category_options > 1:
        raise ValueError(
            "Only one of  < category>,  < dropdown> or  < slider> parameters is allowed to be used at once."
        )

    # Check for category (single choropleth plot):
    if category is None:
        pass
    elif isinstance(category, (list, tuple)):
        raise ValueError(
            "For  < category>, please provide an existing single column of the GeoDataFrame."
        )
    elif category in gdf.columns:
        pass
    else:
        raise ValueError(
            f"Could not find column '{category}' in GeoDataFrame. For  < category>, please provide an existing single column of the GeoDataFrame."
        )

    # Check for dropdown (multiple choropleth plots via dropdown selection):
    if dropdown is None:
        pass
    elif not isinstance(dropdown, (list, tuple)):
        raise ValueError(
            "For  < dropdown>, please provide a list/tuple of existing columns of the GeoDataFrame."
        )
    else:
        for col in dropdown:
            if col not in gdf.columns:
                raise ValueError(
                    f"Could not find column '{col}' for  < dropdown> in GeoDataFrame. "
                )

    # Check for slider (multiple choropleth plots via slider selection):
    if slider is None:
        pass
    elif not isinstance(slider, (list, tuple)):
        raise ValueError(
            "For  < slider>, please provide a list/tuple of existing columns of the GeoDataFrame."
        )
    else:
        for col in slider:
            if col not in gdf.columns:
                raise ValueError(
                    f"Could not find column '{col}' for  < slider> in GeoDataFrame. "
                )

        if slider_range is not None:
            if not isinstance(slider_range, Iterable):
                raise ValueError(
                    " < slider_range> has to be a type that is iterable like list, tuple, range, ..."
                )
            else:
                slider_range = list(slider_range)
                if len(slider_range) != len(slider):
                    raise ValueError(
                        "The number of elements in  < slider_range> has to be the same as in  < slider>."
                    )
                steps = []
                for i in range(len(slider_range) - 1):
                    steps.append(slider_range[i + 1] - slider_range[i])

                if len(set(steps)) > 1:
                    raise ValueError(
                        " < slider_range> has to have equal step size between each elements (like a range-object)."
                    )
                else:
                    slider_step = steps[0]
                    slider_start = slider_range[0]
                    slider_end = slider_range[-1]

    # Check colormap if either  < category>,  < dropdown> or  < slider> is choosen:
    if category_options == 1:
        if colormap is None:
            colormap = blue_colormap
        elif isinstance(colormap, (tuple, list)):
            if len(colormap) > 1:
                pass
            else:
                raise ValueError(
                    f" < colormap> only accepts a list/tuple of at least two colors or the name of one of the following predefined colormaps (see also https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ): {list(all_palettes.keys())}"
                )
        elif isinstance(colormap, str):
            if colormap in all_palettes:
                colormap = all_palettes[colormap]
                colormap = colormap[max(colormap.keys())]
            else:
                raise ValueError(
                    f"Could not find  < colormap> with name {colormap}. The following predefined colormaps are supported (see also https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ): {list(all_palettes.keys())}"
                )
        else:
            raise ValueError(
                f" < colormap> only accepts a list/tuple of at least two colors or the name of one of the following predefined colormaps (see also https://bokeh.pydata.org/en/latest/docs/reference/palettes.html ): {list(all_palettes.keys())}"
            )
    else:
        if isinstance(color, str):
            colormap = [color]
        elif color is None:
            colormap = ["blue"]
        else:
            raise ValueError(
                " < color> has to be a string specifying the fill_color of the map glyph."
            )

    # Check xlim & ylim:
    if xlim is not None:
        if isinstance(xlim, (tuple, list)):
            if len(xlim) == 2:
                xmin, xmax = xlim
                for _ in [xmin, xmax]:
                    if not -180  <  _  < = 180:
                        raise ValueError(
                            "Limits for x-axis (=Longitude) have to be between -180 and 180."
                        )
                if not xmin  <  xmax:
                    raise ValueError("xmin has to be smaller than xmax.")

                from pyproj import Transformer

                transformer = Transformer.from_crs("epsg:4326", "epsg:3857")
                xmin = transformer.transform(0, xmin)[0]
                xmax = transformer.transform(0, xmax)[0]
                figure_options["x_range"] = (xmin, xmax)
            else:
                raise ValueError(
                    "Limits for x-axis (=Longitude) have to be of form [xmin, xmax] with values between -180 and 180."
                )
        else:
            raise ValueError(
                "Limits for x-axis (=Longitude) have to be of form [xmin, xmax] with values between -180 and 180."
            )
    if ylim is not None:
        if isinstance(ylim, (tuple, list)):
            if len(ylim) == 2:
                ymin, ymax = ylim
                for _ in [ymin, ymax]:
                    if not -90  <  _  < = 90:
                        raise ValueError(
                            "Limits for y-axis (=Latitude) have to be between -90 and 90."
                        )
                if not ymin  <  ymax:
                    raise ValueError("ymin has to be smaller than ymax.")

                from pyproj import Transformer

                transformer = Transformer.from_crs("epsg:4326", "epsg:3857")
                ymin = transformer.transform(ymin, 0)[1]
                ymax = transformer.transform(ymax, 0)[1]
                figure_options["y_range"] = (ymin, ymax)
            else:
                raise ValueError(
                    "Limits for y-axis (=Latitude) have to be of form [ymin, ymax] with values between -90 and 90."
                )
        else:
            raise ValueError(
                "Limits for y-axis (=Latitude) have to be of form [ymin, ymax] with values between -90 and 90."
            )

    # Create Figure to draw:
    old_layout = None
    if figure is None:
        figure_options["x_axis_label"] = (
            figure_options["x_axis_label"]
            if figure_options["x_axis_label"] is not None
            else "Longitute"
        )
        figure_options["y_axis_label"] = (
            figure_options["y_axis_label"]
            if figure_options["y_axis_label"] is not None
            else "Latitude"
        )
        p = bokeh.plotting.figure(**figure_options)

        # Add Tile Source as Background:
        p = _add_backgroundtile(
            p, tile_provider, tile_provider_url, tile_attribution, tile_alpha
        )

    elif isinstance(figure, type(bokeh.plotting.figure())):
        p = figure
    elif isinstance(figure, type(column())):
        old_layout = figure
        p = _get_figure(old_layout)
    else:
        raise ValueError(
            "Parameter  < figure> has to be of type bokeh.plotting.figure or bokeh.layouts.column."
        )

    for t in p.tools:
        # Get ridd of zoom on axes:
        if isinstance(t, WheelZoomTool):
            t.zoom_on_axis = False
        # Make sure that box zoom matches aspect:
        if isinstance(t, BoxZoomTool):
            t.match_aspect = True

    # Hide legend if wanted:
    legend_input = legend
    if isinstance(legend, str):
        pass
    else:
        legend = "GeoLayer"

    # Define colormapper:
    if len(colormap) == 1:
        kwargs["fill_color"] = colormap[0]

    elif category is not None:
        # Check if category column is numerical:
        if not issubclass(gdf[category].dtype.type, np.number):
            raise NotImplementedError(
                f" < category> plot only yet implemented for numerical columns. Column '{category}' is not numerical."
            )

        field = category
        colormapper_options = {"palette": colormap}
        if colormap_range is not None:
            if not isinstance(colormap_range, (tuple, list)):
                raise ValueError(
                    " < colormap_range> can only be 'None' or a tuple/list of form (min, max)."
                )
            elif len(colormap_range) == 2:
                colormapper_options["low"] = colormap_range[0]
                colormapper_options["high"] = colormap_range[1]
        else:
            colormapper_options["low"] = gdf[field].min()
            colormapper_options["high"] = gdf[field].max()
        if colormap_uselog:
            colormapper = LogColorMapper(**colormapper_options)
        else:
            colormapper = LinearColorMapper(**colormapper_options)
        kwargs["fill_color"] = {"field": "Colormap", "transform": colormapper}
        if not isinstance(legend, str):
            legend = str(field)

    elif dropdown is not None:
        # Check if all columns in dropdown selection are numerical:
        for col in dropdown:
            if not issubclass(gdf[col].dtype.type, np.number):
                raise NotImplementedError(
                    f" < dropdown> plot only yet implemented for numerical columns. Column '{col}' is not numerical."
                )

        field = dropdown[0]
        colormapper_options = {"palette": colormap}
        if colormap_range is not None:
            if not isinstance(colormap_range, (tuple, list)):
                raise ValueError(
                    " < colormap_range> can only be 'None' or a tuple/list of form (min, max)."
                )
            elif len(colormap_range) == 2:
                colormapper_options["low"] = colormap_range[0]
                colormapper_options["high"] = colormap_range[1]
        else:
            colormapper_options["low"] = gdf[dropdown].min().min()
            colormapper_options["high"] = gdf[dropdown].max().max()
        if colormap_uselog:
            colormapper = LogColorMapper(**colormapper_options)
        else:
            colormapper = LinearColorMapper(**colormapper_options)
        kwargs["fill_color"] = {"field": "Colormap", "transform": colormapper}
        legend = " " + field

    elif slider is not None:
        # Check if all columns in dropdown selection are numerical:
        for col in slider:
            if not issubclass(gdf[col].dtype.type, np.number):
                raise NotImplementedError(
                    f" < slider> plot only yet implemented for numerical columns. Column '{col}' is not numerical."
                )

        field = slider[0]
        colormapper_options = {"palette": colormap}
        if colormap_range is not None:
            if not isinstance(colormap_range, (tuple, list)):
                raise ValueError(
                    " < colormap_range> can only be 'None' or a tuple/list of form (min, max)."
                )
            elif len(colormap_range) == 2:
                colormapper_options["low"] = colormap_range[0]
                colormapper_options["high"] = colormap_range[1]
        else:
            colormapper_options["low"] = gdf[slider].min().min()
            colormapper_options["high"] = gdf[slider].max().max()
        if colormap_uselog:
            colormapper = LogColorMapper(**colormapper_options)
        else:
            colormapper = LinearColorMapper(**colormapper_options)
        kwargs["fill_color"] = {"field": "Colormap", "transform": colormapper}
        if not isinstance(legend, str):
            legend = "Geolayer"

    # Check that only hovertool_columns or hovertool_string is used:
    if isinstance(hovertool_columns, (list, tuple, str)):
        if len(hovertool_columns) > 0 and hovertool_string is not None:
            raise ValueError(
                "Either  < hovertool_columns> or  < hovertool_string> can be used, but not both at the same time."
            )
    else:
        raise ValueError(
            " < hovertool_columns> has to be a list of columns of the GeoDataFrame or the string 'all'."
        )

    if hovertool_string is not None:
        hovertool_columns = "all"

    # Check for Hovertool columns:
    if hovertool:
        if not isinstance(hovertool_columns, (list, tuple)):
            if hovertool_columns == "all":
                hovertool_columns = list(
                    filter(lambda col: col != geometry_column, gdf.columns)
                )
            else:
                raise ValueError(
                    " < hovertool_columns> has to be a list of columns of the GeoDataFrame or the string 'all'."
                )
        elif len(hovertool_columns) == 0:
            if category is not None:
                hovertool_columns = [category]
            elif dropdown is not None:
                hovertool_columns = dropdown
            elif slider is not None:
                hovertool_columns = slider
            else:
                hovertool_columns = []
        else:
            for col in hovertool_columns:
                if col not in gdf.columns:
                    raise ValueError(
                        f"Could not find columns '{col}' in GeoDataFrame.  < hovertool_columns> has to be a list of columns of the GeoDataFrame or the string 'all'."
                    )
    else:
        if category is None:
            hovertool_columns = []
        else:
            hovertool_columns = [category]

    # Reduce DataFrame to needed columns:
    if type(gdf) == pd.DataFrame:
        gdf["Geometry"] = 0
        additional_columns = ["x", "y"]
    else:
        additional_columns = [geometry_column]
    for kwarg, value in kwargs.items():
        if isinstance(value, Hashable):
            if value in gdf.columns:
                additional_columns.append(value)
    if category_options == 0:
        gdf = gdf[list(set(hovertool_columns) | set(additional_columns))]
    else:
        gdf = gdf[
            list(
                set(hovertool_columns) | set(category_columns) | set(additional_columns)
            )
        ]
        gdf["Colormap"] = gdf[field]
        field = "Colormap"

    # Create GeoJSON DataSource for Plot:
    if type(gdf) != pd.DataFrame:
        geo_source = GeoJSONDataSource(geojson=gdf.to_json())
    else:
        geo_source = gdf

    # Draw Glyph on Figure:
    layout = None
    if "Point" in layertypes:
        if "line_color" not in kwargs:
            kwargs["line_color"] = kwargs["fill_color"]
        glyph = p.scatter(
            x="x", y="y", source=geo_source, legend_label=legend, **kwargs
        )

    if "Line" in layertypes:
        if "line_color" not in kwargs:
            kwargs["line_color"] = kwargs["fill_color"]
            del kwargs["fill_color"]
        glyph = p.multi_line(
            xs="xs", ys="ys", source=geo_source, legend_label=legend, **kwargs
        )

    if "Polygon" in layertypes:

        if "line_color" not in kwargs:
            kwargs["line_color"] = "black"

        # Creates from a geoDataFrame with Polygons and Multipolygons a Pandas DataFrame
        # with x any y columns specifying the geometry of the Polygons:
        geo_source = ColumnDataSource(
            convert_geoDataFrame_to_patches(gdf, geometry_column)
        )

        # Plot polygons:
        glyph = p.multi_polygons(
            xs="__x__", ys="__y__", source=geo_source, legend_label=legend, **kwargs
        )

    # Add hovertool:
    if hovertool and (category_options == 1 or len(hovertool_columns) > 0):
        my_hover = HoverTool(renderers=[glyph])
        if hovertool_string is None:
            my_hover.tooltips = [(str(col), "@{%s}" % col) for col in hovertool_columns]
        else:
            my_hover.tooltips = hovertool_string
        p.add_tools(my_hover)

    # Add colorbar:
    if show_colorbar and category_options == 1:
        colorbar_options = {
            "color_mapper": colormapper,
            "label_standoff": 12,
            "border_line_color": None,
            "location": (0, 0),
        }
        if colormap_uselog:
            colorbar_options["ticker"] = LogTicker()

        if colorbar_tick_format:
            colorbar_options["formatter"] = get_tick_formatter(colorbar_tick_format)

        colorbar = ColorBar(**colorbar_options)

        p.add_layout(colorbar, "right")

    # Add Dropdown Widget:
    if dropdown is not None:
        # Define Dropdown widget:
        dropdown_widget = Select(
            title="Select Choropleth Layer", options=list(zip(dropdown, dropdown))
        )

        # Define Callback for Dropdown widget:
        callback = CustomJS(
            args=dict(
                dropdown_widget=dropdown_widget,
                geo_source=geo_source,
                legend=p.legend[0].items[0],
            ),
            code="""

                //Change selection of field for Colormapper for choropleth plot:
                geo_source.data["Colormap"] = geo_source.data[dropdown_widget.value];
                geo_source.change.emit();

                //Change label of Legend:
                legend.label["value"] = " " + dropdown_widget.value;

                            """,
        )
        dropdown_widget.js_on_change("value", callback)

        # Add Dropdown widget above the plot:
        if old_layout is None:
            layout = column(dropdown_widget, p)
        else:
            layout = column(dropdown_widget, old_layout)

    # Add Slider Widget:
    if slider is not None:

        if slider_range is None:
            slider_start = 0
            slider_end = len(slider) - 1
            slider_step = 1

        value2name = ColumnDataSource(
            {
                "Values": np.arange(
                    slider_start, slider_end + slider_step, slider_step
                ),
                "Names": slider,
            }
        )

        # Define Slider widget:
        slider_widget = Slider(
            start=slider_start,
            end=slider_end,
            value=slider_start,
            step=slider_step,
            title=slider_name,
        )

        # Define Callback for Slider widget:
        callback = CustomJS(
            args=dict(
                slider_widget=slider_widget,
                geo_source=geo_source,
                value2name=value2name,
            ),
            code="""

                //Change selection of field for Colormapper for choropleth plot:
                var slider_value = slider_widget.value;
                var i;
                for(i=0; i < value2name.data["Names"].length; i++)
                    {
                    if (value2name.data["Values"][i] == slider_value)
                        {
                         var name = value2name.data["Names"][i];
                         }

                    }
                geo_source.data["Colormap"] = geo_source.data[name];
                geo_source.change.emit();

                            """,
        )
        slider_widget.js_on_change("value", callback)

        # Add Slider widget above the plot:
        if old_layout is None:
            layout = column(slider_widget, p)
        else:
            layout = column(slider_widget, old_layout)

    # Hide legend if user wants:
    if legend_input is False:
        p.legend.visible = False

    # Set click policy for legend:
    p.legend.click_policy = "hide"

    # Set panning option:
    if panning is False:
        p.toolbar.active_drag = None

    # Set zooming option:
    if zooming is False:
        p.toolbar.active_scroll = None

    # Display plot and if wanted return plot:
    if layout is None:
        if old_layout is None:
            layout = p
        else:
            layout = old_layout

    # Display plot if wanted
    if show_figure:
        show(layout)

    # Return as (embeddable) HTML if wanted:
    if return_html:
        return embedded_html(layout)

    # Return plot:
    if return_figure:
        return layout

0 View Source File : bokeh_timeline.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : pyglet

def make_plot(info, outfile):
    # prepare some data
    (wall_times, pyglet_times, audio_times,
     current_times, frame_nums, rescheds,
     x_vnones, y_vnones,
     x_anones, y_anones) = info

    # output to static HTML file
    output_file(outfile)

    # main plot
    p = figure(
       tools="pan,wheel_zoom,reset,save",
       y_axis_type="linear", y_range=[0.000, wall_times[-1]], title="timeline",
       x_axis_label='wall_time', y_axis_label='time',
       plot_width=600, plot_height=600
    )

    # add some renderers
    p.line(wall_times, wall_times, legend="wall_time")
    #p.line(wall_times, pyglet_times, legend="pyglet_time", line_width=3)
    p.line(wall_times, current_times, legend="current_times", line_color="red")
    p.line(wall_times, audio_times, legend="audio_times", line_color="orange", line_dash="4 4")

    p.circle(x_vnones, y_vnones, legend="current time nones", fill_color="green", size=8)
    p.circle(x_anones, y_anones, legend="audio time nones", fill_color="red", size=6)

    # secondary y-axis for frame_num
    p.extra_y_ranges = {"frame_num": Range1d(start=0, end=frame_nums[-1])}
    p.line(wall_times, frame_nums, legend="frame_num",
           line_color="black", y_range_name="frame_num")
    p.add_layout(LinearAxis(y_range_name="frame_num", axis_label="frame num"), 'left')

    p.legend.location = "bottom_right"
    # show the results
    #show(p)

    # secondary plot for rescheduling times
    q = figure(
       tools="pan,wheel_zoom,reset,save",
       y_axis_type="linear", y_range=[-0.3, 0.3], title="rescheduling time",
       x_axis_label='wall_time', y_axis_label='rescheduling time',
       plot_width=600, plot_height=150
    )
    q.line(wall_times, rescheds)

    show(column(p, q))


def usage():

0 View Source File : ui.py
License : Apache License 2.0
Project Creator : seeq12

def startSupervised(app, buttons, xsignal, ysignal, buttonClusterSupervised):

    for button in buttons:
        button.close()
    
    #bokeh configuration
    output_notebook(INLINE)

    x, y = xsignal.value, ysignal.value
    if x == y:
        print('Must select different signals for X and Y! Please restart.')
        return

    #get the samples
    query_str = ""
    for sig in [x, y]:
        query_str += "Name == '{}' or ".format(sig)
    query = query_str[:-4] #delete "or" from the end
    
    to_pull = app.signals.query(query)

    datadf = seeqInterface.get_signals_samples(
            to_pull, 
            display_range = app.display_range,
            grid = app.grid, quiet = app.quiet
        )

    datadf.dropna(inplace = True)

    #modify the grid to use only signals in visual clustering:
    grid = seeqInterface.get_minumum_maximum_interpolation_for_signals_df(to_pull, app.display_range)
    app.grid = grid


    X = datadf[x]
    Y = datadf[y]

    #check if we have any constant signals. This is not allowed because clustering works on variability:
    if len(set(X.values)) == 1:
        print('Cannot use constant signals in clustering. Signal "{}" is constant'.format(x))
        return 
    if len(set(Y.values)) == 1:
        print('Cannot use constant signals in clustering. Signal "{}" is constant'.format(y))
        return

    #randomly down sample
    indices = np.random.choice(len(X), 1000)
    Xnew = X[indices]
    Ynew = Y[indices]

    global datasource

    s1 = ColumnDataSource(data=dict(x=Xnew, y=Ynew))
    p1 = figure(plot_width=400, plot_height=400, tools="lasso_select", title="Select Cluster")
    p1.circle('x', 'y', source=s1, alpha=0.1)

    X = datadf[x]
    Y = datadf[y]

    bins = 50

    H, xe, ye = np.histogram2d(X, Y, bins=bins)


    # produce an image of the 2d histogram
                                             ### centering here
    under_hist = p1.image(image=[H.T], x=xe[0]-((xe[1]-xe[0])/2), y=ye[0]-((ye[1]-ye[0])/2), dw=xe[-1] - xe[0], dh=ye[-1] - ye[0], 
                          palette=Blues[9][::-1]) 
    #the number is because the pallette is dict of lists keyed by how many colors
    under_hist.level = 'underlay'

    #build histogram grid:
    xcoords = [(xe[i] + xe[i-1])/2 for i in range(1, len(xe))]
    ycoords = [(ye[i] + ye[i-1])/2 for i in range(1, len(ye))]
    
    hist_grid_points = np.array(list(itertools.product(xcoords, ycoords))) # a set of points for each bixel of the histogram

    s1 = ColumnDataSource(data=dict(x=hist_grid_points[:,0], y=hist_grid_points[:,1]))
    p1.circle('x', 'y', source=s1, selection_color='green', alpha=0.0, selection_alpha = 0.5, nonselection_alpha=0.0)
    
    s2 = ColumnDataSource(data=dict(x=[], y=[]))


    #the following will write a global variable to the ipython kernel

    out = s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code="""          
            var inds = cb_obj.indices;
            var d1 = s1.data;
            var d2 = s2.data;
            d2['x'] = []
            d2['y'] = []
            for (var i = 0; i   <   inds.length; i++) {
                d2['x'].push(d1['x'][inds[i]])
                d2['y'].push(d1['y'][inds[i]])
            }
            s2.change.emit()
            var command = "__builtins__.indexofselection =" + inds;

            var kernel = IPython.notebook.kernel;
            kernel.execute(command);
        """)
    )
    layout = row(p1)
    show(layout)
    display(VBox([buttonClusterSupervised]))
    datasource = s1

    return datadf, hist_grid_points
    
def clusterSupervised(app, buttons, xsignal, ysignal, clusterExtent, datadf, indexofselection, hist_grid_points, basename, timeOfRun, closeButton):

0 View Source File : components.py
License : MIT License
Project Creator : SiEPIC

    def plot_sparameters(
        self,
        ports: List[List[int]] = None,
        show_freq: bool = True,
        scale: str = "log",
        interactive: bool = False,
    ):
        """Plot the component's S-parameters.

        Args:
            ports: List of lists that contains the desired\
                 S-parameters, e.g., [[1,1],[1,2],[2,1],[2,2]].\
                      Defaults to None.
            show_freq: Flag to determine whether to plot\
                 with respect to frequency or wavelength. Defaults to True.
            scale: Plotting y axis scale, options available:\
                 ["log", "abs", "abs_sq"]. Defaults to "log".
            interactive: Make the plots interactive or not.
        """

        ports_ = []  # ports the plot

        if show_freq:
            x_data = self.f
            xlabel = "Frequency (Hz)"
        else:
            x_data = self.C * 1e6 / self.f
            xlabel = "Wavelength (um)"

        if ports is None:
            nports = self.s.shape[-1]
            for i in range(nports):
                for j in range(nports):
                    ports_.append("S_%d_%d" % (i, j))
        else:
            ports_ = ["S_%d_%d" % (each[0], each[1]) for each in ports]

        if not interactive:
            for each_port in ports_:
                _, i, j = each_port.split("_")
                if scale == "log":
                    plt.plot(
                        x_data,
                        10 * np.log10(np.square(np.abs(self.s[:, int(i), int(j)]))),
                    )
                    plt.ylabel("Transmission (dB)")
                elif scale == "abs":
                    plt.plot(x_data, np.abs(self.s[:, int(i), int(j)]))
                    plt.ylabel("Transmission (normalized)")
                elif scale == "abs_sq":
                    plt.plot(x_data, np.square(np.abs(self.s[:, int(i), int(j)])))
                    plt.ylabel("Transmission (normalized^2)")
            plt.xlabel(xlabel)
            plt.xlim(left=np.min(x_data), right=np.max(x_data))
            plt.tight_layout()
            plt.legend(ports_)
            plt.show()
        else:
            import holoviews as hv
            import pandas as pd
            from bokeh.plotting import show

            hv.extension("bokeh")
            temp_data = self.get_data(ports=ports, xscale="lambda", yscale=scale)
            filtered_s = dict(
                [[key, temp_data[key]] for key in temp_data.keys() if "unit" not in key]
            )
            df = pd.DataFrame.from_dict(filtered_s)
            master_plot = None
            for each_ydata in ports_:
                if master_plot is None:
                    master_plot = hv.Curve(
                        df, "xdata", each_ydata, label=each_ydata
                    ).opts(tools=["hover"])
                else:
                    curve = hv.Curve(df, "xdata", each_ydata, label=each_ydata).opts(
                        tools=["hover"]
                    )
                    master_plot = master_plot * curve

            master_plot.opts(
                ylabel=temp_data["yunit"],
                xlabel=temp_data["xunit"],
                responsive=True,
                min_height=400,
                min_width=600,
                fontscale=1.5,
                max_width=800,
                max_height=600,
            )

            show(hv.render(master_plot))

0 View Source File : evaluation.py
License : MIT License
Project Creator : Zumbalamambo

    def visualization(self):
        # Data prepare
        METRICS_MAPPING = {
            'skip_frame': ['vanilla', 'skip1', 'skip2', 'skip3', 'skip4', 'skip5', 'skip6', 'skip7', 'skip8', 'skip9', 'skip10'],
            'downsampling': ['vanilla', 'skip1_downsampling', 'skip2_downsampling', 'skip3_downsampling', 'skip4_downsampling', 'skip5_downsampling', 'skip6_downsampling', 'skip7_downsampling', 'skip8_downsampling', 'skip9_downsampling', 'skip10_downsampling'],
            'prob_driven': ['vanilla', 'skip1_prob', 'skip2_prob', 'skip3_prob', 'skip4_prob', 'skip5_prob', 'skip6_prob', 'skip7_prob', 'skip8_prob', 'skip9_prob', 'skip10_prob'],
            'downsampling_with_prob_driven': ['vanilla', 'skip1_downsampling_prob', 'skip2_downsampling_prob', 'skip3_downsampling_prob', 'skip4_downsampling_prob', 'skip5_downsampling_prob', 'skip6_downsampling_prob', 'skip7_downsampling_prob', 'skip8_downsampling_prob', 'skip9_downsampling_prob', 'skip10_downsampling_prob'],
            'vanilla': ['vanilla'],
            'skip1': ['skip1', 'skip1_prob'],
            'skip2': ['skip2', 'skip2_prob'],
            'skip3': ['skip3', 'skip3_prob'],
            'skip4': ['skip4', 'skip4_prob'],
            'skip5': ['skip5', 'skip5_prob'],
            'skip6': ['skip6', 'skip6_prob'],
            'skip7': ['skip7', 'skip7_prob'],
            'skip8': ['skip8', 'skip8_prob'],
            'skip9': ['skip9', 'skip9_prob'],
            'skip10': ['skip10', 'skip10_prob'],
        }
        METRICS_MAPPING['all'] = METRICS_MAPPING['vanilla'] + METRICS_MAPPING['skip1'] + METRICS_MAPPING['skip2'] + METRICS_MAPPING['skip3'] + METRICS_MAPPING['skip4'] + METRICS_MAPPING['skip5'] + METRICS_MAPPING['skip6'] + METRICS_MAPPING['skip7'] + METRICS_MAPPING['skip8'] + METRICS_MAPPING['skip9'] + METRICS_MAPPING['skip10']

        # Keys = yolov3, mobilenet ssd, squeeze net 1.0
        keys = list(self.data.keys())
        
        FPS_data = {}
        MOTA_data = {}
        for key, value in self.data.items():
            FPS_data[key] = {}
            FPS_data[key]['skip_frame'] = []
            FPS_data[key]['downsampling'] = []
            FPS_data[key]['prob_driven'] = []
            FPS_data[key]['downsampling_with_prob_driven'] = []
            FPS_data[key]['vanilla'] = []
            FPS_data[key]['skip1'] = []
            FPS_data[key]['skip2'] = []
            FPS_data[key]['skip3'] = []
            FPS_data[key]['skip4'] = []
            FPS_data[key]['skip5'] = []
            FPS_data[key]['skip6'] = []
            FPS_data[key]['skip7'] = []
            FPS_data[key]['skip8'] = []
            FPS_data[key]['skip9'] = []
            FPS_data[key]['skip10'] = []
            FPS_data[key]['all'] = []
            #FPS_data[key]['color'] = Viridis6
            
            MOTA_data[key] = {}
            MOTA_data[key]['skip_frame'] = []
            MOTA_data[key]['downsampling'] = []
            MOTA_data[key]['prob_driven'] = []
            MOTA_data[key]['downsampling_with_prob_driven'] = []
            MOTA_data[key]['vanilla'] = []
            MOTA_data[key]['skip1'] = []
            MOTA_data[key]['skip2'] = []
            MOTA_data[key]['skip3'] = []
            MOTA_data[key]['skip4'] = []
            MOTA_data[key]['skip5'] = []
            MOTA_data[key]['skip6'] = []
            MOTA_data[key]['skip7'] = []
            MOTA_data[key]['skip8'] = []
            MOTA_data[key]['skip9'] = []
            MOTA_data[key]['skip10'] = []
            MOTA_data[key]['all'] = []
            #MOTA_data[key]['color'] = Viridis6
            for algorithm, metrics in value.items():
                if(algorithm in METRICS_MAPPING['skip_frame']):
                    FPS_data[key]['skip_frame'].append(metrics['FPS'])
                    MOTA_data[key]['skip_frame'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['downsampling']):
                    FPS_data[key]['downsampling'].append(metrics['FPS'])
                    MOTA_data[key]['downsampling'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['prob_driven']):
                    FPS_data[key]['prob_driven'].append(metrics['FPS'])
                    MOTA_data[key]['prob_driven'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['downsampling_with_prob_driven']):
                    FPS_data[key]['downsampling_with_prob_driven'].append(metrics['FPS'])
                    MOTA_data[key]['downsampling_with_prob_driven'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['all']):
                    FPS_data[key]['all'].append(metrics['FPS'])
                    MOTA_data[key]['all'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['vanilla']):
                    FPS_data[key]['vanilla'].append(metrics['FPS'])
                    MOTA_data[key]['vanilla'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip1']):
                    FPS_data[key]['skip1'].append(metrics['FPS'])
                    MOTA_data[key]['skip1'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip2']):
                    FPS_data[key]['skip2'].append(metrics['FPS'])
                    MOTA_data[key]['skip2'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip3']):
                    FPS_data[key]['skip3'].append(metrics['FPS'])
                    MOTA_data[key]['skip3'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip4']):
                    FPS_data[key]['skip4'].append(metrics['FPS'])
                    MOTA_data[key]['skip4'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip5']):
                    FPS_data[key]['skip5'].append(metrics['FPS'])
                    MOTA_data[key]['skip5'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip6']):
                    FPS_data[key]['skip6'].append(metrics['FPS'])
                    MOTA_data[key]['skip6'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip7']):
                    FPS_data[key]['skip7'].append(metrics['FPS'])
                    MOTA_data[key]['skip7'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip8']):
                    FPS_data[key]['skip8'].append(metrics['FPS'])
                    MOTA_data[key]['skip8'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip9']):
                    FPS_data[key]['skip9'].append(metrics['FPS'])
                    MOTA_data[key]['skip9'].append(metrics['MOTA'])
                if(algorithm in METRICS_MAPPING['skip10']):
                    FPS_data[key]['skip10'].append(metrics['FPS'])
                    MOTA_data[key]['skip10'].append(metrics['MOTA'])
        
        # Normalization
        colors = ['red', 'green', 'blue', 'purple', 'orange']
        KEY_INDEX = 2
        NORM_METHOD = 'diff_of_max_min'

        MAX_MOTA = max(MOTA_data[keys[KEY_INDEX]]['all'])
        MIN_MOTA = min(MOTA_data[keys[KEY_INDEX]]['all'])
        MAX_FPS = max(FPS_data[keys[KEY_INDEX]]['all'])
        MIN_FPS = min(FPS_data[keys[KEY_INDEX]]['all'])

        all_MOTA_arr = np.array(MOTA_data[keys[KEY_INDEX]]['all'])
        all_FPS_arr = np.array(FPS_data[keys[KEY_INDEX]]['all'])
        mean_MOTA = np.mean(all_MOTA_arr, axis=0)
        std_MOTA = np.std(all_MOTA_arr, axis=0)

        mean_FPS = np.mean(all_FPS_arr, axis=0)
        std_FPS = np.std(all_FPS_arr, axis=0)


        # MOTA vs. FPS 
        #print(self.increment_and_decrement(MOTA_data[keys[KEY_INDEX]]['all'], FPS_data[keys[KEY_INDEX]]['all']))

        TOOLS = 'hover, pan,wheel_zoom,reset,save'

        plot_info = ['skip_frame', 'downsampling', 'prob_driven', 'downsampling_with_prob_driven']
        color_info = ['red', 'green', 'blue', 'purple']

        KEY_INDEX = 0
        p_yolov3_mota = figure(title = "YOLOv3 MOTA vs. FPS", tools=[TOOLS])
        for i, info in enumerate(plot_info):
            yolov3_mota_source = ColumnDataSource(data=dict(
                mota=MOTA_data[keys[KEY_INDEX]][info],
                fps=FPS_data[keys[KEY_INDEX]][info],
                desc=[info] * len(MOTA_data[keys[KEY_INDEX]][info]),
                legend=[info] * len(MOTA_data[keys[KEY_INDEX]][info]),
            ))

            p_yolov3_mota.circle('fps', 'mota',source=yolov3_mota_source, legend='legend', fill_color="white", size=4, color=color_info[i])
            p_yolov3_mota.line('fps', 'mota', source=yolov3_mota_source, legend='legend', line_width=4, line_color=color_info[i], line_alpha=0.6, hover_line_color=color_info[i], hover_line_alpha=0.9) 

            p_yolov3_mota.legend.location = "top_right"
            p_yolov3_mota.legend.click_policy="hide"
            p_yolov3_mota.yaxis.axis_label = "MOTA"
            p_yolov3_mota.xaxis.axis_label = "FPS"
        hover = p_yolov3_mota.select(dict(type=HoverTool))
        hover.tooltips = [("FPS", "@fps"),("MOTA", "@mota")]
        hover.mode = 'mouse'

        KEY_INDEX = 1
        p_mobilenetssd_mota = figure(title = "MOBILENET SSD MOTA vs. FPS", tools=[TOOLS])
        for i, info in enumerate(plot_info):
            yolov3_mota_source = ColumnDataSource(data=dict(
                mota=MOTA_data[keys[KEY_INDEX]][info],
                fps=FPS_data[keys[KEY_INDEX]][info],
                desc=[info] * len(MOTA_data[keys[KEY_INDEX]][info]),
                legend=[info] * len(MOTA_data[keys[KEY_INDEX]][info]),
            ))

            p_mobilenetssd_mota.circle('fps', 'mota',source=yolov3_mota_source, legend='legend', fill_color="white", size=4, color=color_info[i])
            p_mobilenetssd_mota.line('fps', 'mota', source=yolov3_mota_source, legend='legend', line_width=4, line_color=color_info[i], line_alpha=0.6, hover_line_color=color_info[i], hover_line_alpha=0.9) 

            p_mobilenetssd_mota.legend.location = "top_right"
            p_mobilenetssd_mota.legend.click_policy="hide"
            p_mobilenetssd_mota.yaxis.axis_label = "MOTA"
            p_mobilenetssd_mota.xaxis.axis_label = "FPS"
        hover = p_mobilenetssd_mota.select(dict(type=HoverTool))
        hover.tooltips = [("FPS", "@fps"),("MOTA", "@mota")]
        hover.mode = 'mouse'
        
        KEY_INDEX = 2
        p_squeezenetv10_mota = figure(title = "SQUEEZENET V1.0 MOTA vs. FPS", tools=[TOOLS])
        for i, info in enumerate(plot_info):
            yolov3_mota_source = ColumnDataSource(data=dict(
                mota=MOTA_data[keys[KEY_INDEX]][info],
                fps=FPS_data[keys[KEY_INDEX]][info],
                desc=[info] * len(MOTA_data[keys[KEY_INDEX]][info]),
                legend=[info] * len(MOTA_data[keys[KEY_INDEX]][info]),
            ))

            p_squeezenetv10_mota.circle('fps', 'mota',source=yolov3_mota_source, legend='legend', fill_color="white", size=4, color=color_info[i])
            p_squeezenetv10_mota.line('fps', 'mota', source=yolov3_mota_source, legend='legend', line_width=4, line_color=color_info[i], line_alpha=0.6, hover_line_color=color_info[i], hover_line_alpha=0.9) 

            p_squeezenetv10_mota.legend.location = "top_right"
            p_squeezenetv10_mota.legend.click_policy="hide"
            p_squeezenetv10_mota.yaxis.axis_label = "MOTA"
            p_squeezenetv10_mota.xaxis.axis_label = "FPS"
        hover = p_mobilenetssd_mota.select(dict(type=HoverTool))
        hover.tooltips = [("FPS", "@fps"),("MOTA", "@mota")]
        hover.mode = 'mouse'

        show(gridplot([[p_yolov3_mota], [p_mobilenetssd_mota], [p_squeezenetv10_mota]], plot_width=1000, plot_height=600))


        """
        # MOTA and FPS comparison plot

        ## YOLOV3
        TOOLS = 'pan,wheel_zoom,reset,save'

        yolov3_mota_source = ColumnDataSource(data=dict(
            x=[[i for i in list(range(4))] for j in range(4)],
            y=[MOTA_data[keys[0]]['skip_frame'],MOTA_data[keys[0]]['downsampling'],MOTA_data[keys[0]]['prob_driven'],MOTA_data[keys[0]]['downsampling_with_prob_driven']],
            desc=list(MOTA_data[keys[0]].keys()),
            color=['red', 'green', 'blue', 'purple'],
            legend=list(MOTA_data[keys[0]].keys()),
        ))

        yolov3_mota_hover = HoverTool(tooltips=[
                            ("index", "$index"),
                            ("MOTA", "$y"),
                            ("desc", "@desc"),], 
                            mode='mouse',
        )

        p_yolov3_mota = figure(title='YOLOv3 MOTA', tools=[TOOLS, yolov3_mota_hover])
        p_yolov3_mota.multi_line('x', 'y', legend="legend", line_width=4, line_color='color', line_alpha=0.6, hover_line_color='color', hover_line_alpha=1.0, source=yolov3_mota_source)
        p_yolov3_mota.legend.location = "top_right"
        p_yolov3_mota.yaxis.axis_label = "MOTA"

        yolov3_fps_source = ColumnDataSource(data=dict(
            x=[[i for i in list(range(4))] for j in range(4)],
            y=[FPS_data[keys[0]]['skip_frame'],FPS_data[keys[0]]['downsampling'],FPS_data[keys[0]]['prob_driven'],FPS_data[keys[0]]['downsampling_with_prob_driven']],
            desc=list(FPS_data[keys[0]].keys()),
            color=['red', 'green', 'blue', 'purple'],
            legend=list(FPS_data[keys[0]].keys()),
        ))

        yolov3_fps_hover = HoverTool(tooltips=[
                            ("index", "$index"),
                            ("FPS", "$y"),
                            ("desc", "@desc"),], 
                            mode='mouse',
        )

        p_yolov3_fps = figure(title='YOLOv3 FPS', tools=[TOOLS, yolov3_fps_hover])
        p_yolov3_fps.multi_line('x', 'y', legend="legend", line_width=4, line_color='color', line_alpha=0.6, hover_line_color='color', hover_line_alpha=1.0, source=yolov3_fps_source)
        p_yolov3_fps.legend.location = "top_right"
        p_yolov3_fps.yaxis.axis_label = "FPS"


        ## Mobilenet SSD
        mobilenet_mota_source = ColumnDataSource(data=dict(
            x=[[i for i in list(range(4))] for j in range(4)],
            y=[MOTA_data[keys[1]]['skip_frame'],MOTA_data[keys[1]]['downsampling'],MOTA_data[keys[1]]['prob_driven'],MOTA_data[keys[1]]['downsampling_with_prob_driven']],
            desc=list(MOTA_data[keys[1]].keys()),
            color=['red', 'green', 'blue', 'purple'],
            legend=list(MOTA_data[keys[1]].keys()),
        ))

        mobilenet_mota_hover = HoverTool(tooltips=[
                            ("index", "$index"),
                            ("MOTA", "$y"),
                            ("desc", "@desc"),], 
                            mode='mouse',
        )

        p_mobilenet_mota = figure(title='Mobilenet MOTA', tools=[TOOLS, mobilenet_mota_hover])
        p_mobilenet_mota.multi_line('x', 'y', legend="legend", line_width=4, line_color='color', line_alpha=0.6, hover_line_color='color', hover_line_alpha=1.0, source=mobilenet_mota_source)
        p_mobilenet_mota.legend.location = "top_right"
        p_mobilenet_mota.yaxis.axis_label = "MOTA"

        mobilenet_fps_source = ColumnDataSource(data=dict(
            x=[[i for i in list(range(4))] for j in range(4)],
            y=[FPS_data[keys[1]]['skip_frame'],FPS_data[keys[1]]['downsampling'],FPS_data[keys[1]]['prob_driven'],FPS_data[keys[1]]['downsampling_with_prob_driven']],
            desc=list(FPS_data[keys[1]].keys()),
            color=['red', 'green', 'blue', 'purple'],
            legend=list(FPS_data[keys[1]].keys()),
        ))

        mobilenet_fps_hover = HoverTool(tooltips=[
                            ("index", "$index"),
                            ("FPS", "$y"),
                            ("desc", "@desc"),], 
                            mode='mouse',
        )

        p_mobilenet_fps = figure(title='Mobilenet FPS', tools=[TOOLS, mobilenet_fps_hover])
        p_mobilenet_fps.multi_line('x', 'y', legend="legend", line_width=4, line_color='color', line_alpha=0.6, hover_line_color='color', hover_line_alpha=1.0, source=mobilenet_fps_source)
        p_mobilenet_fps.legend.location = "top_right"
        p_mobilenet_fps.yaxis.axis_label = "FPS"

        ## Squeezenet 1.0
        squeezenetv1_0_mota_source = ColumnDataSource(data=dict(
            x=[[i for i in list(range(4))] for j in range(4)],
            y=[MOTA_data[keys[2]]['skip_frame'],MOTA_data[keys[2]]['downsampling'],MOTA_data[keys[2]]['prob_driven'],MOTA_data[keys[2]]['downsampling_with_prob_driven']],
            desc=list(MOTA_data[keys[2]].keys()),
            color=['red', 'green', 'blue', 'purple'],
            legend=list(MOTA_data[keys[2]].keys()),
        ))

        squeezenetv1_0_mota_hover = HoverTool(tooltips=[
                                    ("index", "$index"),
                                    ("MOTA", "$y"),
                                    ("desc", "@desc"),], 
                                    mode='mouse',
        )

        p_squeezenetv1_0_mota = figure(title='SqueezeNet v1.0 MOTA', tools=[TOOLS, squeezenetv1_0_mota_hover])
        p_squeezenetv1_0_mota.multi_line('x', 'y', legend="legend", line_width=4, line_color='color', line_alpha=0.6, hover_line_color='color', hover_line_alpha=1.0, source=squeezenetv1_0_mota_source)
        p_squeezenetv1_0_mota.legend.location = "top_right"
        p_squeezenetv1_0_mota.yaxis.axis_label = "MOTA"

        squeezenetv1_0_fps_source = ColumnDataSource(data=dict(
            x=[[i for i in list(range(4))] for j in range(4)],
            y=[FPS_data[keys[2]]['skip_frame'],FPS_data[keys[2]]['downsampling'],FPS_data[keys[2]]['prob_driven'],FPS_data[keys[2]]['downsampling_with_prob_driven']],
            desc=list(FPS_data[keys[2]].keys()),
            color=['red', 'green', 'blue', 'purple'],
            legend=list(FPS_data[keys[2]].keys()),
        ))

        squeezenetv1_0_fps_hover = HoverTool(tooltips=[
                                            ("index", "$index"),
                                            ("FPS", "$y"),
                                            ("desc", "@desc"),], 
                                            mode='mouse',
        )

        p_squeezenetv1_0_fps = figure(title='SqueezeNet v1.0 FPS', tools=[TOOLS, squeezenetv1_0_fps_hover])
        p_squeezenetv1_0_fps.multi_line('x', 'y', legend="legend", line_width=4, line_color='color', line_alpha=0.6, hover_line_color='color', hover_line_alpha=1.0, source=squeezenetv1_0_fps_source)
        p_squeezenetv1_0_fps.legend.location = "top_right"
        p_squeezenetv1_0_fps.yaxis.axis_label = "FPS"



        show(gridplot([[p_yolov3_mota, p_yolov3_fps], [p_mobilenet_mota, p_mobilenet_fps], [p_squeezenetv1_0_mota, p_squeezenetv1_0_fps]], plot_width=600, plot_height=600))
        """

if __name__ == '__main__':