Here are the examples of the python api bokeh.models.widgets.Div taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
8 Examples
0
Source : plot.py
with BSD 3-Clause "New" or "Revised" License
from CoffeaTeam
with BSD 3-Clause "New" or "Revised" License
from CoffeaTeam
def bokeh_plot(histo, jup_url="http://127.0.0.1:8889"):
if not isnotebook():
raise NotImplementedError("Only usable in jupyter notebook")
import bokeh.plotting.figure as bk_figure
from bokeh.io import show
from bokeh import palettes
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import RadioButtonGroup, CheckboxButtonGroup
from bokeh.models.widgets import RangeSlider, Div
from bokeh.io import output_notebook # enables plot interface in J notebook
# init bokeh
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.core.validation import silence
from bokeh.core.validation.warnings import EMPTY_LAYOUT
silence(EMPTY_LAYOUT, True)
output_notebook()
# Set up widgets
cfg_labels = ["Ghost"]
wi_config = CheckboxButtonGroup(labels=cfg_labels, active=[0])
wi_dense_select = RadioButtonGroup(
labels=[ax.name for ax in histo.dense_axes()], active=0
)
wi_sparse_select = RadioButtonGroup(
labels=[ax.name for ax in histo.sparse_axes()], active=0
)
# Dense widgets
sliders = {}
for ax in histo.dense_axes():
edge_vals = (histo.axis(ax.name).edges()[0], histo.axis(ax.name).edges()[-1])
_smallest_bin = numpy.min(numpy.diff(histo.axis(ax.name).edges()))
sliders[ax.name] = RangeSlider(
title=ax.name,
value=edge_vals,
start=edge_vals[0],
end=edge_vals[1],
step=_smallest_bin,
name=ax.name,
)
# Cat widgets
togglers = {}
for ax in histo.sparse_axes():
togglers[ax.name] = CheckboxButtonGroup(
labels=[i.name for i in ax.identifiers()], active=[0], name=ax.name
)
# Toggles for all widgets
configers = {}
for ax in histo.sparse_axes():
configers[ax.name] = CheckboxButtonGroup(
labels=["Display", "Ghost"], active=[0, 1], name=ax.name
)
for ax in histo.dense_axes():
configers[ax.name] = CheckboxButtonGroup(
labels=["Display"], active=[0], name=ax.name
)
# Figure
fig = bk_figure(
title="1D Projection",
plot_width=500,
plot_height=500,
min_border=20,
toolbar_location=None,
)
fig.yaxis.axis_label = "N"
fig.xaxis.axis_label = "Quantity"
# Iterate over possible overlays
_max_idents = 0 # Max number of simultaneou histograms
for ax in histo.sparse_axes():
_max_idents = max(_max_idents, len([i.name for i in ax.identifiers()]))
# Data source list
sources = []
sources_ghost = []
for i in range(_max_idents):
sources.append(ColumnDataSource(dict(left=[], top=[], right=[], bottom=[])))
sources_ghost.append(
ColumnDataSource(dict(left=[], top=[], right=[], bottom=[]))
)
# Hist list
hists = []
hists_ghost = []
for i in range(_max_idents):
if _max_idents < 10:
_color = palettes.Category10[min(max(3, _max_idents), 10)][i]
else:
_color = palettes.magma(_max_idents)[i]
hists.append(
fig.quad(
left="left",
right="right",
top="top",
bottom="bottom",
source=sources[i],
alpha=0.9,
color=_color,
)
)
hists_ghost.append(
fig.quad(
left="left",
right="right",
top="top",
bottom="bottom",
source=sources_ghost[i],
alpha=0.05,
color=_color,
)
)
def update_data(attrname, old, new):
sparse_active = wi_sparse_select.active
sparse_name = [ax.name for ax in histo.sparse_axes()][sparse_active]
sparse_other = [ax.name for ax in histo.sparse_axes() if ax.name != sparse_name]
dense_active = wi_dense_select.active
dense_name = [ax.name for ax in histo.dense_axes()][dense_active]
dense_other = [ax.name for ax in histo.dense_axes() if ax.name != dense_name]
# Apply cuts in projections
_h = histo.copy()
for proj_ax in sparse_other:
_idents = histo.axis(proj_ax).identifiers()
_labels = [ident.name for ident in _idents]
if 0 in configers[proj_ax].active:
_h = _h.integrate(
proj_ax, [_labels[i] for i in togglers[proj_ax].active]
)
else:
_h = _h.integrate(proj_ax)
for proj_ax in dense_other:
_h = _h.integrate(
proj_ax, slice(sliders[proj_ax].value[0], sliders[proj_ax].value[1])
)
for cat_ix in range(_max_idents):
# Update histo for each toggled overlay
if cat_ix in togglers[sparse_name].active:
cat_value = histo.axis(sparse_name).identifiers()[cat_ix]
h1d = _h.integrate(sparse_name, cat_value)
# Get shown histogram
values = h1d.project(dense_name).values()
if values != {}:
h = values[()]
bins = h1d.axis(dense_name).edges()
# Apply cuts on shown axis
bin_los = bins[:-1][bins[:-1] > sliders[dense_name].value[0]]
bin_his = bins[1:][bins[1:] < sliders[dense_name].value[1]]
new_bins = numpy.intersect1d(bin_los, bin_his)
bin_ixs = numpy.searchsorted(bins, new_bins)[:-1]
h = h[bin_ixs]
sources[cat_ix].data = dict(
left=new_bins[:-1],
right=new_bins[1:],
top=h,
bottom=numpy.zeros_like(h),
)
else:
sources[cat_ix].data = dict(left=[], right=[], top=[], bottom=[])
# Add ghosts
if 0 in wi_config.active:
h1d = histo.integrate(sparse_name, cat_value)
for proj_ax in sparse_other:
_idents = histo.axis(proj_ax).identifiers()
_labels = [ident.name for ident in _idents]
if 1 not in configers[proj_ax].active:
h1d = h1d.integrate(
proj_ax, [_labels[i] for i in togglers[proj_ax].active]
)
else:
h1d = h1d.integrate(proj_ax)
values = h1d.project(dense_name).values()
if values != {}:
h = h1d.project(dense_name).values()[()]
bins = h1d.axis(dense_name).edges()
sources_ghost[cat_ix].data = dict(
left=bins[:-1],
right=bins[1:],
top=h,
bottom=numpy.zeros_like(h),
)
else:
sources_ghost[cat_ix].data = dict(
left=[], right=[], top=[], bottom=[]
)
else:
sources[cat_ix].data = dict(left=[], right=[], top=[], bottom=[])
sources_ghost[cat_ix].data = dict(left=[], right=[], top=[], bottom=[])
# Cosmetics
fig.xaxis.axis_label = dense_name
for name, slider in sliders.items():
slider.on_change("value", update_data)
for name, toggler in togglers.items():
toggler.on_change("active", update_data)
for name, configer in configers.items():
configer.on_change("active", update_data)
# Button
for w in [wi_dense_select, wi_sparse_select, wi_config]:
w.on_change("active", update_data)
from bokeh.models.widgets import Panel, Tabs
layout = row(
fig,
column(
Div(
text=" < b>Overlay Axis: < /b>",
style={"font-size": "100%", "color": "black"},
),
wi_sparse_select,
Div(
text=" < b>Plot Axis: < /b>", style={"font-size": "100%", "color": "black"}
),
wi_dense_select,
Div(
text=" < b>Categorical Cuts: < /b>",
style={"font-size": "100%", "color": "black"},
),
*[toggler for name, toggler in togglers.items()],
Div(
text=" < b>Dense Cuts: < /b>", style={"font-size": "100%", "color": "black"}
),
*[slider for name, slider in sliders.items()]
),
)
# Config prep
incl_lists = [[], [], []]
for i, key in enumerate(list(configers.keys())):
incl_lists[i // max(5, len(list(configers.keys())) / 3)].append(
Div(
text=" < b>{}: < /b>".format(key),
style={"font-size": "70%", "color": "black"},
)
)
incl_lists[i // max(5, len(list(configers.keys())) / 3)].append(configers[key])
layout_cfgs = column(
row(
column(
Div(
text=" < b>Configs: < /b>",
style={"font-size": "100%", "color": "black"},
),
wi_config,
)
),
Div(
text=" < b>Axis togglers: < /b>", style={"font-size": "100%", "color": "black"}
),
row(
column(incl_lists[0]),
column(incl_lists[1]),
column(incl_lists[2]),
),
)
# Update active buttons
def update_layout(attrname, old, new):
active_axes = [None]
for name, wi in configers.items():
if 0 in wi.active:
active_axes.append(name)
for child in layout.children[1].children:
if child.name not in active_axes:
child.visible = False
else:
child.visible = True
for name, configer in configers.items():
configer.on_change("active", update_layout)
tab1 = Panel(child=layout, title="Projection")
tab2 = Panel(child=layout_cfgs, title="Configs")
tabs = Tabs(tabs=[tab1, tab2])
def modify_doc(doc):
doc.add_root(row(tabs, width=800))
doc.title = "Sliders"
handler = FunctionHandler(modify_doc)
app = Application(handler)
show(app, notebook_url=jup_url)
update_data("", "", "")
0
Source : edit.py
with GNU General Public License v3.0
from j-brady
with GNU General Public License v3.0
from j-brady
def setup_plot(self):
"""" code to setup the bokeh plots """
# make bokeh figure
tools = [
"tap",
"box_zoom",
"lasso_select",
"box_select",
"wheel_zoom",
"pan",
"reset",
]
self.p = figure(
x_range=(self.peakipy_data.f2_ppm_0, self.peakipy_data.f2_ppm_1),
y_range=(self.peakipy_data.f1_ppm_0, self.peakipy_data.f1_ppm_1),
x_axis_label=f"{self.peakipy_data.f2_label} - ppm",
y_axis_label=f"{self.peakipy_data.f1_label} - ppm",
tools=tools,
active_drag="pan",
active_scroll="wheel_zoom",
active_tap=None,
)
if not self.thres:
self.thres = threshold_otsu(self.peakipy_data.data[0])
self.contour_start = self.thres # contour level start value
self.contour_num = 20 # number of contour levels
self.contour_factor = 1.20 # scaling factor between contour levels
cl = self.contour_start * self.contour_factor ** np.arange(self.contour_num)
if len(cl) > 1 and np.min(np.diff(cl)) < = 0.0:
print(f"Setting contour levels to np.abs({cl})")
cl = np.abs(cl)
self.extent = (
self.peakipy_data.f2_ppm_0,
self.peakipy_data.f2_ppm_1,
self.peakipy_data.f1_ppm_0,
self.peakipy_data.f1_ppm_1,
)
self.spec_source = get_contour_data(
self.peakipy_data.data[0], cl, extent=self.extent, cmap=viridis
)
# negative contours
self.spec_source_neg = get_contour_data(
self.peakipy_data.data[0] * -1.0, cl, extent=self.extent, cmap=autumn
)
self.p.multi_line(
xs="xs", ys="ys", line_color="line_color", source=self.spec_source
)
self.p.multi_line(
xs="xs", ys="ys", line_color="line_color", source=self.spec_source_neg
)
# contour_num = Slider(title="contour number", value=20, start=1, end=50,step=1)
# contour_start = Slider(title="contour start", value=100000, start=1000, end=10000000,step=1000)
self.contour_start = TextInput(
value="%.2e" % self.thres, title="Contour level:", width=100
)
# contour_factor = Slider(title="contour factor", value=1.20, start=1., end=2.,step=0.05)
self.contour_start.on_change("value", self.update_contour)
# for w in [contour_num,contour_start,contour_factor]:
# w.on_change("value",update_contour)
# plot mask outlines
el = self.p.ellipse(
x="X_PPM",
y="Y_PPM",
width="X_DIAMETER_PPM",
height="Y_DIAMETER_PPM",
source=self.source,
fill_color="color",
fill_alpha=0.1,
line_dash="dotted",
line_color="red",
)
self.p.add_tools(
HoverTool(
tooltips=[
("Index", "$index"),
("Assignment", "@ASS"),
("CLUSTID", "@CLUSTID"),
("RADII", "@X_RADIUS_PPM{0.000}, @Y_RADIUS_PPM{0.000}"),
(
f"{self.peakipy_data.f2_label},{self.peakipy_data.f1_label}",
"$x{0.000} ppm, $y{0.000} ppm",
),
],
mode="mouse",
# add renderers
renderers=[el],
)
)
# p.toolbar.active_scroll = "auto"
# draw border around spectrum area
spec_border_x = [
self.peakipy_data.f2_ppm_min,
self.peakipy_data.f2_ppm_min,
self.peakipy_data.f2_ppm_max,
self.peakipy_data.f2_ppm_max,
self.peakipy_data.f2_ppm_min,
]
spec_border_y = [
self.peakipy_data.f1_ppm_min,
self.peakipy_data.f1_ppm_max,
self.peakipy_data.f1_ppm_max,
self.peakipy_data.f1_ppm_min,
self.peakipy_data.f1_ppm_min,
]
self.p.line(
spec_border_x,
spec_border_y,
line_width=1,
line_color="black",
line_dash="dotted",
line_alpha=0.5,
)
self.p.circle(x="X_PPM", y="Y_PPM", source=self.source, color="color")
# plot cluster numbers
self.p.text(
x="X_PPM",
y="Y_PPM",
text="CLUSTID",
text_color="color",
source=self.source,
text_font_size="8pt",
text_font_style="bold",
)
self.p.on_event(DoubleTap, self.peak_pick_callback)
self.pos_neg_contour_dic = {0: "pos/neg", 1: "pos", 2: "neg"}
self.pos_neg_contour_radiobutton = RadioButtonGroup(
labels=[
self.pos_neg_contour_dic[i] for i in self.pos_neg_contour_dic.keys()
],
active=0,
)
self.pos_neg_contour_radiobutton.on_change("active", self.update_contour)
# call fit_peaks
self.fit_button = Button(label="Fit selected cluster", button_type="primary")
# lineshape selection
self.lineshapes = {
0: "PV",
1: "V",
2: "G",
3: "L",
4: "PV_PV",
# 5: "PV_L",
# 6: "PV_G",
# 7: "G_L",
}
self.radio_button_group = RadioButtonGroup(
labels=[self.lineshapes[i] for i in self.lineshapes.keys()], active=0
)
self.ls_div = Div(
text="""Choose lineshape you wish to fit. This can be Voigt (V), pseudo-Voigt (PV), Gaussian (G), Lorentzian (L).
PV_PV fits a PV lineshape with independent "fraction" parameters for the direct and indirect dimensions"""
)
self.clust_div = Div(
text="""If you want to adjust how the peaks are automatically clustered then try changing the
width/diameter/height (integer values) of the structuring element used during the binary dilation step
(you can also remove it by selecting 'None'). Increasing the size of the structuring element will cause
peaks to be more readily incorporated into clusters. Be sure to save your peak list before doing this as
any manual edits will be lost."""
)
self.intro_div = Div(
text=""" < h2>peakipy - interactive fit adjustment < /h2>
"""
)
self.doc_link = Div(
text=" < h3> < a href='https://j-brady.github.io/peakipy/build/usage/instructions.html', target='_blank'> ℹ️ click here for documentation < /a> < /h3>"
)
self.fit_reports = ""
self.fit_reports_div = Div(text="", height=400, style={"overflow": "scroll"})
# Plane selection
self.select_planes_list = [
f"{i}"
for i in range(self.peakipy_data.data.shape[self.peakipy_data.planes])
]
self.select_plane = Select(
title="Select plane:",
value=self.select_planes_list[0],
options=self.select_planes_list,
)
self.select_planes_dic = {
f"{i}": i
for i in range(self.peakipy_data.data.shape[self.peakipy_data.planes])
}
self.select_plane.on_change("value", self.update_contour)
self.checkbox_group = CheckboxGroup(
labels=["fit current plane only"], active=[]
)
# not sure this is needed
selected_df = self.peakipy_data.df.copy()
self.fit_button.on_event(ButtonClick, self.fit_selected)
columns = [
TableColumn(field="ASS", title="Assignment"),
TableColumn(field="CLUSTID", title="Cluster", editor=IntEditor()),
TableColumn(
field="X_PPM",
title=f"{self.peakipy_data.f2_label}",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="Y_PPM",
title=f"{self.peakipy_data.f1_label}",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="X_RADIUS_PPM",
title=f"{self.peakipy_data.f2_label} radius (ppm)",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="Y_RADIUS_PPM",
title=f"{self.peakipy_data.f1_label} radius (ppm)",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="XW_HZ",
title=f"{self.peakipy_data.f2_label} LW (Hz)",
editor=NumberEditor(step=0.01),
formatter=NumberFormatter(format="0.00"),
),
TableColumn(
field="YW_HZ",
title=f"{self.peakipy_data.f1_label} LW (Hz)",
editor=NumberEditor(step=0.01),
formatter=NumberFormatter(format="0.00"),
),
TableColumn(
field="VOL", title="Volume", formatter=NumberFormatter(format="0.0")
),
TableColumn(
field="include",
title="Include",
editor=SelectEditor(options=["yes", "no"]),
),
TableColumn(field="MEMCNT", title="MEMCNT", editor=IntEditor()),
]
self.data_table = DataTable(
source=self.source, columns=columns, editable=True, fit_columns=True
)
# callback for adding
# source.selected.on_change('indices', callback)
self.source.selected.on_change("indices", self.select_callback)
# Document layout
fitting_controls = column(
row(
column(self.slider_X_RADIUS, self.slider_Y_RADIUS),
column(
row(
widgetbox(self.contour_start, self.pos_neg_contour_radiobutton)
),
widgetbox(self.fit_button),
),
),
row(
column(widgetbox(self.ls_div), widgetbox(self.radio_button_group)),
column(widgetbox(self.select_plane), widgetbox(self.checkbox_group)),
),
)
# reclustering tab
self.struct_el = Select(
title="Structuring element:",
value="disk",
options=["square", "disk", "rectangle", "None", "mask_method"],
width=100,
)
self.struct_el_size = TextInput(
value="3",
title="Size(width/radius or width,height for rectangle):",
width=100,
)
self.recluster = Button(label="Re-cluster", button_type="warning")
self.recluster.on_event(ButtonClick, self.recluster_peaks)
# edit_fits tabs
fitting_layout = fitting_controls
log_layout = self.fit_reports_div
recluster_layout = row(
self.clust_div,
column(
self.contour_start, self.struct_el, self.struct_el_size, self.recluster
),
)
save_layout = column(self.savefilename, self.button, self.exit_button)
fitting_tab = Panel(child=fitting_layout, title="Peak fitting")
log_tab = Panel(child=log_layout, title="Log")
recluster_tab = Panel(child=recluster_layout, title="Re-cluster peaks")
save_tab = Panel(child=save_layout, title="Save edited peaklist")
self.tabs = Tabs(
tabs=[fitting_tab, log_tab, recluster_tab, save_tab],
sizing_mode="scale_both",
)
def recluster_peaks(self, event):
0
Source : view.py
with BSD 3-Clause "New" or "Revised" License
from JaneliaSciComp
with BSD 3-Clause "New" or "Revised" License
from JaneliaSciComp
def init(_bokeh_document):
global bokeh_document, cluster_dot_palette, snippet_palette, p_cluster, cluster_dots, p_cluster_dots, precomputed_dots, snippets_dy, p_snippets, snippets_label_sources_clustered, snippets_label_sources_annotated, snippets_wave_sources, snippets_wave_glyphs, snippets_gram_sources, snippets_gram_glyphs, snippets_quad_grey, dot_size_cluster, dot_alpha_cluster, cluster_circle_fuchsia, p_waveform, p_spectrogram, p_probability, probability_source, probability_glyph, spectrogram_source, spectrogram_glyph, waveform_span_red, spectrogram_span_red, waveform_quad_grey_clustered, waveform_quad_grey_annotated, waveform_quad_grey_pan, waveform_quad_fuchsia, spectrogram_quad_grey_clustered, spectrogram_quad_grey_annotated, spectrogram_quad_grey_pan, spectrogram_quad_fuchsia, snippets_quad_fuchsia, waveform_source, waveform_glyph, waveform_label_source_clustered, waveform_label_source_annotated, spectrogram_label_source_clustered, spectrogram_label_source_annotated, which_layer, which_species, which_word, which_nohyphen, which_kind, color_picker, circle_radius, dot_size, dot_alpha, zoom_context, zoom_offset, zoomin, zoomout, reset, panleft, panright, allleft, allout, allright, save_indicator, label_count_widgets, label_text_widgets, play, play_callback, video_toggle, video_div, undo, redo, detect, misses, configuration_file, train, leaveoneout, leaveallout, xvalidate, mistakes, activations, cluster, visualize, accuracy, freeze, classify, ethogram, compare, congruence, status_ticker, waitfor, file_dialog_source, file_dialog_source, configuration_contents, logs, logs_folder, model, model_file, wavtfcsvfiles, wavtfcsvfiles_string, groundtruth, groundtruth_folder, validationfiles, testfiles, validationfiles_string, testfiles_string, wantedwords, wantedwords_string, labeltypes, labeltypes_string, prevalences, prevalences_string, copy, labelsounds, makepredictions, fixfalsepositives, fixfalsenegatives, generalize, tunehyperparameters, findnovellabels, examineerrors, testdensely, doit, time_sigma_string, time_smooth_ms_string, frequency_n_ms_string, frequency_nw_string, frequency_p_string, frequency_smooth_ms_string, nsteps_string, restore_from_string, save_and_validate_period_string, validate_percentage_string, mini_batch_string, kfold_string, activations_equalize_ratio_string, activations_max_samples_string, pca_fraction_variance_to_retain_string, tsne_perplexity_string, tsne_exaggeration_string, umap_neighbors_string, umap_distance_string, cluster_algorithm, cluster_these_layers, precision_recall_ratios_string, context_ms_string, shiftby_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, optimizer, learning_rate_string, replicates_string, batch_seed_string, weights_seed_string, file_dialog_string, file_dialog_table, readme_contents, wordcounts, wizard_buttons, action_buttons, parameter_buttons, parameter_textinputs, wizard2actions, action2parameterbuttons, action2parametertextinputs, status_ticker_update, status_ticker_pre, status_ticker_post, model_parameters
bokeh_document = _bokeh_document
M.cluster_circle_color = M.cluster_circle_color
if '#' in M.cluster_dot_palette:
cluster_dot_palette = ast.literal_eval(M.cluster_dot_palette)
else:
cluster_dot_palette = getattr(palettes, M.cluster_dot_palette)
snippet_palette = getattr(palettes, M.snippets_colormap)
dot_size_cluster = ColumnDataSource(data=dict(ds=[M.state["dot_size"]]))
dot_alpha_cluster = ColumnDataSource(data=dict(da=[M.state["dot_alpha"]]))
cluster_dots = ColumnDataSource(data=dict(dx=[], dy=[], dz=[], dl=[], dc=[]))
cluster_circle_fuchsia = ColumnDataSource(data=dict(cx=[], cy=[], cz=[], cr=[], cc=[]))
p_cluster = ScatterNd(dx='dx', dy='dy', dz='dz', dl='dl', dc='dc',
dots_source=cluster_dots,
cx='cx', cy='cy', cz='cz', cr='cr', cc='cc',
circle_fuchsia_source=cluster_circle_fuchsia,
ds='ds',
dot_size_source=dot_size_cluster,
da='da',
dot_alpha_source=dot_alpha_cluster,
width=M.gui_width_pix//2)
p_cluster.on_change("click_position", lambda a,o,n: C.cluster_tap_callback(n))
precomputed_dots = None
snippets_dy = 2*M.snippets_waveform + 2*M.snippets_spectrogram
p_snippets = figure(plot_width=M.gui_width_pix//2, \
background_fill_color='#FFFFFF', toolbar_location=None)
p_snippets.toolbar.active_drag = None
p_snippets.grid.visible = False
p_snippets.xaxis.visible = False
p_snippets.yaxis.visible = False
snippets_gram_sources=[None]*(M.snippets_nx*M.snippets_ny)
snippets_gram_glyphs=[None]*(M.snippets_nx*M.snippets_ny)
for ixy in range(M.snippets_nx*M.snippets_ny):
snippets_gram_sources[ixy]=[None]*M.audio_nchannels
snippets_gram_glyphs[ixy]=[None]*M.audio_nchannels
for ichannel in range(M.audio_nchannels):
snippets_gram_sources[ixy][ichannel] = ColumnDataSource(data=dict(image=[]))
snippets_gram_glyphs[ixy][ichannel] = p_snippets.image('image',
source=snippets_gram_sources[ixy][ichannel],
palette=M.spectrogram_colormap)
snippets_quad_grey = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_snippets.quad('left','right','top','bottom',source=snippets_quad_grey,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey")
snippets_wave_sources=[None]*(M.snippets_nx*M.snippets_ny)
snippets_wave_glyphs=[None]*(M.snippets_nx*M.snippets_ny)
for ixy in range(M.snippets_nx*M.snippets_ny):
snippets_wave_sources[ixy]=[None]*M.audio_nchannels
snippets_wave_glyphs[ixy]=[None]*M.audio_nchannels
for ichannel in range(M.audio_nchannels):
snippets_wave_sources[ixy][ichannel]=ColumnDataSource(data=dict(x=[], y=[]))
snippets_wave_glyphs[ixy][ichannel]=p_snippets.line(
'x', 'y', source=snippets_wave_sources[ixy][ichannel])
xdata = [(i%M.snippets_nx)*(M.snippets_gap_pix+M.snippets_pix)
for i in range(M.snippets_nx*M.snippets_ny)]
ydata = [-(i//M.snippets_nx*snippets_dy-1)
for i in range(M.snippets_nx*M.snippets_ny)]
text = ['' for i in range(M.snippets_nx*M.snippets_ny)]
snippets_label_sources_clustered = ColumnDataSource(data=dict(x=xdata, y=ydata, text=text))
p_snippets.text('x', 'y', source=snippets_label_sources_clustered, text_font_size='6pt',
text_baseline='top',
text_color='black' if M.snippets_waveform else 'white')
xdata = [(i%M.snippets_nx)*(M.snippets_gap_pix+M.snippets_pix)
for i in range(M.snippets_nx*M.snippets_ny)]
ydata = [-(i//M.snippets_nx*snippets_dy+1+2*(M.snippets_waveform and M.snippets_spectrogram))
for i in range(M.snippets_nx*M.snippets_ny)]
text_annotated = ['' for i in range(M.snippets_nx*M.snippets_ny)]
snippets_label_sources_annotated = ColumnDataSource(data=dict(x=xdata, y=ydata,
text=text_annotated))
p_snippets.text('x', 'y', source=snippets_label_sources_annotated,
text_font_size='6pt',
text_color='white' if M.snippets_spectrogram else 'black')
snippets_quad_fuchsia = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_snippets.quad('left','right','top','bottom',source=snippets_quad_fuchsia,
fill_color=None, line_color="fuchsia")
p_snippets.on_event(Tap, C.snippets_tap_callback)
p_snippets.on_event(DoubleTap, C.snippets_doubletap_callback)
p_waveform = figure(plot_width=M.gui_width_pix,
plot_height=M.context_waveform_height_pix,
background_fill_color='#FFFFFF', toolbar_location=None)
p_waveform.toolbar.active_drag = None
p_waveform.grid.visible = False
if M.context_spectrogram:
p_waveform.xaxis.visible = False
else:
p_waveform.xaxis.axis_label = 'Time (sec)'
p_waveform.yaxis.axis_label = ""
p_waveform.yaxis.ticker = []
p_waveform.x_range.range_padding = p_waveform.y_range.range_padding = 0.0
p_waveform.y_range.start = -1
p_waveform.y_range.end = 1
p_waveform.title.text=' '
waveform_span_red = Span(location=0, dimension='height', line_color='red')
p_waveform.add_layout(waveform_span_red)
waveform_span_red.visible=False
waveform_quad_grey_clustered = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_waveform.quad('left','right','top','bottom',source=waveform_quad_grey_clustered,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey",
level='underlay')
waveform_quad_grey_annotated = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_waveform.quad('left','right','top','bottom',source=waveform_quad_grey_annotated,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey",
level='underlay')
waveform_quad_grey_pan = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_waveform.quad('left','right','top','bottom',source=waveform_quad_grey_pan,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey",
level='underlay')
waveform_quad_fuchsia = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_waveform.quad('left','right','top','bottom',source=waveform_quad_fuchsia,
fill_color=None, line_color="fuchsia", level='underlay')
waveform_source=[None]*M.audio_nchannels
waveform_glyph=[None]*M.audio_nchannels
for ichannel in range(M.audio_nchannels):
waveform_source[ichannel] = ColumnDataSource(data=dict(x=[], y=[]))
waveform_glyph[ichannel] = p_waveform.line('x', 'y', source=waveform_source[ichannel])
waveform_label_source_clustered = ColumnDataSource(data=dict(x=[], y=[], text=[]))
p_waveform.text('x', 'y', source=waveform_label_source_clustered,
text_font_size='6pt', text_align='center', text_baseline='top',
text_line_height=0.8, level='underlay')
waveform_label_source_annotated = ColumnDataSource(data=dict(x=[], y=[], text=[]))
p_waveform.text('x', 'y', source=waveform_label_source_annotated,
text_font_size='6pt', text_align='center', text_baseline='bottom',
text_line_height=0.8, level='underlay')
p_waveform.on_event(DoubleTap, lambda e: C.context_doubletap_callback(e, 0))
p_waveform.on_event(PanStart, C.waveform_pan_start_callback)
p_waveform.on_event(Pan, C.waveform_pan_callback)
p_waveform.on_event(PanEnd, C.waveform_pan_end_callback)
p_waveform.on_event(Tap, C.waveform_tap_callback)
p_spectrogram = figure(plot_width=M.gui_width_pix,
plot_height=M.context_spectrogram_height_pix,
background_fill_color='#FFFFFF', toolbar_location=None)
p_spectrogram.toolbar.active_drag = None
p_spectrogram.x_range.range_padding = p_spectrogram.y_range.range_padding = 0
p_spectrogram.xgrid.visible = False
p_spectrogram.ygrid.visible = True
p_spectrogram.xaxis.axis_label = 'Time (sec)'
p_spectrogram.yaxis.axis_label = 'Frequency (' + M.context_spectrogram_units + ')'
p_spectrogram.yaxis.ticker = list(range(1+M.audio_nchannels))
spectrogram_source = [None]*M.audio_nchannels
spectrogram_glyph = [None]*M.audio_nchannels
for ichannel in range(M.audio_nchannels):
spectrogram_source[ichannel] = ColumnDataSource(data=dict(image=[]))
spectrogram_glyph[ichannel] = p_spectrogram.image('image',
source=spectrogram_source[ichannel],
palette=M.spectrogram_colormap,
level="image")
p_spectrogram.on_event(MouseWheel, C.spectrogram_mousewheel_callback)
p_spectrogram.on_event(DoubleTap,
lambda e: C.context_doubletap_callback(e, M.audio_nchannels/2))
p_spectrogram.on_event(PanStart, C.spectrogram_pan_start_callback)
p_spectrogram.on_event(Pan, C.spectrogram_pan_callback)
p_spectrogram.on_event(PanEnd, C.spectrogram_pan_end_callback)
p_spectrogram.on_event(Tap, C.spectrogram_tap_callback)
spectrogram_span_red = Span(location=0, dimension='height', line_color='red')
p_spectrogram.add_layout(spectrogram_span_red)
spectrogram_span_red.visible=False
spectrogram_quad_grey_clustered = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_spectrogram.quad('left','right','top','bottom',source=spectrogram_quad_grey_clustered,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey",
level='underlay')
spectrogram_quad_grey_annotated = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_spectrogram.quad('left','right','top','bottom',source=spectrogram_quad_grey_annotated,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey",
level='underlay')
spectrogram_quad_grey_pan = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_spectrogram.quad('left','right','top','bottom',source=spectrogram_quad_grey_pan,
fill_color="lightgrey", fill_alpha=0.5, line_color="lightgrey",
level='underlay')
spectrogram_quad_fuchsia = ColumnDataSource(data=dict(left=[], right=[], top=[], bottom=[]))
p_spectrogram.quad('left','right','top','bottom',source=spectrogram_quad_fuchsia,
fill_color=None, line_color="fuchsia", level='underlay')
spectrogram_label_source_clustered = ColumnDataSource(data=dict(x=[], y=[], text=[]))
p_spectrogram.text('x', 'y', source=spectrogram_label_source_clustered,
text_font_size='6pt', text_align='center', text_baseline='top',
text_line_height=0.8, level='underlay', text_color='white')
spectrogram_label_source_annotated = ColumnDataSource(data=dict(x=[], y=[], text=[]))
p_spectrogram.text('x', 'y', source=spectrogram_label_source_annotated,
text_font_size='6pt', text_align='center', text_baseline='bottom',
text_line_height=0.8, level='underlay', text_color='white')
TOOLTIPS = """
< div> < div> < span style="color:@colors;">@labels < /span> < /div> < /div>
"""
p_probability = figure(plot_width=M.gui_width_pix, tooltips=TOOLTIPS,
plot_height=M.context_probability_height_pix,
background_fill_color='#FFFFFF', toolbar_location=None)
p_probability.toolbar.active_drag = None
p_probability.grid.visible = False
p_probability.yaxis.axis_label = "Probability"
p_probability.x_range.range_padding = p_probability.y_range.range_padding = 0.0
p_probability.y_range.start = 0
p_probability.y_range.end = 1
p_probability.xaxis.visible = False
probability_source = ColumnDataSource(data=dict(xs=[], ys=[], colors=[], labels=[]))
probability_glyph = p_probability.multi_line(xs='xs', ys='ys',
source=probability_source, color='colors')
probability_span_red = Span(location=0, dimension='height', line_color='red')
p_probability.add_layout(probability_span_red)
probability_span_red.visible=False
which_layer = Select(title="layer:")
which_layer.on_change('value', lambda a,o,n: C.layer_callback(n))
which_species = Select(title="species:")
which_species.on_change('value', lambda a,o,n: C.species_callback(n))
which_word = Select(title="word:")
which_word.on_change('value', lambda a,o,n: C.word_callback(n))
which_nohyphen = Select(title="no hyphen:")
which_nohyphen.on_change('value', lambda a,o,n: C.nohyphen_callback(n))
which_kind = Select(title="kind:")
which_kind.on_change('value', lambda a,o,n: C.kind_callback(n))
color_picker = ColorPicker(title="color:", disabled=True)
color_picker.on_change("color", lambda a,o,n: C.color_picker_callback(n))
circle_radius = Slider(start=0, end=10, step=1, \
value=M.state["circle_radius"], \
title="circle radius", \
disabled=True)
circle_radius.on_change("value_throttled", C.circle_radius_callback)
dot_size = Slider(start=1, end=24, step=1, \
value=M.state["dot_size"], \
title="dot size", \
disabled=True)
dot_size.on_change("value", C.dot_size_callback)
dot_alpha = Slider(start=0.01, end=1.0, step=0.01, \
value=M.state["dot_alpha"], \
title="dot alpha", \
disabled=True)
dot_alpha.on_change("value", C.dot_alpha_callback)
cluster_update()
zoom_context = TextInput(value=str(M.context_width_ms),
title="context (msec):",
disabled=True)
zoom_context.on_change("value", C.zoom_context_callback)
zoom_offset = TextInput(value=str(M.context_offset_ms),
title="offset (msec):",
disabled=True)
zoom_offset.on_change("value", C.zoom_offset_callback)
zoomin = Button(label='\u2191', disabled=True)
zoomin.on_click(C.zoomin_callback)
zoomout = Button(label='\u2193', disabled=True)
zoomout.on_click(C.zoomout_callback)
reset = Button(label='\u25ef', disabled=True)
reset.on_click(C.zero_callback)
panleft = Button(label='\u2190', disabled=True)
panleft.on_click(C.panleft_callback)
panright = Button(label='\u2192', disabled=True)
panright.on_click(C.panright_callback)
allleft = Button(label='\u21e4', disabled=True)
allleft.on_click(C.allleft_callback)
allout = Button(label='\u2913', disabled=True)
allout.on_click(C.allout_callback)
allright = Button(label='\u21e5', disabled=True)
allright.on_click(C.allright_callback)
save_indicator = Button(label='0')
label_count_callbacks=[]
label_count_widgets=[]
label_text_callbacks=[]
label_text_widgets=[]
for i in range(M.nlabels):
label_count_callbacks.append(lambda i=i: C.label_count_callback(i))
label_count_widgets.append(Button(label='0', css_classes=['hide-label'], width=40))
label_count_widgets[-1].on_click(label_count_callbacks[-1])
label_text_callbacks.append(lambda a,o,n,i=i: C.label_text_callback(n,i))
label_text_widgets.append(TextInput(value=M.state['labels'][i],
css_classes=['hide-label']))
label_text_widgets[-1].on_change("value", label_text_callbacks[-1])
C.label_count_callback(M.ilabel)
play = Button(label='play', disabled=True)
play_callback = CustomJS(args=dict(waveform_span_red=waveform_span_red,
spectrogram_span_red=spectrogram_span_red,
probability_span_red=probability_span_red),
code=C.play_callback_code % ("",""))
play.js_on_event(ButtonClick, play_callback)
play.on_change('disabled', lambda a,o,n: reset_video())
play.js_on_change('disabled', play_callback)
video_toggle = Toggle(label='video', active=False, disabled=True)
video_toggle.on_click(lambda x: context_update())
video_div = Div(text=""" < video id="context_video"> < /video>""", width=0, height=0)
undo = Button(label='undo', disabled=True)
undo.on_click(C.undo_callback)
redo = Button(label='redo', disabled=True)
redo.on_click(C.redo_callback)
detect = Button(label='detect')
detect.on_click(lambda: C.action_callback(detect, C.detect_actuate))
misses = Button(label='misses')
misses.on_click(lambda: C.action_callback(misses, C.misses_actuate))
train = Button(label='train')
train.on_click(lambda: C.action_callback(train, C.train_actuate))
leaveoneout = Button(label='omit one')
leaveoneout.on_click(lambda: C.action_callback(leaveoneout,
lambda: C.leaveout_actuate(False)))
leaveallout = Button(label='omit all')
leaveallout.on_click(lambda: C.action_callback(leaveallout,
lambda: C.leaveout_actuate(True)))
xvalidate = Button(label='x-validate')
xvalidate.on_click(lambda: C.action_callback(xvalidate, C.xvalidate_actuate))
mistakes = Button(label='mistakes')
mistakes.on_click(lambda: C.action_callback(mistakes, C.mistakes_actuate))
activations = Button(label='activations')
activations.on_click(lambda: C.action_callback(activations, C.activations_actuate))
cluster = Button(label='cluster')
cluster.on_click(lambda: C.action_callback(cluster, C.cluster_actuate))
visualize = Button(label='visualize')
visualize.on_click(lambda: C.action_callback(visualize, C.visualize_actuate))
accuracy = Button(label='accuracy')
accuracy.on_click(lambda: C.action_callback(accuracy, C.accuracy_actuate))
freeze = Button(label='freeze')
freeze.on_click(lambda: C.action_callback(freeze, C.freeze_actuate))
classify = Button(label='classify')
classify.on_click(C.classify_callback)
ethogram = Button(label='ethogram')
ethogram.on_click(lambda: C.action_callback(ethogram, C.ethogram_actuate))
compare = Button(label='compare')
compare.on_click(lambda: C.action_callback(compare, C.compare_actuate))
congruence = Button(label='congruence')
congruence.on_click(lambda: C.action_callback(congruence, C.congruence_actuate))
status_ticker_pre=" < div style='overflow:auto; white-space:nowrap; width:"+str(M.gui_width_pix-126)+"px'>status: "
status_ticker_post=" < /div>"
status_ticker = Div(text=status_ticker_pre+status_ticker_post)
file_dialog_source = ColumnDataSource(data=dict(names=[], sizes=[], dates=[]))
file_dialog_source.selected.on_change('indices', C.file_dialog_callback)
file_dialog_columns = [
TableColumn(field="names", title="Name", width=M.gui_width_pix//2-50-115-30),
TableColumn(field="sizes", title="Size", width=50, \
formatter=NumberFormatter(format="0 b")),
TableColumn(field="dates", title="Date", width=115, \
formatter=DateFormatter(format="%Y-%m-%d %H:%M:%S")),
]
file_dialog_table = DataTable(source=file_dialog_source, \
columns=file_dialog_columns, \
height=727, width=M.gui_width_pix//2-11, \
index_position=None,
fit_columns=False)
waitfor = Toggle(label='wait for last job', active=False, disabled=True, width=100)
waitfor.on_click(C.waitfor_callback)
logs = Button(label='logs folder:', width=110)
logs.on_click(C.logs_callback)
logs_folder = TextInput(value=M.state['logs'], title="", disabled=False)
logs_folder.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
model = Button(label='checkpoint file:', width=110)
model.on_click(C.model_callback)
model_file = TextInput(value=M.state['model'], title="", disabled=False)
model_file.on_change('value', model_file_update)
wavtfcsvfiles = Button(label='wav,tf,csv files:', width=110)
wavtfcsvfiles.on_click(C.wavtfcsvfiles_callback)
wavtfcsvfiles_string = TextInput(value=M.state['wavtfcsvfiles'], title="", disabled=False)
wavtfcsvfiles_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
groundtruth = Button(label='ground truth:', width=110)
groundtruth.on_click(C.groundtruth_callback)
groundtruth_folder = TextInput(value=M.state['groundtruth'], title="", disabled=False)
groundtruth_folder.on_change('value', lambda a,o,n: groundtruth_update())
validationfiles = Button(label='validation files:', width=110)
validationfiles.on_click(C.validationfiles_callback)
validationfiles_string = TextInput(value=M.state['validationfiles'], title="", disabled=False)
validationfiles_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
testfiles = Button(label='test files:', width=110)
testfiles.on_click(C.testfiles_callback)
testfiles_string = TextInput(value=M.state['testfiles'], title="", disabled=False)
testfiles_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
wantedwords = Button(label='wanted words:', width=110)
wantedwords.on_click(C.wantedwords_callback)
wantedwords_string = TextInput(value=M.state['wantedwords'], title="", disabled=False)
wantedwords_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
labeltypes = Button(label='label types:', width=110)
labeltypes_string = TextInput(value=M.state['labeltypes'], title="", disabled=False)
labeltypes_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
prevalences = Button(label='prevalences:', width=110)
prevalences.on_click(C.prevalences_callback)
prevalences_string = TextInput(value=M.state['prevalences'], title="", disabled=False)
prevalences_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
copy = Button(label='copy')
copy.on_click(C.copy_callback)
labelsounds = Button(label='label sounds')
labelsounds.on_click(lambda: C.wizard_callback(labelsounds))
makepredictions = Button(label='make predictions')
makepredictions.on_click(lambda: C.wizard_callback(makepredictions))
fixfalsepositives = Button(label='fix false positives')
fixfalsepositives.on_click(lambda: C.wizard_callback(fixfalsepositives))
fixfalsenegatives = Button(label='fix false negatives')
fixfalsenegatives.on_click(lambda: C.wizard_callback(fixfalsenegatives))
generalize = Button(label='test generalization')
generalize.on_click(lambda: C.wizard_callback(generalize))
tunehyperparameters = Button(label='tune h-parameters')
tunehyperparameters.on_click(lambda: C.wizard_callback(tunehyperparameters))
findnovellabels = Button(label='find novel labels')
findnovellabels.on_click(lambda: C.wizard_callback(findnovellabels))
examineerrors = Button(label='examine errors')
examineerrors.on_click(lambda: C.wizard_callback(examineerrors))
testdensely = Button(label='test densely')
testdensely .on_click(lambda: C.wizard_callback(testdensely))
doit = Button(label='do it!', disabled=True)
doit.on_click(C.doit_callback)
time_sigma_string = TextInput(value=M.state['time_sigma'], \
title="time σ", \
disabled=False)
time_sigma_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
time_smooth_ms_string = TextInput(value=M.state['time_smooth_ms'], \
title="time smooth", \
disabled=False)
time_smooth_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
frequency_n_ms_string = TextInput(value=M.state['frequency_n_ms'], \
title="freq N (msec)", \
disabled=False)
frequency_n_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
frequency_nw_string = TextInput(value=M.state['frequency_nw'], \
title="freq NW", \
disabled=False)
frequency_nw_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
frequency_p_string = TextInput(value=M.state['frequency_p'], \
title="freq ρ", \
disabled=False)
frequency_p_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
frequency_smooth_ms_string = TextInput(value=M.state['frequency_smooth_ms'], \
title="freq smooth", \
disabled=False)
frequency_smooth_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
nsteps_string = TextInput(value=M.state['nsteps'], title="# steps", disabled=False)
nsteps_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
restore_from_string = TextInput(value=M.state['restore_from'], title="restore from", disabled=False)
restore_from_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
save_and_validate_period_string = TextInput(value=M.state['save_and_validate_interval'], \
title="validate period", \
disabled=False)
save_and_validate_period_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
validate_percentage_string = TextInput(value=M.state['validate_percentage'], \
title="validate %", \
disabled=False)
validate_percentage_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
mini_batch_string = TextInput(value=M.state['mini_batch'], \
title="mini-batch", \
disabled=False)
mini_batch_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
kfold_string = TextInput(value=M.state['kfold'], title="k-fold", disabled=False)
kfold_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
activations_equalize_ratio_string = TextInput(value=M.state['activations_equalize_ratio'], \
title="equalize ratio", \
disabled=False)
activations_equalize_ratio_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
activations_max_samples_string = TextInput(value=M.state['activations_max_samples'], \
title="max samples", \
disabled=False)
activations_max_samples_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
pca_fraction_variance_to_retain_string = TextInput(value=M.state['pca_fraction_variance_to_retain'], \
title="PCA fraction", \
disabled=False)
pca_fraction_variance_to_retain_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
tsne_perplexity_string = TextInput(value=M.state['tsne_perplexity'], \
title="perplexity", \
disabled=False)
tsne_perplexity_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
tsne_exaggeration_string = TextInput(value=M.state['tsne_exaggeration'], \
title="exaggeration", \
disabled=False)
tsne_exaggeration_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
umap_neighbors_string = TextInput(value=M.state['umap_neighbors'], \
title="neighbors", \
disabled=False)
umap_neighbors_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
umap_distance_string = TextInput(value=M.state['umap_distance'], \
title="distance", \
disabled=False)
umap_distance_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
precision_recall_ratios_string = TextInput(value=M.state['precision_recall_ratios'], \
title="P/Rs", \
disabled=False)
precision_recall_ratios_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
context_ms_string = TextInput(value=M.state['context_ms'], \
title="context (msec)", \
disabled=False)
context_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
shiftby_ms_string = TextInput(value=M.state['shiftby_ms'], \
title="shift by (msec)", \
disabled=False)
shiftby_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
representation = Select(title="representation", height=50, \
value=M.state['representation'], \
options=["waveform", "spectrogram", "mel-cepstrum"])
representation.on_change('value', lambda a,o,n: C.generic_parameters_callback(''))
window_ms_string = TextInput(value=M.state['window_ms'], \
title="window (msec)", \
disabled=False)
window_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
stride_ms_string = TextInput(value=M.state['stride_ms'], \
title="stride (msec)", \
disabled=False)
stride_ms_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
mel_dct_string = TextInput(value=M.state['mel&dct'], \
title="Mel & DCT", \
disabled=False)
mel_dct_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
optimizer = Select(title="optimizer", height=50, \
value=M.state['optimizer'], \
options=[("sgd","SGD"), ("adam","Adam"), ("adagrad","AdaGrad"), \
("rmsprop","RMSProp")])
optimizer.on_change('value', lambda a,o,n: C.generic_parameters_callback(''))
learning_rate_string = TextInput(value=M.state['learning_rate'], \
title="learning rate", \
disabled=False)
learning_rate_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
model_parameters = OrderedDict()
for parameter in M.model_parameters:
if parameter[2]=='':
thisparameter = TextInput(value=M.state[parameter[0]], \
title=parameter[1], \
disabled=False, width=94)
thisparameter.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
else:
thisparameter = Select(value=M.state[parameter[0]], \
title=parameter[1], \
options=parameter[2], \
height=50, width=94)
model_parameters[parameter[0]] = thisparameter
configuration_contents = TextAreaInput(rows=49-3*np.ceil(len(model_parameters)/6).astype(np.int),
max_length=50000, \
disabled=True, css_classes=['fixedwidth'])
if M.configuration_file:
with open(M.configuration_file, 'r') as fid:
configuration_contents.value = fid.read()
cluster_algorithm = Select(title="cluster", height=50, \
value=M.state['cluster_algorithm'], \
options=["PCA 2D", "PCA 3D", \
"tSNE 2D", "tSNE 3D", \
"UMAP 2D", "UMAP 3D"])
cluster_algorithm.on_change('value', lambda a,o,n: C.generic_parameters_callback(''))
cluster_these_layers = MultiSelect(title='layers', \
value=M.state['cluster_these_layers'], \
options=[],
height=108)
cluster_these_layers.on_change('value', lambda a,o,n: C.generic_parameters_callback(''))
cluster_these_layers_update()
replicates_string = TextInput(value=M.state['replicates'], \
title="replicates", \
disabled=False)
replicates_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
batch_seed_string = TextInput(value=M.state['batch_seed'], \
title="batch seed", \
disabled=False)
batch_seed_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
weights_seed_string = TextInput(value=M.state['weights_seed'], \
title="weights seed", \
disabled=False)
weights_seed_string.on_change('value', lambda a,o,n: C.generic_parameters_callback(n))
file_dialog_string = TextInput(disabled=False)
file_dialog_string.on_change("value", C.file_dialog_path_callback)
file_dialog_string.value = M.state['file_dialog_string']
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..','README.md'), 'r', encoding='utf-8') as fid:
contents = fid.read()
html = markdown.markdown(contents, extensions=['tables','toc'])
readme_contents = Div(text=html, style={'overflow':'scroll','width':'600px','height':'1397px'})
wordcounts = Div(text="")
wordcounts_update()
wizard_buttons = set([
labelsounds,
makepredictions,
fixfalsepositives,
fixfalsenegatives,
generalize,
tunehyperparameters,
findnovellabels,
examineerrors,
testdensely])
action_buttons = set([
detect,
train,
leaveoneout,
leaveallout,
xvalidate,
mistakes,
activations,
cluster,
visualize,
accuracy,
freeze,
classify,
ethogram,
misses,
compare,
congruence])
parameter_buttons = set([
logs,
model,
wavtfcsvfiles,
groundtruth,
validationfiles,
testfiles,
wantedwords,
labeltypes,
prevalences])
parameter_textinputs = set([
logs_folder,
model_file,
wavtfcsvfiles_string,
groundtruth_folder,
validationfiles_string,
testfiles_string,
wantedwords_string,
labeltypes_string,
prevalences_string,
time_sigma_string,
time_smooth_ms_string,
frequency_n_ms_string,
frequency_nw_string,
frequency_p_string,
frequency_smooth_ms_string,
nsteps_string,
restore_from_string,
save_and_validate_period_string,
validate_percentage_string,
mini_batch_string,
kfold_string,
activations_equalize_ratio_string,
activations_max_samples_string,
pca_fraction_variance_to_retain_string,
tsne_perplexity_string,
tsne_exaggeration_string,
umap_neighbors_string,
umap_distance_string,
cluster_algorithm,
cluster_these_layers,
precision_recall_ratios_string,
replicates_string,
batch_seed_string,
weights_seed_string,
context_ms_string,
shiftby_ms_string,
representation,
window_ms_string,
stride_ms_string,
mel_dct_string,
optimizer,
learning_rate_string] +
list(model_parameters.values()))
wizard2actions = {
labelsounds: [detect,train,activations,cluster,visualize],
makepredictions: [train, accuracy, freeze, classify, ethogram],
fixfalsepositives: [activations, cluster, visualize],
fixfalsenegatives: [detect, misses, activations, cluster, visualize],
generalize: [leaveoneout, leaveallout, accuracy],
tunehyperparameters: [xvalidate, accuracy, compare],
findnovellabels: [detect, train, activations, cluster, visualize],
examineerrors: [detect, mistakes, activations, cluster, visualize],
testdensely: [detect, activations, cluster, visualize, classify, ethogram, congruence],
None: action_buttons }
action2parameterbuttons = {
detect: [wavtfcsvfiles],
train: [logs, groundtruth, wantedwords, testfiles, labeltypes],
leaveoneout: [logs, groundtruth, validationfiles, testfiles, wantedwords, labeltypes],
leaveallout: [logs, groundtruth, validationfiles, testfiles, wantedwords, labeltypes],
xvalidate: [logs, groundtruth, testfiles, wantedwords, labeltypes],
mistakes: [groundtruth],
activations: [logs, model, groundtruth, wantedwords, labeltypes],
cluster: [groundtruth],
visualize: [groundtruth],
accuracy: [logs],
freeze: [logs, model],
classify: [logs, model, wavtfcsvfiles, wantedwords, prevalences],
ethogram: [model, wavtfcsvfiles],
misses: [wavtfcsvfiles],
compare: [logs],
congruence: [groundtruth, validationfiles, testfiles],
None: parameter_buttons }
action2parametertextinputs = {
detect: [wavtfcsvfiles_string, time_sigma_string, time_smooth_ms_string, frequency_n_ms_string, frequency_nw_string, frequency_p_string, frequency_smooth_ms_string],
train: [context_ms_string, shiftby_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, optimizer, learning_rate_string, replicates_string, batch_seed_string, weights_seed_string, logs_folder, groundtruth_folder, testfiles_string, wantedwords_string, labeltypes_string, nsteps_string, restore_from_string, save_and_validate_period_string, validate_percentage_string, mini_batch_string] + list(model_parameters.values()),
leaveoneout: [context_ms_string, shiftby_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, optimizer, learning_rate_string, batch_seed_string, weights_seed_string, logs_folder, groundtruth_folder, validationfiles_string, testfiles_string, wantedwords_string, labeltypes_string, nsteps_string, restore_from_string, save_and_validate_period_string, mini_batch_string] + list(model_parameters.values()),
leaveallout: [context_ms_string, shiftby_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, optimizer, learning_rate_string, batch_seed_string, weights_seed_string, logs_folder, groundtruth_folder, validationfiles_string, testfiles_string, wantedwords_string, labeltypes_string, nsteps_string, restore_from_string, save_and_validate_period_string, mini_batch_string] + list(model_parameters.values()),
xvalidate: [context_ms_string, shiftby_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, optimizer, learning_rate_string, batch_seed_string, weights_seed_string, logs_folder, groundtruth_folder, testfiles_string, wantedwords_string, labeltypes_string, nsteps_string, restore_from_string, save_and_validate_period_string, mini_batch_string, kfold_string] + list(model_parameters.values()),
mistakes: [groundtruth_folder],
activations: [context_ms_string, shiftby_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, logs_folder, model_file, groundtruth_folder, wantedwords_string, labeltypes_string, activations_equalize_ratio_string, activations_max_samples_string, mini_batch_string] + list(model_parameters.values()),
cluster: [groundtruth_folder, cluster_algorithm, cluster_these_layers, pca_fraction_variance_to_retain_string, tsne_perplexity_string, tsne_exaggeration_string, umap_neighbors_string, umap_distance_string],
visualize: [groundtruth_folder],
accuracy: [logs_folder, precision_recall_ratios_string],
freeze: [context_ms_string, representation, window_ms_string, stride_ms_string, mel_dct_string, logs_folder, model_file] + list(model_parameters.values()),
classify: [context_ms_string, shiftby_ms_string, representation, stride_ms_string, logs_folder, model_file, wavtfcsvfiles_string, wantedwords_string, prevalences_string] + list(model_parameters.values()),
ethogram: [model_file, wavtfcsvfiles_string],
misses: [wavtfcsvfiles_string],
compare: [logs_folder],
congruence: [groundtruth_folder, validationfiles_string, testfiles_string],
None: parameter_textinputs }
0
Source : interact.py
with MIT License
from nasa
with MIT License
from nasa
def show_interact_widget(tpf, notebook_url='localhost:8888',
max_cadences=30000,
aperture_mask='pipeline',
exported_filename=None):
"""Display an interactive Jupyter Notebook widget to inspect the pixel data.
The widget will show both the lightcurve and pixel data. The pixel data
supports pixel selection via Bokeh tap and box select tools in an
interactive javascript user interface.
Note: at this time, this feature only works inside an active Jupyter
Notebook, and tends to be too slow when more than ~30,000 cadences
are contained in the TPF (e.g. short cadence data).
Parameters
----------
tpf : lightkurve.TargetPixelFile
Target Pixel File to interact with
notebook_url: str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
max_cadences : int
Raise a RuntimeError if the number of cadences shown is larger than
this value. This limit helps keep browsers from becoming unresponsive.
"""
try:
import bokeh
if bokeh.__version__[0] == '0':
warnings.warn("interact() requires Bokeh version 1.0 or later", LightkurveWarning)
except ImportError:
log.error("The interact() tool requires the `bokeh` Python package; "
"you can install bokeh using e.g. `conda install bokeh`.")
return None
aperture_mask = tpf._parse_aperture_mask(aperture_mask)
if exported_filename is None:
exported_filename = make_default_export_name(tpf)
try:
exported_filename = str(exported_filename)
except:
log.error('Invalid input filename type for interact()')
raise
if ('.fits' not in exported_filename.lower()):
exported_filename += '.fits'
lc = tpf.to_lightcurve(aperture_mask=aperture_mask)
npix = tpf.flux[0, :, :].size
pixel_index_array = np.arange(0, npix, 1).reshape(tpf.flux[0].shape)
# Bokeh cannot handle many data points
# https://github.com/bokeh/bokeh/issues/7490
if len(lc.cadenceno) > max_cadences:
msg = 'Interact cannot display more than {} cadences.'
raise RuntimeError(msg.format(max_cadences))
def create_interact_ui(doc):
# The data source includes metadata for hover-over tooltips
lc_source = prepare_lightcurve_datasource(lc)
tpf_source = prepare_tpf_datasource(tpf, aperture_mask)
# Create the lightcurve figure and its vertical marker
fig_lc, vertical_line = make_lightcurve_figure_elements(lc, lc_source)
# Create the TPF figure and its stretch slider
pedestal = np.nanmin(tpf.flux)
fig_tpf, stretch_slider = make_tpf_figure_elements(tpf, tpf_source,
pedestal=pedestal,
fiducial_frame=0)
# Helper lookup table which maps cadence number onto flux array index.
tpf_index_lookup = {cad: idx for idx, cad in enumerate(tpf.cadenceno)}
# Interactive slider widgets and buttons to select the cadence number
cadence_slider = Slider(start=np.min(tpf.cadenceno),
end=np.max(tpf.cadenceno),
value=np.min(tpf.cadenceno),
step=1,
title="Cadence Number",
width=490)
r_button = Button(label=">", button_type="default", width=30)
l_button = Button(label=" < ", button_type="default", width=30)
export_button = Button(label="Save Lightcurve",
button_type="success", width=120)
message_on_save = Div(text=' ',width=600, height=15)
# Callbacks
def update_upon_pixel_selection(attr, old, new):
"""Callback to take action when pixels are selected."""
# Check if a selection was "re-clicked", then de-select
if ((sorted(old) == sorted(new)) & (new != [])):
# Trigger recursion
tpf_source.selected.indices = new[1:]
if new != []:
selected_indices = np.array(new)
selected_mask = np.isin(pixel_index_array, selected_indices)
lc_new = tpf.to_lightcurve(aperture_mask=selected_mask)
lc_source.data['flux'] = lc_new.flux
ylims = get_lightcurve_y_limits(lc_source)
fig_lc.y_range.start = ylims[0]
fig_lc.y_range.end = ylims[1]
else:
lc_source.data['flux'] = lc.flux * 0.0
fig_lc.y_range.start = -1
fig_lc.y_range.end = 1
message_on_save.text = " "
export_button.button_type = "success"
def update_upon_cadence_change(attr, old, new):
"""Callback to take action when cadence slider changes"""
if new in tpf.cadenceno:
frameno = tpf_index_lookup[new]
fig_tpf.select('tpfimg')[0].data_source.data['image'] = \
[tpf.flux[frameno, :, :] - pedestal]
vertical_line.update(location=tpf.time[frameno])
else:
fig_tpf.select('tpfimg')[0].data_source.data['image'] = \
[tpf.flux[0, :, :] * np.NaN]
lc_source.selected.indices = []
def go_right_by_one():
"""Step forward in time by a single cadence"""
existing_value = cadence_slider.value
if existing_value < np.max(tpf.cadenceno):
cadence_slider.value = existing_value + 1
def go_left_by_one():
"""Step back in time by a single cadence"""
existing_value = cadence_slider.value
if existing_value > np.min(tpf.cadenceno):
cadence_slider.value = existing_value - 1
def save_lightcurve():
"""Save the lightcurve as a fits file with mask as HDU extension"""
if tpf_source.selected.indices != []:
selected_indices = np.array(tpf_source.selected.indices)
selected_mask = np.isin(pixel_index_array, selected_indices)
lc_new = tpf.to_lightcurve(aperture_mask=selected_mask)
lc_new.to_fits(exported_filename, overwrite=True,
flux_column_name='SAP_FLUX',
aperture_mask=selected_mask.astype(np.int),
SOURCE='lightkurve interact',
NOTE='custom mask',
MASKNPIX=np.nansum(selected_mask))
if message_on_save.text == " ":
text = ' < font color="black"> < i>Saved file {} < /i> < /font>'
message_on_save.text = text.format(exported_filename)
export_button.button_type = "success"
else:
text = ' < font color="gray"> < i>Saved file {} < /i> < /font>'
message_on_save.text = text.format(exported_filename)
else:
text = ' < font color="gray"> < i>No pixels selected, no mask saved < /i> < /font>'
export_button.button_type = "warning"
message_on_save.text = text
def jump_to_lightcurve_position(attr, old, new):
if new != []:
cadence_slider.value = lc.cadenceno[new[0]]
# Map changes to callbacks
r_button.on_click(go_right_by_one)
l_button.on_click(go_left_by_one)
tpf_source.selected.on_change('indices', update_upon_pixel_selection)
lc_source.selected.on_change('indices', jump_to_lightcurve_position)
export_button.on_click(save_lightcurve)
cadence_slider.on_change('value', update_upon_cadence_change)
# Layout all of the plots
sp1, sp2, sp3, sp4 = (Spacer(width=15), Spacer(width=30),
Spacer(width=80), Spacer(width=60))
widgets_and_figures = layout([fig_lc, fig_tpf],
[l_button, sp1, r_button, sp2,
cadence_slider, sp3, stretch_slider],
[export_button, sp4, message_on_save])
doc.add_root(widgets_and_figures)
output_notebook(verbose=False, hide_banner=True)
return show(create_interact_ui, notebook_url=notebook_url)
def show_skyview_widget(tpf, notebook_url='localhost:8888', magnitude_limit=18):
0
Source : uikeras.py
with Apache License 2.0
from OpendTect
with Apache License 2.0
from OpendTect
def getUiPars(uipars=None):
dict = keras_dict
learntype = info[dgbkeys.learntypedictstr]
if isinstance(info[dgbkeys.inpshapedictstr], int):
ndim = 1
else:
ndim = len(info[dgbkeys.inpshapedictstr])
modeltypes = getUiModelTypes( learntype, info[dgbkeys.classdictstr], ndim )
if len(modeltypes)==0:
divfld = Div(text="""No Keras models found for this workflow.""")
parsgrp = column(divfld)
return {'grp': parsgrp,
'uiobjects':{
'divfld': divfld
}
}
defmodel = modeltypes[0]
defbatchsz = keras_dict['batch']
estimatedsz = info[dgbkeys.estimatedsizedictstr]
if kc.UserModel.isImg2Img( defmodel ):
defbatchsz = 4
uiobjs = {}
if not uipars:
uiobjs = {
'modeltypfld': Select(title='Type', options=modeltypes),
'batchfld': Select(title='Batch Size',options=cudacores),
'epochfld': Slider(start=1,end=1000, title='Epochs'),
'patiencefld': Slider(start=1,end=100, title='Patience'),
'lrfld': Slider(start=-10,end=-1,step=1, title='Initial Learning Rate (1e)'),
'edfld': Slider(start=1,end=100, title='Epoch drop (%)', step=0.1),
'sizefld': None,
'dodecimatefld': CheckboxGroup( labels=['Decimate input']),
'chunkfld': Slider(start=1,end=100, title='Number of Chunks'),
'rundevicefld': CheckboxGroup( labels=['Train on GPU'], visible=can_use_gpu())
}
if estimatedsz:
uiobjs['sizefld'] = Div( text=getSizeStr( estimatedsz ) )
uiobjs['dodecimatefld'].on_click(partial(decimateCB,chunkfld=uiobjs['chunkfld'],sizefld=uiobjs['sizefld']))
try:
uiobjs['chunkfld'].on_change('value_throttled',partial(chunkfldCB, uiobjs['sizefld']))
except AttributeError:
log_msg( '[WARNING] Bokeh version too old, consider updating it.' )
pass
parsgrp = column(*list(uiobjs.values()))
uipars = {'grp': parsgrp, 'uiobjects': uiobjs}
else:
uiobjs = uipars['uiobjects']
uiobjs['modeltypfld'].value = defmodel
uiobjs['batchfld'].value = str(defbatchsz)
uiobjs['epochfld'].value = dict['epoch']
uiobjs['patiencefld'].value = dict['patience']
uiobjs['lrfld'].value = np.log10(dict['learnrate'])
uiobjs['edfld'].value = 100*dict['epochdrop']/uiobjs['epochfld'].value
if estimatedsz:
uiobjs['sizefld'].text = getSizeStr(estimatedsz)
uiobjs['dodecimatefld'].active = []
uiobjs['chunkfld'].value = dict['nbchunk']
uiobjs['rundevicefld'].active = [0]
decimateCB( uiobjs['dodecimatefld'].active,uiobjs['chunkfld'],uiobjs['sizefld'] )
return uipars
def chunkfldCB(sizefld,attr,old,new):
0
Source : uitorch.py
with Apache License 2.0
from OpendTect
with Apache License 2.0
from OpendTect
def getUiPars(uipars=None):
dict = torch_dict
learntype = info[dgbkeys.learntypedictstr]
if isinstance(info[dgbkeys.inpshapedictstr], int):
ndim = 1
else:
ndim = len(info[dgbkeys.inpshapedictstr])
modeltypes = getUiModelTypes( learntype, info[dgbkeys.classdictstr], ndim )
if len(modeltypes)==0:
divfld = Div(text="""No PyTorch models found for this workflow.""")
parsgrp = column(divfld)
return {'grp': parsgrp,
'uiobjects':{
'divfld': divfld
}
}
defbatchsz = torch_dict['batch_size']
defmodel = modeltypes[0]
estimatedsz = info[dgbkeys.estimatedsizedictstr]
if tc.TorchUserModel.isImg2Img( defmodel ):
defbatchsz = 4
uiobjs = {}
if not uipars:
uiobjs = {
'modeltypfld': Select(title='Type', options=modeltypes),
'batchfld': Select(title='Batch Size',options=cudacores),
'epochfld': Slider(start=1,end=1000, title='Epochs'),
'epochdrop': Slider(start=1, end=100, title='Early Stopping'),
'lrfld': Slider(start=-10,end=-1,step=1, title='Initial Learning Rate (1e)'),
}
if estimatedsz:
uiobjs['sizefld'] = Div( text=getSizeStr( estimatedsz ) )
parsgrp = column(*list(uiobjs.values()))
uipars = {'grp': parsgrp, 'uiobjects': uiobjs}
else:
uiobjs = uipars['uiobjects']
uiobjs['modeltypfld'].value = defmodel
uiobjs['batchfld'].value = str(defbatchsz)
uiobjs['epochfld'].value = dict['epochs']
uiobjs['lrfld'].value = np.log10(dict['learnrate'])
uiobjs['epochdrop'].value = dict['epochdrop']
if estimatedsz:
uiobjs['sizefld'].text = getSizeStr(estimatedsz)
return uipars
def getUiParams( torchpars ):
0
Source : dashboard.py
with MIT License
from pfnet-research
with MIT License
from pfnet-research
def __call__(self, doc):
# type: (bokeh.document.Document) -> None
self.doc = doc
self.current_trials = \
self.study.trials # type: Optional[List[optuna.structs.FrozenTrial]]
self.new_trials = None # type: Optional[List[optuna.structs.FrozenTrial]]
self.complete_trials_widget = _CompleteTrialsWidget(self.current_trials)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = 'Optuna Dashboard (Beta)'
header = _HEADER_FORMAT.format(study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout([[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()]],
sizing_mode='scale_width'))
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
thread.daemon = True
thread.start()
def thread_loop(self):
0
Source : outputs.py
with MIT License
from PSLmodels
with MIT License
from PSLmodels
def create_layout(data, start_year, end_year):
"""
Function for creating a bokeh layout with all of the data tables
"""
agg_data = data["aggr_outputs"]
# create aggregate table
clt_title = f" < h3>{agg_data['current']['title']} < /h3>"
current_law_table = Div(text=clt_title + agg_data["current"]["renderable"],
width=1000)
rt_title = f" < h3>{agg_data['reform']['title']} < /h3>"
reform_table = Div(text=rt_title + agg_data["reform"]["renderable"],
width=1000)
ct_title = f" < h3>{agg_data['change']['title']} < /h3>"
change_table = Div(text=ct_title + agg_data["change"]["renderable"],
width=1000)
current_tab = Panel(child=current_law_table,
title="Current Law")
reform_tab = Panel(child=reform_table,
title="Reform")
change_tab = Panel(child=change_table,
title="Change")
agg_tabs = Tabs(tabs=[current_tab, reform_tab, change_tab])
key_map = {
"current": "Current",
"reform": "Reform",
"ind_income": "Income Tax",
"payroll": "Payroll Tax",
"combined": "Combined Tax",
"dist": "Distribution Table",
"diff": "Differences Table"
}
tbl_data = data["tbl_outputs"]
yr_panels = []
# loop through each year (start - end year)
for yr in range(start_year, end_year + 1):
# loop through each table type: dist, idff
tbl_panels = []
for tbl_type, content in tbl_data.items():
# loop through sub tables: current, reform for dist
# ind_income, payroll, combined for diff
content_panels = []
for key, value in content.items():
# loop through each grouping: bins, deciles
grp_panels = []
for grp, grp_data in value.items():
_data = grp_data[yr]
# create a data table for this tab
title = f" < h3>{_data['title']} < /h3>"
note = (" < p> < i>All monetary totals are in billions. "
"All counts are in millions. "
"Averages and shares are as shown. < /i> < /p>")
tbl = Div(text=title + note + _data["renderable"],
width=1000)
grp_panel = Panel(child=tbl, title=grp.title())
grp_panels.append(grp_panel)
grp_tab = Tabs(tabs=grp_panels)
# panel for the sub tables
content_panel = Panel(child=grp_tab, title=key_map[key])
content_panels.append(content_panel)
content_tab = Tabs(tabs=content_panels)
# panel for the table types
tbl_panel = Panel(child=content_tab,
title=key_map[tbl_type])
tbl_panels.append(tbl_panel)
type_tab = Tabs(tabs=tbl_panels)
# panel for the year
yr_panel = Panel(child=type_tab, title=str(yr))
yr_panels.append(yr_panel)
yr_tabs = Tabs(tabs=yr_panels)
agg_layout = layout(
children=[agg_tabs]
)
table_layout = layout(
children=[yr_tabs]
)
agg_data = json_item(agg_layout)
table_data = json_item(table_layout)
# return a dictionary of outputs ready for COMP
agg_outputs = {
"media_type": "bokeh",
"title": "Aggregate Results",
"data": agg_data,
}
table_outputs = {
"media_type": "bokeh",
"title": "Tables",
"data": table_data,
}
# return js, div, cdn_js, cdn_css, widget_js, widget_css
return agg_outputs, table_outputs