Here are the examples of the python api bokeh.models.widgets.DataTable taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
14 Examples
3
View Source File : NeoPredViz.py
License : GNU Lesser General Public License v3.0
Project Creator : MathOnco
License : GNU Lesser General Public License v3.0
Project Creator : MathOnco
def EpitopeTable(self):
Columns = [TableColumn(field=Ci, title=Ci) for Ci in self.neosData.columns] # bokeh columns
data_table = DataTable(columns=Columns, source=ColumnDataSource(self.neosData) ,width=1200, height=200) # bokeh table
return(data_table)
def SummaryTable(self):
3
View Source File : NeoPredViz.py
License : GNU Lesser General Public License v3.0
Project Creator : MathOnco
License : GNU Lesser General Public License v3.0
Project Creator : MathOnco
def SummaryTable(self):
Columns = [TableColumn(field=Ci, title=Ci) for Ci in self.summaryData.columns] # bokeh columns
data_table = DataTable(columns=Columns, source=ColumnDataSource(self.summaryData) ,width=1200, height=200) # bokeh table
return(data_table)
def SummaryBarChart(self):
3
View Source File : bokeh_utils.py
License : MIT License
Project Creator : ylabbe
License : MIT License
Project Creator : ylabbe
def convert_df(df):
columns = []
for column in df.columns:
if df.dtypes[column].kind == 'f':
formatter = NumberFormatter(format='0.000')
else:
formatter = None
table_col = TableColumn(field=column, title=column, formatter=formatter)
columns.append(table_col)
data_table = DataTable(columns=columns, source=ColumnDataSource(df), height=200)
return data_table
0
View Source File : plotting.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : FeatureLabs
License : BSD 3-Clause "New" or "Revised" License
Project Creator : FeatureLabs
def dendrogram(D, figargs=None):
'''Creates a dendrogram plot.
This plot can show full structure of a given dendrogram.
Args:
D (henchman.selection.Dendrogram): An initialized dendrogram object
Examples:
>>> from henchman.selection import Dendrogram
>>> from henchman.plotting import show
>>> import henchman.plotting as hplot
>>> D = Dendrogram(X)
>>> plot = hplot.dendrogram(D)
>>> show(plot)
'''
if figargs is None:
return lambda figargs: dendrogram(D, figargs=figargs)
G = nx.Graph()
vertices_source = ColumnDataSource(
pd.DataFrame({'index': D.columns.keys(),
'desc': list(D.columns.values())}))
edges_source = ColumnDataSource(
pd.DataFrame(D.edges[0]).rename(
columns={1: 'end', 0: 'start'}))
step_source = ColumnDataSource(
pd.DataFrame({'step': [0],
'thresh': [D.threshlist[0]],
'components': [len(D.graphs[0])]}))
G.add_nodes_from([str(x) for x in vertices_source.data['index']])
G.add_edges_from(zip(
[str(x) for x in edges_source.data['start']],
[str(x) for x in edges_source.data['end']]))
graph_renderer = from_networkx(G, nx.circular_layout,
scale=1, center=(0, 0))
graph_renderer.node_renderer.data_source = vertices_source
graph_renderer.node_renderer.view = CDSView(source=vertices_source)
graph_renderer.edge_renderer.data_source = edges_source
graph_renderer.edge_renderer.view = CDSView(source=edges_source)
plot = Plot(plot_width=400, plot_height=400,
x_range=Range1d(-1.1, 1.1),
y_range=Range1d(-1.1, 1.1))
plot.title.text = "Feature Connectivity"
graph_renderer.node_renderer.glyph = Circle(
size=5, fill_color=Spectral4[0])
graph_renderer.node_renderer.selection_glyph = Circle(
size=15, fill_color=Spectral4[2])
graph_renderer.edge_renderer.data_source = edges_source
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC",
line_alpha=0.6,
line_width=.5)
graph_renderer.edge_renderer.selection_glyph = MultiLine(
line_color=Spectral4[2],
line_width=3)
graph_renderer.node_renderer.hover_glyph = Circle(
size=5,
fill_color=Spectral4[1])
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = NodesAndLinkedEdges()
plot.renderers.append(graph_renderer)
plot.add_tools(
HoverTool(tooltips=[("feature", "@desc"),
("index", "@index"), ]),
TapTool(),
BoxZoomTool(),
SaveTool(),
ResetTool())
plot = _modify_plot(plot, figargs)
if figargs['static']:
return plot
def modify_doc(doc, D, figargs):
data_table = DataTable(source=step_source,
columns=[TableColumn(field='step',
title='Step'),
TableColumn(field='thresh',
title='Thresh'),
TableColumn(field='components',
title='Components')],
height=50, width=400)
def callback(attr, old, new):
try:
edges = D.edges[slider.value]
edges_source.data = ColumnDataSource(
pd.DataFrame(edges).rename(columns={1: 'end',
0: 'start'})).data
step_source.data = ColumnDataSource(
{'step': [slider.value],
'thresh': [D.threshlist[slider.value]],
'components': [len(D.graphs[slider.value])]}).data
except Exception as e:
print(e)
slider = Slider(start=0,
end=(len(D.edges) - 1),
value=0,
step=1,
title="Step")
slider.on_change('value', callback)
doc.add_root(column(slider, data_table, plot))
return lambda doc: modify_doc(doc, D, figargs)
def f1(X, y, model, n_precs=1000, n_splits=1, figargs=None):
0
View Source File : laptable.py
License : GNU General Public License v3.0
Project Creator : gotzl
License : GNU General Public License v3.0
Project Creator : gotzl
def create():
columns = [
TableColumn(field="name", title="File name"),
TableColumn(field="datetime", title="Datetime"),
TableColumn(field="driver", title="Driver"),
TableColumn(field="track", title="Track"),
TableColumn(field="car", title="Car"),
TableColumn(field="lap", title="Lap"),
TableColumn(field="time", title="Lap time"),
]
source = ColumnDataSource()
filter_source = ColumnDataSource()
data_table = DataTable(source=filter_source, columns=columns, width=800)
### https://gist.github.com/dennisobrien/450d7da20daaba6d39d0
# callback code to be used by all the filter widgets
combined_callback_code = """
var data = {};
var original_data = source.data;
var track = track_select_obj.value;
var car = car_select_obj.value;
for (var key in original_data) {
data[key] = [];
for (var i = 0; i < original_data['track'].length; ++i) {
if ((track === "ALL" || original_data['track'][i] === track) &&
(car === "ALL" || original_data['car'][i] === car)) {
data[key].push(original_data[key][i]);
}
}
}
filter_source.data = data;
filter_source.change.emit();
target_obj.change.emit();
"""
# define the filter widgets
track_select = Select(title="Track:", value='ALL', options=['ALL'])
car_select = Select(title="Car:", value='ALL', options=['ALL'])
# define the callback object
generic_callback = CustomJS(
args=dict(source=source,
filter_source=filter_source,
track_select_obj=track_select,
car_select_obj=car_select,
target_obj=data_table),
code=combined_callback_code
)
# connect the callbacks to the filter widgets
track_select.js_on_change('value', generic_callback)
car_select.js_on_change('value', generic_callback)
filters = row(track_select, car_select)
######
return filters, data_table, source, filter_source, track_select, car_select
0
View Source File : optbrowser.py
License : GNU General Public License v3.0
Project Creator : happydasch
License : GNU General Public License v3.0
Project Creator : happydasch
def _build_optresult_selector(self, optresults):
# 1. build a dict with all params and all user columns
data_dict = defaultdict(list)
for optres in optresults:
for param_name, _ in optres[0].params._getitems():
param_val = optres[0].params._get(param_name)
data_dict[param_name].append(param_val)
for usercol_label, usercol_fnc in self._usercolumns.items():
data_dict[usercol_label].append(usercol_fnc(optres))
# 2. build a pandas DataFrame
df = DataFrame(data_dict)
# 3. now sort and limit result
if self._sortcolumn is not None:
df = df.sort_values(by=[self._sortcolumn], ascending=self._sortasc)
if self._num_result_limit is not None:
df = df.head(self._num_result_limit)
# 4. build column info for Bokeh table
tab_columns = []
for colname in data_dict.keys():
formatter = NumberFormatter(format='0.000')
if (len(data_dict[colname]) > 0
and isinstance(data_dict[colname][0], int)):
formatter = StringFormatter()
tab_columns.append(
TableColumn(
field=colname,
title=f'{colname}',
sortable=False,
formatter=formatter))
cds = ColumnDataSource(df)
selector = DataTable(
source=cds,
columns=tab_columns,
height=150, # fixed height for selector
width=0, # set width to 0 so there is no min_width
sizing_mode='stretch_width',
fit_columns=True)
return selector, cds
def build_optresult_model(self, _=None):
0
View Source File : ui.py
License : Apache License 2.0
Project Creator : IntelLabs
License : Apache License 2.0
Project Creator : IntelLabs
def _create_ui_components() -> (Figure, ColumnDataSource): # pylint: disable=too-many-statements
global asp_table_source, asp_filter_src, op_table_source, op_filter_src
global stats, aspects, tabs, lexicons_dropdown
stats = pd.DataFrame(columns=["Quantity", "Score"])
aspects = pd.Series([])
def new_col_data_src():
return ColumnDataSource({"file_contents": [], "file_name": []})
large_text = HTMLTemplateFormatter(template=""" < div> < %= value %> < /div>""")
def data_column(title):
return TableColumn(
field=title, title=' < span class="header">' + title + " < /span>", formatter=large_text
)
asp_table_columns = [
data_column("Term"),
data_column("Alias1"),
data_column("Alias2"),
data_column("Alias3"),
]
op_table_columns = [data_column("Term"), data_column("Score"), data_column("Polarity")]
asp_table_source = empty_table("Term", "Alias1", "Alias2", "Alias3")
asp_filter_src = empty_table("Term", "Alias1", "Alias2", "Alias3")
asp_src = new_col_data_src()
op_table_source = empty_table("Term", "Score", "Polarity", "Polarity")
op_filter_src = empty_table("Term", "Score", "Polarity", "Polarity")
op_src = new_col_data_src()
asp_table = DataTable(
source=asp_table_source,
selectable="checkbox",
columns=asp_table_columns,
editable=True,
width=600,
height=500,
)
op_table = DataTable(
source=op_table_source,
selectable="checkbox",
columns=op_table_columns,
editable=True,
width=600,
height=500,
)
asp_examples_box = _create_examples_table()
op_examples_box = _create_examples_table()
asp_layout = layout([[asp_table, asp_examples_box]])
op_layout = layout([[op_table, op_examples_box]])
asp_tab = Panel(child=asp_layout, title="Aspect Lexicon")
op_tab = Panel(child=op_layout, title="Opinion Lexicon")
tabs = Tabs(tabs=[asp_tab, op_tab], width=700, css_classes=["mytab"])
lexicons_menu = [("Open", "open"), ("Save", "save")]
lexicons_dropdown = Dropdown(
label="Edit Lexicons",
button_type="success",
menu=lexicons_menu,
width=140,
height=31,
css_classes=["mybutton"],
)
train_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
train_dropdown = Dropdown(
label="Extract Lexicons",
button_type="success",
menu=train_menu,
width=162,
height=31,
css_classes=["mybutton"],
)
inference_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
inference_dropdown = Dropdown(
label="Classify",
button_type="success",
menu=inference_menu,
width=140,
height=31,
css_classes=["mybutton"],
)
text_status = TextInput(
value="Select training data", title="Train Run Status:", css_classes=["statusText"]
)
text_status.visible = False
train_src = new_col_data_src()
infer_src = new_col_data_src()
with open(join(SOLUTION_DIR, "dropdown.js")) as f:
args = dict(
clicked=lexicons_dropdown,
asp_filter=asp_filter_src,
op_filter=op_filter_src,
asp_src=asp_src,
op_src=op_src,
tabs=tabs,
text_status=text_status,
train_src=train_src,
infer_src=infer_src,
train_clicked=train_dropdown,
infer_clicked=inference_dropdown,
opinion_lex_generic="",
)
code = f.read()
args["train_clicked"] = train_dropdown
train_dropdown.js_on_change("value", CustomJS(args=args, code=code))
args["train_clicked"] = inference_dropdown
inference_dropdown.js_on_change("value", CustomJS(args=args, code=code))
args["clicked"] = lexicons_dropdown
lexicons_dropdown.js_on_change("value", CustomJS(args=args, code=code))
def update_filter_source(table_source, filter_source):
df = table_source.to_df()
sel_inx = sorted(table_source.selected.indices)
df = df.iloc[sel_inx, 1:]
new_source = ColumnDataSource(df)
filter_source.data = new_source.data
def update_examples_box(data, examples_box, old, new):
examples_box.source.data = {"Examples": []}
unselected = list(set(old) - set(new))
selected = list(set(new) - set(old))
if len(selected) < = 1 and len(unselected) < = 1:
examples_box.source.data.update(
{
"Examples": [str(data.iloc[unselected[0], i]) for i in range(4, 24)]
if len(unselected) != 0
else [str(data.iloc[selected[0], i]) for i in range(4, 24)]
}
)
def asp_selected_change(_, old, new):
global asp_filter_src, asp_table_source, aspects_data
update_filter_source(asp_table_source, asp_filter_src)
update_examples_box(aspects_data, asp_examples_box, old, new)
def op_selected_change(_, old, new):
global op_filter_src, op_table_source, opinions_data
update_filter_source(op_table_source, op_filter_src)
update_examples_box(opinions_data, op_examples_box, old, new)
def read_csv(file_src, headers=False, index_cols=False, readCSV=True):
if readCSV:
raw_contents = file_src.data["file_contents"][0]
if len(raw_contents.split(",")) == 1:
b64_contents = raw_contents
else:
# remove the prefix that JS adds
b64_contents = raw_contents.split(",", 1)[1]
file_contents = base64.b64decode(b64_contents)
return pd.read_csv(
io.BytesIO(file_contents),
encoding="ISO-8859-1",
keep_default_na=False,
na_values={None},
engine="python",
index_col=index_cols,
header=0 if headers else None,
)
return file_src
def read_parsed_files(file_content, file_name):
try:
# remove the prefix that JS adds
b64_contents = file_content.split(",", 1)[1]
file_content = base64.b64decode(b64_contents)
with open(SENTIMENT_OUT / file_name, "w") as json_file:
data_dict = json.loads(file_content.decode("utf-8"))
json.dump(data_dict, json_file)
except Exception as e:
print(str(e))
# pylint: disable=unused-argument
def train_file_callback(attr, old, new):
global train_data
SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)
train = TrainSentiment(parse=True, rerank_model=None)
if len(train_src.data["file_contents"]) == 1:
train_data = read_csv(train_src, index_cols=0)
file_name = train_src.data["file_name"][0]
raw_data_path = SENTIMENT_OUT / file_name
train_data.to_csv(raw_data_path, header=False)
print("Running_SentimentTraining on data...")
train.run(data=raw_data_path)
else:
f_contents = train_src.data["file_contents"]
f_names = train_src.data["file_name"]
raw_data_path = SENTIMENT_OUT / train_src.data["file_name"][0].split("/")[0]
if not os.path.exists(raw_data_path):
os.makedirs(raw_data_path)
for f_content, f_name in zip(f_contents, f_names):
read_parsed_files(f_content, f_name)
print("Running_SentimentTraining on data...")
train.run(parsed_data=raw_data_path)
text_status.value = "Lexicon extraction completed"
with io.open(AcquireTerms.acquired_aspect_terms_path, "r") as fp:
aspect_data_csv = fp.read()
file_data = base64.b64encode(str.encode(aspect_data_csv))
file_data = file_data.decode("utf-8")
asp_src.data = {"file_contents": [file_data], "file_name": ["nameFile.csv"]}
out_path = LEXICONS_OUT / "generated_opinion_lex_reranked.csv"
with io.open(out_path, "r") as fp:
opinion_data_csv = fp.read()
file_data = base64.b64encode(str.encode(opinion_data_csv))
file_data = file_data.decode("utf-8")
op_src.data = {"file_contents": [file_data], "file_name": ["nameFile.csv"]}
def show_analysis() -> None:
global stats, aspects, plot, source, tabs
plot, source = _create_plot()
events_table = _create_events_table()
# pylint: disable=unused-argument
def _events_handler(attr, old, new):
_update_events(events_table, events_type.active)
# Toggle display of in-domain / All aspect mentions
events_type = RadioButtonGroup(labels=["All Events", "In-Domain Events"], active=0)
analysis_layout = layout([[plot], [events_table]])
# events_type display toggle disabled
# analysis_layout = layout([[plot],[events_type],[events_table]])
analysis_tab = Panel(child=analysis_layout, title="Analysis")
tabs.tabs.insert(2, analysis_tab)
tabs.active = 2
events_type.on_change("active", _events_handler)
source.selected.on_change("indices", _events_handler) # pylint: disable=no-member
# pylint: disable=unused-argument
def infer_file_callback(attr, old, new):
# run inference on input data and current aspect/opinion lexicons in view
global infer_data, stats, aspects
SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)
df_aspect = pd.DataFrame.from_dict(asp_filter_src.data)
aspect_col_list = ["Term", "Alias1", "Alias2", "Alias3"]
df_aspect = df_aspect[aspect_col_list]
df_aspect.to_csv(SENTIMENT_OUT / "aspects.csv", index=False, na_rep="NaN")
df_opinion = pd.DataFrame.from_dict(op_filter_src.data)
opinion_col_list = ["Term", "Score", "Polarity", "isAcquired"]
df_opinion = df_opinion[opinion_col_list]
df_opinion.to_csv(SENTIMENT_OUT / "opinions.csv", index=False, na_rep="NaN")
solution = SentimentSolution()
if len(infer_src.data["file_contents"]) == 1:
infer_data = read_csv(infer_src, index_cols=0)
file_name = infer_src.data["file_name"][0]
raw_data_path = SENTIMENT_OUT / file_name
infer_data.to_csv(raw_data_path, header=False)
print("Running_SentimentInference on data...")
text_status.value = "Running classification on data..."
stats = solution.run(
data=raw_data_path,
aspect_lex=SENTIMENT_OUT / "aspects.csv",
opinion_lex=SENTIMENT_OUT / "opinions.csv",
)
else:
f_contents = infer_src.data["file_contents"]
f_names = infer_src.data["file_name"]
raw_data_path = SENTIMENT_OUT / infer_src.data["file_name"][0].split("/")[0]
if not os.path.exists(raw_data_path):
os.makedirs(raw_data_path)
for f_content, f_name in zip(f_contents, f_names):
read_parsed_files(f_content, f_name)
print("Running_SentimentInference on data...")
text_status.value = "Running classification on data..."
stats = solution.run(
parsed_data=raw_data_path,
aspect_lex=SENTIMENT_OUT / "aspects.csv",
opinion_lex=SENTIMENT_OUT / "opinions.csv",
)
aspects = pd.read_csv(SENTIMENT_OUT / "aspects.csv", encoding="utf-8")["Term"]
text_status.value = "Classification completed"
show_analysis()
# pylint: disable=unused-argument
def asp_file_callback(attr, old, new):
global aspects_data, asp_table_source
aspects_data = read_csv(asp_src, headers=True)
# Replaces None values by empty string
aspects_data = aspects_data.fillna("")
new_source = ColumnDataSource(aspects_data)
asp_table_source.data = new_source.data
asp_table_source.selected.indices = list(range(len(aspects_data)))
# pylint: disable=unused-argument
def op_file_callback(attr, old, new):
global opinions_data, op_table_source, lexicons_dropdown, df_opinion_generic
df = read_csv(op_src, headers=True)
# Replaces None values by empty string
df = df.fillna("")
# Placeholder for generic opinion lexicons from the given csv file
df_opinion_generic = df[df["isAcquired"] == "N"]
# Update the argument value for the callback customJS
lexicons_dropdown.js_property_callbacks.get("change:value")[0].args[
"opinion_lex_generic"
] = df_opinion_generic.to_dict(orient="list")
opinions_data = df[df["isAcquired"] == "Y"]
new_source = ColumnDataSource(opinions_data)
op_table_source.data = new_source.data
op_table_source.selected.indices = list(range(len(opinions_data)))
# pylint: disable=unused-argument
def txt_status_callback(attr, old, new):
print("Status: " + new)
text_status.on_change("value", txt_status_callback)
asp_src.on_change("data", asp_file_callback)
# pylint: disable=no-member
asp_table_source.selected.on_change("indices", asp_selected_change)
op_src.on_change("data", op_file_callback)
op_table_source.selected.on_change("indices", op_selected_change) # pylint: disable=no-member
train_src.on_change("data", train_file_callback)
infer_src.on_change("data", infer_file_callback)
return layout([[_create_header(train_dropdown, inference_dropdown, text_status)], [tabs]])
def _create_events_table() -> DataTable:
0
View Source File : ui.py
License : Apache License 2.0
Project Creator : IntelLabs
License : Apache License 2.0
Project Creator : IntelLabs
def _create_events_table() -> DataTable:
"""Utility function for creating and styling the events table."""
formatter = HTMLTemplateFormatter(
template="""
< style>
.AS_POS {color: #0000FF; font-weight: bold;}
.AS_NEG {color: #0000FF; font-weight: bold;}
.OP_POS {color: #1aaa0d; font-style: bold;}
.OP_NEG {color: #f40000;font-style: bold;}
.NEG_POS {font-style: italic;}
.NEG_NEG {color: #f40000; font-style: italic;}
.INT_POS {color: #1aaa0d; font-style: italic;}
.INT_NEG {color: #f40000; font-style: italic;}
< /style>
< %= value %>"""
)
columns = [
TableColumn(field="POS_events", title="Positive Examples", formatter=formatter),
TableColumn(field="NEG_events", title="Negative Examples", formatter=formatter),
]
return DataTable(
source=ColumnDataSource(),
columns=columns,
height=400,
index_position=None,
width=2110,
sortable=False,
editable=True,
reorderable=False,
)
def _create_plot() -> (Figure, ColumnDataSource):
0
View Source File : ui.py
License : Apache License 2.0
Project Creator : IntelLabs
License : Apache License 2.0
Project Creator : IntelLabs
def _create_examples_table() -> DataTable:
"""Utility function for creating and styling the events table."""
formatter = HTMLTemplateFormatter(
template="""
< style>
.AS {color: #0000FF; font-weight: bold;}
.OP {color: #0000FF; font-weight: bold;}
< /style>
< div> < %= value %> < /div>"""
)
columns = [
TableColumn(
field="Examples", title=' < span class="header">Examples < /span>', formatter=formatter
)
]
empty_source = ColumnDataSource()
empty_source.data = {"Examples": []}
return DataTable(
source=empty_source,
columns=columns,
height=500,
index_position=None,
width=1500,
sortable=False,
editable=False,
reorderable=False,
header_row=True,
)
if __name__ == "__main__":
0
View Source File : edit.py
License : GNU General Public License v3.0
Project Creator : j-brady
License : GNU General Public License v3.0
Project Creator : j-brady
def setup_plot(self):
"""" code to setup the bokeh plots """
# make bokeh figure
tools = [
"tap",
"box_zoom",
"lasso_select",
"box_select",
"wheel_zoom",
"pan",
"reset",
]
self.p = figure(
x_range=(self.peakipy_data.f2_ppm_0, self.peakipy_data.f2_ppm_1),
y_range=(self.peakipy_data.f1_ppm_0, self.peakipy_data.f1_ppm_1),
x_axis_label=f"{self.peakipy_data.f2_label} - ppm",
y_axis_label=f"{self.peakipy_data.f1_label} - ppm",
tools=tools,
active_drag="pan",
active_scroll="wheel_zoom",
active_tap=None,
)
if not self.thres:
self.thres = threshold_otsu(self.peakipy_data.data[0])
self.contour_start = self.thres # contour level start value
self.contour_num = 20 # number of contour levels
self.contour_factor = 1.20 # scaling factor between contour levels
cl = self.contour_start * self.contour_factor ** np.arange(self.contour_num)
if len(cl) > 1 and np.min(np.diff(cl)) < = 0.0:
print(f"Setting contour levels to np.abs({cl})")
cl = np.abs(cl)
self.extent = (
self.peakipy_data.f2_ppm_0,
self.peakipy_data.f2_ppm_1,
self.peakipy_data.f1_ppm_0,
self.peakipy_data.f1_ppm_1,
)
self.spec_source = get_contour_data(
self.peakipy_data.data[0], cl, extent=self.extent, cmap=viridis
)
# negative contours
self.spec_source_neg = get_contour_data(
self.peakipy_data.data[0] * -1.0, cl, extent=self.extent, cmap=autumn
)
self.p.multi_line(
xs="xs", ys="ys", line_color="line_color", source=self.spec_source
)
self.p.multi_line(
xs="xs", ys="ys", line_color="line_color", source=self.spec_source_neg
)
# contour_num = Slider(title="contour number", value=20, start=1, end=50,step=1)
# contour_start = Slider(title="contour start", value=100000, start=1000, end=10000000,step=1000)
self.contour_start = TextInput(
value="%.2e" % self.thres, title="Contour level:", width=100
)
# contour_factor = Slider(title="contour factor", value=1.20, start=1., end=2.,step=0.05)
self.contour_start.on_change("value", self.update_contour)
# for w in [contour_num,contour_start,contour_factor]:
# w.on_change("value",update_contour)
# plot mask outlines
el = self.p.ellipse(
x="X_PPM",
y="Y_PPM",
width="X_DIAMETER_PPM",
height="Y_DIAMETER_PPM",
source=self.source,
fill_color="color",
fill_alpha=0.1,
line_dash="dotted",
line_color="red",
)
self.p.add_tools(
HoverTool(
tooltips=[
("Index", "$index"),
("Assignment", "@ASS"),
("CLUSTID", "@CLUSTID"),
("RADII", "@X_RADIUS_PPM{0.000}, @Y_RADIUS_PPM{0.000}"),
(
f"{self.peakipy_data.f2_label},{self.peakipy_data.f1_label}",
"$x{0.000} ppm, $y{0.000} ppm",
),
],
mode="mouse",
# add renderers
renderers=[el],
)
)
# p.toolbar.active_scroll = "auto"
# draw border around spectrum area
spec_border_x = [
self.peakipy_data.f2_ppm_min,
self.peakipy_data.f2_ppm_min,
self.peakipy_data.f2_ppm_max,
self.peakipy_data.f2_ppm_max,
self.peakipy_data.f2_ppm_min,
]
spec_border_y = [
self.peakipy_data.f1_ppm_min,
self.peakipy_data.f1_ppm_max,
self.peakipy_data.f1_ppm_max,
self.peakipy_data.f1_ppm_min,
self.peakipy_data.f1_ppm_min,
]
self.p.line(
spec_border_x,
spec_border_y,
line_width=1,
line_color="black",
line_dash="dotted",
line_alpha=0.5,
)
self.p.circle(x="X_PPM", y="Y_PPM", source=self.source, color="color")
# plot cluster numbers
self.p.text(
x="X_PPM",
y="Y_PPM",
text="CLUSTID",
text_color="color",
source=self.source,
text_font_size="8pt",
text_font_style="bold",
)
self.p.on_event(DoubleTap, self.peak_pick_callback)
self.pos_neg_contour_dic = {0: "pos/neg", 1: "pos", 2: "neg"}
self.pos_neg_contour_radiobutton = RadioButtonGroup(
labels=[
self.pos_neg_contour_dic[i] for i in self.pos_neg_contour_dic.keys()
],
active=0,
)
self.pos_neg_contour_radiobutton.on_change("active", self.update_contour)
# call fit_peaks
self.fit_button = Button(label="Fit selected cluster", button_type="primary")
# lineshape selection
self.lineshapes = {
0: "PV",
1: "V",
2: "G",
3: "L",
4: "PV_PV",
# 5: "PV_L",
# 6: "PV_G",
# 7: "G_L",
}
self.radio_button_group = RadioButtonGroup(
labels=[self.lineshapes[i] for i in self.lineshapes.keys()], active=0
)
self.ls_div = Div(
text="""Choose lineshape you wish to fit. This can be Voigt (V), pseudo-Voigt (PV), Gaussian (G), Lorentzian (L).
PV_PV fits a PV lineshape with independent "fraction" parameters for the direct and indirect dimensions"""
)
self.clust_div = Div(
text="""If you want to adjust how the peaks are automatically clustered then try changing the
width/diameter/height (integer values) of the structuring element used during the binary dilation step
(you can also remove it by selecting 'None'). Increasing the size of the structuring element will cause
peaks to be more readily incorporated into clusters. Be sure to save your peak list before doing this as
any manual edits will be lost."""
)
self.intro_div = Div(
text=""" < h2>peakipy - interactive fit adjustment < /h2>
"""
)
self.doc_link = Div(
text=" < h3> < a href='https://j-brady.github.io/peakipy/build/usage/instructions.html', target='_blank'> ℹ️ click here for documentation < /a> < /h3>"
)
self.fit_reports = ""
self.fit_reports_div = Div(text="", height=400, style={"overflow": "scroll"})
# Plane selection
self.select_planes_list = [
f"{i}"
for i in range(self.peakipy_data.data.shape[self.peakipy_data.planes])
]
self.select_plane = Select(
title="Select plane:",
value=self.select_planes_list[0],
options=self.select_planes_list,
)
self.select_planes_dic = {
f"{i}": i
for i in range(self.peakipy_data.data.shape[self.peakipy_data.planes])
}
self.select_plane.on_change("value", self.update_contour)
self.checkbox_group = CheckboxGroup(
labels=["fit current plane only"], active=[]
)
# not sure this is needed
selected_df = self.peakipy_data.df.copy()
self.fit_button.on_event(ButtonClick, self.fit_selected)
columns = [
TableColumn(field="ASS", title="Assignment"),
TableColumn(field="CLUSTID", title="Cluster", editor=IntEditor()),
TableColumn(
field="X_PPM",
title=f"{self.peakipy_data.f2_label}",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="Y_PPM",
title=f"{self.peakipy_data.f1_label}",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="X_RADIUS_PPM",
title=f"{self.peakipy_data.f2_label} radius (ppm)",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="Y_RADIUS_PPM",
title=f"{self.peakipy_data.f1_label} radius (ppm)",
editor=NumberEditor(step=0.0001),
formatter=NumberFormatter(format="0.0000"),
),
TableColumn(
field="XW_HZ",
title=f"{self.peakipy_data.f2_label} LW (Hz)",
editor=NumberEditor(step=0.01),
formatter=NumberFormatter(format="0.00"),
),
TableColumn(
field="YW_HZ",
title=f"{self.peakipy_data.f1_label} LW (Hz)",
editor=NumberEditor(step=0.01),
formatter=NumberFormatter(format="0.00"),
),
TableColumn(
field="VOL", title="Volume", formatter=NumberFormatter(format="0.0")
),
TableColumn(
field="include",
title="Include",
editor=SelectEditor(options=["yes", "no"]),
),
TableColumn(field="MEMCNT", title="MEMCNT", editor=IntEditor()),
]
self.data_table = DataTable(
source=self.source, columns=columns, editable=True, fit_columns=True
)
# callback for adding
# source.selected.on_change('indices', callback)
self.source.selected.on_change("indices", self.select_callback)
# Document layout
fitting_controls = column(
row(
column(self.slider_X_RADIUS, self.slider_Y_RADIUS),
column(
row(
widgetbox(self.contour_start, self.pos_neg_contour_radiobutton)
),
widgetbox(self.fit_button),
),
),
row(
column(widgetbox(self.ls_div), widgetbox(self.radio_button_group)),
column(widgetbox(self.select_plane), widgetbox(self.checkbox_group)),
),
)
# reclustering tab
self.struct_el = Select(
title="Structuring element:",
value="disk",
options=["square", "disk", "rectangle", "None", "mask_method"],
width=100,
)
self.struct_el_size = TextInput(
value="3",
title="Size(width/radius or width,height for rectangle):",
width=100,
)
self.recluster = Button(label="Re-cluster", button_type="warning")
self.recluster.on_event(ButtonClick, self.recluster_peaks)
# edit_fits tabs
fitting_layout = fitting_controls
log_layout = self.fit_reports_div
recluster_layout = row(
self.clust_div,
column(
self.contour_start, self.struct_el, self.struct_el_size, self.recluster
),
)
save_layout = column(self.savefilename, self.button, self.exit_button)
fitting_tab = Panel(child=fitting_layout, title="Peak fitting")
log_tab = Panel(child=log_layout, title="Log")
recluster_tab = Panel(child=recluster_layout, title="Re-cluster peaks")
save_tab = Panel(child=save_layout, title="Save edited peaklist")
self.tabs = Tabs(
tabs=[fitting_tab, log_tab, recluster_tab, save_tab],
sizing_mode="scale_both",
)
def recluster_peaks(self, event):
0
View Source File : bokeh.py
License : MIT License
Project Creator : karlicoss
License : MIT License
Project Creator : karlicoss
def rolling(*, x: str, y: str, df, avgs: Sequence[Avg]=['7D', '30D'], legend_label=None, context: Optional[RollingResult]=None, **kwargs) -> RollingResult:
# TODO maybe use a special logging handler, so everything logged with warning level gets displayed?
errors = []
# todo ugh. the same HPI check would be nice..
tzs = set(df.index.map(lambda x: getattr(x, 'tzinfo', None))) # meh
if len(tzs) > 1:
errors.append(f'WARNING: a mixture of timezones: {tzs}. You might want to unlocalize() them first.')
elif len(tzs) == 1:
[_tz] = tzs
if _tz is not None:
# todo not really sure about that.. maybe it's okay, although UTC might be wrong as well
errors.append(f'WARNING: detected timezone: {_tz}. You might want to unlocalize() first.')
# todo should copy df first??
if legend_label is None:
legend_label = y
# meh... don't think I like it
# TODO def test this
if context is None:
ls = []
plot = date_figure(df=df)
ls.append(plot)
ctx = RollingResult(
layout=column(ls, sizing_mode='stretch_width'),
plots=[],
figures=[plot],
)
else:
ctx = context
plot = ctx.figure
plots = ctx.plots
layouts = ctx.layout.children
# todo assert datetime index? test it too
# todo although in theory it doens't have to be datetimes with the approprivate avgs??
has_x = df.index.notna()
has_y = df[y].notna()
err = df['error'].notna() if 'error' in df.columns else df.index == 'how_to_make_empty_index?'
# ^^^ todo a bit ugly... think about this better
for_table = ~has_x # case 1 is handled
# case 2: set proper error for ones that don't have y
df.loc[has_x & ~has_y & ~err, 'error'] = f'{y} is nan/null'
# now case 2 and 3 are the same
# case 3
case_3 = has_x & ~has_y
for_table |= case_3
for_marks = case_3
# case 4, 5
ok = has_x & has_y
case_4 = ok & err
for_table |= case_4
for_warn = case_4
dfm = df.loc[for_marks]
dfe = df.loc[for_table]
dfw = df.loc[for_warn]
df = df.loc[ok]
if len(dfm) > 0:
# todo meh.. how to make the position absolute??
some_y = df[y].quantile(0.8) # to display kinda on top, but not too high
if np.isnan(some_y):
# otherwise fails during JSON serialization
some_y = 0.0
plot.scatter(
source=CDS(dfm),
x=x,
y=some_y,
legend_label='errors',
line_color='red',
fill_color='yellow', # ??
marker='circle_cross',
size=10,
)
if len(dfe) > 0:
errors.append(f'Also encountered {len(dfe)} errors:')
from bokeh.models.widgets.markups import Div
# first a summary for the most important warnings/errors
# todo append later stuff as well, there are some warnings during means
for e in errors:
layouts.append(Div(
text=html.escape(e),
style={'color': 'red', 'font-weight': 'strong'},
))
if len(dfe) > 0:
# todo could even keep the 'value' erorrs and display below too.. but for now it's ok
# ok, if it respected newlines, would be perfect
# for now this is 'fine'...
# todo maybe should display all points, highlight error ones as red (and it sorts anyway so easy to overview?)
# todo would be nice to highlight the corresponding points in table/plot
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh.models.widgets.tables import DateFormatter, NumberFormatter, HTMLTemplateFormatter
# didn't work at all??
# from bokeh.models.widgets.tables import ScientificFormatter
# todo DataCube?? even more elaborate
dfe = dfe.reset_index() # todo ugh. otherwise doesn't display the index at all?
dfe = dfe.sort_values(by=x)
# todo maybe display 'error' as the first col?
datefmt = DateFormatter(format="%Y-%m-%d")
# todo speed_avg could have less digits (guess by the dispersion or something??)
# TODO horrible, but js bits of bokeh compute some complete bullhit for column widths
# todo set monospace font??
one_char = 10 # pixels
def datatable_columns(df):
for c, t in df.dtypes.items():
formatter = None
# TODO also use col name.. then won't have to handle nans!
width = 15 # in characters
# for fixed width types, we can have something kind of reasonable
if str(t).startswith('float'):
l = df[c].dropna().map(str).str.len().max()
width = 4 if np.isnan(l) else l
if str(t).startswith('datetime'):
formatter = DateFormatter(format='%Y%m%d %H%M%S %Z', nan_format='Nan')
width = 15
elif str(t).startswith('timedelta'):
# TODO warn if df contains stuff with duration >1D?
# without nan_format, it results in NaN:Nan:Nan
formatter = DateFormatter(format='%H:%M:%S', nan_format='Nan')
width = 8
# if c == 'error':
# # meh, but the only easy way to limit and ellipsize it I found
# # aaand it still computes width in some weird way, ends up taking too much space
# formatter = HTMLTemplateFormatter(template=' < div style="text-overflow: ellipsis; overflow: hidden; width: 60ch;"> < %= value %> < /div>')
tc = TableColumn(
field=c,
title=c,
**({} if formatter is None else dict(formatter=formatter)),
width=width * one_char,
)
yield tc
# TODO hmm, if we reuse the data source, editing & selection might work?
errors_table = DataTable(
source=CDS(dfe),
columns=list(datatable_columns(dfe)),
# todo ugh. handle this properly, was too narrow on the sleep plots
editable=True,
width=2000,
# default ends up in trimmed table content
autosize_mode='none',
# this might overstretch the parent...
# autosize_mode='fit_viewport',
# this just makes it respect the parent width
# width_policy='fit',
)
layouts.append(errors_table)
# todo
# >>> plot.circle([1,2,3], [4,5,6], name="temp")
# >>> plot.select(name="temp")
# [GlyphRenderer(id='399d53f5-73e9-44d9-9527-544b761c7705', ...)]
if len(dfw) > 0:
plot.circle(source=CDS(dfw), x=x, y=y, legend_label='warnings', size=20, color='yellow')
# todo warn if unsorted?
df = df.sort_index()
if len(df) == 0:
# add a fake point, so at least plotting doesn't fail...
df = pd.DataFrame([{
x: datetime(year=2000, month=1, day=1),
y: 0.0,
}]).set_index(x)
avgs = ['3D' for _ in avgs]
# FIXME need to add this to errors as well, or at least title..
# TODO need to add a better placholder, timestamp 0 really messes things up
warnings.warn(f'No data points for {df}, empty plot!')
if None not in avgs:
plots.append(plot.scatter(x=x, y=y, source=CDS(df), legend_label=legend_label, **kwargs))
# only stuff without errors/warnings participates in the avg computation
if 'error' in df.columns: # meh
df = df[df['error'].isna()]
for period in [a for a in avgs if a is not None]:
dfy = df[[y]]
if str(dfy.index.dtype) == 'object':
logging.error(f"{dfy.dtypes}: index type is 'object'. You're likely doing something wrong")
if 'datetime64' in str(dfy.index.dtype):
# you're probably doing something wrong otherwise..
# todo warn too?
# check it's a valid period
pd.to_timedelta(period)
# TODO how to fill the missing values??
# a sequence of consts would be a good test for it
# todo why would index be na at this point? probably impossible?
dfa = dfy[dfy.index.notna()].rolling(period).mean()
# TODO assert x in df?? or rolling wrt to x??
# somehow plot.line works if 'x' is index? but df[x] doesnt..
# todo different style by default? thicker line? not sure..
plots.append(plot.line(x=x, y=y, source=CDS(dfa), legend_label=f'{legend_label} ({period} avg)', **kwargs))
plot.title.text = f'x: {x}, y: {y}'
# TODO axis labels instead?
return ctx
# return RollingResult(
# # todo maybe return orig layouts and let the parent wrap into column?
# layout=column(layouts, sizing_mode='stretch_width'),
# plots=plots, # todo rename to 'graphs'?
# figures=[plot],
# )
from bokeh.models import CustomJSHover
0
View Source File : make_plots.py
License : MIT License
Project Creator : PatrikHlobil
License : MIT License
Project Creator : PatrikHlobil
def plot_Scatterplot():
df = df_iris()
df = df.sample(frac=1)
# Create Bokeh-Table with DataFrame:
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, TableColumn
data_table = DataTable(
columns=[TableColumn(field=Ci, title=Ci) for Ci in df.columns],
source=ColumnDataSource(df.head(10)),
)
# Create Scatterplot:
p_scatter = df.plot_bokeh.scatter(
x="petal length (cm)",
y="sepal width (cm)",
category="species",
title="Iris DataSet Visualization",
show_figure=False,
)
# Combine Div and Scatterplot via grid layout:
return pandas_bokeh.plot_grid(
[[data_table, p_scatter]], plot_width=400, plot_height=350, show_plot=False
)
@embeddable
0
View Source File : test_PandasBokeh.py
License : MIT License
Project Creator : PatrikHlobil
License : MIT License
Project Creator : PatrikHlobil
def test_scatterplot(df_iris):
"Test for scatterplot"
# Create Bokeh-Table with DataFrame:
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, TableColumn
data_table = DataTable(
columns=[TableColumn(field=Ci, title=Ci) for Ci in df_iris.columns],
source=ColumnDataSource(df_iris.head(10)),
)
data_table_accessor = DataTable(
columns=[TableColumn(field=Ci, title=Ci) for Ci in df_iris.columns],
source=ColumnDataSource(df_iris.head(10)),
)
# Create Scatterplot:
arguments = dict(
x="petal length (cm)",
y="sepal width (cm)",
category="species",
title="Iris DataSet Visualization",
show_figure=False,
)
p_scatter = df_iris.plot_bokeh(kind="scatter", **arguments)
p_scatter_accessor = df_iris.plot_bokeh.scatter(**arguments)
p_scatter_pandas_backend = df_iris.plot(kind="scatter", **arguments)
p_scatter_accessor_pandas_backend = df_iris.plot.scatter(**arguments)
# Combine Div and Scatterplot via grid layout:
output = pandas_bokeh.plot_grid(
[
[data_table, p_scatter],
[
data_table_accessor,
p_scatter_accessor,
p_scatter_pandas_backend,
p_scatter_accessor_pandas_backend,
],
],
show_plot=False,
return_html=True,
)
with open(os.path.join(DIRECTORY, "Plots", "Scatterplot.html"), "w") as f:
f.write(output)
assert True
def test_scatterplot_2(df_iris):
0
View Source File : client_demo.py
License : MIT License
Project Creator : wywongbd
License : MIT License
Project Creator : wywongbd
def build_widgets_wb(stock_list, metrics):
# CODE SECTION: setup buttons, widgetbox name = controls_wb
WIDGET_WIDTH = 250
# ========== Select Stocks ============= #
select_stk_1 = Select(width = WIDGET_WIDTH, title='Select Stock 1:', value = backtest_params["stk_0"], options=stock_list)
select_stk_2 = Select(width = WIDGET_WIDTH, title='Select Stock 2:', value = backtest_params["stk_1"], options=stock_list)
# ========== Strategy Type ============= #
strategy_list = ['kalman', 'distance', 'cointegration', 'reinforcement learning']
select_strategy = Select(width = WIDGET_WIDTH, title='Select Strategy:', value = backtest_params["strategy_type"], options=strategy_list)
# ========== set start/end date ============= #
# date time variables
MAX_START = datetime.strptime(backtest_params["max_start"], "%Y-%m-%d").date()
MAX_END = datetime.strptime(backtest_params["max_end"], "%Y-%m-%d").date()
DEFAULT_START = datetime.strptime(backtest_params["backtest_start"], "%Y-%m-%d").date()
DEFAULT_END = datetime.strptime(backtest_params["backtest_end"], "%Y-%m-%d").date()
STEP = 1
backtest_dates = DateRangeSlider(width = WIDGET_WIDTH,
start=MAX_START, end=MAX_END,
value=(DEFAULT_START, DEFAULT_END),
step=STEP, title="Backtest Date Range:")
start_bt = Button(label="Backtest", button_type="success", width = WIDGET_WIDTH)
# controls = column(select_stk_1, select_stk_2, select_strategy, backtest_dates, start_bt)
controls_wb = widgetbox(select_stk_1, select_stk_2, select_strategy, backtest_dates, start_bt, width=300)
# CODE SECTION: setup table, widgetbox name = metrics_wb
master_wb = None
if metrics is not None:
metric_source = ColumnDataSource(metrics)
metric_columns = [
TableColumn(field="Metrics", title="Metrics"),
TableColumn(field="Value", title="Performance"),
]
metric_table = DataTable(source=metric_source, columns=metric_columns, width=300)
master_wb = row(controls_wb, widgetbox(metric_table))
else:
logging.info("creating controls without table")
master_wb = row(controls_wb)
return master_wb, select_stk_1, select_stk_2, select_strategy, backtest_dates, start_bt
if FIRST_ITER[0]: