Here are the examples of the python api bokeh.models.DataRange1d taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
22 Examples
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_no_arguments(self):
datarange1d = DataRange1d()
assert datarange1d.start is None
assert datarange1d.end is None
assert datarange1d.bounds is None
def test_init_with_timedelta(self):
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_timedelta(self):
datarange1d = DataRange1d(start=-dt.timedelta(seconds=5), end=dt.timedelta(seconds=3))
assert datarange1d.start == -dt.timedelta(seconds=5)
assert datarange1d.end == dt.timedelta(seconds=3)
assert datarange1d.bounds is None
def test_init_with_datetime(self):
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_datetime(self):
datarange1d = DataRange1d(start=dt.datetime(2016, 4, 28, 2, 20, 50), end=dt.datetime(2017, 4, 28, 2, 20, 50))
assert datarange1d.start == dt.datetime(2016, 4, 28, 2, 20, 50)
assert datarange1d.end == dt.datetime(2017, 4, 28, 2, 20, 50)
assert datarange1d.bounds is None
def test_init_with_float(self):
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_float(self):
datarange1d = DataRange1d(start=-1.0, end=3.0)
assert datarange1d.start == -1.0
assert datarange1d.end == 3.0
assert datarange1d.bounds is None
def test_init_with_int(self):
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_int(self):
datarange1d = DataRange1d(start=-1, end=3)
assert datarange1d.start == -1
assert datarange1d.end == 3
assert datarange1d.bounds is None
def test_init_with_follow_sets_bounds_to_none(self):
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_follow_sets_bounds_to_none(self):
datarange1d = DataRange1d(follow="start")
assert datarange1d.follow == "start"
assert datarange1d.bounds is None
def test_init_with_bad_bounds(self):
3
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_init_with_bad_bounds(self):
with pytest.raises(ValueError):
DataRange1d(1, 2, bounds=(1, 0))
with pytest.raises(ValueError):
DataRange1d(1, 2, bounds=[1, 0])
with pytest.raises(ValueError):
Range1d(1, 2, bounds="21")
class Test_FactorRange(object):
3
View Source File : bokehstreaming.py
License : MIT License
Project Creator : smartyal
License : MIT License
Project Creator : smartyal
def button2_cb():
global p
global mysource
start = mysource.data["time"][0]
end = mysource.data["time"][-1]
p.x_range=DataRange1d(start,end)
def button3_cb():
3
View Source File : axes.py
License : Apache License 2.0
Project Creator : spotify
License : Apache License 2.0
Project Creator : spotify
def __init__(self, chart):
self._chart = chart
self._y_range_name = 'second_y'
self._chart.figure.extra_y_ranges = {
self._y_range_name: DataRange1d(bounds='auto')
}
# Add the appropriate axis type to the figure.
axis_class = LinearAxis
if self._chart._second_y_axis_type == 'log':
axis_class = LogAxis
self._chart.figure.add_layout(
axis_class(y_range_name=self._y_range_name), 'right')
self._y_axis_index = 1
self._y_range = self._chart.figure.extra_y_ranges[self._y_range_name]
self._chart.style._apply_settings('second_y_axis')
class SecondAxis:
0
View Source File : main.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : binder-examples
License : BSD 3-Clause "New" or "Revised" License
Project Creator : binder-examples
def make_plot(source, title):
plot = figure(x_axis_type="datetime", plot_width=800, tools="", toolbar_location=None)
plot.title.text = title
plot.quad(top='record_max_temp', bottom='record_min_temp', left='left', right='right',
color=Blues4[2], source=source, legend="Record")
plot.quad(top='average_max_temp', bottom='average_min_temp', left='left', right='right',
color=Blues4[1], source=source, legend="Average")
plot.quad(top='actual_max_temp', bottom='actual_min_temp', left='left', right='right',
color=Blues4[0], alpha=0.5, line_color="black", source=source, legend="Actual")
# fixed attributes
plot.xaxis.axis_label = None
plot.yaxis.axis_label = "Temperature (F)"
plot.axis.axis_label_text_font_style = "bold"
plot.x_range = DataRange1d(range_padding=0.0)
plot.grid.grid_line_alpha = 0.3
return plot
def update_plot(attrname, old, new):
0
View Source File : plot_expression_va.py
License : Apache License 2.0
Project Creator : EPFL-LCSB
License : Apache License 2.0
Project Creator : EPFL-LCSB
def plot_va(filename, tag, kind, color = DEFAULT_COLOR, orient = 'horizontal'):
bp.curdoc().clear()
title = verbose_kinds[kind] + ' variability analysis for {} iJO1366 model'\
.format(tag)
data = pd.read_csv(os.path.join(outputs_folder, filename), index_col = 0)
if not data.columns[0] in ['minimum','maximum']:
data.columns = ['minimum','maximum']
data += 1e-9 # Resolution of the solver
f = lambda x: np.sqrt(x[0]*x[1])
# data['score'] = data.mean(axis=1)
data['score'] = data.apply(f, axis=1)
data.sort_values(by='score', ascending = False, inplace = True)
data['y'] = range(len(data))
data['name'] = data.index
source = ColumnDataSource(data)
xdr = DataRange1d()
ydr = DataRange1d()
_tools_to_show = 'box_zoom,pan,save,hover,reset,tap,wheel_zoom'
if orient == 'vertical':
p1 = bp.figure( title=title, x_range=xdr, y_range=ydr,
x_axis_type = 'log',
plot_width=600,
plot_height=1000,
tools=_tools_to_show,
# h_symmetry=False, v_symmetry=False,
min_border=0)
glyph = HBar(y="y", right="maximum", left="minimum", height=0.9,
fill_color=color, fill_alpha=0.5,
line_color = None)
p1.add_glyph(source, glyph)
p1.circle(x='score', y='y', fill_color=color, line_color=None,
source=source)
axis1 = p1.xaxis
axis2 = p1.yaxis
elif orient == 'horizontal':
p1 = bp.figure(title=title, x_range=ydr, y_range=xdr,
y_axis_type='log',
plot_width=1000,
plot_height=600,
tools=_tools_to_show,
# h_symmetry=False, v_symmetry=False,
min_border=0)
glyph = VBar(x="y", top="maximum", bottom="minimum", width=0.9,
fill_color=color, fill_alpha=0.5,
line_color=None)
p1.add_glyph(source, glyph)
p1.circle(y='score', x='y', fill_color=color, line_color=None,
source=source)
axis1 = p1.yaxis
axis2 = p1.xaxis
else:
raise ValueError("orient should be 'vertical' or 'horizontal'")
# Fix ticks
label_dict = {}
for i, s in enumerate(data.index):
label_dict[i] = s
axis2.formatter = FuncTickFormatter(code="""
var labels = %s;
return labels[tick];
""" % label_dict)
axis1.axis_label = '[{}]'.format(verbose_kinds[kind])
# p1.yaxis.ticker = [x for x in range(len(data))]
hover = p1.select(dict(type=HoverTool))
hover.tooltips = [(verbose_kinds[kind], "@name"),
("min", "@minimum"),
("max", "@maximum"),
]
hover.mode = 'mouse'
path = os.path.join(plots_folder, 'va_' + filename)
bp.output_file(path + '.html')
bp.show(p1)
p1.output_backend='svg'
export_svgs(p1, filename=path+'.svg')
bp.curdoc().clear()
return data
if __name__ == '__main__':
0
View Source File : figure.py
License : GNU General Public License v3.0
Project Creator : happydasch
License : GNU General Public License v3.0
Project Creator : happydasch
def plot_volume(self, data, alpha=1.0, extra_axis=False):
'''
Plot method for volume
extra_axis: displays a second axis (for overlay on data plotting)
'''
source_id = get_source_id(data)
self.set_cds_col(source_id + 'volume')
# create color columns
volup = convert_color(self._scheme.volup)
voldown = convert_color(self._scheme.voldown)
self.set_cds_col((
source_id + 'colors_volume',
source_id + 'open',
source_id + 'close',
partial(cds_op_color,
color_up=volup,
color_down=voldown)))
# prepare bar kwargs
kwargs = {
'x': 'index',
'width': self._bar_width,
'top': source_id + 'volume',
'bottom': 0,
'fill_color': source_id + 'colors_volume',
'line_color': source_id + 'colors_volume',
'fill_alpha': alpha,
'line_alpha': alpha,
'name': 'Volume',
'legend_label': 'Volume'}
# set axis
ax_formatter = NumeralTickFormatter(format=self._scheme.number_format)
if extra_axis:
source_data_axis = 'axvol'
# use colorup
ax_color = convert_color(self._scheme.volup)
# use only one additional axis to prevent multiple axis being added
# to a single figure
ax = self.figure.select_one({'name': source_data_axis})
if ax is None:
# create new axis if not already available
self.figure.extra_y_ranges = {source_data_axis: DataRange1d(
range_padding=1.0 / self._scheme.volscaling,
start=0)}
ax = LinearAxis(
name=source_data_axis,
y_range_name=source_data_axis,
formatter=ax_formatter,
axis_label_text_color=ax_color,
axis_line_color=ax_color,
major_label_text_color=ax_color,
major_tick_line_color=ax_color,
minor_tick_line_color=ax_color)
self.figure.add_layout(ax, self._scheme.vol_axis_location)
kwargs['y_range_name'] = source_data_axis
else:
self.figure.yaxis.formatter = ax_formatter
# append renderer
self._figure_append_renderer(self.figure.vbar, **kwargs)
# set hover label
self._fp.hover.add_hovertip(
'Volume',
f'@{source_id}volume{{({self._scheme.number_format})}}',
data)
def plot_observer(self, obj):
0
View Source File : dist.py
License : MIT License
Project Creator : justinbois
License : MIT License
Project Creator : justinbois
def histogram(
data=None,
val=None,
cats=None,
palette=None,
order=None,
val_axis="x",
p=None,
show_legend=None,
bins="freedman-diaconis",
density=False,
kind="step_filled",
click_policy="hide",
line_kwargs=None,
fill_kwargs=None,
horizontal=False,
**kwargs,
):
"""
Make a plot of histograms of a data set.
Parameters
----------
data : Pandas DataFrame, 1D Numpy array, or xarray
DataFrame containing tidy data for plotting. If a Numpy array,
a single category is assumed and a histogram generated from
data.
val : hashable
Name of column to use as value variable.
cats : hashable or list of hashables
Name of column(s) to use as categorical variable(s).
val_axis : str, either 'x' or 'y', default 'x'
Axis along which the quantitative value varies.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
order : list or None
If not None, must be a list of unique group names when the input
data frame is grouped by `cats`. The order of the list specifies
the ordering of the categorical variables in the legend. If
None, the categories appear in the order in which they appeared
in the inputted data frame.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
show_legend : bool, default False
If True, display legend.
bins : int, array_like, or str, default 'freedman-diaconis'
If int or array_like, setting for `bins` kwarg to be passed to
`np.histogram()`. If 'exact', then each unique value in the
data gets its own bin. If 'integer', then integer data is
assumed and each integer gets its own bin. If 'sqrt', uses the
square root rule to determine number of bins. If
`freedman-diaconis`, uses the Freedman-Diaconis rule for number
of bins.
density : bool, default False
If True, normalize the histograms. Otherwise, base the
histograms on counts.
kind : str, default 'step_filled'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
click_policy : str, default 'hide'
Either 'hide', 'mute', or None; how the glyphs respond when the
corresponding category is clicked in the legend.
line_kwargs : dict
Keyword arguments to pass to `p.line()` in constructing the
histograms. By default, {"line_width": 2}.
fill_kwargs : dict
Keyword arguments to pass to `p.patch()` when making the fill
for the step-filled histogram. Ignored if `kind = 'step'`. By
default {"fill_alpha": 0.3, "line_alpha": 0}.
horizontal : bool, default False
Deprecated. Use `val_axis`.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()` when making
the plot.
Returns
-------
output : Bokeh figure
Figure populated with histograms.
"""
if val_axis not in ("x", "y"):
raise RuntimeError("Invalid `val_axis`. Must by 'x' or 'y'.")
if horizontal and val_axis != "y":
raise RuntimeError(
"`horizontal` and `val_axis` kwargs in disagreement. "
"Use `val_axis`; `horizontal` is deprecated."
)
if palette is None:
palette = colorcet.b_glasbey_category10
df, val, cats, show_legend = utils._data_cats(data, val, cats, show_legend)
if show_legend is None:
if cats is None:
show_legend = False
else:
show_legend = True
if type(bins) == str and bins not in [
"integer",
"exact",
"sqrt",
"freedman-diaconis",
]:
raise RuntimeError("Invalid bin specification.")
if cats is None:
df["__cat"] = "__dummy_cat"
if show_legend:
raise RuntimeError("No legend to show if `cats` is None.")
if order is not None:
raise RuntimeError("No `order` is allowed if `cats` is None.")
cats = "__cat"
cats, cols = utils._check_cat_input(
df, cats, val, None, None, palette, order, kwargs
)
kwargs = utils._fig_dimensions(kwargs)
if line_kwargs is None:
line_kwargs = {"line_width": 2}
if fill_kwargs is None:
fill_kwargs = {}
if "fill_alpha" not in fill_kwargs:
fill_kwargs["fill_alpha"] = 0.3
if "line_alpha" not in fill_kwargs:
fill_kwargs["line_alpha"] = 0
_, df["__label"] = utils._source_and_labels_from_cats(df, cats)
cols += ["__label"]
if order is not None:
if type(cats) in [list, tuple]:
df["__sort"] = df.apply(lambda r: order.index(tuple(r[cats])), axis=1)
else:
df["__sort"] = df.apply(lambda r: order.index(r[cats]), axis=1)
df = df.sort_values(by="__sort")
if type(bins) == str and bins == "exact":
a = np.unique(df[val])
if len(a) == 1:
bins = np.array([a[0] - 0.5, a[0] + 0.5])
else:
bins = np.concatenate(
(
(a[0] - (a[1] - a[0]) / 2,),
(a[1:] + a[:-1]) / 2,
(a[-1] + (a[-1] - a[-2]) / 2,),
)
)
elif type(bins) == str and bins == "integer":
if np.any(df[val] != np.round(df[val])):
raise RuntimeError("'integer' bins chosen, but data are not integer.")
bins = np.arange(df[val].min() - 1, df[val].max() + 1) + 0.5
if p is None:
kwargs = utils._fig_dimensions(kwargs)
if "x_axis_label" not in kwargs:
kwargs["x_axis_label"] = val
if "y_axis_label" not in kwargs:
if density:
kwargs["y_axis_label"] = "density"
else:
kwargs["y_axis_label"] = "count"
if "y_range" not in kwargs:
kwargs["y_range"] = bokeh.models.DataRange1d(start=0)
p = bokeh.plotting.figure(**kwargs)
# Explicitly loop to enable click policies on the legend (not possible with factors)
for i, (name, g) in enumerate(df.groupby(cats, sort=False)):
e0, f0 = _compute_histogram(g[val], bins, density)
line_kwargs["color"] = palette[i % len(palette)]
if val_axis == "y":
p.line(f0, e0, **line_kwargs, legend_label=g["__label"].iloc[0])
else:
p.line(e0, f0, **line_kwargs, legend_label=g["__label"].iloc[0])
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
fill_kwargs["color"] = palette[i % len(palette)]
if val_axis == "y":
p = utils._fill_between(
p, f0, e0, y2, x2, legend_label=g["__label"].iloc[0], **fill_kwargs
)
else:
p = utils._fill_between(
p, e0, f0, x2, y2, legend_label=g["__label"].iloc[0], **fill_kwargs
)
if show_legend:
if val_axis == "y":
p.legend.location = "bottom_right"
else:
p.legend.location = "top_right"
p.legend.click_policy = click_policy
else:
p.legend.visible = False
return p
def _staircase_ecdf(p, data, complementary=False, horizontal=False, line_kwargs={}):
0
View Source File : whole_productivity_per_date.py
License : MIT License
Project Creator : kurusugawa-computer
License : MIT License
Project Creator : kurusugawa-computer
def plot(self, output_file: Path):
"""
全体の生産量や生産性をプロットする
Args:
df:
Returns:
"""
def add_velocity_columns(df: pandas.DataFrame):
for denominator in ["input_data_count", "annotation_count"]:
for category in [
"actual",
"monitored",
"monitored_annotation",
"monitored_inspection",
"monitored_acceptance",
"unmonitored",
]:
df[f"{category}_worktime_minute/{denominator}"] = (
df[f"{category}_worktime_hour"] * 60 / df[denominator]
)
df[f"{category}_worktime_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
get_weekly_sum(df[f"{category}_worktime_hour"]) * 60 / get_weekly_sum(df[denominator])
)
df[f"actual_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
df["actual_worktime_hour"]
) / get_weekly_sum(df["task_count"])
df[f"monitored_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
df["monitored_worktime_hour"]
) / get_weekly_sum(df["task_count"])
for column in ["task_count", "input_data_count", "actual_worktime_hour", "monitored_worktime_hour"]:
df[f"{column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_moving_average(df[column])
def create_figure(title: str, y_axis_label: str) -> bokeh.plotting.Figure:
return figure(
plot_width=1200,
plot_height=600,
title=title,
x_axis_label="日",
x_axis_type="datetime",
y_axis_label=y_axis_label,
)
def plot_and_moving_average(fig, y_column_name: str, legend_name: str, source, color, **kwargs):
x_column_name = "dt_date"
# 値をプロット
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name=y_column_name,
source=source,
color=color,
legend_label=legend_name,
**kwargs,
)
# 移動平均をプロット
plot_moving_average(
fig,
x_column_name=x_column_name,
y_column_name=f"{y_column_name}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}",
source=source,
color=color,
legend_label=f"{legend_name}の1週間移動平均",
**kwargs,
)
def create_task_figure():
y_range_name = "worktime_axis"
fig_task = create_figure(title="日ごとのタスク数と作業時間", y_axis_label="タスク数")
fig_task.add_layout(
LinearAxis(
y_range_name=y_range_name,
axis_label="作業時間[hour]",
),
"right",
)
y_overlimit = 0.05
fig_task.extra_y_ranges = {
y_range_name: DataRange1d(
end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * (1 + y_overlimit)
)
}
plot_and_moving_average(
fig=fig_task,
y_column_name="task_count",
legend_name="タスク数",
source=source,
color=get_color_from_small_palette(0),
)
plot_and_moving_average(
fig=fig_task,
y_column_name="actual_worktime_hour",
legend_name="実績作業時間",
source=source,
color=get_color_from_small_palette(1),
y_range_name=y_range_name,
)
plot_and_moving_average(
fig=fig_task,
y_column_name="monitored_worktime_hour",
legend_name="計測作業時間",
source=source,
color=get_color_from_small_palette(2),
y_range_name=y_range_name,
)
return fig_task
def create_input_data_figure():
y_range_name = "worktime_axis"
fig_input_data = create_figure(title="日ごとの入力データ数と作業時間", y_axis_label="入力データ数")
fig_input_data.add_layout(
LinearAxis(
y_range_name=y_range_name,
axis_label="作業時間[hour]",
),
"right",
)
y_overlimit = 0.05
fig_input_data.extra_y_ranges = {
y_range_name: DataRange1d(
end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * (1 + y_overlimit)
)
}
plot_and_moving_average(
fig=fig_input_data,
y_column_name="input_data_count",
legend_name="入力データ数",
source=source,
color=get_color_from_small_palette(0),
)
plot_and_moving_average(
fig=fig_input_data,
y_column_name="actual_worktime_hour",
legend_name="実績作業時間",
source=source,
color=get_color_from_small_palette(1),
y_range_name=y_range_name,
)
plot_and_moving_average(
fig=fig_input_data,
y_column_name="monitored_worktime_hour",
legend_name="計測作業時間",
source=source,
color=get_color_from_small_palette(2),
y_range_name=y_range_name,
)
return fig_input_data
if not self._validate_df_for_output(output_file):
return
df = self.df.copy()
df["dt_date"] = df["date"].map(lambda e: parse(e).date())
add_velocity_columns(df)
logger.debug(f"{output_file} を出力します。")
phase_prefix = [
("actual_worktime", "実績作業時間"),
("monitored_worktime", "計測作業時間"),
("monitored_annotation_worktime", "計測作業時間(教師付)"),
("monitored_inspection_worktime", "計測作業時間(検査)"),
("monitored_acceptance_worktime", "計測作業時間(受入)"),
]
if df["actual_worktime_hour"].sum() > 0:
# 条件分岐の理由:実績作業時間がないときは、非計測作業時間がマイナス値になり、分かりづらいグラフになるため。必要なときのみ非計測作業時間をプロットする
phase_prefix.append(("unmonitored_worktime", "非計測作業時間"))
fig_info_list = [
{
"figure": create_figure(title="日ごとの作業時間", y_axis_label="作業時間[hour]"),
"y_info_list": [
{"column": "actual_worktime_hour", "legend": "実績作業時間"},
{"column": "monitored_worktime_hour", "legend": "計測作業時間"},
],
},
{
"figure": create_figure(title="日ごとのタスクあたり作業時間", y_axis_label="タスクあたり作業時間[hour/task]"),
"y_info_list": [
{"column": "actual_worktime_hour/task_count", "legend": "タスクあたり実績作業時間"},
{"column": "monitored_worktime_hour/task_count", "legend": "タスクあたり計測作業時間"},
],
},
{
"figure": create_figure(title="日ごとの入力データあたり作業時間", y_axis_label="入力データあたり作業時間[minute/input_data]"),
"y_info_list": [
{"column": f"{e[0]}_minute/input_data_count", "legend": f"入力データあたり{e[1]}"} for e in phase_prefix
],
},
{
"figure": create_figure(title="日ごとのアノテーションあたり作業時間", y_axis_label="アノテーションあたり作業時間[minute/annotation]"),
"y_info_list": [
{"column": f"{e[0]}_minute/annotation_count", "legend": f"アノテーションあたり{e[1]}"} for e in phase_prefix
],
},
]
source = ColumnDataSource(data=df)
for fig_info in fig_info_list:
y_info_list: list[dict[str, str]] = fig_info["y_info_list"] # type: ignore
for index, y_info in enumerate(y_info_list):
color = get_color_from_small_palette(index)
plot_and_moving_average(
fig=fig_info["figure"],
y_column_name=y_info["column"],
legend_name=y_info["legend"],
source=source,
color=color,
)
tooltip_item = [
"date",
"task_count",
"input_data_count",
"actual_worktime_hour",
"monitored_worktime_hour",
"cumsum_task_count",
"cumsum_input_data_count",
"cumsum_actual_worktime_hour",
"actual_worktime_hour/task_count",
"actual_worktime_minute/input_data_count",
"actual_worktime_minute/annotation_count",
"monitored_worktime_hour/task_count",
"monitored_worktime_minute/input_data_count",
"monitored_worktime_minute/annotation_count",
]
hover_tool = create_hover_tool(tooltip_item)
figure_list = [
create_task_figure(),
create_input_data_figure(),
]
figure_list.extend([info["figure"] for info in fig_info_list])
for fig in figure_list:
fig.add_tools(hover_tool)
add_legend_to_figure(fig)
div_element = self._create_div_element()
write_bokeh_graph(bokeh.layouts.column([div_element] + figure_list), output_file)
def plot_cumulatively(self, output_file: Path):
0
View Source File : whole_productivity_per_date.py
License : MIT License
Project Creator : kurusugawa-computer
License : MIT License
Project Creator : kurusugawa-computer
def plot_cumulatively(self, output_file: Path):
"""
全体の生産量や作業時間の累積折れ線グラフを出力する
"""
def create_figure(title: str, y_axis_label: str) -> bokeh.plotting.Figure:
return figure(
plot_width=1200,
plot_height=600,
title=title,
x_axis_label="日",
x_axis_type="datetime",
y_axis_label=y_axis_label,
)
def create_task_figure():
x_column_name = "dt_date"
y_range_name = "worktime_axis"
fig = create_figure(title="日ごとの累積タスク数と累積作業時間", y_axis_label="タスク数")
fig.add_layout(
LinearAxis(
y_range_name=y_range_name,
axis_label="作業時間[hour]",
),
"right",
)
y_overlimit = 0.05
fig.extra_y_ranges = {
y_range_name: DataRange1d(
end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max())
* (1 + y_overlimit)
)
}
# 値をプロット
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name="cumsum_task_count",
source=source,
color=get_color_from_small_palette(0),
legend_label="タスク数",
)
# 値をプロット
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name="cumsum_actual_worktime_hour",
source=source,
color=get_color_from_small_palette(1),
legend_label="実績作業時間",
y_range_name=y_range_name,
)
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name="cumsum_monitored_worktime_hour",
source=source,
color=get_color_from_small_palette(2),
legend_label="計測作業時間",
y_range_name=y_range_name,
)
return fig
def create_input_data_figure():
x_column_name = "dt_date"
y_range_name = "worktime_axis"
fig = create_figure(title="日ごとの累積入力データ数と累積作業時間", y_axis_label="入力データ数")
fig.add_layout(
LinearAxis(
y_range_name=y_range_name,
axis_label="作業時間[hour]",
),
"right",
)
y_overlimit = 0.05
fig.extra_y_ranges = {
y_range_name: DataRange1d(
end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max())
* (1 + y_overlimit)
)
}
# 値をプロット
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name="cumsum_input_data_count",
source=source,
color=get_color_from_small_palette(0),
legend_label="入力データ数",
)
# 値をプロット
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name="cumsum_actual_worktime_hour",
source=source,
color=get_color_from_small_palette(1),
legend_label="実績作業時間",
y_range_name=y_range_name,
)
# 値をプロット
plot_line_and_circle(
fig,
x_column_name=x_column_name,
y_column_name="cumsum_monitored_worktime_hour",
source=source,
color=get_color_from_small_palette(2),
legend_label="計測作業時間",
y_range_name=y_range_name,
)
return fig
if not self._validate_df_for_output(output_file):
return
df = self.df.copy()
df["dt_date"] = df["date"].map(lambda e: parse(e).date())
df["cumsum_monitored_worktime_hour"] = df["monitored_worktime_hour"].cumsum()
logger.debug(f"{output_file} を出力します。")
source = ColumnDataSource(data=df)
tooltip_item = [
"date",
"task_count",
"input_data_count",
"actual_worktime_hour",
"monitored_worktime_hour",
"cumsum_task_count",
"cumsum_input_data_count",
"cumsum_actual_worktime_hour",
"cumsum_monitored_worktime_hour",
]
hover_tool = create_hover_tool(tooltip_item)
fig_list = [create_task_figure(), create_input_data_figure()]
for fig in fig_list:
fig.add_tools(hover_tool)
add_legend_to_figure(fig)
div_element = self._create_div_element()
write_bokeh_graph(bokeh.layouts.column([div_element] + fig_list), output_file)
def to_csv(self, output_file: Path) -> None:
0
View Source File : whole_productivity_per_date.py
License : MIT License
Project Creator : kurusugawa-computer
License : MIT License
Project Creator : kurusugawa-computer
def plot(self, output_file: Path):
"""
全体の生産量や生産性をプロットする
"""
def add_velocity_and_weekly_moving_average_columns(df):
for column in [
"input_data_count",
"worktime_hour",
"annotation_worktime_hour",
"inspection_worktime_hour",
"acceptance_worktime_hour",
]:
df[f"{column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_moving_average(df[column])
for denominator in ["input_data_count", "annotation_count"]:
for numerator in ["worktime", "annotation_worktime", "inspection_worktime", "acceptance_worktime"]:
df[f"{numerator}_minute/{denominator}"] = df[f"{numerator}_hour"] * 60 / df[denominator]
df[f"{numerator}_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
get_weekly_sum(df[f"{numerator}_hour"]) * 60 / get_weekly_sum(df[denominator])
)
def create_div_element() -> bokeh.models.Div:
"""
HTMLページの先頭に付与するdiv要素を生成する。
"""
return bokeh.models.Div(
text=""" < h4>注意 < /h4>
< p>「X日の作業時間」とは、「X日に教師付開始したタスクにかけた作業時間」です。
「X日に作業した時間」ではありません。
< /p>
"""
)
def create_figure(title: str, y_axis_label: str) -> bokeh.plotting.Figure:
return figure(
plot_width=1200,
plot_height=600,
title=title,
x_axis_label="教師開始日",
x_axis_type="datetime",
y_axis_label=y_axis_label,
)
def create_input_data_figure():
y_range_name = "worktime_axis"
fig_input_data = create_figure(title="日ごとの入力データ数と作業時間", y_axis_label="入力データ数")
fig_input_data.add_layout(
LinearAxis(
y_range_name=y_range_name,
axis_label="作業時間[hour]",
),
"right",
)
y_overlimit = 0.05
fig_input_data.extra_y_ranges = {
y_range_name: DataRange1d(end=df["worktime_hour"].max() * (1 + y_overlimit))
}
self._plot_and_moving_average(
fig=fig_input_data,
x_column_name="dt_first_annotation_started_date",
y_column_name="input_data_count",
legend_name="入力データ数",
source=source,
color=get_color_from_small_palette(0),
)
self._plot_and_moving_average(
fig=fig_input_data,
x_column_name="dt_first_annotation_started_date",
y_column_name="worktime_hour",
legend_name="作業時間",
source=source,
color=get_color_from_small_palette(1),
y_range_name=y_range_name,
)
return fig_input_data
if not self._validate_df_for_output(output_file):
return
df = self.df.copy()
df["dt_first_annotation_started_date"] = df["first_annotation_started_date"].map(lambda e: parse(e).date())
add_velocity_and_weekly_moving_average_columns(df)
logger.debug(f"{output_file} を出力します。")
fig_list = [
create_figure(title="教師付開始日ごとの作業時間", y_axis_label="作業時間[hour]"),
create_figure(title="教師付開始日ごとの入力データあたり作業時間", y_axis_label="入力データあたり作業時間[minute/input_data]"),
create_figure(title="教師付開始日ごとのアノテーションあたり作業時間", y_axis_label="アノテーションあたり作業時間[minute/annotation]"),
]
fig_info_list = [
{
"x": "dt_first_annotation_started_date",
"y_info_list": [
{"column": "worktime_hour", "legend": "作業時間"},
{"column": "annotation_worktime_hour", "legend": "教師付作業時間"},
{"column": "inspection_worktime_hour", "legend": "検査作業時間"},
{"column": "acceptance_worktime_hour", "legend": "受入作業時間"},
],
},
{
"x": "dt_first_annotation_started_date",
"y_info_list": [
{"column": "worktime_minute/input_data_count", "legend": "入力データあたり作業時間"},
{"column": "annotation_worktime_minute/input_data_count", "legend": "入力データあたり教師付作業時間"},
{"column": "inspection_worktime_minute/input_data_count", "legend": "入力データあたり検査作業時間"},
{"column": "acceptance_worktime_minute/input_data_count", "legend": "入力データあたり受入作業時間"},
],
},
{
"x": "dt_date",
"y_info_list": [
{"column": "worktime_minute/annotation_count", "legend": "アノテーション作業時間"},
{"column": "annotation_worktime_minute/annotation_count", "legend": "アノテーションあたり教師付作業時間"},
{"column": "inspection_worktime_minute/annotation_count", "legend": "アノテーションあたり検査作業時間"},
{"column": "acceptance_worktime_minute/annotation_count", "legend": "アノテーションあたり受入作業時間"},
],
},
]
source = ColumnDataSource(data=df)
for fig, fig_info in zip(fig_list, fig_info_list):
y_info_list: list[dict[str, str]] = fig_info["y_info_list"] # type: ignore
for index, y_info in enumerate(y_info_list):
color = get_color_from_small_palette(index)
self._plot_and_moving_average(
fig=fig,
x_column_name="dt_first_annotation_started_date",
y_column_name=y_info["column"],
legend_name=y_info["legend"],
source=source,
color=color,
)
tooltip_item = [
"first_annotation_started_date",
"task_count",
"input_data_count",
"annotation_count",
"worktime_hour",
"annotation_worktime_hour",
"inspection_worktime_hour",
"acceptance_worktime_hour",
"worktime_minute/input_data_count",
"annotation_worktime_minute/input_data_count",
"inspection_worktime_minute/input_data_count",
"acceptance_worktime_minute/input_data_count",
"worktime_minute/annotation_count",
"annotation_worktime_minute/annotation_count",
"inspection_worktime_minute/annotation_count",
"acceptance_worktime_minute/annotation_count",
]
hover_tool = create_hover_tool(tooltip_item)
fig_list.insert(0, create_input_data_figure())
for fig in fig_list:
fig.add_tools(hover_tool)
add_legend_to_figure(fig)
output_file.parent.mkdir(exist_ok=True, parents=True)
bokeh.plotting.reset_output()
bokeh.plotting.output_file(output_file, title=output_file.stem)
bokeh.plotting.save(bokeh.layouts.column([create_div_element()] + fig_list))
0
View Source File : visualization.py
License : MIT License
Project Creator : pedromartins4
License : MIT License
Project Creator : pedromartins4
def plot_macd(stock):
p = figure(x_axis_type="datetime", plot_width=WIDTH_PLOT, plot_height=200, title="MACD (line + histogram)",
tools=TOOLS, toolbar_location='above')
up = [True if val > 0 else False for val in stock.data['macd_histogram']]
down = [True if val < 0 else False for val in stock.data['macd_histogram']]
view_upper = CDSView(source=stock, filters=[BooleanFilter(up)])
view_lower = CDSView(source=stock, filters=[BooleanFilter(down)])
p.vbar(x='date', top='macd_histogram', bottom='zeros', width=30000000, color=GREEN, source=stock, view=view_upper)
p.vbar(x='date', top='zeros', bottom='macd_histogram', width=30000000, color=RED, source=stock, view=view_lower)
# Adding an extra range for the MACD lines, because using the same axis as the histogram
# sometimes flattens them too much
p.extra_y_ranges = {'macd': DataRange1d()}
p.add_layout(LinearAxis(y_range_name='macd'), 'right')
p.line(x='date', y='macd', line_width=2, color=BLUE, source=stock, legend='MACD', muted_color=BLUE,
muted_alpha=0, y_range_name='macd')
p.line(x='date', y='macd_signal', line_width=2, color=BLUE_LIGHT, source=stock, legend='Signal',
muted_color=BLUE_LIGHT, muted_alpha=0, y_range_name='macd')
p.legend.location = "bottom_left"
p.legend.border_line_alpha = 0
p.legend.background_fill_alpha = 0
p.legend.click_policy = "mute"
p.yaxis.ticker = []
p.yaxis.axis_line_alpha = 0
return p
# RSI
def plot_rsi(stock):
0
View Source File : cpu.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : rapidsai
License : BSD 3-Clause "New" or "Revised" License
Project Creator : rapidsai
def resource_timeline(doc):
# Shared X Range for all plots
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
tools = "reset,xpan,xwheel_zoom"
source = ColumnDataSource(
{
"time": [],
"memory": [],
"cpu": [],
"disk-read": [],
"disk-write": [],
"net-read": [],
"net-sent": [],
}
)
memory_fig = figure(
title="Memory",
sizing_mode="stretch_both",
x_axis_type="datetime",
y_range=[0, psutil.virtual_memory().total],
x_range=x_range,
tools=tools,
)
memory_fig.line(source=source, x="time", y="memory")
memory_fig.yaxis.formatter = NumeralTickFormatter(format="0.0b")
cpu_fig = figure(
title="CPU",
sizing_mode="stretch_both",
x_axis_type="datetime",
y_range=[0, 100],
x_range=x_range,
tools=tools,
)
cpu_fig.line(source=source, x="time", y="cpu")
disk_fig = figure(
title="Disk I/O Bandwidth",
sizing_mode="stretch_both",
x_axis_type="datetime",
x_range=x_range,
tools=tools,
)
disk_fig.line(source=source, x="time", y="disk-read", color="blue", legend="Read")
disk_fig.line(source=source, x="time", y="disk-write", color="red", legend="Write")
disk_fig.yaxis.formatter = NumeralTickFormatter(format="0.0b")
disk_fig.legend.location = "top_left"
net_fig = figure(
title="Network I/O Bandwidth",
sizing_mode="stretch_both",
x_axis_type="datetime",
x_range=x_range,
tools=tools,
)
net_fig.line(source=source, x="time", y="net-read", color="blue", legend="Recv")
net_fig.line(source=source, x="time", y="net-sent", color="red", legend="Send")
net_fig.yaxis.formatter = NumeralTickFormatter(format="0.0b")
net_fig.legend.location = "top_left"
doc.title = "Resource Timeline"
doc.add_root(
column(cpu_fig, memory_fig, disk_fig, net_fig, sizing_mode="stretch_both")
)
last_disk_read = psutil.disk_io_counters().read_bytes
last_disk_write = psutil.disk_io_counters().write_bytes
last_net_recv = psutil.net_io_counters().bytes_recv
last_net_sent = psutil.net_io_counters().bytes_sent
last_time = time.time()
def cb():
nonlocal last_disk_read, last_disk_write, last_net_recv, last_net_sent, last_time
now = time.time()
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory().used
disk = psutil.disk_io_counters()
disk_read = disk.read_bytes
disk_write = disk.write_bytes
net = psutil.net_io_counters()
net_read = net.bytes_recv
net_sent = net.bytes_sent
source.stream(
{
"time": [now * 1000], # bokeh measures in ms
"cpu": [cpu],
"memory": [mem],
"disk-read": [(disk_read - last_disk_read) / (now - last_time)],
"disk-write": [(disk_write - last_disk_write) / (now - last_time)],
"net-read": [(net_read - last_net_recv) / (now - last_time)],
"net-sent": [(net_sent - last_net_sent) / (now - last_time)],
},
1000,
)
last_disk_read = disk_read
last_disk_write = disk_write
last_net_recv = net_read
last_net_sent = net_sent
last_time = now
doc.add_periodic_callback(cb, 200)
0
View Source File : gpu.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : rapidsai
License : BSD 3-Clause "New" or "Revised" License
Project Creator : rapidsai
def nvlink_timeline(doc):
# X Range
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
tools = "reset,xpan,xwheel_zoom"
item_dict = {"time": []}
for i in range(ngpus):
item_dict["nvlink-tx-" + str(i)] = []
item_dict["nvlink-rx-" + str(i)] = []
source = ColumnDataSource(item_dict)
def _get_color(ind):
color_list = [
"blue",
"red",
"green",
"black",
"brown",
"cyan",
"orange",
"pink",
"purple",
"gold",
]
return color_list[ind % len(color_list)]
tx_fig = figure(
title="TX NVLink (per Device) [B/s]",
sizing_mode="stretch_both",
x_axis_type="datetime",
x_range=x_range,
tools=tools,
)
rx_fig = figure(
title="RX NVLink (per Device) [B/s]",
sizing_mode="stretch_both",
x_axis_type="datetime",
x_range=x_range,
tools=tools,
)
for i in range(ngpus):
tx_fig.line(
source=source, x="time", y="nvlink-tx-" + str(i), color=_get_color(i)
)
rx_fig.line(
source=source, x="time", y="nvlink-rx-" + str(i), color=_get_color(i)
)
tx_fig.yaxis.formatter = NumeralTickFormatter(format="0.0 b")
rx_fig.yaxis.formatter = NumeralTickFormatter(format="0.0 b")
doc.title = "NVLink Throughput Timeline"
doc.add_root(column(tx_fig, rx_fig, sizing_mode="stretch_both"))
counter = 1
nlinks = pynvml.NVML_NVLINK_MAX_LINKS
nvlink_state = {}
nvlink_state["tx"] = [
sum(
[
pynvml.nvmlDeviceGetNvLinkUtilizationCounter(
gpu_handles[i], j, counter
)["tx"]
for j in range(nlinks)
]
)
for i in range(ngpus)
]
nvlink_state["rx"] = [
sum(
[
pynvml.nvmlDeviceGetNvLinkUtilizationCounter(
gpu_handles[i], j, counter
)["rx"]
for j in range(nlinks)
]
)
for i in range(ngpus)
]
nvlink_state["tx-ref"] = nvlink_state["tx"].copy()
nvlink_state["rx-ref"] = nvlink_state["rx"].copy()
last_time = time.time()
def cb():
nonlocal last_time
nonlocal nvlink_state
now = time.time()
src_dict = {"time": [now * 1000]}
nvlink_state["tx-ref"] = nvlink_state["tx"].copy()
nvlink_state["rx-ref"] = nvlink_state["rx"].copy()
nvlink_state["tx"] = [
sum(
[
pynvml.nvmlDeviceGetNvLinkUtilizationCounter(
gpu_handles[i], j, counter
)["tx"]
for j in range(nlinks)
]
)
for i in range(ngpus)
]
nvlink_state["rx"] = [
sum(
[
pynvml.nvmlDeviceGetNvLinkUtilizationCounter(
gpu_handles[i], j, counter
)["rx"]
for j in range(nlinks)
]
)
for i in range(ngpus)
]
tx_diff = [
max(a - b, 0.0) * 5.0
for (a, b) in zip(nvlink_state["tx"], nvlink_state["tx-ref"])
]
rx_diff = [
max(a - b, 0.0) * 5.0
for (a, b) in zip(nvlink_state["rx"], nvlink_state["rx-ref"])
]
for i in range(ngpus):
src_dict["nvlink-tx-" + str(i)] = [tx_diff[i]]
src_dict["nvlink-rx-" + str(i)] = [rx_diff[i]]
source.stream(src_dict, 1000)
last_time = now
doc.add_periodic_callback(cb, 200)
def gpu_resource_timeline(doc):
0
View Source File : gpu.py
License : BSD 3-Clause "New" or "Revised" License
Project Creator : rapidsai
License : BSD 3-Clause "New" or "Revised" License
Project Creator : rapidsai
def gpu_resource_timeline(doc):
memory_list = [
pynvml.nvmlDeviceGetMemoryInfo(handle).total / (1024 * 1024)
for handle in gpu_handles
]
gpu_mem_max = max(memory_list) * (1024 * 1024)
gpu_mem_sum = sum(memory_list)
# Shared X Range for all plots
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
tools = "reset,xpan,xwheel_zoom"
item_dict = {
"time": [],
"gpu-total": [],
"memory-total": [],
"rx-total": [],
"tx-total": [],
}
for i in range(ngpus):
item_dict["gpu-" + str(i)] = []
item_dict["memory-" + str(i)] = []
source = ColumnDataSource(item_dict)
def _get_color(ind):
color_list = [
"blue",
"red",
"green",
"black",
"brown",
"cyan",
"orange",
"pink",
"purple",
"gold",
]
return color_list[ind % len(color_list)]
memory_fig = figure(
title="Memory Utilization (per Device) [B]",
sizing_mode="stretch_both",
x_axis_type="datetime",
y_range=[0, gpu_mem_max],
x_range=x_range,
tools=tools,
)
for i in range(ngpus):
memory_fig.line(
source=source, x="time", y="memory-" + str(i), color=_get_color(i)
)
memory_fig.yaxis.formatter = NumeralTickFormatter(format="0.0 b")
gpu_fig = figure(
title="GPU Utilization (per Device) [%]",
sizing_mode="stretch_both",
x_axis_type="datetime",
y_range=[0, 100],
x_range=x_range,
tools=tools,
)
for i in range(ngpus):
gpu_fig.line(source=source, x="time", y="gpu-" + str(i), color=_get_color(i))
tot_fig = figure(
title="Total Utilization [%]",
sizing_mode="stretch_both",
x_axis_type="datetime",
y_range=[0, 100],
x_range=x_range,
tools=tools,
)
tot_fig.line(
source=source, x="time", y="gpu-total", color="blue", legend="Total-GPU"
)
tot_fig.line(
source=source, x="time", y="memory-total", color="red", legend="Total-Memory"
)
tot_fig.legend.location = "top_left"
pci_fig = figure(
title="Total PCI Throughput [B/s]",
sizing_mode="stretch_both",
x_axis_type="datetime",
x_range=x_range,
tools=tools,
)
pci_fig.line(source=source, x="time", y="tx-total", color="blue", legend="TX")
pci_fig.line(source=source, x="time", y="rx-total", color="red", legend="RX")
pci_fig.yaxis.formatter = NumeralTickFormatter(format="0.0 b")
pci_fig.legend.location = "top_left"
doc.title = "Resource Timeline"
doc.add_root(
column(gpu_fig, memory_fig, tot_fig, pci_fig, sizing_mode="stretch_both")
)
last_time = time.time()
def cb():
nonlocal last_time
now = time.time()
src_dict = {"time": [now * 1000]}
gpu_tot = 0
mem_tot = 0
tx_tot = 0
rx_tot = 0
for i in range(ngpus):
gpu = pynvml.nvmlDeviceGetUtilizationRates(gpu_handles[i]).gpu
mem = pynvml.nvmlDeviceGetMemoryInfo(gpu_handles[i]).used
tx = (
pynvml.nvmlDeviceGetPcieThroughput(
gpu_handles[i], pynvml.NVML_PCIE_UTIL_TX_BYTES
)
* 1024
)
rx = (
pynvml.nvmlDeviceGetPcieThroughput(
gpu_handles[i], pynvml.NVML_PCIE_UTIL_RX_BYTES
)
* 1024
)
gpu_tot += gpu
mem_tot += mem / (1024 * 1024)
rx_tot += rx
tx_tot += tx
src_dict["gpu-" + str(i)] = [gpu]
src_dict["memory-" + str(i)] = [mem]
src_dict["gpu-total"] = [gpu_tot / ngpus]
src_dict["memory-total"] = [(mem_tot / gpu_mem_sum) * 100]
src_dict["tx-total"] = [tx_tot]
src_dict["rx-total"] = [rx_tot]
source.stream(src_dict, 1000)
last_time = now
doc.add_periodic_callback(cb, 200)
0
View Source File : test_ranges.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def test_basic(self):
r = DataRange1d()
check_properties_existence(r, [
"callback",
"names",
"renderers",
"range_padding",
"range_padding_units",
"flipped",
"follow",
"follow_interval",
"default_span",
"start",
"end",
"bounds",
"min_interval",
"max_interval"],
)
def test_init_with_no_arguments(self):
0
View Source File : test_objects.py
License : MIT License
Project Creator : rthorst
License : MIT License
Project Creator : rthorst
def large_plot(n):
from bokeh.models import (
Plot, LinearAxis, Grid, GlyphRenderer,
ColumnDataSource, DataRange1d, PanTool, ZoomInTool, ZoomOutTool, WheelZoomTool, BoxZoomTool,
BoxSelectTool, SaveTool, ResetTool
)
from bokeh.models.layouts import Column
from bokeh.models.glyphs import Line
col = Column()
objects = set([col])
for i in xrange(n):
source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1]))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
xaxis = LinearAxis()
plot.add_layout(xaxis, "below")
yaxis = LinearAxis()
plot.add_layout(yaxis, "left")
xgrid = Grid(dimension=0)
plot.add_layout(xgrid, "center")
ygrid = Grid(dimension=1)
plot.add_layout(ygrid, "center")
tickers = [xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter]
glyph = Line(x='x', y='y')
renderer = GlyphRenderer(data_source=source, glyph=glyph)
plot.renderers.append(renderer)
pan = PanTool()
zoom_in = ZoomInTool()
zoom_out = ZoomOutTool()
wheel_zoom = WheelZoomTool()
box_zoom = BoxZoomTool()
box_select = BoxSelectTool()
save = SaveTool()
reset = ResetTool()
tools = [pan, zoom_in, zoom_out, wheel_zoom, box_zoom, box_select, save, reset]
plot.add_tools(*tools)
col.children.append(plot)
objects |= set([
xdr, ydr,
xaxis, yaxis,
xgrid, ygrid,
renderer, renderer.view, glyph,
source, source.selected, source.selection_policy,
plot, plot.x_scale, plot.y_scale, plot.toolbar, plot.title,
box_zoom.overlay, box_select.overlay,
] + tickers + tools)
return col, objects
class TestMetaModel(object):