Here are the examples of the python api bokeh.embed.components taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
21 Examples
3
Source : container.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def __init__(self, **param: Any) -> None:
self.title = self._title
self.resources = self._resources
self.container_width = self._container_width
for attr, value in param.items():
if attr == "layout":
if len(value) == 0:
setattr(self, "components", ("", []))
else:
setattr(self, "components", components(value))
elif attr == "meta":
setattr(self, attr, value)
figs = [val for val in value if val != "Stats"]
setattr(self, "figs", figs)
else:
setattr(self, attr, value)
def __getitem__(self, key: str) -> Any:
3
Source : timeseries.py
with MIT License
from SolarArbiter
with MIT License
from SolarArbiter
def to_components(f):
"""Return script and div of a bokeh object if the return_components
kwarg is True"""
@wraps(f)
def wrapper(*args, **kwargs):
if kwargs.pop('return_components', False):
out = f(*args, **kwargs)
if out is not None:
return components(out)
else:
return out
else:
return f(*args, **kwargs)
return wrapper
@to_components
3
Source : app.py
with MIT License
from wywongbd
with MIT License
from wywongbd
def index():
# Create the plot
plot = create_figure()
# tag here means the tag to reference to the new bokeh chart, saved as a js file
js, plot_tag = components(plot, CDN, "/Users/brendantham/Desktop/FYP/Flask/static/plots")
# TODO:
# 1) fix URLS
# 2) figure out where to store the js files for future load use
# with open('/Users/brendantham/Desktop/FYP/Flask/static/plots/plot1.js', 'w') as f:
# f.write(js)
return render_template("index.html", script1 = js, plot1 = plot_tag)
# With debug=True, Flask server will auto-reload
# when there are code changes
if __name__ == '__main__':
0
Source : tsne.py
with MIT License
from evidence-surveillance
with MIT License
from evidence-surveillance
def test_bokeh(matrices):
p = figure(plot_width=550, plot_height=550)
xlim = (-69.551894338989271, 64.381507070922851)
ylim = (-70.038440855407714, 70.644477995300306)
p.axis.visible = False
p.border_fill_color = None
p.outline_line_color = None
p.grid.visible = None
p.toolbar.logo = None
p.toolbar_location = None
p.x_range = Range1d(start=xlim[0],end =xlim[1])
p.y_range = Range1d(start=ylim[0],end =ylim[1])
# p.image_url(url=['color_scatter.png'], x=0, y=0, w=None, h=None, anchor="center")
p.image_url(url=['static/images/tsne'+LATEST_DATE+'_sml.png'], x=xlim[0]-2.0, y=ylim[1], w=ylim[1]-ylim[0]-2.7, h=(xlim[1]-xlim[0])+6.7, anchor="top_left")
for i, matrix in enumerate(matrices):
if len(matrix['score']) > 0:
p.scatter(matrix['vectors'][:, 0], matrix['vectors'][:, 1],
fill_color='#fc6e2d', fill_alpha=0.8,
line_color=None, size=2)
else:
p.scatter(matrix['vectors'][:, 0], matrix['vectors'][:, 1],
fill_color='#5f4af9', fill_alpha=0.8,
line_color=None, size=2.5)
# export_png(p, filename='color_scatter.png')
# output_file("color_scatter.html", title="color_scatter.py example")
comp = components(p)
return comp[0]+comp[1]
# return file_html(p, CDN)
# save(p)
if __name__ == "__main__":
0
Source : dashboard.py
with BSD 3-Clause "New" or "Revised" License
from jcrist
with BSD 3-Clause "New" or "Revised" License
from jcrist
def build_html():
"""Build the html, to be served by IndexHandler"""
source = AjaxDataSource(data_url='./data',
polling_interval=INTERVAL,
method='GET')
# OHLC plot
p = figure(plot_height=400,
title='OHLC',
sizing_mode='scale_width',
tools="xpan,xwheel_zoom,xbox_zoom,reset",
x_axis_type=None,
y_axis_location="right",
y_axis_label="Price ($)")
p.x_range.follow = "end"
p.x_range.follow_interval = 100
p.x_range.range_padding = 0
p.line(x='time', y='average', alpha=0.25, line_width=3, color='black',
source=source)
p.line(x='time', y='ma', alpha=0.8, line_width=2, color='steelblue',
source=source)
p.segment(x0='time', y0='low', x1='time', y1='high', line_width=2,
color='black', source=source)
p.segment(x0='time', y0='open', x1='time', y1='close', line_width=8,
color='color', source=source, alpha=0.8)
# MACD plot
p2 = figure(plot_height=200,
title='MACD',
sizing_mode='scale_width',
x_range=p.x_range,
x_axis_label='Time (s)',
tools="xpan,xwheel_zoom,xbox_zoom,reset",
y_axis_location="right")
p2.line(x='time', y='macd', color='darkred', line_width=2, source=source)
p2.line(x='time', y='macd9', color='navy', line_width=2, source=source)
p2.segment(x0='time', y0=0, x1='time', y1='macdh', line_width=6, color='steelblue',
alpha=0.5, source=source)
# Combine plots together
plot = gridplot([[p], [p2]], toolbar_location="left", plot_width=1000)
# Compose html from plots and template
script, div = components(plot, theme=theme)
html = template.render(resources=CDN.render(), script=script, div=div)
return html
class IndexHandler(RequestHandler):
0
Source : html_utils.py
with MIT License
from martno
with MIT License
from martno
def create_charts(uuids):
paths = [Path(c.DEFAULT_PARENT_FOLDER)/uuid for uuid in uuids]
html = ''
for uuid in uuids:
experiment_json = utils.load_experiment_json(uuid)
title = experiment_json['title']
if title:
html += '{} {} - {}\n < br>'.format(color_circle(uuid), utils.get_short_uuid(uuid), title)
else:
html += '{} {}\n < br>'.format(color_circle(uuid), utils.get_short_uuid(uuid))
scalar_names = get_all_scalar_names(paths)
hover = HoverTool(
tooltips=[
('step', '@x'),
('value', '@y'),
],
mode='vline'
)
plots = []
for scalar_name in scalar_names:
plot = figure(
tools=[hover, 'reset', 'pan', 'wheel_zoom', 'box_zoom'],
title=scalar_name,
x_axis_label='Step',
width=FIGURE_WIDTH,
height=FIGURE_HEIGHT,
)
for uuid, path in zip(uuids, paths):
scalar_file = path / c.SCALARS_FOLDER / '{}.csv'.format(scalar_name)
if scalar_file.is_file():
df = pd.read_csv(scalar_file)
source = ColumnDataSource(data={
'x': df['step'],
'y': df['value'],
})
color = colorhash.ColorHash(uuid).rgb
plot.line('x', 'y',
source=source,
line_color=bokeh.colors.RGB(*color),
legend=utils.get_short_uuid(uuid),
line_width=2,
)
plot.legend.location = "top_left"
plot.legend.click_policy = "hide"
script, div = components(plot)
plots.append('{}\n{}'.format(script, div))
return html + '\n\n'.join(plots)
def get_all_scalar_names(paths):
0
Source : views.py
with MIT License
from mwang87
with MIT License
from mwang87
def compoundenrichment():
blacklist_attributes = ["ATTRIBUTE_DatasetAccession", "ATTRIBUTE_Curated_BodyPartOntologyIndex", "filename", "UniqueSubjectID", "UBERONOntologyIndex", "SubjectIdentifierAsRecorded", "SampleCollectionDateandTime", "LatitudeandLongitude", "InternalStandardsUsed", "DepthorAltitudeMeters", "DOIDOntologyIndex", "Country", "ComorbidityListDOIDIndex", "AgeInYears"]
compoundname = request.form['compoundname']
compound_db = Compound.select().where(Compound.compoundname == compoundname)
compound_filenames = [filename.filepath for filename in Filename.select().join(CompoundFilenameConnection).where(CompoundFilenameConnection.compound==compound_db)]
enrichment_list = []
if "filenames" in request.form:
filter_filenames = set(json.loads(request.form["filenames"]))
if len(filter_filenames) == 0:
filter_filenames = set([filename.filepath for filename in Filename.select()])
else:
filter_filenames = set([filename.filepath for filename in Filename.select()])
all_metadata = FilenameAttributeConnection.select(Attribute.categoryname, AttributeTerm.term, fn.COUNT(FilenameAttributeConnection.filename).alias('ct')).join(Attribute).switch(FilenameAttributeConnection).join(AttributeTerm).group_by(Attribute.categoryname, AttributeTerm.term).dicts()
for attribute_term_pair in all_metadata:
# if attribute_term_pair["categoryname"].find("ATTRIBUTE_") == -1:
# continue
if attribute_term_pair["categoryname"] in blacklist_attributes:
continue
attribute_files_db = Filename.select().join(FilenameAttributeConnection).where(FilenameAttributeConnection.attributeterm == attribute_term_pair["term"]).where(FilenameAttributeConnection.attribute == attribute_term_pair["categoryname"])
attribute_filenames = set([filename.filepath for filename in attribute_files_db]).intersection(filter_filenames)
if len(attribute_filenames) > 0:
intersection_filenames = set(compound_filenames).intersection(set(attribute_filenames)).intersection(filter_filenames)
enrichment_dict = {}
enrichment_dict["attribute_name"] = attribute_term_pair["categoryname"]
enrichment_dict["attribute_term"] = attribute_term_pair["term"]
enrichment_dict["totalfiles"] = len(attribute_filenames)
enrichment_dict["compoundfiles"] = len(intersection_filenames)
enrichment_dict["percentage"] = len(intersection_filenames)/float(len(attribute_filenames))
enrichment_list.append(enrichment_dict)
enrichment_list = sorted(enrichment_list, key=lambda list_object: list_object["percentage"], reverse=True)
# Creating Bokeh Plot Here
enrichment_df = pd.DataFrame(enrichment_list)
# Finding all non-zero entries
enrichment_df = enrichment_df[enrichment_df["totalfiles"] != 0]
all_attributes = list(set(list(enrichment_df["attribute_name"])))
from bokeh.models import Panel, Tabs
from bokeh.plotting import figure
from bokeh.embed import components
all_tabs = []
for attribute in all_attributes:
filtered_df = enrichment_df[enrichment_df["attribute_name"] == attribute]
filtered_df = filtered_df[filtered_df["percentage"] > 0]
all_terms = list(filtered_df["attribute_term"])
all_percentage = list(filtered_df["percentage"])
plot = figure(x_range=all_terms, plot_height=300, plot_width=1200, sizing_mode="scale_width", title="{} Percentage of Terms".format(attribute))
plot.vbar(x=all_terms, top=all_percentage, width=0.9)
tab = Panel(child=plot, title=attribute)
all_tabs.append(tab)
tabs = Tabs(tabs=all_tabs)
script, div = components(tabs)
drawing_dict = {}
drawing_dict["div"] = div
drawing_dict["script"] = script
return_dict = {}
return_dict["enrichment_list"] = enrichment_list
return_dict["drawings"] = drawing_dict
return json.dumps(return_dict)
@app.route('/filesenrichment', methods=['POST'])
0
Source : base.py
with MIT License
from PatrikHlobil
with MIT License
from PatrikHlobil
def embedded_html(fig, resources="CDN"):
"""Returns an html string that contains all neccessary CSS&JS files,
together with the div containing the Bokeh plot. As input, a figure fig
is expected."""
html_embedded = ""
if resources == "CDN":
js_css_resources = get_bokeh_resources()
html_embedded += js_css_resources
elif resources == "raw":
raise NotImplementedError(" < resources> is not yet implemented")
elif resources is None:
pass
else:
raise ValueError(" < resources> only accept 'CDN', 'raw' or None.")
# Add plot script and div
script, div = components(fig)
html_embedded += "\n\n" + div + "\n\n" + script
return html_embedded
def get_bokeh_resources():
0
Source : twitter_sent.py
with MIT License
from rthorst
with MIT License
from rthorst
def plot_tweets(tweets, sentiment_scores):
"""
Create a histogram-style barplot of tweets and their sentiment.
Return a bokeh plot object, expressed as a tuple of (resources, script, div).
Where :
resources: some CSS, etc. that goes in the head of the webpage for styling the plot.
script: javascript for the plot to function. expressed as string.
div: html div container for the plot. expressed as string.
"""
# Sort tweets from negative to positive.
# This step is not strictly necessary, but makes it easier to see the overall shape of the data.
sorted_indices = np.argsort(sentiment_scores)
sentiment_scores = np.array(sentiment_scores)[sorted_indices]
tweets = np.array(tweets)[sorted_indices]
# Express the data as a bokeh data source object.
source = ColumnDataSource(data={
"text": tweets,
"sentiment": sentiment_scores,
"x": np.arange(len(tweets)),
})
"""
Create plot.
"""
# Create plot object.
width = 0.9
p = figure(x_axis_label="Tweet", y_axis_label="Sentiment (0 = Neutral)")
p.vbar(source=source, x="x", top="sentiment", width=width)
# Add hover tool, allowing mouseover to view text and sentiment.
hover = HoverTool(
tooltips=[
("text", "@text"),
("sentiment", "@sentiment")
],
formatters={
"text": "printf",
"sentiment": "printf"
},
mode="vline"
)
p.add_tools(hover)
"""
Format plot.
"""
# axis font size
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
# remove tick marks from axes
p.xaxis.major_tick_line_color = None
p.xaxis.minor_tick_line_color = None
p.yaxis.major_tick_line_color = None
p.yaxis.minor_tick_line_color = None
# adjust plot width, height
scale = 1.5
p.plot_height = int(250 * scale)
p.plot_width = int(450 * scale)
# remove toolbar (e.g. move, resize, etc) from right of plot.
p.toolbar.logo = None
p.toolbar_location = None
# remove gridlines
p.xgrid.visible = False
p.ygrid.visible = False
# remove x axis tick labels (done by setting label fontsize to 0 pt)
p.xaxis.major_label_text_font_size = '0pt'
"""
Export plot
"""
# Create resources string, which is CSS, etc. that goes in the head of
resources = INLINE.render()
# Get javascript (script) and HTML div (div) for the plot.
script, div = components(p)
return (resources, script, div)
def plot_reason(tweets, sentiment_scores):
0
Source : twitter_sent.py
with MIT License
from rthorst
with MIT License
from rthorst
def plot_reason(tweets, sentiment_scores):
"""
Plot the top words that lead us to the classification as positive or negative.
Return:
script : javascript for the plot, expressed as string.
div : html container for the plot, expressed as string.
NOTE: requires the shared resources attribute from plot_tweets() in the HTML header.
"""
"""
Calculate the sentiment of each individual token in the tweets.
"""
# list tokens, keeping only unique tokens (e.g. remove repeated words).
all_toks = []
for tweet in tweets:
toks = tweet.lower().split()
all_toks.extend(toks)
all_toks = [tok for tok in set(all_toks)] # remove duplicates.
# calculate sentiment of each token.
sm = VaderSentimentModel()
toks_sentiment = [sm.classify_sentiment(tok) for tok in all_toks]
"""
sort tokens by sentiment.
if overall valence is negative, sort negative to postitive.
if overall valence is positive, sort positive to negative.
thus, in any case, the earliest elements in the list are the most informative words.
"""
nwords = 20
# negative? sort neg -> positive.
if np.mean(sentiment_scores) < 0:
sorted_indices = np.argsort(toks_sentiment)
# else (positive)? sort positive -> negative
else:
sorted_indices = np.argsort(toks_sentiment)[::-1]
# toks_to_plot: shape (nwords, ) list of informative tokens.
# sentiment_to_plot: shape (nwords, ) list of sentiment of these tokens.
toks_to_plot = np.array(all_toks)[sorted_indices][:nwords]
sentiment_to_plot = np.array(toks_sentiment)[sorted_indices][:nwords]
# convert all sentiment scores to positive values.
# this is for DISPLAY only, to make all plots go from left to right.
# we still retain the correct tokens and sorting order.
sentiment_to_plot = np.array([abs(v) for v in sentiment_to_plot])
"""
Set up plot.
- create data source object.
- define formatting variables.
"""
text_offset = 0.1
source = ColumnDataSource(data={
"token": toks_to_plot,
"sentiment": sentiment_to_plot,
"x": np.arange(len(toks_to_plot))[::-1],
"label_x": sentiment_to_plot + text_offset
})
"""
Make plot.
"""
# Create initial plot.
width = 0.9
xrange = [0, max(sentiment_to_plot) + 1]
p2 = figure(x_axis_label="Sentiment", y_axis_label="Word", x_range=xrange)
p2.hbar(source=source, y="x", right="sentiment", height=width)
"""
Format plot.
"""
# Annotate each bar with the word being represented.
glyph = Text(x="label_x", y="x", text="token")
p2.add_glyph(source, glyph)
# Axis labels.
p2.xaxis.axis_label_text_font_size = "15pt"
p2.yaxis.axis_label_text_font_size = "15pt"
# Remove ticks.
p2.xaxis.major_tick_line_color = None
p2.xaxis.minor_tick_line_color = None
p2.yaxis.major_tick_line_color = None
p2.yaxis.minor_tick_line_color = None
# Remove y axis tick labels.
p2.yaxis.major_label_text_font_size = '0pt'
# Plot width, height.
scale = 1.5
p2.plot_height = int(250 * scale)
p2.plot_width = int(250 * scale)
# remove toolbar (e.g. move, resize, etc) from right of plot.
p2.toolbar.logo = None
p2.toolbar_location = None
# remove gridlines
p2.xgrid.visible = False
p2.ygrid.visible = False
# remove x axis tick labels (set font to 0pt)
p2.xaxis.major_label_text_font_size = '0pt'
# get bokeh component for plot 2.
script2, div2 = components(p2)
return (script2, div2)
class MainPage(webapp2.RequestHandler):
0
Source : diff_formatter.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def format_basic(df_list: List[pd.DataFrame], cfg: Config) -> Dict[str, Any]:
"""
Format basic version.
Parameters
----------
df_list
The DataFrames for which data are calculated.
cfg
The config dict user passed in. E.g. config = {"hist.bins": 20}
Without user's specifications, the default is "auto"
Returns
-------
Dict[str, Any]
A dictionary in which formatted data is stored.
This variable acts like an API in passing data to the template engine.
"""
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
# aggregate all computations
final_results: Dict[str, Any] = {"dfs": []}
delayed_results: List[Any] = []
figs_var: List[Figure] = []
dask_results = {}
for df in df_list:
df = EDAFrame(df)
setattr(getattr(cfg, "plot"), "report", True)
data = basic_computations(df, cfg)
with catch_warnings():
filterwarnings(
"ignore",
"invalid value encountered in true_divide",
category=RuntimeWarning,
)
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
# data = dask.compute(data)
delayed_results.append(data)
res_plots = dask.delayed(_format_plots)(cfg=cfg, df_list=df_list)
dask_results["df_computations"] = delayed_results
dask_results["plots"] = res_plots
dask_results = dask.compute(dask_results)
for df, data in zip(df_list, dask_results[0]["df_computations"]): # type: ignore
res_overview = _format_overview(data, cfg) # type: ignore
res_variables = _format_variables(EDAFrame(df), cfg, data) # type: ignore
res = {**res_overview, **res_variables}
final_results["dfs"].append(res)
layout = dask_results[0]["plots"]["layout"] # type: ignore
for tab in layout:
try:
fig = tab.children[0]
except AttributeError:
fig = tab
figs_var.append(fig)
plots = components(figs_var)
final_results["graphs"] = plots
final_results["legend_lables"] = [
{"label": label, "color": color}
for label, color in zip(cfg.diff.label, CATEGORY10[: len(cfg.diff.label)]) # type: ignore
]
return final_results
def basic_computations(df: EDAFrame, cfg: Config) -> Dict[str, Any]:
0
Source : diff_formatter.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def _format_variables(df: EDAFrame, cfg: Config, data: Dict[str, Any]) -> Dict[str, Any]:
"""Formatting of variables section"""
res: Dict[str, Any] = {}
# variables
if not cfg.variables.enable:
res["has_variables"] = False
return res
res["variables"] = {}
res["has_variables"] = True
for col in df.columns:
try:
stats: Any = None # needed for pylint
dtp = df.get_eda_dtype(col)
tab_names: List[str] = []
if isinstance(dtp, Continuous):
itmdt = Intermediate(col=col, data=data[col], visual_type="numerical_column")
stats = format_num_stats(data[col])
tab_names = ["Stats", "Histogram", "KDE Plot", "Normal Q-Q Plot"]
elif type(dtp) in [Nominal, SmallCardNum, GeoGraphy, GeoPoint]:
itmdt = Intermediate(col=col, data=data[col], visual_type="categorical_column")
stats = format_cat_stats(
data[col]["stats"], data[col]["len_stats"], data[col]["letter_stats"]
)
tab_names = ["Stats", "Word Length", "Pie Chart", "Word Cloud", "Word Frequency"]
elif isinstance(dtp, DateTime):
itmdt = Intermediate(
col=col,
data=data[col]["stats"],
line=data[col]["line"],
visual_type="datetime_column",
)
stats = stats_viz_dt(data[col]["stats"])
else:
raise RuntimeError(f"the type of column {col} is unknown: {type(dtp)}")
rndrd = render(itmdt, cfg)
layout = rndrd["layout"]
figs_var: List[Figure] = []
for tab in layout:
try:
fig = tab.children[0]
except AttributeError:
fig = tab
# fig.title = Title(text=tab.title, align="center")
figs_var.append(fig)
comp = components(figs_var)
res["variables"][col] = {
"tabledata": stats,
"col_type": itmdt.visual_type.replace("_column", ""),
"tab_names": tab_names,
"plots": comp,
}
except:
print(f"error happended in column:{col}", file=sys.stderr)
raise
return res
def _format_plots(
0
Source : formatter.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def _format_variables(df: EDAFrame, cfg: Config, data: Dict[str, Any]) -> Dict[str, Any]:
res: Dict[str, Any] = {}
# variables
if not cfg.variables.enable:
res["has_variables"] = False
return res
res["variables"] = {}
res["has_variables"] = True
for col in df.columns:
try:
stats: Any = None # needed for pylint
dtp = df.get_eda_dtype(col)
if isinstance(dtp, Continuous):
itmdt = Intermediate(col=col, data=data[col], visual_type="numerical_column")
stats = format_num_stats(data[col])
elif type(dtp) in [Nominal, SmallCardNum, GeoGraphy, GeoPoint]:
itmdt = Intermediate(col=col, data=data[col], visual_type="categorical_column")
stats = format_cat_stats(
data[col]["stats"], data[col]["len_stats"], data[col]["letter_stats"]
)
elif isinstance(dtp, DateTime):
itmdt = Intermediate(
col=col,
data=data[col]["stats"],
line=data[col]["line"],
visual_type="datetime_column",
)
stats = stats_viz_dt(data[col]["stats"])
else:
raise RuntimeError(f"the type of column {col} is unknown: {type(dtp)}")
rndrd = render(itmdt, cfg)
layout = rndrd["layout"]
figs_var: List[Figure] = []
for tab in layout:
try:
fig = tab.children[0]
except AttributeError:
fig = tab
# fig.title = Title(text=tab.title, align="center")
figs_var.append(fig)
comp = components(figs_var)
insight_keys = list(rndrd["insights"].keys())[2:] if rndrd["insights"] else []
res["variables"][col] = {
"tabledata": stats,
"plots": comp,
"col_type": itmdt.visual_type.replace("_column", ""),
"tab_name": rndrd["meta"],
"plots_tab": zip(comp[1][1:], rndrd["meta"][1:], insight_keys),
"insights_tab": rndrd["insights"],
}
except:
print(f"error happended in column:{col}", file=sys.stderr)
raise
return res
def _format_interaction(data: Dict[str, Any], cfg: Config) -> Dict[str, Any]:
0
Source : formatter.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def _format_interaction(data: Dict[str, Any], cfg: Config) -> Dict[str, Any]:
"""Format of Interaction section"""
res: Dict[str, Any] = {}
# interactions
if cfg.interactions.enable:
res["has_interaction"] = True
itmdt = Intermediate(
scatter_source=data["interaction.scatter_source"],
other_plots=data["interaction.other_plots"],
num_cols=data["num_cols"],
all_cols=data["all_cols"],
visual_type="correlation_crossfilter",
)
rndrd = render_correlation(itmdt, cfg)
rndrd.sizing_mode = "stretch_width"
res["interactions"] = components(rndrd)
else:
res["has_interaction"] = False
return res
def _format_correlation(data: Dict[str, Any], cfg: Config) -> Dict[str, Any]:
0
Source : formatter.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def _format_correlation(data: Dict[str, Any], cfg: Config) -> Dict[str, Any]:
"""Format of Correlation section"""
res: Dict[str, Any] = {}
if len(data["num_cols"]) > 0:
# correlations
if cfg.correlations.enable:
res["has_correlation"] = True
dfs: Dict[str, pd.DataFrame] = {}
for method, corr in data["corrs"].items():
ndf = pd.DataFrame(
{
"x": data["num_cols"][data["cordx"]],
"y": data["num_cols"][data["cordy"]],
"correlation": corr.ravel(),
}
)
dfs[method.name] = ndf[data["cordy"] > data["cordx"]]
itmdt = Intermediate(
data=dfs,
axis_range=list(data["num_cols"]),
visual_type="correlation_heatmaps",
)
rndrd = render_correlation(itmdt, cfg)
res["correlation_names"] = []
figs_corr: List[Figure] = []
# pylint: disable = not-an-iterable
for tab in rndrd.tabs:
fig = tab.child
fig.sizing_mode = "stretch_width"
figs_corr.append(fig)
res["correlation_names"].append(tab.title)
res["correlations"] = components(figs_corr)
else:
res["has_correlation"] = False
return res
def _format_missing(
0
Source : formatter.py
with MIT License
from sfu-db
with MIT License
from sfu-db
def _format_missing(
data: Dict[str, Any], cfg: Config, completions: Optional[Dict[str, Any]], ncols: int
) -> Dict[str, Any]:
"""Format of Missing section"""
res: Dict[str, Any] = {}
# missing
if cfg.missingvalues.enable and completions is not None:
res["has_missing"] = True
itmdt = completions["miss"](data["miss"])
rndrd = render_missing(itmdt, cfg)
figs_missing: List[Figure] = []
for fig in rndrd["layout"]:
fig.sizing_mode = "stretch_both"
figs_missing.append(fig)
res["missing"] = components(figs_missing)
res["missing_tabs"] = ["Bar Chart", "Spectrum", "Heat Map"]
# only display dendrogram when df has more than one column
if ncols > 1:
res["missing_tabs"].append("Dendrogram")
return res
def _format_overview(data: Dict[str, Any], cfg: Config) -> Dict[str, Any]:
0
Source : bokeh_figures.py
with MIT License
from SolarArbiter
with MIT License
from SolarArbiter
def raw_report_plots(report, metrics):
"""Create a RawReportPlots object from the metrics of a report.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
metrics: tuple of :py:class:`solarforecastarbiter.datamodel.MetricResult`
Returns
-------
:py:class:`solarforecastarbiter.datamodel.RawReportPlots`
"""
cds = construct_metrics_cds(metrics, rename=abbreviate)
# Create initial bar figures
figure_dict = {}
# Components for other metrics
for category in report.report_parameters.categories:
for metric in report.report_parameters.metrics:
if category == 'total':
fig = bar(cds, metric)
figure_dict[f'total::{metric}::all'] = fig
else:
figs = bar_subdivisions(cds, category, metric)
for name, fig in figs.items():
figure_dict[f'{category}::{metric}::{name}'] = fig
script, divs = components(figure_dict)
mplots = []
with _make_webdriver() as driver:
for k, v in divs.items():
cat, met, name = k.split('::', 2)
fig = figure_dict[k]
svg = output_svg(fig, driver=driver)
mplots.append(datamodel.BokehReportFigure(
name=name, category=cat, metric=met, div=v, svg=svg,
figure_type='bar'))
out = datamodel.RawReportPlots(bokeh_version=bokeh_version, script=script,
figures=tuple(mplots))
return out
def timeseries_plots(report):
0
Source : bokeh_figures.py
with MIT License
from SolarArbiter
with MIT License
from SolarArbiter
def timeseries_plots(report):
"""Return the bokeh components (script and div element) for timeseries
and scatter plots of the processed forecasts and observations.
Parameters
----------
report: :py:class:`solarforecastarbiter.datamodel.Report`
Returns
-------
script: str
A script element to insert into an html template
div: str
A div element to insert into an html template.
"""
value_cds, meta_cds = construct_timeseries_cds(report)
pfxobs = report.raw_report.processed_forecasts_observations
units = pfxobs[0].original.forecast.units
tfig = timeseries(value_cds, meta_cds, report.report_parameters.start,
report.report_parameters.end, units,
report.raw_report.timezone)
sfig = scatter(value_cds, meta_cds, units)
layout = gridplot((tfig, sfig), ncols=1)
script, div = components(layout)
return script, div
0
Source : ci_data.py
with GNU General Public License v3.0
from TaylorPrewitt
with GNU General Public License v3.0
from TaylorPrewitt
def all_index():
mappy = get_mappy()
print(f"mappy = {mappy}")
#mappy.to_csv("Region_Info.csv", encoding='utf-8', index=False )
# Scatter Map
# Scatter Map
fig5 = px.scatter_geo(mappy, lat=mappy['Latitude'], lon=mappy['Longitude'], hover_name=mappy['Azure Region'],
hover_data=[mappy['Emission Percent'], mappy['MOER Value']], color=mappy['MOER Value'], title='Global Azure Regions in WattTime Balancing Authorities', template='none')
fig5 = plotly.io.to_html(fig5)
# Data table
# Using bokeh
data_for_table = mappy[['Azure Region', 'MOER Value']]
source = ColumnDataSource(data_for_table)
columns = [
TableColumn(field="Azure Region", title="Azure Region"),
TableColumn(field="MOER Value", title="MOER Value"),
]
data_table = DataTable(
source=source, columns=columns, width=400, height=280)
# grab the static resources
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
# render template
script, div = components(data_table)
return render_template('footprint.html', plot1=fig5,
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,)
# /overview api
@ci_bp.route('/overview', methods=["GET", "POST"])
0
Source : app-reference.py
with GNU General Public License v3.0
from TaylorPrewitt
with GNU General Public License v3.0
from TaylorPrewitt
def all_index():
mappy = get_mappy()
print(f"mappy = {mappy}")
#mappy.to_csv("Region_Info.csv", encoding='utf-8', index=False )
# Scatter Map
# Scatter Map
fig5 = px.scatter_geo(mappy, lat=mappy['Latitude'], lon=mappy['Longitude'], hover_name=mappy['Azure Region'],
hover_data=[mappy['Emission Percent'], mappy['MOER Value']], color=mappy['MOER Value'], title='Global Azure Regions in WattTime Balancing Authorities', template='none')
fig5 = plotly.io.to_html(fig5)
# Data table
# Using bokeh
data_for_table = mappy[['Azure Region', 'MOER Value']]
source = ColumnDataSource(data_for_table)
columns = [
TableColumn(field="Azure Region", title="Azure Region"),
TableColumn(field="MOER Value", title="MOER Value"),
]
data_table = DataTable(
source=source, columns=columns, width=400, height=280)
# grab the static resources
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
# render template
script, div = components(data_table)
return render_template('footprint.html', plot1=fig5,
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,)
# For /overview route
# 1. Real-time carbon info for BA - from the /get_index_api
# @app.route('/get_index_data', methods=["GET"])
def get_realtime_data(ba):
0
Source : timeline_visualizer.py
with Apache License 2.0
from xldrx
with Apache License 2.0
from xldrx
def _export_to_html(self, plot):
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
script, div = components(plot)
html = self._main_template.render(
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,
title="TensorFlow Timeline",
header=str(datetime.now()),
custom_css='',
custom_header='',
custom_js=''
)
return encode_utf8(html)
class DataLoader: