Here are the examples of the python api sqlalchemy.Float taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
46 Examples
4
Example 1
Project: SickGear Source File: test_types.py
@testing.requires.floats_to_four_decimals
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
3
Example 2
def upgrade():
op.add_column('radio_stationanalytic', sa.Column('gps_lat', sa.Float(), nullable=True))
op.add_column('radio_stationanalytic', sa.Column('gps_lon', sa.Float(), nullable=True))
op.add_column('radio_stationanalytic', sa.Column('gsm_signal', sa.Float(), nullable=True))
op.add_column('radio_stationanalytic', sa.Column('wifi_connected', sa.Boolean(), nullable=True))
op.drop_column('radio_stationanalytic', u'gsm_connectivity')
3
Example 3
def test_render_literal_float(self):
self._literal_round_trip(
Float(4),
[15.7563, decimal.Decimal("15.7563")],
[15.7563,],
filter_=lambda n: n is not None and round(n, 5) or None
)
3
Example 4
@testing.requires.precision_generic_float_type
def test_float_custom_scale(self):
self._do_test(
Float(None, decimal_return_scale=7, asdecimal=True),
[15.7563827, decimal.Decimal("15.7563827")],
[decimal.Decimal("15.7563827"),],
check_scale=True
)
3
Example 5
Project: SickGear Source File: test_types.py
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
3
Example 6
Project: gourmet Source File: data_plugin.py
def setup_nutritionconversions_table (self):
self.db.nutritionconversions_table = Table('nutritionconversions',self.db.metadata,
Column('id',Integer(),primary_key=True),
Column('ingkey',String(length=255),**{}),
Column('unit',String(length=255),**{}),
Column('factor',Float(),**{}), # Factor is the amount we multiply
# from unit to get 100 grams
) # NUTRITION_CONVERSIONS
class NutritionConversion (object): pass
self.db._setup_object_for_table(self.db.nutritionconversions_table, NutritionConversion)
3
Example 7
Project: alembic Source File: test_postgresql.py
def test_compare_float_no_diff1(self):
self._compare_default_roundtrip(
Float(),
text("5.2"), "5.2",
diff_expected=False
)
3
Example 8
Project: alembic Source File: test_postgresql.py
def test_compare_float_no_diff2(self):
self._compare_default_roundtrip(
Float(),
"5.2", text("5.2"),
diff_expected=False
)
3
Example 9
Project: alembic Source File: test_postgresql.py
def test_compare_float_no_diff3(self):
self._compare_default_roundtrip(
Float(),
text("5"), text("5.0"),
diff_expected=False
)
3
Example 10
Project: alembic Source File: test_postgresql.py
def test_compare_float_no_diff4(self):
self._compare_default_roundtrip(
Float(),
"5", "5.0",
diff_expected=False
)
3
Example 11
Project: alembic Source File: test_postgresql.py
def test_compare_float_no_diff5(self):
self._compare_default_roundtrip(
Float(),
text("5"), "5.0",
diff_expected=False
)
3
Example 12
Project: alembic Source File: test_postgresql.py
def test_compare_float_no_diff6(self):
self._compare_default_roundtrip(
Float(),
"5", text("5.0"),
diff_expected=False
)
3
Example 13
def test_render_literal_float(self):
self._literal_round_trip(
Float(4),
[15.7563, decimal.Decimal("15.7563")],
[15.7563, ],
filter_=lambda n: n is not None and round(n, 5) or None
)
3
Example 14
@testing.requires.precision_generic_float_type
def test_float_custom_scale(self):
self._do_test(
Float(None, decimal_return_scale=7, asdecimal=True),
[15.7563827, decimal.Decimal("15.7563827")],
[decimal.Decimal("15.7563827"), ],
check_scale=True
)
3
Example 15
Project: holmes-api Source File: 4779cd2391f7_create_aopic_score_and_lambda_page.py
Function: upgrade
Function: upgrade
def upgrade():
op.add_column(
'pages',
sa.Column('score', sa.Float, nullable=False, server_default=sa.text('0.0'))
)
op.create_table(
'settings',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('lambda_score', sa.Float, server_default=sa.text('0.0'), nullable=False)
)
connection = op.get_bind()
connection.execute('INSERT INTO settings(lambda_score) VALUES(0.0)')
op.create_index('idx_pages_score', 'pages', ['score'])
3
Example 16
def test_supports_step_as_info_arg(self):
self.init(
type_=sa.Float, info={'step': '0.2'},
)
form = self.form_class()
assert 'step="0.2"' in str(form.test_column)
3
Example 17
Project: incubator-airflow Source File: 2e541a1dcfed_task_duration.py
def upgrade():
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column('duration',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.Float(),
existing_nullable=True)
3
Example 18
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
metadata_float = Table('metadata_float', meta, autoload=True)
metadata_float.c.value.alter(type=Float(53))
trait = Table('trait', meta, autoload=True)
trait.c.t_float.alter(type=Float(53))
0
Example 19
Project: headphones Source File: sqlalchemy.py
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL):
super(SQLAlchemyJobStore, self).__init__()
self.pickle_protocol = pickle_protocol
metadata = maybe_ref(metadata) or MetaData()
if engine:
self.engine = maybe_ref(engine)
elif url:
self.engine = create_engine(url)
else:
raise ValueError('Need either "engine" or "url" defined')
# 191 = max key length in MySQL for InnoDB/utf8mb4 tables, 25 = precision that translates to an 8-byte float
self.jobs_t = Table(
tablename, metadata,
Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
Column('next_run_time', Float(25), index=True),
Column('job_state', LargeBinary, nullable=False)
)
self.jobs_t.create(self.engine, True)
0
Example 20
def upgrade():
op.add_column('radio_station', sa.Column('analytic_update_frequency', sa.Float(), nullable=True))
0
Example 21
Project: pyspider Source File: projectdb.py
def __init__(self, url):
self.table = Table(self.__tablename__, MetaData(),
Column('name', String(64)),
Column('group', String(64)),
Column('status', String(16)),
Column('script', Text),
Column('comments', String(1024)),
Column('rate', Float(11)),
Column('burst', Float(11)),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, pool_recycle=3600)
conn = engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE %s" % database)
except sqlalchemy.exc.SQLAlchemyError:
pass
self.url.database = database
self.engine = create_engine(url, pool_recycle=3600)
self.table.create(self.engine, checkfirst=True)
0
Example 22
Project: pyspider Source File: resultdb.py
def __init__(self, url):
self.table = Table('__tablename__', MetaData(),
Column('taskid', String(64), primary_key=True, nullable=False),
Column('url', String(1024)),
Column('result', LargeBinary),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, convert_unicode=True,
pool_recycle=3600)
engine.execute("CREATE DATABASE IF NOT EXISTS %s" % database)
except sqlalchemy.exc.SQLAlchemyError:
pass
self.url.database = database
self.engine = create_engine(url, convert_unicode=True,
pool_recycle=3600)
self._list_project()
0
Example 23
Project: pyspider Source File: taskdb.py
def __init__(self, url):
self.table = Table('__tablename__', MetaData(),
Column('taskid', String(64), primary_key=True, nullable=False),
Column('project', String(64)),
Column('url', String(1024)),
Column('status', Integer),
Column('schedule', LargeBinary),
Column('fetch', LargeBinary),
Column('process', LargeBinary),
Column('track', LargeBinary),
Column('lastcrawltime', Float(32)),
Column('updatetime', Float(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
self.url = make_url(url)
if self.url.database:
database = self.url.database
self.url.database = None
try:
engine = create_engine(self.url, pool_recycle=3600)
conn = engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE %s" % database)
except sqlalchemy.exc.SQLAlchemyError:
pass
self.url.database = database
self.engine = create_engine(url, pool_recycle=3600)
self._list_project()
0
Example 24
Project: alembic Source File: test_postgresql.py
def test_compare_float_str(self):
self._compare_default_roundtrip(
Float(),
"5.2",
)
0
Example 25
Project: alembic Source File: test_postgresql.py
def test_compare_float_text(self):
self._compare_default_roundtrip(
Float(),
text("5.2"),
)
0
Example 26
Project: sqlalchemy Source File: test_zoomark.py
def _baseline_1_create_tables(self):
Table(
'Zoo',
self.metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
Column('Founded', Date),
Column('Opens', Time),
Column('LastEscape', DateTime),
Column('Admission', Float),
)
Table(
'Animal',
self.metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
Column('Name', Unicode(100)),
Column('Species', Unicode(100)),
Column('Legs', Integer, default=4),
Column('LastEscape', DateTime),
Column('Lifespan', Float(4)),
Column('MotherID', Integer, ForeignKey('Animal.ID')),
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
self.metadata.create_all()
0
Example 27
Project: sqlalchemy Source File: test_zoomark_orm.py
def _baseline_1_create_tables(self):
zoo = Table(
'Zoo',
self.metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
Column('Name', Unicode(255)),
Column('Founded', Date),
Column('Opens', Time),
Column('LastEscape', DateTime),
Column('Admission', Float),
)
animal = Table(
'Animal',
self.metadata,
Column('ID', Integer, Sequence('animal_id_seq'),
primary_key=True),
Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True),
Column('Name', Unicode(100)),
Column('Species', Unicode(100)),
Column('Legs', Integer, default=4),
Column('LastEscape', DateTime),
Column('Lifespan', Float(4)),
Column('MotherID', Integer, ForeignKey('Animal.ID')),
Column('PreferredFoodID', Integer),
Column('AlternateFoodID', Integer),
)
self.metadata.create_all()
global Zoo, Animal
class Zoo(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Animal(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
mapper(Zoo, zoo)
mapper(Animal, animal)
0
Example 28
Project: sqlalchemy Source File: test_types.py
def test_float(self):
float_table = Table(
'float_table', metadata,
Column(
'id', Integer,
Sequence('numeric_id_seq', optional=True), primary_key=True),
Column('floatcol', Float()))
metadata.create_all()
try:
test_items = [float(d) for d in (
'1500000.00000000000000000000',
'-1500000.00000000000000000000',
'1500000',
'0.0000000000000000002',
'0.2',
'-0.0000000000000000002',
'156666.458923543',
'-156666.458923543',
'1',
'-1',
'1234',
'2E-12',
'4E8',
'3E-6',
'3E-7',
'4.1',
'1E-1',
'1E-2',
'1E-3',
'1E-4',
'1E-5',
'1E-6',
'1E-7',
'1E-8',
)]
for value in test_items:
float_table.insert().execute(floatcol=value)
except Exception as e:
raise e
0
Example 29
Project: sqlalchemy Source File: test_text.py
def test_typing_construction(self):
t = text("select * from table :foo :bar :bat")
self._assert_type_map(t, {"foo": NullType(),
"bar": NullType(),
"bat": NullType()})
t = t.bindparams(bindparam('foo', type_=String))
self._assert_type_map(t, {"foo": String(),
"bar": NullType(),
"bat": NullType()})
t = t.bindparams(bindparam('bar', type_=Integer))
self._assert_type_map(t, {"foo": String(),
"bar": Integer(),
"bat": NullType()})
t = t.bindparams(bat=45.564)
self._assert_type_map(t, {"foo": String(),
"bar": Integer(),
"bat": Float()})
0
Example 30
Project: caravel Source File: __init__.py
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
with gzip.open(os.path.join(DATA_FOLDER, 'energy.json.gz')) as f:
pdf = pd.read_json(f)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print("Creating table [wb_health_population] reference")
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "Energy consumption"
tbl.is_featured = True
tbl.database = utils.get_or_create_main_db(caravel)
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name="Energy Sankey",
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"datasource_id": "3",
"datasource_name": "energy_usage",
"datasource_type": "table",
"flt_col_0": "source",
"flt_eq_0": "",
"flt_op_0": "in",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Energy Force Layout",
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"datasource_id": "1",
"datasource_name": "energy_usage",
"datasource_type": "table",
"flt_col_0": "source",
"flt_eq_0": "",
"flt_op_0": "in",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Heatmap",
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"datasource_id": "1",
"datasource_name": "energy_usage",
"datasource_type": "table",
"flt_col_0": "source",
"flt_eq_0": "",
"flt_op_0": "in",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
0
Example 31
Project: caravel Source File: __init__.py
def load_unicode_test_data():
"""Loading unicode test dataset from a csv file in the repo"""
df = pd.read_csv(os.path.join(DATA_FOLDER, 'unicode_utf8_unixnl_test.csv'),
encoding="utf-8")
# generate date/numeric data
df['date'] = datetime.datetime.now().date()
df['value'] = [random.randint(1, 100) for _ in range(len(df))]
df.to_sql(
'unicode_test',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'phrase': String(500),
'short_phrase': String(10),
'with_missing': String(100),
'date': Date(),
'value': Float(),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table [unicode_test] reference")
obj = db.session.query(TBL).filter_by(table_name='unicode_test').first()
if not obj:
obj = TBL(table_name='unicode_test')
obj.main_dttm_col = 'date'
obj.database = utils.get_or_create_main_db(caravel)
obj.is_featured = False
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"datasource_id": "3",
"datasource_name": "unicode_test",
"datasource_type": "table",
"flt_op_1": "in",
"granularity": "date",
"groupby": [],
"metric": 'sum__value',
"row_limit": config.get("ROW_LIMIT"),
"since": "100 years ago",
"until": "now",
"where": "",
"viz_type": "word_cloud",
"size_from": "10",
"series": "short_phrase",
"size_to": "70",
"rotation": "square",
"limit": "100",
}
print("Creating a slice")
slc = Slice(
slice_name="Unicode Cloud",
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
print("Creating a dashboard")
dash = db.session.query(Dash).filter_by(dashboard_title="Unicode Test").first()
if not dash:
dash = Dash()
pos = {
"size_y": 4,
"size_x": 4,
"col": 1,
"row": 1,
"slice_id": slc.id,
}
dash.dashboard_title = "Unicode Test"
dash.position_json = json.dumps([pos], indent=4)
dash.slug = "unicode-test"
dash.slices = [slc]
db.session.merge(dash)
db.session.commit()
0
Example 32
Project: caravel Source File: __init__.py
def load_long_lat_data():
"""Loading lat/long data from a csv file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'san_francisco.csv.gz')) as f:
pdf = pd.read_csv(f, encoding="utf-8")
pdf['date'] = datetime.datetime.now().date()
pdf['occupancy'] = [random.randint(1, 6) for _ in range(len(pdf))]
pdf['radius_miles'] = [random.uniform(1, 3) for _ in range(len(pdf))]
pdf.to_sql(
'long_lat',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'longitude': Float(),
'latitude': Float(),
'number': Float(),
'street': String(100),
'unit': String(10),
'city': String(50),
'district': String(50),
'region': String(50),
'postcode': Float(),
'id': String(100),
'date': Date(),
'occupancy': Float(),
'radius_miles': Float(),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
obj = db.session.query(TBL).filter_by(table_name='long_lat').first()
if not obj:
obj = TBL(table_name='long_lat')
obj.main_dttm_col = 'date'
obj.database = utils.get_or_create_main_db(caravel)
obj.is_featured = False
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"datasource_id": "7",
"datasource_name": "long_lat",
"datasource_type": "table",
"granularity": "day",
"since": "2014-01-01",
"until": "2016-12-12",
"where": "",
"viz_type": "mapbox",
"all_columns_x": "LON",
"all_columns_y": "LAT",
"mapbox_style": "mapbox://styles/mapbox/light-v9",
"all_columns": ["occupancy"],
"row_limit": 500000,
}
print("Creating a slice")
slc = Slice(
slice_name="Mapbox Long/Lat",
viz_type='mapbox',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
0
Example 33
Project: superset Source File: __init__.py
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
with gzip.open(os.path.join(DATA_FOLDER, 'energy.json.gz')) as f:
pdf = pd.read_json(f)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print("Creating table [wb_health_population] reference")
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "Energy consumption"
tbl.is_featured = True
tbl.database = utils.get_or_create_main_db(superset)
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name="Energy Sankey",
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"datasource_id": "3",
"datasource_name": "energy_usage",
"datasource_type": "table",
"flt_col_0": "source",
"flt_eq_0": "",
"flt_op_0": "in",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Energy Force Layout",
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"datasource_id": "1",
"datasource_name": "energy_usage",
"datasource_type": "table",
"flt_col_0": "source",
"flt_eq_0": "",
"flt_op_0": "in",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Heatmap",
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"datasource_id": "1",
"datasource_name": "energy_usage",
"datasource_type": "table",
"flt_col_0": "source",
"flt_eq_0": "",
"flt_op_0": "in",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
0
Example 34
Project: superset Source File: __init__.py
def load_unicode_test_data():
"""Loading unicode test dataset from a csv file in the repo"""
df = pd.read_csv(os.path.join(DATA_FOLDER, 'unicode_utf8_unixnl_test.csv'),
encoding="utf-8")
# generate date/numeric data
df['date'] = datetime.datetime.now().date()
df['value'] = [random.randint(1, 100) for _ in range(len(df))]
df.to_sql(
'unicode_test',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'phrase': String(500),
'short_phrase': String(10),
'with_missing': String(100),
'date': Date(),
'value': Float(),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table [unicode_test] reference")
obj = db.session.query(TBL).filter_by(table_name='unicode_test').first()
if not obj:
obj = TBL(table_name='unicode_test')
obj.main_dttm_col = 'date'
obj.database = utils.get_or_create_main_db(superset)
obj.is_featured = False
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"datasource_id": "3",
"datasource_name": "unicode_test",
"datasource_type": "table",
"flt_op_1": "in",
"granularity": "date",
"groupby": [],
"metric": 'sum__value',
"row_limit": config.get("ROW_LIMIT"),
"since": "100 years ago",
"until": "now",
"where": "",
"viz_type": "word_cloud",
"size_from": "10",
"series": "short_phrase",
"size_to": "70",
"rotation": "square",
"limit": "100",
}
print("Creating a slice")
slc = Slice(
slice_name="Unicode Cloud",
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
print("Creating a dashboard")
dash = db.session.query(Dash).filter_by(dashboard_title="Unicode Test").first()
if not dash:
dash = Dash()
pos = {
"size_y": 4,
"size_x": 4,
"col": 1,
"row": 1,
"slice_id": slc.id,
}
dash.dashboard_title = "Unicode Test"
dash.position_json = json.dumps([pos], indent=4)
dash.slug = "unicode-test"
dash.slices = [slc]
db.session.merge(dash)
db.session.commit()
0
Example 35
Project: superset Source File: __init__.py
def load_long_lat_data():
"""Loading lat/long data from a csv file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'san_francisco.csv.gz')) as f:
pdf = pd.read_csv(f, encoding="utf-8")
pdf['date'] = datetime.datetime.now().date()
pdf['occupancy'] = [random.randint(1, 6) for _ in range(len(pdf))]
pdf['radius_miles'] = [random.uniform(1, 3) for _ in range(len(pdf))]
pdf.to_sql(
'long_lat',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'longitude': Float(),
'latitude': Float(),
'number': Float(),
'street': String(100),
'unit': String(10),
'city': String(50),
'district': String(50),
'region': String(50),
'postcode': Float(),
'id': String(100),
'date': Date(),
'occupancy': Float(),
'radius_miles': Float(),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
obj = db.session.query(TBL).filter_by(table_name='long_lat').first()
if not obj:
obj = TBL(table_name='long_lat')
obj.main_dttm_col = 'date'
obj.database = utils.get_or_create_main_db(superset)
obj.is_featured = False
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"datasource_id": "7",
"datasource_name": "long_lat",
"datasource_type": "table",
"granularity": "day",
"since": "2014-01-01",
"until": "2016-12-12",
"where": "",
"viz_type": "mapbox",
"all_columns_x": "LON",
"all_columns_y": "LAT",
"mapbox_style": "mapbox://styles/mapbox/light-v9",
"all_columns": ["occupancy"],
"row_limit": 500000,
}
print("Creating a slice")
slc = Slice(
slice_name="Mapbox Long/Lat",
viz_type='mapbox',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
0
Example 36
def upgrade():
op.add_column(
'Users', sa.Column('rate', sa.Float(), nullable=True)
)
0
Example 37
def upgrade():
op.add_column('Users', sa.Column('efficiency', sa.Float(), nullable=True))
# set default value
op.execute('update "Users" set efficiency = 1.0')
0
Example 38
def upgrade():
op.add_column(
'Project_Users', sa.Column('rate', sa.Float(), nullable=True)
)
0
Example 39
def upgrade():
op.add_column('jobpost', sa.Column('language_confidence', sa.Float(), nullable=True))
0
Example 40
Project: wtforms-alchemy Source File: test_field_parameters.py
def test_min_and_max_info_attributes_with_float_field(self):
self.init(type_=sa.Float, info={'min': 1, 'max': 100})
validator = self.get_validator('test_column', NumberRange)
assert validator.min == 1
assert validator.max == 100
0
Example 41
Project: wtforms-alchemy Source File: test_select_field.py
def test_float_coerces_values_to_floats(self):
choices = [(u'1.0', '1.0'), (u'2.0', '2.0')]
self.init(type_=sa.Float, info={'choices': choices})
form = self.form_class(MultiDict({'test_column': '2.0'}))
assert form.test_column.data == 2.0
0
Example 42
Project: wtforms-alchemy Source File: test_types.py
def test_float_converts_to_float_field(self):
self.init(type_=sa.Float)
self.assert_type('test_column', FloatField)
0
Example 43
Project: gemini Source File: database.py
def create_tables(path, effect_fields=None):
"""
Create our master DB tables
"""
if effect_fields:
effect_string = "".join(e + (" float,\n" if e.endswith("_num") else " TEXT,\n") for e in effect_fields)
else:
effect_string = ""
db = dict(variants="""
chrom varchar(15),
start integer,
end integer,
vcf_id text,
variant_id integer,
anno_id integer,
ref text,
alt text,
qual float,
filter text,
type varchar(15),
sub_type text,
gts blob,
gt_types blob,
gt_phases blob,
gt_depths blob,
gt_ref_depths blob,
gt_alt_depths blob,
gt_quals blob,
gt_copy_numbers blob,
gt_phred_ll_homref blob,
gt_phred_ll_het blob,
gt_phred_ll_homalt blob,
call_rate float,
max_aaf_all float,
in_dbsnp bool,
rs_ids text default NULL,
sv_cipos_start_left integer,
sv_cipos_end_left integer,
sv_cipos_start_right integer,
sv_cipos_end_right integer,
sv_length integer,
sv_is_precise bool,
sv_tool text,
sv_evidence_type text,
sv_event_id text,
sv_mate_id text,
sv_strand text,
in_omim bool,
clinvar_sig text default NULL,
clinvar_disease_name text default NULL,
clinvar_dbsource text default NULL,
clinvar_dbsource_id text default NULL,
clinvar_origin text default NULL,
clinvar_dsdb text default NULL,
clinvar_dsdbid text default NULL,
clinvar_disease_acc text default NULL,
clinvar_in_locus_spec_db bool,
clinvar_on_diag_assay bool,
clinvar_causal_allele text,
clinvar_gene_phenotype text,
geno2mp_hpo_ct integer,
pfam_domain text,
cyto_band text default NULL,
rmsk text default NULL,
in_cpg_island bool,
in_segdup bool,
is_conserved bool,
gerp_bp_score float,
gerp_element_pval float,
num_hom_ref integer,
num_het integer,
num_hom_alt integer,
num_unknown integer,
aaf real,
hwe decimal(2,7),
inbreeding_coeff decimal(2,7),
pi decimal(2,7),
recomb_rate decimal(2,7),
gene varchar(60),
transcript varchar(60),
is_exonic bool,
is_coding bool,
is_splicing bool,
is_lof bool,
exon text,
codon_change text,
aa_change text,
aa_length text,
biotype text,
impact varchar(60) default NULL,
impact_so text default NULL,
impact_severity varchar(15),
polyphen_pred text,
polyphen_score float,
sift_pred text,
sift_score float,
anc_allele text,
rms_bq float,
cigar text,
depth integer default NULL,
strand_bias float default NULL,
rms_map_qual float default NULL,
in_hom_run integer default NULL,
num_mapq_zero integer default NULL,
num_alleles integer default NULL,
num_reads_w_dels float default NULL,
haplotype_score float default NULL,
qual_depth float default NULL,
allele_count integer default NULL,
allele_bal float default NULL,
in_hm2 bool,
in_hm3 bool,
is_somatic bool,
somatic_score float,
in_esp bool,
aaf_esp_ea decimal(2,7),
aaf_esp_aa decimal(2,7),
aaf_esp_all decimal(2,7),
exome_chip bool,
in_1kg bool,
aaf_1kg_amr decimal(2,7),
aaf_1kg_eas decimal(2,7),
aaf_1kg_sas decimal(2,7),
aaf_1kg_afr decimal(2,7),
aaf_1kg_eur decimal(2,7),
aaf_1kg_all decimal(2,7),
grc text default NULL,
gms_illumina float,
gms_solid float,
gms_iontorrent float,
in_cse bool,
encode_tfbs text,
encode_dnaseI_cell_count integer,
encode_dnaseI_cell_list text,
encode_consensus_gm12878 text,
encode_consensus_h1hesc text,
encode_consensus_helas3 text,
encode_consensus_hepg2 text,
encode_consensus_huvec text,
encode_consensus_k562 text,
vista_enhancers text,
cosmic_ids text,
info blob,
cadd_raw float,
cadd_scaled float,
fitcons float,
in_exac bool,
aaf_exac_all decimal(2,7),
aaf_adj_exac_all decimal(2,7),
aaf_adj_exac_afr decimal(2,7),
aaf_adj_exac_amr decimal(2,7),
aaf_adj_exac_eas decimal(2,7),
aaf_adj_exac_fin decimal(2,7),
aaf_adj_exac_nfe decimal(2,7),
aaf_adj_exac_oth decimal(2,7),
aaf_adj_exac_sas decimal(2,7),
exac_num_het int,
exac_num_hom_alt int,
exac_num_chroms int,
%s""" % effect_string.rstrip(","),
variant_impacts="""
variant_id integer,
anno_id integer,
gene varchar(60),
transcript varchar(60),
is_exonic bool,
is_coding bool,
is_lof bool,
exon text,
codon_change text,
aa_change text,
aa_length text,
biotype text,
impact varchar(60),
impact_so text,
impact_severity varchar(15),
polyphen_pred text,
polyphen_score float,
sift_pred text,
sift_score float,
%s""" % effect_string.rstrip(","),
sample_genotypes="""
sample_id integer,
gt_types BLOB""",
sample_genotype_counts="""
sample_id integer,
num_hom_ref integer,
num_het integer,
num_hom_alt integer,
num_unknown integer""",
resources="""
name text,
resource text""",
version="""version text""",
gene_detailed="""
uid integer,
chrom varchar(60),
gene varchar(60),
is_hgnc bool,
ensembl_gene_id text,
transcript varchar(60),
biotype text,
transcript_status text,
ccds_id varchar(60),
hgnc_id text,
entrez_id text,
cds_length text,
protein_length text,
transcript_start text,
transcript_end text,
strand text,
synonym text,
rvis_pct float,
mam_phenotype_id text""",
gene_summary="""
uid integer,
chrom varchar(60),
gene varchar(60),
is_hgnc bool,
ensembl_gene_id text,
hgnc_id text,
transcript_min_start integer,
transcript_max_end integer,
strand text,
synonym text,
rvis_pct float,
mam_phenotype_id text,
in_cosmic_census bool,
""",
vcf_header="""vcf_header text""")
# in the future this will be replaced by reading from the conf file.
lookup = {'real': sql.Float(),
'float': sql.Float(),
'text': sql.Text(),
'bool': sql.Boolean(),
'blob': sql.LargeBinary(),
'decimal(2,7)': sql.Float(), #sql.DECIMAL(precision=7, scale=2, asdecimal=False),
'integer': sql.Integer(),
'varchar(15)': sql.String(20),
'varchar(60)': sql.String(60),
'int': sql.Integer(),
}
for table in db:
db[table] = db[table].strip().strip(",").split(",\n")
db[table] = [x.strip().split() for x in db[table]]
cols = [sql.Column(c[0], lookup[c[1].lower()]) for c in db[table]]
if table != "variant_impacts":
for c in cols:
if c.name in ("variant_id", "sample_id", "uid"):
c.primary_key = True
if c.name == "variant_id" and table == "variants":
c.autoincrement = False
db[table] = cols
e = sql.create_engine(get_path(path), isolation_level=None)
e.connect().connection.connection.text_factory = str
metadata = sql.MetaData(bind=e)
session = create_session(bind=e, autocommit=False, autoflush=False)
mapped = {}
tables = ['variants'] + [x for x in sorted(db) if x != 'variants']
otables = [sql.Table(tbl, metadata, *db[tbl]) for tbl in tables]
metadata.drop_all(tables=otables)
for t in otables:
mapped[t.name] = t
session.commit()
metadata.create_all()
return session, metadata
0
Example 44
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
meter = Table('meter', meta, autoload=True)
meter.c.counter_volume.alter(type=Float(53))
0
Example 45
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
unique_name = Table(
'unique_name', meta,
Column('id', Integer, primary_key=True),
Column('key', String(32), index=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
unique_name.create()
event = Table(
'event', meta,
Column('id', Integer, primary_key=True),
Column('generated', Float(asdecimal=True), index=True),
Column('unique_name_id', Integer, ForeignKey('unique_name.id')),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
event.create()
trait = Table(
'trait', meta,
Column('id', Integer, primary_key=True),
Column('name_id', Integer, ForeignKey('unique_name.id')),
Column('t_type', Integer, index=True),
Column('t_string', String(32), nullable=True, default=None,
index=True),
Column('t_float', Float, nullable=True, default=None, index=True),
Column('t_int', Integer, nullable=True, default=None, index=True),
Column('t_datetime', Float(asdecimal=True), nullable=True,
default=None, index=True),
Column('event_id', Integer, ForeignKey('event.id')),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
trait.create()
0
Example 46
Project: subunit2sql Source File: 163fd5aa1380_create_avg_runtime_column_in_test_table.py
Function: upgrade
Function: upgrade
def upgrade():
op.add_column('tests', sa.Column('run_time', sa.Float(), nullable=True))