Here are the examples of the python api sqlalchemy.Float taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
79 Examples
3
Source : 1c0739669926_add_last_pm25_to_subscriptions.py
with MIT License
from airq-dev
with MIT License
from airq-dev
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("subscriptions", sa.Column("last_pm25", sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : 7de8a31a8e57_track_pm_cf_1.py
with MIT License
from airq-dev
with MIT License
from airq-dev
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("clients", sa.Column("last_pm_cf_1", sa.Float(), nullable=True))
op.add_column(
"sensors", sa.Column("pm_cf_1", sa.Float(), server_default="0", nullable=False)
)
op.add_column(
"zipcodes", sa.Column("pm_cf_1", sa.Float(), server_default="0", nullable=False)
)
# ### end Alembic commands ###
def downgrade():
3
Source : bc92ae15a407_track_humidity.py
with MIT License
from airq-dev
with MIT License
from airq-dev
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("clients", sa.Column("last_humidity", sa.Float(), nullable=True))
op.add_column("sensors", sa.Column("humidity", sa.Float(), nullable=True))
op.execute("UPDATE sensors SET humidity = 0")
op.alter_column("sensors", "humidity", nullable=False)
op.add_column(
"zipcodes",
sa.Column("humidity", sa.Float(), server_default="0", nullable=False),
)
# ### end Alembic commands ###
def downgrade():
3
Source : test_types.py
with MIT License
from analyzeDFIR
with MIT License
from analyzeDFIR
def test_render_literal_float(self):
self._literal_round_trip(
Float(4),
[15.7563, decimal.Decimal("15.7563")],
[15.7563, ],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_generic_float_type
3
Source : test_types.py
with MIT License
from analyzeDFIR
with MIT License
from analyzeDFIR
def test_float_custom_scale(self):
self._do_test(
Float(None, decimal_return_scale=7, asdecimal=True),
[15.7563827, decimal.Decimal("15.7563827")],
[decimal.Decimal("15.7563827"), ],
check_scale=True
)
def test_numeric_as_decimal(self):
3
Source : test_types.py
with MIT License
from analyzeDFIR
with MIT License
from analyzeDFIR
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_float_as_float(self):
3
Source : test_types.py
with MIT License
from analyzeDFIR
with MIT License
from analyzeDFIR
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
def test_float_coerce_round_trip(self):
3
Source : model_fields.py
with MIT License
from collerek
with MIT License
from collerek
def get_column_type(cls, **kwargs: Any) -> Any:
"""
Return proper type of db column for given field type.
Accepts required and optional parameters that each column type accepts.
:param kwargs: key, value pairs of sqlalchemy options
:type kwargs: Any
:return: initialized column with proper options
:rtype: sqlalchemy Column
"""
return sqlalchemy.Float()
if TYPE_CHECKING: # pragma: nocover
3
Source : b9df7cd058d7_add_extended_data_about_tower.py
with GNU General Public License v3.0
from EFForg
with GNU General Public License v3.0
from EFForg
def upgrade():
op.add_column('tower_data', sa.Column('cfo', sa.Float(), nullable=True))
op.add_column('tower_data', sa.Column('raw_sib1', sa.String(255), nullable=True))
op.add_column('tower_data', sa.Column('rssi', sa.Float(), nullable=True))
op.drop_column('tower_data', 'rsrp')
def downgrade():
3
Source : 3ac2bc1897ce_adding_weight_to_tradedocuments.py
with GNU Affero General Public License v3.0
from eReuse
with GNU Affero General Public License v3.0
from eReuse
def upgrade():
op.add_column("trade_document", sa.Column("weight", sa.Float(decimal_return_scale=2), nullable=True), schema=f'{get_inv()}')
# DataWipeDocument table
op.create_table('move_on_document',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("weight", sa.Float(decimal_return_scale=2), nullable=True),
sa.Column('container_from_id', sa.BigInteger(), nullable=False),
sa.Column('container_to_id', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['container_from_id'], [f'{get_inv()}.trade_document.id'], ),
sa.ForeignKeyConstraint(['container_to_id'], [f'{get_inv()}.trade_document.id'], ),
sa.ForeignKeyConstraint(['id'], [f'{get_inv()}.action.id'], ),
sa.PrimaryKeyConstraint('id'),
schema=f'{get_inv()}'
)
def downgrade():
3
Source : sql_data_generator.py
with Apache License 2.0
from ethyca
with Apache License 2.0
from ethyca
def sqlalchemy_datatype(fides_data_type: DataType, **kwargs):
return {
DataType.string: Column(String(**kwargs)),
DataType.integer: Column(Integer(**kwargs)),
DataType.float: Column(Float(**kwargs)),
DataType.boolean: Column(Boolean(**kwargs)),
DataType.object_id: None, # not a sqlalchemy supported type
}[fides_data_type]
def create_sample_value(field: Field):
3
Source : 037_2c5f898d5dd7_add_kinchranks.py
with GNU General Public License v3.0
from euphwes
with GNU General Public License v3.0
from euphwes
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user_site_rankings', schema=None) as batch_op:
batch_op.add_column(sa.Column('all_kinchrank', sa.Float(), nullable=True))
batch_op.add_column(sa.Column('wca_kinchrank', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : 038_66f166a908a4_add_non_wca_only_kinchrank.py
with GNU General Public License v3.0
from euphwes
with GNU General Public License v3.0
from euphwes
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user_site_rankings', schema=None) as batch_op:
batch_op.add_column(sa.Column('non_wca_kinchrank', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : test_types.py
with MIT License
from fbla-competitive-events
with MIT License
from fbla-competitive-events
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_numerics_general
3
Source : 2e541a1dcfed_task_duration.py
with Apache License 2.0
from flink-extended
with Apache License 2.0
from flink-extended
def upgrade(): # noqa: D103
# use batch_alter_table to support SQLite workaround
with op.batch_alter_table("task_instance") as batch_op:
batch_op.alter_column(
'duration',
existing_type=mysql.INTEGER(display_width=11),
type_=sa.Float(),
existing_nullable=True,
)
def downgrade(): # noqa: D103
3
Source : test_types.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_asdecimal_int_to_numeric(self):
expr = column("a", Integer) * column("b", Numeric(asdecimal=False))
is_(expr.type.asdecimal, False)
expr = column("a", Integer) * column("b", Numeric())
is_(expr.type.asdecimal, True)
expr = column("a", Integer) * column("b", Float())
is_(expr.type.asdecimal, False)
assert isinstance(expr.type, Float)
def test_asdecimal_numeric_to_int(self):
3
Source : test_types.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_asdecimal_numeric_to_int(self):
expr = column("a", Numeric(asdecimal=False)) * column("b", Integer)
is_(expr.type.asdecimal, False)
expr = column("a", Numeric()) * column("b", Integer)
is_(expr.type.asdecimal, True)
expr = column("a", Float()) * column("b", Integer)
is_(expr.type.asdecimal, False)
assert isinstance(expr.type, Float)
def test_null_comparison(self):
3
Source : bde7db78a4fb_add_time_statistics.py
with MIT License
from hpi-epic
with MIT License
from hpi-epic
def upgrade():
op.add_column('result', sa.Column('execution_time', sa.Float()))
op.add_column('result', sa.Column('dataset_loading_time', sa.Float()))
def downgrade():
3
Source : 423495a41dd6_db_changes_for_system_config_updates.py
with MIT License
from jamespfennell
with MIT License
from jamespfennell
def upgrade():
op.add_column("feed", sa.Column("http_timeout", sa.Float(), nullable=True))
op.add_column("feed", sa.Column("parser_options", sa.String(), nullable=True))
op.alter_column("system", "name", existing_type=sa.VARCHAR(), nullable=False)
def downgrade():
3
Source : load.py
with MIT License
from josh-lang
with MIT License
from josh-lang
def write_businesses(**kwargs):
'''Write Yelp business data to postgres for further processing'''
ti = kwargs['ti']
businesses = ti.xcom_pull(task_ids = 'clean_businesses')
businesses.to_sql(
name = 'yelp_businesses',
con = py_engine,
schema = 'staging',
if_exists = 'replace',
index = False,
index_label = 'business_id',
dtype = {
'business_id': String(22),
'review_count': Integer(),
'rating': Float(),
'geometry': Geometry('POINT', 4326)
}
)
3
Source : 11b6ef362f98_.py
with GNU Affero General Public License v3.0
from minetest
with GNU Affero General Public License v3.0
from minetest
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('package', sa.Column('score', sa.Float(), nullable=False, server_default="0.0"))
# ### end Alembic commands ###
def downgrade():
3
Source : dd27f1311a90_.py
with GNU Affero General Public License v3.0
from minetest
with GNU Affero General Public License v3.0
from minetest
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('package', sa.Column('score_downloads', sa.Float(), nullable=False, server_default="0"))
op.execute("""
UPDATE "package" SET "score_downloads"="score";
""")
# ### end Alembic commands ###
def downgrade():
3
Source : 07b769722b91_improve_job_data_types.py
with Apache License 2.0
from Open-EO
with Apache License 2.0
from Open-EO
def upgrade():
job_status_type.create(op.get_bind())
op.alter_column('jobs', 'status', type_=job_status_type, postgresql_using='status::job_status', nullable=False)
op.alter_column('jobs', 'budget', type_=sa.Float, postgresql_using='budget::float')
op.alter_column('jobs', 'current_costs', type_=sa.Float, postgresql_using='current_costs::float')
def downgrade():
3
Source : 8abb844b1c27_.py
with MIT License
from rsrdesarrollo
with MIT License
from rsrdesarrollo
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('finding_template', sa.Column('cvss_v3_score', sa.Float(), nullable=False, server_default="0"))
op.add_column('finding_template', sa.Column('cvss_v3_vector', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : d0d60a20bf61_.py
with MIT License
from rsrdesarrollo
with MIT License
from rsrdesarrollo
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('finding', sa.Column('cvss_v3_score', sa.Float(), nullable=False, server_default="0"))
# ### end Alembic commands ###
def downgrade():
3
Source : test_fields.py
with MIT License
from shosca
with MIT License
from shosca
def test_float_field(self):
column = fields.FloatField()
self.assertIsInstance(column.type, sa.Float)
self.assertIsNone(column.type.precision)
self.assertFalse(column.type.asdecimal)
column = fields.FloatField(precision=10)
self.assertEqual(column.type.precision, 10)
column = fields.FloatField(max_digits=10)
self.assertEqual(column.type.precision, 10)
form_field = meta.column_info(column).formfield()
self.assertIsInstance(form_field, djangofields.FloatField)
def test_integer_field(self):
3
Source : test_postgresql.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_compare_float_no_diff1(self):
self._compare_default_roundtrip(
Float(), text("5.2"), "5.2", diff_expected=False
)
def test_compare_float_no_diff2(self):
3
Source : test_postgresql.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_compare_float_no_diff2(self):
self._compare_default_roundtrip(
Float(), "5.2", text("5.2"), diff_expected=False
)
def test_compare_float_no_diff3(self):
3
Source : test_postgresql.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_compare_float_no_diff3(self):
self._compare_default_roundtrip(
Float(), text("5"), text("5.0"), diff_expected=False
)
def test_compare_float_no_diff4(self):
3
Source : test_postgresql.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_compare_float_no_diff4(self):
self._compare_default_roundtrip(
Float(), "5", "5.0", diff_expected=False
)
def test_compare_float_no_diff5(self):
3
Source : test_postgresql.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_compare_float_no_diff5(self):
self._compare_default_roundtrip(
Float(), text("5"), "5.0", diff_expected=False
)
def test_compare_float_no_diff6(self):
3
Source : test_postgresql.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_compare_float_no_diff6(self):
self._compare_default_roundtrip(
Float(), "5", text("5.0"), diff_expected=False
)
def test_compare_numeric_no_diff(self):
3
Source : test_types.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_no_variants_of_variants(self):
t = Integer().with_variant(Float(), "postgresql")
with expect_raises_message(
exc.ArgumentError,
r"can't pass a type that already has variants as a "
r"dialect-level type to with_variant\(\)",
):
String().with_variant(t, "mysql")
def test_compile(self):
3
Source : 33e16e437be7_.py
with GNU Affero General Public License v3.0
from tchx84
with GNU Affero General Public License v3.0
from tchx84
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('measurements', sa.Column('humidity', sa.Float(), nullable=True))
op.add_column('measurements', sa.Column('pressure', sa.Float(), nullable=True))
op.add_column('measurements', sa.Column('temperature', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : cd509524593e_co2_support.py
with GNU Affero General Public License v3.0
from tchx84
with GNU Affero General Public License v3.0
from tchx84
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('measurements', sa.Column('co2', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : 590c08fa2cbd_.py
with GNU General Public License v3.0
from teamsempo
with GNU General Public License v3.0
from teamsempo
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('currency_conversion', sa.Column('usd_equiv', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
3
Source : 96862ec4ff72_.py
with GNU General Public License v3.0
from teamsempo
with GNU General Public License v3.0
from teamsempo
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('currency_conversion', sa.Column('rate', sa.Float(), nullable=True))
op.drop_column('currency_conversion', 'usd_equiv')
# ### end Alembic commands ###
def downgrade():
3
Source : [09-20-2020--11-05]__add_price_bought.py
with BSD 3-Clause "New" or "Revised" License
from turbulette
with BSD 3-Clause "New" or "Revised" License
from turbulette
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("app_1_book", sa.Column("price_bought", sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
0
Source : test_sql.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32': Series([V, ], dtype='float32'),
'f64': Series([V, ], dtype='float64'),
'f64_as_f32': Series([V, ], dtype='float64'),
'i32': Series([5, ], dtype='int32'),
'i64': Series([5, ], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
assert (np.round(df['f64'].iloc[0], 14) ==
np.round(res['f64'].iloc[0], 14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
assert str(col_dict['f32'].type) == str(col_dict['f64_as_f32'].type)
assert isinstance(col_dict['f32'].type, sqltypes.Float)
assert isinstance(col_dict['f64'].type, sqltypes.Float)
assert isinstance(col_dict['i32'].type, sqltypes.Integer)
assert isinstance(col_dict['i64'].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
0
Source : rels.py
with Apache License 2.0
from amakelov
with Apache License 2.0
from amakelov
def setup_storage(self, conn:Connection=None):
rel_meta = RelMeta(op_adapter=OpAdapter(), type_adapter=TypeAdapter())
self.rel_storage.update_meta(value=rel_meta, conn=conn)
self.val_adapter.set_type_adapter(value=rel_meta.type_adapter)
var_spec = RelSpec(col_objs=[
Column(self.VAR_INDEX, String(32), primary_key=True),
Column(self.VAR_PARTITION, String(32))
],
indices=[], extend_existing=True)
self.rel_storage.create_relation(name=self.VAR_TABLE,
rel_spec=var_spec,
allow_exist=True, conn=conn)
### provenance table
if CoreConfig.track_provenance:
#! note that dtypes are not supported by some queries
var_qtable = self.rel_storage.get_qtable(name=self.VAR_TABLE)
var_qindex = f'{var_qtable}.{self.VAR_INDEX}'
provenance_spec = RelSpec(col_objs=[
Column(Prov.call_uid, String(32),),
Column(Prov.op_name, String(40),),
Column(Prov.op_version, String(32), ),
Column(Prov.is_super, Boolean()),
Column(Prov.vref_name, String(40), ),
Column(Prov.vref_uid, String(32), ForeignKey(var_qindex, ondelete='CASCADE')),
Column(Prov.is_input, Boolean(), ),
Column(Prov.call_start, Float(), ),
Column(Prov.call_end, Float(), ),
],
indices=[[Prov.call_uid, Prov.vref_name, Prov.is_input]],
extend_existing=True)
self.rel_storage.create_relation(name=self.PROVENANCE_TABLE, rel_spec=provenance_spec, allow_exist=True, conn=conn)
if CoreConfig.decompose_struct_as_many:
builtin_ops = [GetItemList(), ConstructList(), GetKeyDict(), ConstructDict()]
else:
builtin_ops = [ConstructList(), GetKeyDict(), ConstructDict(), DeconstructList()]
for op in builtin_ops:
self.synchronize_many(ops=[op], conn=conn)
@property
0
Source : syntax.py
with MIT License
from arrowresearch
with MIT License
from arrowresearch
def float_make_value(v: float):
return Literal(value=v, type=sa.Float())
@make_value.register
0
Source : fields.py
with BSD 3-Clause "New" or "Revised" License
from awesometoolbox
with BSD 3-Clause "New" or "Revised" License
from awesometoolbox
def Float(
*,
primary_key: bool = False,
allow_null: bool = False,
index: bool = False,
unique: bool = False,
minimum: float = None,
maximum: float = None,
multiple_of: int = None,
) -> Type[int]:
namespace = dict(
primary_key=primary_key,
allow_null=allow_null,
index=index,
unique=unique,
ge=minimum,
le=maximum,
multiple_of=multiple_of,
column_type=sqlalchemy.Float(),
)
return type("Float", (pydantic.ConstrainedFloat, ColumnFactory), namespace)
def Boolean(
0
Source : energy.py
with Apache License 2.0
from CloudmindsRobot
with Apache License 2.0
from CloudmindsRobot
def load_energy(only_metadata: bool = False, force: bool = False) -> None:
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = "energy_usage"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("energy.json.gz")
pdf = pd.read_json(data)
pdf.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={"source": String(255), "target": String(255), "value": Float()},
index=False,
)
print("Creating table [wb_health_population] reference")
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "Energy consumption"
tbl.database = database
if not any(col.metric_name == "sum__value" for col in tbl.metrics):
col = str(column("value").compile(db.engine))
tbl.metrics.append(
SqlMetric(metric_name="sum__value", expression=f"SUM({col})")
)
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name="Energy Sankey",
viz_type="sankey",
datasource_type="table",
datasource_id=tbl.id,
params=textwrap.dedent(
"""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey"
}
"""
),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Energy Force Layout",
viz_type="directed_force",
datasource_type="table",
datasource_id=tbl.id,
params=textwrap.dedent(
"""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force"
}
"""
),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Heatmap",
viz_type="heatmap",
datasource_type="table",
datasource_id=tbl.id,
params=textwrap.dedent(
"""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"xscale_interval": "1",
"yscale_interval": "1"
}
"""
),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
0
Source : long_lat.py
with Apache License 2.0
from CloudmindsRobot
with Apache License 2.0
from CloudmindsRobot
def load_long_lat_data(only_metadata: bool = False, force: bool = False) -> None:
"""Loading lat/long data from a csv file in the repo"""
tbl_name = "long_lat"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("san_francisco.csv.gz", make_bytes=True)
pdf = pd.read_csv(data, encoding="utf-8")
start = datetime.datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0
)
pdf["datetime"] = [
start + datetime.timedelta(hours=i * 24 / (len(pdf) - 1))
for i in range(len(pdf))
]
pdf["occupancy"] = [random.randint(1, 6) for _ in range(len(pdf))]
pdf["radius_miles"] = [random.uniform(1, 3) for _ in range(len(pdf))]
pdf["geohash"] = pdf[["LAT", "LON"]].apply(lambda x: geohash.encode(*x), axis=1)
pdf["delimited"] = pdf["LAT"].map(str).str.cat(pdf["LON"].map(str), sep=",")
pdf.to_sql( # pylint: disable=no-member
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"longitude": Float(),
"latitude": Float(),
"number": Float(),
"street": String(100),
"unit": String(10),
"city": String(50),
"district": String(50),
"region": String(50),
"postcode": Float(),
"id": String(100),
"datetime": DateTime(),
"occupancy": Float(),
"radius_miles": Float(),
"geohash": String(12),
"delimited": String(60),
},
index=False,
)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "datetime"
obj.database = database
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity_sqla": "day",
"since": "2014-01-01",
"until": "now",
"viz_type": "mapbox",
"all_columns_x": "LON",
"all_columns_y": "LAT",
"mapbox_style": "mapbox://styles/mapbox/light-v9",
"all_columns": ["occupancy"],
"row_limit": 500000,
}
print("Creating a slice")
slc = Slice(
slice_name="Mapbox Long/Lat",
viz_type="mapbox",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
0
Source : unicode_test_data.py
with Apache License 2.0
from CloudmindsRobot
with Apache License 2.0
from CloudmindsRobot
def load_unicode_test_data(only_metadata: bool = False, force: bool = False) -> None:
"""Loading unicode test dataset from a csv file in the repo"""
tbl_name = "unicode_test"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data(
"unicode_utf8_unixnl_test.csv", is_gzip=False, make_bytes=True
)
df = pd.read_csv(data, encoding="utf-8")
# generate date/numeric data
df["dttm"] = datetime.datetime.now().date()
df["value"] = [random.randint(1, 100) for _ in range(len(df))]
df.to_sql( # pylint: disable=no-member
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"phrase": String(500),
"short_phrase": String(10),
"with_missing": String(100),
"dttm": Date(),
"value": Float(),
},
index=False,
)
print("Done loading table!")
print("-" * 80)
print("Creating table [unicode_test] reference")
obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not obj:
obj = TBL(table_name=tbl_name)
obj.main_dttm_col = "dttm"
obj.database = database
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity_sqla": "dttm",
"groupby": [],
"metric": {
"aggregate": "SUM",
"column": {"column_name": "value"},
"expressionType": "SIMPLE",
"label": "Value",
},
"row_limit": config["ROW_LIMIT"],
"since": "100 years ago",
"until": "now",
"viz_type": "word_cloud",
"size_from": "10",
"series": "short_phrase",
"size_to": "70",
"rotation": "square",
"limit": "100",
}
print("Creating a slice")
slc = Slice(
slice_name="Unicode Cloud",
viz_type="word_cloud",
datasource_type="table",
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
print("Creating a dashboard")
dash = db.session.query(Dashboard).filter_by(slug="unicode-test").first()
if not dash:
dash = Dashboard()
js = """\
{
"CHART-Hkx6154FEm": {
"children": [],
"id": "CHART-Hkx6154FEm",
"meta": {
"chartId": 2225,
"height": 30,
"sliceName": "slice 1",
"width": 4
},
"type": "CHART"
},
"GRID_ID": {
"children": [
"ROW-SyT19EFEQ"
],
"id": "GRID_ID",
"type": "GRID"
},
"ROOT_ID": {
"children": [
"GRID_ID"
],
"id": "ROOT_ID",
"type": "ROOT"
},
"ROW-SyT19EFEQ": {
"children": [
"CHART-Hkx6154FEm"
],
"id": "ROW-SyT19EFEQ",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"DASHBOARD_VERSION_KEY": "v2"
}
"""
dash.dashboard_title = "Unicode Test"
pos = json.loads(js)
update_slice_ids(pos, [slc])
dash.position_json = json.dumps(pos, indent=4)
dash.slug = "unicode-test"
dash.slices = [slc]
db.session.merge(dash)
db.session.commit()
0
Source : test_sql.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
0
Source : fields.py
with BSD 3-Clause "New" or "Revised" License
from encode
with BSD 3-Clause "New" or "Revised" License
from encode
def get_column_type(self):
return sqlalchemy.Float()
class BigInteger(ModelField):
0
Source : database.py
with MIT License
from fakedrtom
with MIT License
from fakedrtom
def create_tables(path, effect_fields=None, pls=True):
"""
Create our master DB tables
"""
if pls:
pls = """\
gt_phred_ll_homref blob,
gt_phred_ll_het blob,
gt_phred_ll_homalt blob,
"""
else:
pls = ""
if effect_fields:
effect_string = "".join(e + (" float,\n" if e.endswith("_num") else " TEXT,\n") for e in effect_fields)
else:
effect_string = ""
db = dict(variants="""
chrom varchar(15),
start integer,
end integer,
vcf_id text,
variant_id integer,
anno_id integer,
ref text,
alt text,
qual float,
filter text,
type varchar(15),
sub_type text,
gts blob,
gt_types blob,
gt_phases blob,
gt_depths blob,
gt_ref_depths blob,
gt_alt_depths blob,
gt_alt_freqs blob,
gt_quals blob,
gt_copy_numbers blob,
%s,
call_rate float,
max_aaf_all float,
in_dbsnp bool,
rs_ids text default NULL,
sv_cipos_start_left integer,
sv_cipos_end_left integer,
sv_cipos_start_right integer,
sv_cipos_end_right integer,
sv_length integer,
sv_is_precise bool,
sv_tool text,
sv_evidence_type text,
sv_event_id text,
sv_mate_id text,
sv_strand text,
in_omim bool,
clinvar_sig text default NULL,
clinvar_disease_name text default NULL,
clinvar_dbsource text default NULL,
clinvar_dbsource_id text default NULL,
clinvar_origin text default NULL,
clinvar_dsdb text default NULL,
clinvar_dsdbid text default NULL,
clinvar_disease_acc text default NULL,
clinvar_in_locus_spec_db bool,
clinvar_on_diag_assay bool,
clinvar_causal_allele text,
clinvar_gene_phenotype text,
geno2mp_hpo_ct integer,
pfam_domain text,
cyto_band text default NULL,
rmsk text default NULL,
in_cpg_island bool,
in_segdup bool,
is_conserved bool,
gerp_bp_score float,
gerp_element_pval float,
num_hom_ref integer,
num_het integer,
num_hom_alt integer,
num_unknown integer,
aaf real,
hwe decimal(2,7),
inbreeding_coeff decimal(2,7),
pi decimal(2,7),
recomb_rate decimal(2,7),
gene varchar(60),
transcript varchar(60),
is_exonic bool,
is_coding bool,
is_splicing bool,
is_lof bool,
exon text,
codon_change text,
aa_change text,
aa_length text,
biotype text,
impact varchar(60) default NULL,
impact_so text default NULL,
impact_severity varchar(15),
polyphen_pred text,
polyphen_score float,
sift_pred text,
sift_score float,
anc_allele text,
rms_bq float,
cigar text,
depth integer default NULL,
strand_bias float default NULL,
rms_map_qual float default NULL,
in_hom_run integer default NULL,
num_mapq_zero integer default NULL,
num_alleles integer default NULL,
num_reads_w_dels float default NULL,
haplotype_score float default NULL,
qual_depth float default NULL,
allele_count integer default NULL,
allele_bal float default NULL,
in_hm2 bool,
in_hm3 bool,
is_somatic bool,
somatic_score float,
in_esp bool,
aaf_esp_ea decimal(2,7),
aaf_esp_aa decimal(2,7),
aaf_esp_all decimal(2,7),
exome_chip bool,
in_1kg bool,
aaf_1kg_amr decimal(2,7),
aaf_1kg_eas decimal(2,7),
aaf_1kg_sas decimal(2,7),
aaf_1kg_afr decimal(2,7),
aaf_1kg_eur decimal(2,7),
aaf_1kg_all decimal(2,7),
grc text default NULL,
gms_illumina float,
gms_solid float,
gms_iontorrent float,
in_cse bool,
encode_tfbs text,
encode_dnaseI_cell_count integer,
encode_dnaseI_cell_list text,
encode_consensus_gm12878 text,
encode_consensus_h1hesc text,
encode_consensus_helas3 text,
encode_consensus_hepg2 text,
encode_consensus_huvec text,
encode_consensus_k562 text,
vista_enhancers text,
cosmic_ids text,
info blob,
cadd_raw float,
cadd_scaled float,
fitcons float,
in_exac bool,
aaf_exac_all decimal(2,7),
aaf_adj_exac_all decimal(2,7),
aaf_adj_exac_afr decimal(2,7),
aaf_adj_exac_amr decimal(2,7),
aaf_adj_exac_eas decimal(2,7),
aaf_adj_exac_fin decimal(2,7),
aaf_adj_exac_nfe decimal(2,7),
aaf_adj_exac_oth decimal(2,7),
aaf_adj_exac_sas decimal(2,7),
exac_num_het int,
exac_num_hom_alt int,
exac_num_chroms int,
aaf_gnomad_all decimal(2,7),
aaf_gnomad_afr decimal(2,7),
aaf_gnomad_amr decimal(2,7),
aaf_gnomad_asj decimal(2,7),
aaf_gnomad_eas decimal(2,7),
aaf_gnomad_fin decimal(2,7),
aaf_gnomad_nfe decimal(2,7),
aaf_gnomad_oth decimal(2,7),
aaf_gnomad_sas decimal(2,7),
gnomad_num_het int,
gnomad_num_hom_alt int,
gnomad_num_chroms int,
%s""" % (pls, effect_string.rstrip(",")),
variant_impacts="""
variant_id integer,
anno_id integer,
gene varchar(60),
transcript varchar(60),
is_exonic bool,
is_coding bool,
is_lof bool,
exon text,
codon_change text,
aa_change text,
aa_length text,
biotype text,
impact varchar(60),
impact_so text,
impact_severity varchar(15),
polyphen_pred text,
polyphen_score float,
sift_pred text,
sift_score float,
%s""" % effect_string.rstrip(","),
sample_genotypes="""
sample_id integer,
gt_types BLOB""",
sample_genotype_counts="""
sample_id integer,
num_hom_ref integer,
num_het integer,
num_hom_alt integer,
num_unknown integer""",
resources="""
name text,
resource text""",
version="""version text""",
gene_detailed="""
uid integer,
chrom varchar(60),
gene varchar(60),
is_hgnc bool,
ensembl_gene_id text,
transcript varchar(60),
biotype text,
transcript_status text,
ccds_id varchar(60),
hgnc_id text,
entrez_id text,
cds_length text,
protein_length text,
transcript_start text,
transcript_end text,
strand text,
synonym text,
rvis_pct float,
mam_phenotype_id text""",
gene_summary="""
uid integer,
chrom varchar(60),
gene varchar(60),
is_hgnc bool,
ensembl_gene_id text,
hgnc_id text,
transcript_min_start integer,
transcript_max_end integer,
strand text,
synonym text,
rvis_pct float,
mam_phenotype_id text,
in_cosmic_census bool,
""",
vcf_header="""vcf_header text""")
# in the future this will be replaced by reading from the conf file.
lookup = {'real': sql.Float(),
'float': sql.Float(),
'text': sql.Text(),
'bool': sql.Boolean(),
'blob': sql.LargeBinary(),
'decimal(2,7)': sql.Float(), #sql.DECIMAL(precision=7, scale=2, asdecimal=False),
'integer': sql.Integer(),
'varchar(15)': sql.String(20),
'varchar(60)': sql.String(60),
'int': sql.Integer(),
}
for table in db:
db[table] = db[table].strip().strip(",").split(",\n")
db[table] = [x.strip().split() for x in db[table] if x.strip()]
cols = [sql.Column(c[0], lookup[c[1].lower()]) for c in db[table]]
if table != "variant_impacts":
for c in cols:
if c.name in ("variant_id", "sample_id", "uid"):
c.primary_key = True
if c.name == "variant_id" and table == "variants":
c.autoincrement = False
db[table] = cols
e = sql.create_engine(get_path(path), isolation_level=None)
e.connect().connection.connection.text_factory = str
metadata = sql.MetaData(bind=e)
session = create_session(bind=e, autocommit=False, autoflush=False)
mapped = {}
tables = ['variants'] + [x for x in sorted(db) if x != 'variants']
otables = [sql.Table(tbl, metadata, *db[tbl]) for tbl in tables]
metadata.drop_all(tables=otables)
for t in otables:
mapped[t.name] = t
session.commit()
metadata.create_all()
return session, metadata
def create_sample_table(cursor, metadata, args):
0
Source : test_zoomark.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def _baseline_1_create_tables(self):
Table(
"Zoo",
self.metadata,
Column(
"ID",
Integer,
Sequence("zoo_id_seq"),
primary_key=True,
index=True,
),
Column("Name", Unicode(255)),
Column("Founded", Date),
Column("Opens", Time),
Column("LastEscape", DateTime),
Column("Admission", Float),
)
Table(
"Animal",
self.metadata,
Column("ID", Integer, Sequence("animal_id_seq"), primary_key=True),
Column("ZooID", Integer, ForeignKey("Zoo.ID"), index=True),
Column("Name", Unicode(100)),
Column("Species", Unicode(100)),
Column("Legs", Integer, default=4),
Column("LastEscape", DateTime),
Column("Lifespan", Float(4)),
Column("MotherID", Integer, ForeignKey("Animal.ID")),
Column("PreferredFoodID", Integer),
Column("AlternateFoodID", Integer),
)
self.metadata.create_all()
def _baseline_1a_populate(self):
0
Source : test_zoomark_orm.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def _baseline_1_create_tables(self):
zoo = Table(
"Zoo",
self.metadata,
Column(
"ID",
Integer,
Sequence("zoo_id_seq"),
primary_key=True,
index=True,
),
Column("Name", Unicode(255)),
Column("Founded", Date),
Column("Opens", Time),
Column("LastEscape", DateTime),
Column("Admission", Float),
)
animal = Table(
"Animal",
self.metadata,
Column("ID", Integer, Sequence("animal_id_seq"), primary_key=True),
Column("ZooID", Integer, ForeignKey("Zoo.ID"), index=True),
Column("Name", Unicode(100)),
Column("Species", Unicode(100)),
Column("Legs", Integer, default=4),
Column("LastEscape", DateTime),
Column("Lifespan", Float(4)),
Column("MotherID", Integer, ForeignKey("Animal.ID")),
Column("PreferredFoodID", Integer),
Column("AlternateFoodID", Integer),
)
self.metadata.create_all()
global Zoo, Animal
class Zoo(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Animal(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
mapper(Zoo, zoo)
mapper(Animal, animal)
def _baseline_1a_populate(self):
See More Examples