Here are the examples of the python api sqlalchemy.case taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
81 Examples
3
Source : dictlike-polymorphic.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def _case(self):
pairs = set(self.cls.type_map.values())
whens = [
(
literal_column("'%s'" % discriminator),
cast(getattr(self.cls, attribute), String),
)
for attribute, discriminator in pairs
if attribute is not None
]
return case(whens, self.cls.type, null())
def __eq__(self, other):
3
Source : test_basic.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_polymorphic_on_expr_explicit_map(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([(t1.c.x == "p", "parent"), (t1.c.x == "c", "child")])
mapper(
Parent,
t1,
properties={"discriminator": column_property(expr)},
polymorphic_identity="parent",
polymorphic_on=expr,
)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
3
Source : test_basic.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([(t1.c.x == "p", "parent"), (t1.c.x == "c", "child")])
mapper(Parent, t1, polymorphic_identity="parent", polymorphic_on=expr)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_expr_implicit_map_w_label_joined(self):
3
Source : test_basic.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_polymorphic_on_expr_implicit_map_no_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t1 = self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([(t1.c.x == "p", "parent"), (t1.c.x == "c", "child")])
mapper(Parent, t1, polymorphic_identity="parent", polymorphic_on=expr)
mapper(Child, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_expr_implicit_map_w_label_single(self):
3
Source : test_basic.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_polymorphic_on_column_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([(t1.c.x == "p", "parent"), (t1.c.x == "c", "child")])
cprop = column_property(expr)
mapper(
Parent,
t1,
properties={"discriminator": cprop},
polymorphic_identity="parent",
polymorphic_on=cprop,
)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_column_str_prop(self):
3
Source : test_basic.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_polymorphic_on_column_str_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case([(t1.c.x == "p", "parent"), (t1.c.x == "c", "child")])
cprop = column_property(expr)
mapper(
Parent,
t1,
properties={"discriminator": cprop},
polymorphic_identity="parent",
polymorphic_on="discriminator",
)
mapper(Child, t2, inherits=Parent, polymorphic_identity="child")
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_synonym(self):
3
Source : test_case_statement.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_literal_interpretation(self):
t = table("test", column("col1"))
self.assert_compile(
case([("x", "y")], value=t.c.col1),
"CASE test.col1 WHEN :param_1 THEN :param_2 END",
)
self.assert_compile(
case([(t.c.col1 == 7, "y")], else_="z"),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END",
)
def test_text_doesnt_explode(self):
3
Source : test_generative.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_table_to_alias_7(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case([(t1.c.col1 == 5, t1.c.col2)], else_=t1.c.col1)),
"CASE WHEN (t1alias.col1 = :col1_1) THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_8(self):
3
Source : test_generative.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_table_to_alias_8(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case([(5, t1.c.col2)], value=t1.c.col1, else_=t1.c.col1)
),
"CASE t1alias.col1 WHEN :param_1 THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_9(self):
3
Source : sqlalchemy.py
with MIT License
from konstantint
with MIT License
from konstantint
def case_greatest(element, compiler, **kw):
arg1, arg2 = list(element.clauses)
return compiler.process(sa.case([(arg1 > arg2, arg1)], else_=arg2), **kw)
# Utilities ----------------------------------
import sqlalchemy.dialects
3
Source : search.py
with GNU General Public License v2.0
from makedeb
with GNU General Public License v2.0
from makedeb
def _sort_by_voted(self, order: str):
# FIXME: Currently, PHP is destroying this implementation
# in terms of performance. We should improve this; there's no
# reason it should take _longer_.
column = getattr(
case([(models.PackageVote.UsersID == self.user.ID, 1)], else_=0), order
)
name = getattr(models.Package.Name, order)
self.query = self.query.order_by(column(), name())
return self
def _sort_by_notify(self, order: str):
3
Source : search.py
with GNU General Public License v2.0
from makedeb
with GNU General Public License v2.0
from makedeb
def _sort_by_notify(self, order: str):
# FIXME: Currently, PHP is destroying this implementation
# in terms of performance. We should improve this; there's no
# reason it should take _longer_.
column = getattr(
case([(models.PackageNotification.UserID == self.user.ID, 1)], else_=0),
order,
)
name = getattr(models.Package.Name, order)
self.query = self.query.order_by(column(), name())
return self
def _sort_by_maintainer(self, order: str):
3
Source : db.py
with GNU Affero General Public License v3.0
from maubot
with GNU Affero General Public License v3.0
from maubot
def get_event_stats(cls, direction, limit: int = 10) -> Iterable['EventKarmaStats']:
c = cls.c
return (EventKarmaStats(*row) for row in cls.db.execute(
select([c.given_in, c.given_for, c.given_to, c.content,
func.sum(c.value).label("total"),
func.sum(case([(c.value > 0, c.value)], else_=0)).label("positive"),
func.abs(func.sum(case([(c.value < 0, c.value)], else_=0))).label("negative")])
.group_by(c.given_for)
.order_by(direction("total"), asc(c.given_for))
.limit(limit)))
@classmethod
3
Source : db.py
with GNU Affero General Public License v3.0
from maubot
with GNU Affero General Public License v3.0
from maubot
def get_user_stats(cls, direction, limit: int = 10) -> Iterable['UserKarmaStats']:
c = cls.c
return (UserKarmaStats(*row) for row in cls.db.execute(
select([c.given_to,
func.sum(c.value).label("total"),
func.sum(case([(c.value > 0, c.value)], else_=0)).label("positive"),
func.abs(func.sum(case([(c.value < 0, c.value)], else_=0))).label("negative")])
.group_by(c.given_to)
.order_by(direction("total"), asc(c.given_to))
.limit(limit)))
@classmethod
3
Source : db.py
with GNU Affero General Public License v3.0
from maubot
with GNU Affero General Public License v3.0
from maubot
def get_karma(cls, user_id: UserID) -> Optional['UserKarmaStats']:
c = cls.c
rows = cls.db.execute(
select([c.given_to,
func.sum(c.value).label("total"),
func.sum(case([(c.value > 0, c.value)], else_=0)).label("positive"),
func.abs(func.sum(case([(c.value < 0, c.value)], else_=0))).label("negative")]
).where(c.given_to == user_id))
try:
return UserKarmaStats(*next(rows))
except StopIteration:
return None
@classmethod
3
Source : occasion.py
with MIT License
from OneGov
with MIT License
from OneGov
def total_cost(self):
from onegov.activity.models.period import Period
return coalesce(Occasion.cost, 0) + case([
(Period.all_inclusive == True, 0),
(Period.all_inclusive == False, func.coalesce(
Occasion.booking_cost, Period.booking_cost, 0
)),
])
def compute_duration(self, dates):
3
Source : occasion.py
with MIT License
from OneGov
with MIT License
from OneGov
def available_spots(cls):
return case((
(
cls.cancelled == False,
func.upper(cls.spots) - 1 - cls.attendee_count
),
), else_=0)
@property
3
Source : publication.py
with MIT License
from OneGov
with MIT License
from OneGov
def publication_started(cls):
return case((
(
cls.publication_start == None,
True
),
), else_=cls.publication_start < = func.now())
@hybrid_property
3
Source : publication.py
with MIT License
from OneGov
with MIT License
from OneGov
def publication_ended(cls):
return case((
(
cls.publication_end == None,
False
),
), else_=cls.publication_end < func.now())
@hybrid_property
3
Source : file.py
with MIT License
from OneGov
with MIT License
from OneGov
def signature_timestamp(self):
return type_coerce(case(
[(
File.signed == True,
text("""
(
to_timestamp(
signature_metadata->>'timestamp',
'YYYY-MM-DD"T"HH24:MI:SS.US'
)::timestamp without time zone
)
""")
)],
else_=text('NULL')
), UTCDateTime)
@observes('reference')
3
Source : submission.py
with MIT License
from OneGov
with MIT License
from OneGov
def registration_state(cls):
return case((
(cls.spots == 0, None),
(cls.claimed == None, 'open'),
(cls.claimed == 0, 'cancelled'),
(cls.claimed == cls.spots, 'confirmed'),
(cls.claimed < cls.spots, 'partial')
), else_=None)
def get_email_field_data(self, form=None):
3
Source : models.py
with MIT License
from packit
with MIT License
from packit
def get_merged_chroots(cls, first: int, last: int) -> Iterable["PipelineModel"]:
with get_sa_session() as session:
return (
cls.__query_merged_runs(session)
.group_by(
PipelineModel.srpm_build_id,
case(
[(PipelineModel.srpm_build_id.isnot(null()), 0)],
else_=PipelineModel.id,
),
)
.order_by(desc("merged_id"))
.slice(first, last)
)
@classmethod
3
Source : models.py
with MIT License
from packit
with MIT License
from packit
def get_merged_run(cls, first_id: int) -> Optional[Iterable["PipelineModel"]]:
with get_sa_session() as session:
return (
cls.__query_merged_runs(session)
.filter(
PipelineModel.id >= first_id, PipelineModel.id < = first_id + 100
)
.group_by(
PipelineModel.srpm_build_id,
case(
[(PipelineModel.srpm_build_id.isnot(null()), 0)],
else_=PipelineModel.id,
),
)
.first()
)
@classmethod
3
Source : models.py
with Mozilla Public License 2.0
from pass-culture
with Mozilla Public License 2.0
from pass-culture
def timezone(cls): # pylint: disable=no-self-argument
offerer_alias = aliased(Offerer)
return case(
[
(
cls.departementCode.is_(None),
case(
CUSTOM_TIMEZONES,
value=db.session.query(offerer_alias.departementCode)
.filter(cls.managingOffererId == offerer_alias.id)
.as_scalar(),
else_=METROPOLE_TIMEZONE,
),
)
],
else_=case(CUSTOM_TIMEZONES, value=cls.departementCode, else_=METROPOLE_TIMEZONE),
)
def field_exists_and_has_changed(self, field: str, value: typing.Any) -> typing.Any:
3
Source : models.py
with Mozilla Public License 2.0
from pass-culture
with Mozilla Public License 2.0
from pass-culture
def departementCode(cls): # pylint: disable=no-self-argument
return case(
[
(
cast(func.substring(cls.postalCode, 1, 2), Integer) >= OVERSEAS_DEPARTEMENT_CODE_START,
func.substring(cls.postalCode, 1, 3),
)
],
else_=func.substring(cls.postalCode, 1, 2),
)
@cached_property
3
Source : offer_mixin.py
with Mozilla Public License 2.0
from pass-culture
with Mozilla Public License 2.0
from pass-culture
def status(cls): # pylint: disable=no-self-argument
return sa.case(
[
(cls.validation == OfferValidationStatus.REJECTED.name, OfferStatus.REJECTED.name),
(cls.validation == OfferValidationStatus.PENDING.name, OfferStatus.PENDING.name),
(cls.validation == OfferValidationStatus.DRAFT.name, OfferStatus.DRAFT.name),
(cls.isActive.is_(False), OfferStatus.INACTIVE.name),
(cls.hasBookingLimitDatetimesPassed.is_(True), OfferStatus.EXPIRED.name),
(cls.isSoldOut.is_(True), OfferStatus.SOLD_OUT.name),
],
else_=OfferStatus.ACTIVE.name,
)
class ValidationMixin:
3
Source : dictlike-polymorphic.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def _case(self):
pairs = set(self.cls.type_map.values())
whens = [
(
literal_column("'%s'" % discriminator),
cast(getattr(self.cls, attribute), String),
)
for attribute, discriminator in pairs
if attribute is not None
]
return case(whens, value=self.cls.type, else_=null())
def __eq__(self, other):
3
Source : test_basic.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_polymorphic_on_expr_explicit_map(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case((t1.c.x == "p", "parent"), (t1.c.x == "c", "child"))
self.mapper_registry.map_imperatively(
Parent,
t1,
properties={"discriminator": column_property(expr)},
polymorphic_identity="parent",
polymorphic_on=expr,
)
self.mapper_registry.map_imperatively(
Child, t2, inherits=Parent, polymorphic_identity="child"
)
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
3
Source : test_basic.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_polymorphic_on_expr_implicit_map_no_label_joined(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case((t1.c.x == "p", "parent"), (t1.c.x == "c", "child"))
self.mapper_registry.map_imperatively(
Parent, t1, polymorphic_identity="parent", polymorphic_on=expr
)
self.mapper_registry.map_imperatively(
Child, t2, inherits=Parent, polymorphic_identity="child"
)
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_expr_implicit_map_w_label_joined(self):
3
Source : test_basic.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_polymorphic_on_expr_implicit_map_no_label_single(self):
"""test that single_table_criterion is propagated
with a standalone expr"""
t1 = self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case((t1.c.x == "p", "parent"), (t1.c.x == "c", "child"))
self.mapper_registry.map_imperatively(
Parent, t1, polymorphic_identity="parent", polymorphic_on=expr
)
self.mapper_registry.map_imperatively(
Child, inherits=Parent, polymorphic_identity="child"
)
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_expr_implicit_map_w_label_single(self):
3
Source : test_basic.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_polymorphic_on_column_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case((t1.c.x == "p", "parent"), (t1.c.x == "c", "child"))
cprop = column_property(expr)
self.mapper_registry.map_imperatively(
Parent,
t1,
properties={"discriminator": cprop},
polymorphic_identity="parent",
polymorphic_on=cprop,
)
self.mapper_registry.map_imperatively(
Child, t2, inherits=Parent, polymorphic_identity="child"
)
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_column_str_prop(self):
3
Source : test_basic.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_polymorphic_on_column_str_prop(self):
t2, t1 = self.tables.t2, self.tables.t1
Parent, Child = self.classes.Parent, self.classes.Child
expr = case((t1.c.x == "p", "parent"), (t1.c.x == "c", "child"))
cprop = column_property(expr)
self.mapper_registry.map_imperatively(
Parent,
t1,
properties={"discriminator": cprop},
polymorphic_identity="parent",
polymorphic_on="discriminator",
)
self.mapper_registry.map_imperatively(
Child, t2, inherits=Parent, polymorphic_identity="child"
)
self._roundtrip(parent_ident="p", child_ident="c")
def test_polymorphic_on_synonym(self):
3
Source : test_case_statement.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_literal_interpretation_one(self):
"""note this is modified as of #7287 to accept strings, tuples
and other literal values as input
where they are interpreted as bound values just like any other
expression.
Previously, an exception would be raised that the literal was
ambiguous.
"""
self.assert_compile(
case(("x", "y")),
"CASE WHEN :param_1 THEN :param_2 END",
checkparams={"param_1": "x", "param_2": "y"},
)
def test_literal_interpretation_two(self):
3
Source : test_case_statement.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_literal_interpretation_three(self):
t = table("test", column("col1"))
self.assert_compile(
case(("x", "y"), value=t.c.col1),
"CASE test.col1 WHEN :param_1 THEN :param_2 END",
)
self.assert_compile(
case((t.c.col1 == 7, "y"), else_="z"),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END",
)
@testing.combinations(
3
Source : test_case_statement.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_when_dicts(self, test_case, expected):
t = table("test", column("col1"))
when_dict, value, else_ = testing.resolve_lambda(test_case, t=t)
self.assert_compile(
case(when_dict, value=value, else_=else_), expected
)
def test_text_doesnt_explode(self, connection):
3
Source : test_case_statement.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_text_doenst_explode_even_in_whenlist(self):
"""test #7287"""
self.assert_compile(
case(
(text(":case = 'upper'"), func.upper(literal_column("q"))),
else_=func.lower(literal_column("q")),
),
"CASE WHEN :case = 'upper' THEN upper(q) ELSE lower(q) END",
)
def testcase_with_dict(self):
3
Source : test_external_traversal.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_annotated_label_cases(self, case, expected, label_, annotate):
"""test #6550"""
t1 = table("t1", column("col1"))
a1 = t1.alias()
expr = case(t1=t1)
if label_:
expr = expr.label(None)
if annotate:
expr = expr._annotate({"foo": "bar"})
adapted = sql_util.ClauseAdapter(a1).traverse(expr)
self.assert_compile(adapted, expected)
@testing.combinations((null(),), (true(),))
3
Source : test_external_traversal.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_table_to_alias_7(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case((t1.c.col1 == 5, t1.c.col2), else_=t1.c.col1)),
"CASE WHEN (t1alias.col1 = :col1_1) THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_8(self):
3
Source : test_external_traversal.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_table_to_alias_8(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case((5, t1.c.col2), value=t1.c.col1, else_=t1.c.col1)
),
"CASE t1alias.col1 WHEN :param_1 THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_9(self):
3
Source : credit_transfer.py
with GNU General Public License v3.0
from teamsempo
with GNU General Public License v3.0
from teamsempo
def public_transfer_type(cls):
from sqlalchemy import case, cast, String
return case([
(cls.transfer_subtype == TransferSubTypeEnum.STANDARD, cast(cls.transfer_type, String)),
(cls.transfer_type == TransferTypeEnum.PAYMENT, cast(cls.transfer_subtype, String)),
],
else_ = cast(cls.transfer_type, String)
)
def send_blockchain_payload_to_worker(self, is_retry=False, queue='high-priority'):
3
Source : models.py
with GNU General Public License v3.0
from teamsempo
with GNU General Public License v3.0
from teamsempo
def status(cls):
return (
case(
STATUS_INT_TO_STRING,
value=(
select([func.min(BlockchainTransaction.status_code)])
.where(BlockchainTransaction.blockchain_task_id == cls.id)
.label('lowest_status')
),
else_='UNSTARTED'
)
)
def add_prior_tasks(self, prior_tasks: UUIDList):
3
Source : models.py
with GNU General Public License v3.0
from teamsempo
with GNU General Public License v3.0
from teamsempo
def status_code(cls):
return(
case(
STATUS_STRING_TO_INT,
value=cls._status,
else_=99
)
)
def __repr__(self):
0
Source : CrudCompra.py
with MIT License
from andrersp
with MIT License
from andrersp
def Pagar(self):
try:
# Abrindo Sessao
conecta = Conexao()
sessao = conecta.Session()
# Selecionando Id
row = sessao.query(Compra).get(self.id)
# Update status Pagamento
status = case([
(Compra.valor_pago >= Compra.valor_total, '1')
], else_='2'
)
row.valor_pago = Compra.valor_pago + self.valorPago
row.pagamento = status
# Executando a Query
sessao.commit()
# Fechando a Conexao
sessao.close()
except IntegrityError as err:
print(err)
# Lista de Pedidos a receber hoje
def pedidosAReceber(self):
0
Source : CrudContaAPagar.py
with MIT License
from andrersp
with MIT License
from andrersp
def pagarConta(self):
try:
# Abrindo Sessao
conecta = Conexao()
sessao = conecta.Session()
# selecionando ID
row = sessao.query(ContaAPagar).get(self.id)
# Update Status se valor pago igual ou maior que valor parcela
status = case([
(ContaAPagar.valor_pago >= ContaAPagar.valor, '1')
], else_='2'
)
# Novos Valores
row.forma_pagamento = self.formaPagamento
row.data_pagamento = self.dataPagamento
row.valor_pago = ContaAPagar.valor_pago + self.valorPago
row.pagamento = status
# Executando a query
sessao.commit()
# Fechando a Conexao
sessao.close()
except IntegrityError as err:
print(err)
pass
""" Obtendo Movimentação financeira """
0
Source : CrudContaAReceber.py
with MIT License
from andrersp
with MIT License
from andrersp
def receberConta(self):
try:
# Abrindo Sessao
conecta = Conexao()
sessao = conecta.Session()
# Selecionando ID
row = sessao.query(ContaAReceber).get(self.id)
# Update Status se valor recebido igual ou maior que valor parcela
status = case([
(ContaAReceber.valor_recebido >= row.valor, '1')
], else_='2'
)
# Query
row.forma_pagamento = self.formaPagamento
row.data_recebimento = self.dataRecebimento
row.valor_recebido = ContaAReceber.valor_recebido + self.valorRecebido
row.pagamento = status
# Executando a query
sessao.commit()
# Fechando a Conexao
sessao.close()
except IntegrityError as err:
print(err)
""" Obtendo Movimentação financeira """
0
Source : CrudVenda.py
with MIT License
from andrersp
with MIT License
from andrersp
def Receber(self):
try:
# Abrindo Sessao
conecta = Conexao()
sessao = conecta.Session()
# Selecionando por ID
row = sessao.query(Venda).get(self.id)
# Update status Pagamento
status = case([
(Venda.valor_recebido >= Venda.valor_total, '1')
], else_='2'
)
# Query
row.valor_recebido = Venda.valor_recebido + self.valorRecebido
row.pagamento = status
# Executando a query
sessao.commit()
# Fechando a conexao
sessao.close()
except IntegrityError as err:
print(err)
# Relatório Vendas por periodo
def relatValorDia(self):
0
Source : comparators.py
with MIT License
from bali-framework
with MIT License
from bali-framework
def __clause_element__(self):
return case(self.whens, self.expression)
def __eq__(self, other):
0
Source : crawler.py
with MIT License
from blakebjorn
with MIT License
from blakebjorn
def calculate_pending_nodes(start_time):
now = datetime.datetime.utcnow()
# Get a list of all never checked nodes, and nodes that have been checked recently:
q = session.query(Node)
q = q.filter(or_(
Node.first_checked == None,
Node.last_checked == None,
# Assume 30m interval
# If it hasn't been seen before, check every 6h
and_(Node.last_seen == None, Node.last_checked != None,
Node.last_checked < now - datetime.timedelta(minutes=CONF['crawl_interval'] * 12)),
# If it has been seen in the last 6 hours, check it every 30 minutes
and_(Node.last_seen != None, Node.last_seen > now - datetime.timedelta(hours=6),
Node.last_checked < now - datetime.timedelta(minutes=CONF['crawl_interval'])),
# If it has been seen in the last 2 weeks, check it every 12 hours
and_(Node.last_seen != None, Node.last_seen > now - datetime.timedelta(hours=24 * 14),
Node.last_checked < now - datetime.timedelta(minutes=CONF['crawl_interval'] * 24)),
# Otherwise every day
and_(Node.last_seen != None,
Node.last_checked < now - datetime.timedelta(minutes=CONF['crawl_interval'] * 48))
)).filter(not_(and_(Node.last_checked != None, Node.last_checked > start_time)))
if CONF['crawl_order']:
case_order = []
for i in range(len(CONF['crawl_order'])):
case_order.append((Node.network == CONF['crawl_order'][i], str(i)))
q = q.order_by(case(case_order, else_=Node.network), Node.seen.desc(), Node.last_checked)
else:
q = q.order_by(Node.seen.desc(), Node.last_checked)
if CONF['max_queue'] > 0:
count = q.count()
q = q.limit(CONF['max_queue'])
else:
count = q.count()
if count > CONF['max_queue']:
logging.info(f"{count} nodes pending")
if CONF['database_concurrency']:
nodes = q.with_for_update().all()
session.bulk_update_mappings(Node, [
{'id': x.id, 'last_checked': now} for x in nodes])
session.commit()
return nodes
return q.all()
def process_pending_nodes(node_addresses, node_processing_queue, recent_heights, thread_pool, mnodes=None):
0
Source : crawler.py
with MIT License
from blakebjorn
with MIT License
from blakebjorn
def dump_summary():
# Set updated countries
for n in session.query(Node).all():
n.country, n.city, n.aso, n.asn = geocode_ip(n.address, )
# Get and set dash masternodes
if CONF['get_dash_masternodes']:
mnodes = update_masternode_list()
set_master_nodes(mnodes)
logging.info("masternodes updated")
q = session.query(Node.id, Node.network, Node.address, Node.port, Node.user_agent, Node.version, Node.asn, Node.aso,
Node.country, Node.city, Node.last_seen, Node.last_height, Node.is_masternode) \
.filter(Node.seen == True) \
.filter(Node.last_seen >= datetime.datetime.utcnow() - datetime.timedelta(days=7))
nodes = pd.read_sql(q.statement, q.session.bind)
nodes[['port', 'version', 'last_height']] = nodes[['port', 'version', 'last_height']].fillna(0)
nodes = nodes.fillna("")
if nodes.empty:
logging.warning("Nodes table is empty, no results to dump")
return
# Exclude user agents
if CONF['excluded_user_agents']:
for agent_re in CONF['excluded_user_agents']:
agent_re = re.compile(agent_re)
nodes = nodes[~nodes['user_agent'].str.match(agent_re)].copy()
now = datetime.datetime.utcnow()
labels = []
for age, label in [(2, "2h"), (8, "8h"), (24, "24h"), (24 * 7, "7d"), (24 * 30, "30d")]:
stt = time.time()
q = session.query(Node.id,
func.sum(case([(NodeVisitation.success, 1)], else_=0)).label("success"),
func.count(NodeVisitation.parent_id).label("total")) \
.join(NodeVisitation, Node.id == NodeVisitation.parent_id) \
.group_by(Node.id) \
.filter(Node.last_seen > now - datetime.timedelta(hours=age)) \
.filter(NodeVisitation.timestamp >= now - datetime.timedelta(hours=age))
df = pd.read_sql(q.statement, q.session.bind)
df[label] = (df['success'] / df['total']).fillna(0.0)
nodes = nodes.merge(df[['id', label]], how="left")
labels.append(label)
logging.info(f"done {label} in {round(time.time() - stt, 3)}s")
nodes = nodes.drop(['id'], 1)
nodes[labels] = nodes[labels].fillna(0.0).round(3)
nodes[['network', 'address']] = nodes[['network', 'address']].fillna("")
nodes['address_type'] = nodes['address'].apply(code_ip_type)
nodes['network'] = nodes[['network', 'user_agent']].apply(
lambda x: "bitcoin-sv" if x['network'] == 'bitcoin-cash' and ' SV' in x['user_agent'] else x['network'], axis=1)
networks = nodes['network'].unique()
# Calculate summaries
summaries = {}
for network in networks:
summary_df = nodes[(nodes['network'] == network) &
(nodes['last_seen'] > datetime.datetime.utcnow() - datetime.timedelta(
hours=8))]
if summary_df.empty:
continue
summaries[network] = {
"min": int(summary_df['last_height'].fillna(np.inf).min()),
"max": int(summary_df['last_height'].fillna(0.0).max()),
"mean": float(summary_df['last_height'].mean()),
"stdev": float(summary_df['last_height'].std()),
"med": float(summary_df['last_height'].median()),
"1q": float(np.percentile(summary_df['last_height'], 25)),
"3q": float(np.percentile(summary_df['last_height'], 75)),
"2.5pct": float(np.percentile(summary_df['last_height'], 1)),
"97.5pct": float(np.percentile(summary_df['last_height'], 99)),
"age_min": nodes[nodes['network'] == network]['last_seen'].min().timestamp(),
"age_max": summary_df['last_seen'].max().timestamp()
}
summaries[network]['iqr'] = summaries[network]['3q'] - summaries[network]['1q']
summaries[network]['95_range'] = summaries[network]['97.5pct'] - summaries[network]['2.5pct']
summaries["_timestamp"] = datetime.datetime.utcnow().isoformat()
with open("static/network_summaries.json", 'w') as f:
json.dump(summaries, f)
if CONF['inactive_use_iqr']:
deviations = {network: summaries[network]['iqr'] * (
CONF['inactive_threshold'][network] if network in CONF['inactive_threshold'] else
CONF['inactive_threshold']['default']) for network in networks}
else:
deviations = {net: CONF['inactive_threshold'][net] if net in CONF['inactive_threshold'] else \
CONF['inactive_threshold']['default'] for net in networks}
for i in deviations:
deviations[i] = (deviations[i], summaries[i]['3q'])
nodes['is_active'] = nodes[['network', 'last_height']] \
.apply(lambda x: check_active(x['last_height'], deviations[x['network']]), axis=1)
if not CONF['export_inactive_nodes']:
nodes = nodes[nodes['is_active']].copy()
nodes['last_seen'] = nodes['last_seen'].values.astype(np.int64) // 10 ** 9
nodes.to_csv("static/data.csv", index=False)
with open("static/data.txt", "w") as f:
f.write(space_sep_df(nodes))
for network in nodes['network'].unique():
net_df = nodes[nodes['network'] == network].copy()
net_df = net_df.drop(['network'], 1)
net_df.to_csv(f"static/data_{network}.csv", index=False)
with open(os.path.join("static", f"data_{network}.json"), "w") as f:
json.dump({'data': net_df.to_dict(orient="records")}, f)
with open(os.path.join("static", f"data_{network}.txt"), "w") as f:
f.write(space_sep_df(net_df))
nodes = nodes.drop(['user_agent', 'version', 'last_height'], 1)
with open(os.path.join("static", "data.json"), "w") as f:
json.dump({'data': nodes.to_dict(orient="records")}, f)
# Write unique addresses only
def group_nets(x):
return ", ".join(sorted(set(x)))
nodes = nodes.groupby(by=['address', 'asn', 'aso', 'country', 'city', 'address_type'], as_index=False).agg(
{"network": group_nets, "2h": "mean", "8h": "mean", "24h": "mean", "7d": "mean", "30d": "mean"})
nodes.to_csv("static/data_unique.csv", index=False)
with open(os.path.join("static", "data_unique.json"), "w") as f:
json.dump({'data': nodes.to_dict(orient="records")}, f)
with open(os.path.join("static", "data_unique.txt"), "w") as f:
f.write(space_sep_df(nodes))
for network in networks:
net_df = nodes[nodes['network'].str.contains(network)]
net_df = net_df.drop(['network'], 1)
net_df.to_csv(os.path.join("static", f"data_{network}_unique.csv"), index=False)
with open(os.path.join("static", f"data_{network}_unique.json"), "w") as f:
json.dump({'data': net_df.to_dict(orient="records")}, f)
with open(os.path.join("static", f"data_{network}_unique.txt"), "w") as f:
f.write(space_sep_df(net_df))
def space_sep_df(df, spacing=3):
0
Source : crawler.py
with MIT License
from blakebjorn
with MIT License
from blakebjorn
def generate_historic_data():
networks = [x[0] for x in session.query(Node.network).distinct()]
sd = session.query(func.min(Node.first_seen)).one()[0]
start_date = datetime.datetime(sd.year, sd.month, sd.day,
sd.hour // CONF['historic_interval'] * CONF['historic_interval'], 0, 0)
end_date = session.query(func.max(Node.last_seen)).one()[0]
historic_interval = datetime.timedelta(hours=CONF['historic_interval'])
last_date = start_date
while last_date < end_date:
last_date += historic_interval
interval_end = start_date + historic_interval
session.query(CrawlSummary).filter(
CrawlSummary.timestamp >= (last_date - datetime.timedelta(hours=CONF['historic_interval'] * 1.5))).delete()
session.commit()
while interval_end < end_date:
if session.query(CrawlSummary).filter(CrawlSummary.timestamp == interval_end).count() >= 1:
interval_end += historic_interval
continue
logging.info(f"Summarizing period starting with {interval_end - historic_interval}")
sv_sq = session.query(UserAgent.id).filter(UserAgent.user_agent.ilike("% SV%")).subquery()
case_stmt = case([(sv_sq.c.id != None, 'bitcoin-sv')], else_=Node.network)
q = session.query(NodeVisitation.parent_id.label("id"),
case_stmt.label("network"),
func.max(NodeVisitation.height).label("height"),
func.max(case([(NodeVisitation.is_masternode, 1)], else_=0)).label("is_masternode")) \
.join(sv_sq, NodeVisitation.user_agent_id == sv_sq.c.id) \
.join(Node, Node.id == NodeVisitation.parent_id) \
.filter(NodeVisitation.timestamp >= interval_end - historic_interval) \
.filter(NodeVisitation.timestamp < = interval_end) \
.filter(NodeVisitation.success == True) \
.filter(Node.first_seen < = interval_end) \
.filter(Node.last_seen >= interval_end - historic_interval) \
.group_by(NodeVisitation.parent_id, case_stmt)
df = pd.read_sql(q.statement, q.session.bind)
df['height'] = df['height'].astype(int)
if not df.empty:
networks = df['network'].unique()
medians = df.groupby(by=['network']).agg({"height": "median"})
deviations = {network: CONF['inactive_threshold'][network] if network in CONF['inactive_threshold'] else \
CONF['inactive_threshold']['default'] for network in networks}
for i in list(deviations.keys()):
if i in medians.index:
deviations[i] = (deviations[i], medians.loc[i]['height'])
else:
deviations.pop(i)
df['active'] = df[['network', 'height']].apply(
lambda x: check_active(x['height'], deviations[x['network']]), axis=1)
df = df[df['active']].drop(['active'], 1)
for network in networks:
net_df = df[df['network'] == network]
cs = CrawlSummary(timestamp=interval_end,
network=network,
node_count=len(net_df),
masternode_count=sum(net_df['is_masternode']),
lookback_hours=CONF['historic_interval'])
session.add(cs)
session.commit()
interval_end += datetime.timedelta(hours=CONF['historic_interval'])
q = session.query(CrawlSummary).order_by(CrawlSummary.timestamp)
df = pd.read_sql(q.statement, q.session.bind)
df['timestamp'] = df['timestamp'].values.astype(np.int64) // 10 ** 9
for network in networks:
df[df['network'] == network][['timestamp', 'node_count', 'masternode_count']] \
.to_json(os.path.join("static", f"history_{network}.json"), orient='records')
def prune_database():
See More Examples