Here are the examples of the python api sqlalchemy.asc taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
82 Examples
3
Source : response.py
with GNU Affero General Public License v3.0
from andrewcooke
with GNU Affero General Public License v3.0
from andrewcooke
def _range(self, s, statistic_name, value, finish_time, period):
jtype = TYPE_TO_JOURNAL_CLASS[type(value.value)]
start_time = finish_time - period if period else to_time(0.0)
q = s.query(jtype). \
filter(jtype.statistic_name == statistic_name,
jtype.time >= start_time,
jtype.time < finish_time)
return (q.order_by(asc(jtype.value)).first(),
q.order_by(desc(jtype.value)).first())
3
Source : statistic.py
with GNU Affero General Public License v3.0
from andrewcooke
with GNU Affero General Public License v3.0
from andrewcooke
def after(cls, s, time, name, owner, activity_group):
from . import ActivityGroup, Source
return s.query(StatisticJournal).join(StatisticName).join(Source). \
filter(StatisticName.name == name,
StatisticJournal.time >= time,
StatisticName.owner == owner,
Source.activity_group == ActivityGroup.from_name(s, activity_group)). \
order_by(asc(StatisticJournal.time)).limit(1).one_or_none()
@classmethod
3
Source : notifications_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def get_notifications_for_job(service_id, job_id, filter_dict=None, page=1, page_size=None):
if page_size is None:
page_size = current_app.config["PAGE_SIZE"]
query = Notification.query.filter_by(service_id=service_id, job_id=job_id)
query = _filter_query(query, filter_dict)
return query.order_by(asc(Notification.job_row_number)).paginate(page=page, per_page=page_size)
@statsd(namespace="dao")
3
Source : provider_details_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def get_current_provider(notification_type):
return (
ProviderDetails.query.filter_by(notification_type=notification_type, active=True)
.order_by(asc(ProviderDetails.priority))
.first()
)
def dao_get_provider_versions(provider_id):
3
Source : provider_details_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def get_provider_details_by_notification_type(notification_type, supports_international=False):
filters = [ProviderDetails.notification_type == notification_type]
if supports_international:
filters.append(ProviderDetails.supports_international == supports_international)
return ProviderDetails.query.filter(*filters).order_by(asc(ProviderDetails.priority)).all()
@transactional
3
Source : provider_details_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def dao_get_sms_provider_with_equal_priority(identifier, priority):
provider = (
db.session.query(ProviderDetails)
.filter(
ProviderDetails.identifier != identifier,
ProviderDetails.notification_type == "sms",
ProviderDetails.priority == priority,
ProviderDetails.active,
)
.order_by(asc(ProviderDetails.priority))
.first()
)
return provider
def dao_get_provider_stats():
3
Source : settings.py
with MIT License
from ctxis
with MIT License
from ctxis
def all(self):
results = ConfigModel.query.order_by(asc(ConfigModel.name)).all()
settings = {}
for result in results:
settings[result.name] = self.__process_return_value(result.value, None, ',')
return settings
3
Source : sqlalchemy_store.py
with Apache License 2.0
from flink-extended
with Apache License 2.0
from flink-extended
def apply(self, criterion, query, value):
if value == 'ascend':
return query.order_by(asc(getattr(criterion, self.column_name)))
elif value == 'descend':
return query.order_by(desc(getattr(criterion, self.column_name)))
class SqlAlchemyStore(AbstractStore):
3
Source : event_based_scheduler_job.py
with Apache License 2.0
from flink-extended
with Apache License 2.0
from flink-extended
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id, msg.queue_time))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
3
Source : test_text.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_asc(self):
stmt = select([table1.c.myid]).order_by(asc("name"), "description")
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"ORDER BY mytable.name ASC, mytable.description",
)
def test_group_by_subquery(self):
3
Source : queries.py
with GNU General Public License v3.0
from GIScience
with GNU General Public License v3.0
from GIScience
def query_oldest_country_price(country):
query = (db.session.query(CountryDataModel.country_alpha_2,
EurostatCountryPriceModel.date).filter(
CountryDataModel.country_alpha_2 == country).join(
EurostatCountryPriceModel,
CountryDataModel.country_alpha_2 == EurostatCountryPriceModel.country_alpha_2).order_by(
asc(EurostatCountryPriceModel.date)).first())
return query
def query_general_price() -> GeneralPrice:
3
Source : routes.py
with Apache License 2.0
from google
with Apache License 2.0
from google
def get_pending_proposals_paged():
entries = db.session.query(Vulnerability, Nvd)
entries = entries.filter(Vulnerability.state != VulnerabilityState.PUBLISHED)
entries = entries.outerjoin(Vulnerability, Nvd.cve_id == Vulnerability.cve_id)
entries = entries.order_by(asc(Vulnerability.state), desc(Nvd.id))
bookmarked_page = parse_pagination_param("review_p")
per_page = 10
entries_full = entries.options(default_nvd_view_options)
review_vulns = get_page(entries_full, per_page, page=bookmarked_page)
review_vulns = VulnViewTypesetPaginationObjectWrapper(review_vulns.paging)
return review_vulns
def get_reviewed_proposals_paged():
3
Source : blocks.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_latest(cls, n=None):
highest = database.db.getSession().query(func.max(Blocks.height)).scalar()
if n == None:
return database.db.getSession().query(Blocks).filter(Blocks.height == highest).first()
else:
return list(database.db.getSession().query(Blocks).filter(Blocks.height >= highest-n).order_by(asc(Blocks.height)))
# Get record(s) by height and range
@classmethod
3
Source : blocks.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_height(cls, height, range=None):
if range == None:
return database.db.getSession().query(Blocks).filter(Blocks.height == height).first()
else:
h_start = height-(range-1)
h_end = height
return list(database.db.getSession().query(Blocks).filter(and_(Blocks.height >= h_start, Blocks.height < = h_end)).order_by(asc(Blocks.height)))
# Get stats records falling within requested range
@classmethod
3
Source : grin_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_latest(cls, range=None):
highest = database.db.getSession().query(func.max(Grin_stats.height)).scalar()
if range == None:
return database.db.getSession().query(Grin_stats).filter(Grin_stats.height == highest).first()
else:
h_start = highest-(range-1)
return list(database.db.getSession().query(Grin_stats).filter(Grin_stats.height >= h_start).order_by(asc(Grin_stats.height)))
# Get record(s) by height and optional historical range
@classmethod
3
Source : grin_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_height(cls, height, range=None):
if range == None:
return database.db.getSession().query(Grin_stats).filter(Grin_stats.height==height).first()
else:
h_start = height-(range-1)
h_end = height
return list(database.db.getSession().query(Grin_stats).filter(and_(Grin_stats.height >= h_start, Grin_stats.height < = h_end)).order_by(asc(Grin_stats.height)))
# Get stats records falling within requested time range
@classmethod
3
Source : grin_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_time(cls, ts, range=None):
if range == None:
# XXX Get a range, sort, and give closest?
return database.db.getSession().query(Grin_stats).filter(Grin_stats.timestamp < =ts).first()
else:
ts_start = ts-range
ts_end = ts
return list(database.db.getSession().query(Grin_stats).filter(and_(Grin_stats.timestamp >= ts_start, Grin_stats.timestamp < = ts_end)).order_by(asc(Grin_stats.height)))
3
Source : pool_blocks.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_time(cls, ts, range):
if range == None:
# XXX TODO: Test this
return database.db.getSession().query(Pool_blocks).filter(Pool_blocks.timestamp < = ts).first()
else:
ts_start = ts-range
ts_end = ts
return list(database.db.getSession().query(Pool_blocks).filter(and_(Pool_blocks.timestamp >= ts_start, Pool_blocks.timestamp < = ts_end)).order_by(asc(Pool_blocks.height)))
3
Source : pool_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_latest(cls, n=None):
highest = database.db.getSession().query(func.max(Pool_stats.height)).scalar()
if n == None:
return database.db.getSession().query(Pool_stats).filter(Pool_stats.height == highest).first()
else:
return list(database.db.getSession().query(Pool_stats).filter(Pool_stats.height >= highest-n).order_by(asc(Pool_stats.height)))
# Get record(s) by height
@classmethod
3
Source : pool_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_height(cls, height, range=None):
if height == 0:
height = database.db.getSession().query(func.max(Pool_stats.height)).scalar()
if range == None:
return database.db.getSession().query(Pool_stats).filter(Pool_stats.height == height).first()
else:
h_start = height-(range-1)
h_end = height
return list(database.db.getSession().query(Pool_stats).filter(and_(Pool_stats.height >= h_start, Pool_stats.height < = h_end)).order_by(asc(Pool_stats.height)))
# Get stats by timestamp
@classmethod
3
Source : pool_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_time(cls, ts, range):
if range == None:
# XXX TODO: Test this
return database.db.getSession().query(Pool_stats).filter(Pool_stats.timestamp < = ts).first()
else:
ts_start = ts-range
ts_end = ts
return list(database.db.getSession().query(Pool_stats).filter(and_(Pool_stats.timestamp >= ts_start, Pool_stats.timestamp < = ts_end)).order_by(asc(Pool_stats.height)))
# Get the earliest dirty stat
@classmethod
3
Source : worker_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_height(cls, height, range=None):
if range == None:
return list(database.db.getSession().query(Worker_stats).filter(Worker_stats.height == height))
else:
h_start = height-(range-1)
h_end = height
return list(database.db.getSession().query(Worker_stats).filter(and_(Worker_stats.height >= h_start, Worker_stats.height < = h_end)).order_by(asc(Worker_stats.height)))
# Get record(s) by height for a single worker id
@classmethod
3
Source : worker_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_height_and_id(cls, id, height, range=None):
if range == None:
return database.db.getSession().query(Worker_stats).filter(and_(Worker_stats.height == height, Worker_stats.user_id == id)).one_or_none()
else:
h_start = height-(range-1)
h_end = height
return list(database.db.getSession().query(Worker_stats).filter(and_(Worker_stats.height >= h_start, Worker_stats.height < = h_end, Worker_stats.user_id == id)).order_by(asc(Worker_stats.height)))
# Get stats by timestamp
@classmethod
3
Source : worker_stats.py
with Apache License 2.0
from grin-pool
with Apache License 2.0
from grin-pool
def get_by_time(cls, id, ts, range):
if range == None:
# XXX TODO: Test this
return list(database.db.getSession().query(Worker_stats).filter(and_(Worker_stats.timestamp < = ts, Worker_stats.user_id == id)))
else:
ts_start = ts-range
ts_end = ts
return list(database.db.getSession().query(Worker_stats).filter(and_(Worker_stats.timestamp >= ts_start, Worker_stats.timestamp < = ts_end, Worker_stats.user_id == id)).order_by(asc(Worker_stats.height)))
# Get the earliest dirty stat
@classmethod
3
Source : topic_mysql_template.py
with MIT License
from Indexical-Metrics-Measure-Advisory
with MIT License
from Indexical-Metrics-Measure-Advisory
def build_mysql_order(table, order_: list):
result = []
if order_ is None:
return result
else:
for item in order_:
if isinstance(item, tuple):
if item[1] == "desc":
new_ = desc(table.c[item[0].lower()])
result.append(new_)
if item[1] == "asc":
new_ = asc(table.c[item[0].lower()])
result.append(new_)
return result
def clear_metadata(self):
3
Source : topic_oracle_template.py
with MIT License
from Indexical-Metrics-Measure-Advisory
with MIT License
from Indexical-Metrics-Measure-Advisory
def build_oracle_order(self, table, order_: list):
result = []
if order_ is None:
return result
else:
for item in order_:
if isinstance(item, tuple):
if item[1] == "desc":
new_ = desc(table.c[item[0].lower()])
result.append(new_)
if item[1] == "asc":
new_ = asc(table.c[item[0].lower()])
result.append(new_)
return result
'''
3
Source : db.py
with GNU Affero General Public License v3.0
from maubot
with GNU Affero General Public License v3.0
from maubot
def get_event_stats(cls, direction, limit: int = 10) -> Iterable['EventKarmaStats']:
c = cls.c
return (EventKarmaStats(*row) for row in cls.db.execute(
select([c.given_in, c.given_for, c.given_to, c.content,
func.sum(c.value).label("total"),
func.sum(case([(c.value > 0, c.value)], else_=0)).label("positive"),
func.abs(func.sum(case([(c.value < 0, c.value)], else_=0))).label("negative")])
.group_by(c.given_for)
.order_by(direction("total"), asc(c.given_for))
.limit(limit)))
@classmethod
3
Source : db.py
with GNU Affero General Public License v3.0
from maubot
with GNU Affero General Public License v3.0
from maubot
def get_user_stats(cls, direction, limit: int = 10) -> Iterable['UserKarmaStats']:
c = cls.c
return (UserKarmaStats(*row) for row in cls.db.execute(
select([c.given_to,
func.sum(c.value).label("total"),
func.sum(case([(c.value > 0, c.value)], else_=0)).label("positive"),
func.abs(func.sum(case([(c.value < 0, c.value)], else_=0))).label("negative")])
.group_by(c.given_to)
.order_by(direction("total"), asc(c.given_to))
.limit(limit)))
@classmethod
3
Source : db.py
with GNU Affero General Public License v3.0
from maubot
with GNU Affero General Public License v3.0
from maubot
def find_index_from_top(cls, user_id: UserID) -> int:
c = cls.c
rows = cls.db.execute(select([c.given_to])
.group_by(c.given_to)
.order_by(desc(func.sum(c.value)), asc(c.given_to)))
for i, row in enumerate(rows):
if row[0] == user_id:
return i
return -1
@classmethod
3
Source : base_list_view.py
with Apache License 2.0
from Nextdoor
with Apache License 2.0
from Nextdoor
def get_order_by(self):
if self.order == 'asc':
return asc(self.sort)
else:
return desc(self.sort)
@auth.login_required
3
Source : models.py
with MIT License
from optuna
with MIT License
from optuna
def find_min_value_trial(
cls, study_id: int, objective: int, session: orm.Session
) -> "TrialModel":
trial = (
session.query(cls)
.filter(cls.study_id == study_id)
.filter(cls.state == TrialState.COMPLETE)
.join(TrialValueModel)
.filter(TrialValueModel.objective == objective)
.order_by(asc(TrialValueModel.value))
.limit(1)
.one_or_none()
)
if trial is None:
raise ValueError(NOT_FOUND_MSG)
return trial
@classmethod
3
Source : models.py
with MIT License
from optuna
with MIT License
from optuna
def where_trial_id(cls, trial_id: int, session: orm.Session) -> List["TrialValueModel"]:
trial_values = (
session.query(cls).filter(cls.trial_id == trial_id).order_by(asc(cls.objective)).all()
)
return trial_values
class TrialIntermediateValueModel(BaseModel):
3
Source : test_text.py
with MIT License
from sqlalchemy
with MIT License
from sqlalchemy
def test_asc(self):
stmt = select(table1.c.myid).order_by(asc("name"), "description")
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"ORDER BY mytable.name ASC, mytable.description",
)
def test_group_by_subquery(self):
3
Source : review_repo.py
with GNU General Public License v3.0
from Toaster192
with GNU General Public License v3.0
from Toaster192
def get_tierboard(self, type, sem, degree, year, offset=0):
subquery = self.gen_tierboard_subquery(type, sem, degree, year)
return (
session.query(subquery.c.shortcut, subquery.c.avg_tier)
.filter(subquery.c.avg_tier != None)
.order_by(asc("avg_tier"))
.offset(offset)
.limit(10)
.all()
)
def get_tierboard_page_count(self, type, sem, degree, year):
3
Source : review_repo.py
with GNU General Public License v3.0
from Toaster192
with GNU General Public License v3.0
from Toaster192
def get_tierboard_page_count(self, type, sem, degree, year):
subquery = self.gen_tierboard_subquery(type, sem, degree, year)
return math.ceil((
session.query(subquery.c.shortcut, subquery.c.avg_tier)
.filter(subquery.c.avg_tier != None)
.order_by(asc("avg_tier"))
.count()
)/10)
def set_subject_details(self, shortcut, name, credits, semester, end, card, type, for_year, degree):
0
Source : models.py
with BSD 3-Clause "New" or "Revised" License
from aminalaee
with BSD 3-Clause "New" or "Revised" License
from aminalaee
async def list(
self, page: int, page_size: int, search: str, sort_by: str, sort: str
) -> Pagination:
page_size = min(page_size or self.page_size, max(self.page_size_options))
count = await self.count()
stmt = select(self.model).limit(page_size).offset((page - 1) * page_size)
for _, relation in self._list_relations:
stmt = stmt.options(selectinload(relation.key))
sort_field = self.get_model_attr(sort_by) if sort_by else self.pk_column
if sort == "desc":
stmt = stmt.order_by(desc(sort_field))
else:
stmt = stmt.order_by(asc(sort_field))
if search:
expressions = [attr.ilike(f"%{search}%") for attr in self._search_fields]
stmt = stmt.filter(or_(*expressions))
rows = await self._run_query(stmt)
pagination = Pagination(
rows=rows,
page=page,
page_size=page_size,
count=count,
)
return pagination
async def get_model_by_pk(self, value: Any) -> Any:
0
Source : query.py
with GNU Affero General Public License v3.0
from andrewcooke
with GNU Affero General Public License v3.0
from andrewcooke
def std_health_statistics(s, freq='1h'):
from ..pipeline.owners import ResponseCalculator, ActivityCalculator
# 2 days to skip constants etc with time zones
start = s.query(StatisticJournal.time). \
filter(StatisticJournal.time > TIME_ZERO + dt.timedelta(days=2)). \
order_by(asc(StatisticJournal.time)).limit(1).scalar()
finish = s.query(StatisticJournal.time).order_by(desc(StatisticJournal.time)).limit(1).scalar()
# convert to UTC because we may have postgres timezones (at UTC, but incompatible)
stats = pd.DataFrame(index=pd.date_range(start=start.replace(tzinfo=pytz.UTC),
end=finish.replace(tzinfo=pytz.UTC), freq=freq))
set_times_from_index(stats)
stats = Statistics(s). \
by_name(ResponseCalculator, N.DEFAULT_ANY, like=True).with_. \
drop_prefix(N.DEFAULT + SPACE).into(stats, tolerance='30m')
stats = Statistics(s). \
by_name(ActivityCalculator, N._delta(N.DEFAULT_ANY), like=True).with_. \
rename_with_units(N.REST_HR).into(stats, tolerance='30m')
stats = Statistics(s).\
by_group(ActivityCalculator, N.ACTIVE_TIME, N.ACTIVE_DISTANCE).with_. \
coalesce_groups(N.ACTIVE_TIME, N.ACTIVE_DISTANCE). \
rename_with_units(N.ACTIVE_TIME, N.ACTIVE_DISTANCE). \
copy({N.ACTIVE_TIME_S: N.ACTIVE_TIME_H}, scale=1 / 3600). \
into(stats, tolerance='30m')
return stats
def std_activity_statistics(s, activity_journal, activity_group=None):
0
Source : query.py
with GNU Affero General Public License v3.0
from andrewcooke
with GNU Affero General Public License v3.0
from andrewcooke
def interpolate(s, source, statistic_name, statistic_owner, time, activity_group=None):
if not isinstance(source, Source):
source = ActivityJournal.at(s, source, activity_group=activity_group)
before = s.query(StatisticJournal). \
join(StatisticName). \
filter(StatisticName.name == statistic_name,
StatisticName.owner == statistic_owner,
StatisticJournal.time < = time,
StatisticJournal.source == source). \
order_by(desc(StatisticJournal.time)).first()
after = s.query(StatisticJournal). \
join(StatisticName). \
filter(StatisticName.name == statistic_name,
StatisticName.owner == statistic_owner,
StatisticJournal.time >= time,
StatisticJournal.source == source). \
order_by(asc(StatisticJournal.time)).first()
if before is None or after is None:
return None
if before.time == time:
return before.value
if after.time == time:
return after.value
dt = (after.time - before.time).total_seconds()
ta = (after.time - time).total_seconds()
tb = (time - before.time).total_seconds()
return (ta * before.value + tb * after.value) / dt
0
Source : view_models.py
with GNU Affero General Public License v3.0
from BluABK
with GNU Affero General Public License v3.0
from BluABK
def sort_playback_view_videos(self):
"""
Applies a sort-by rule to the PlaybackGridView videos list.
update_sort is a tuple of priority sort categories, first element is highest, last is lowest.
update_sort += operations requires at least two items on rhs.
:return:
"""
sort_by_ascending_date = read_config('PlaySort', 'ascending_date')
sort_by_channel = read_config('PlaySort', 'by_channel')
self.logger.info("Sorting PlaybackGridView Videos: date = {} | channel = {}".format(sort_by_ascending_date,
sort_by_channel))
update_sort = (asc(Video.watch_prio),)
# Sort-by ascending date
if sort_by_ascending_date:
update_sort += (asc(Video.date_downloaded), asc(Video.date_published))
# Sort-by channel name (implied by default: then descending date)
if sort_by_channel:
update_sort += (desc(Video.channel_title),)
# Sort-by channel name then ascending date # FIXME: Implement handling both sorts toggled
if sort_by_channel and sort_by_ascending_date:
# update_sort += (asc(Video.channel_title),)
self.logger.debug5("By-Channel|By-date update_sort: {}".format(str(update_sort)))
for t in update_sort:
self.logger.debug5(t.compile(dialect=postgresql.dialect()))
# FIXME: workaround for not handling both: disable channel sort if both toggled, and run date sort
set_config('PlaySort', 'by_channel', format(not read_config('PlaySort', 'by_channel')))
sort_by_channel = read_config('PlaySort', 'by_channel')
update_sort += (asc(Video.date_downloaded), asc(Video.date_published))
# DEFAULT: Sort-by descending date
else:
update_sort += (desc(Video.date_downloaded), desc(Video.date_published))
self.logger.info("Sorted PlaybackGridView Videos: date = {} | channel = {}".format(sort_by_ascending_date,
sort_by_channel))
return update_sort
0
Source : jobs_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def dao_set_scheduled_jobs_to_pending():
"""
Sets all past scheduled jobs to pending, and then returns them for further processing.
this is used in the run_scheduled_jobs task, so we put a FOR UPDATE lock on the job table for the duration of
the transaction so that if the task is run more than once concurrently, one task will block the other select
from completing until it commits.
"""
jobs = (
Job.query.filter(
Job.job_status == JOB_STATUS_SCHEDULED,
Job.scheduled_for < datetime.utcnow(),
)
.order_by(asc(Job.scheduled_for))
.with_for_update()
.all()
)
for job in jobs:
job.job_status = JOB_STATUS_PENDING
db.session.add_all(jobs)
db.session.commit()
return jobs
def dao_get_future_scheduled_job_by_id_and_service_id(job_id, service_id):
0
Source : templates_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def dao_get_all_templates_for_service(service_id, template_type=None):
if template_type is not None:
return (
Template.query.filter_by(
service_id=service_id,
template_type=template_type,
hidden=False,
archived=False,
)
.order_by(
asc(Template.name),
asc(Template.template_type),
)
.all()
)
return (
Template.query.filter_by(service_id=service_id, hidden=False, archived=False)
.order_by(
asc(Template.name),
asc(Template.template_type),
)
.all()
)
def dao_get_template_versions(service_id, template_id):
0
Source : conftest.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def current_sms_provider():
return ProviderDetails.query.filter_by(notification_type="sms").order_by(asc(ProviderDetails.priority)).first()
@pytest.fixture(scope="function")
0
Source : test_provider_details_dao.py
with MIT License
from cds-snc
with MIT License
from cds-snc
def test_toggle_sms_provider_switches_provider_stores_notify_user_id_in_history(restore_provider_details, sample_user, mocker):
mocker.patch("app.provider_details.switch_providers.get_user_by_id", return_value=sample_user)
old_provider = get_current_provider("sms")
dao_toggle_sms_provider(old_provider.identifier)
new_provider = get_current_provider("sms")
old_provider_from_history = (
ProviderDetailsHistory.query.filter_by(identifier=old_provider.identifier, version=old_provider.version)
.order_by(asc(ProviderDetailsHistory.priority))
.first()
)
new_provider_from_history = (
ProviderDetailsHistory.query.filter_by(identifier=new_provider.identifier, version=new_provider.version)
.order_by(asc(ProviderDetailsHistory.priority))
.first()
)
assert old_provider.version == old_provider_from_history.version
assert new_provider.version == new_provider_from_history.version
assert new_provider_from_history.created_by_id == sample_user.id
assert old_provider_from_history.created_by_id == sample_user.id
def test_can_get_all_provider_history(restore_provider_details, current_sms_provider):
0
Source : create-event-contact-associations.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def process_shard(shard_id, dry_run, id_start=0):
# At 500K events, we need to process 6 events per second to finish within a day.
batch_size = 100
rps = 6 / batch_size
window = 5
throttle = limitlion.throttle_wait(
"create-event-contact-associations", rps=rps, window=window
)
with session_scope_by_shard_id(shard_id) as db_session:
# NOTE: The session is implicitly autoflushed, which ensures no
# duplicate contacts are created.
n = 0
n_skipped = 0
n_updated = 0
while True:
event_query = list(
db_session.query(Event)
.filter(Event.id > id_start)
.order_by(asc(Event.id))
.limit(batch_size)
)
if not event_query:
break
for event in event_query:
n += 1
id_start = event.id
if n % batch_size == 0:
log.info(
"progress",
shard_id=shard_id,
id_start=id_start,
n=n,
n_skipped=n_skipped,
n_updated=n_updated,
)
if event.contacts:
continue
if not dry_run:
update_contacts_from_event(db_session, event, event.namespace_id)
n_updated += 1
if n_updated % batch_size == 0:
db_session.commit()
log.info(
"committed",
shard_id=shard_id,
n=n,
n_skipped=n_skipped,
n_updated=n_updated,
)
throttle()
log.info(
"finished", shard_id=shard_id, n=n, n_skipped=n_skipped, n_updated=n_updated
)
@click.command()
0
Source : filtering.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def files(
namespace_id,
message_public_id,
filename,
content_type,
limit,
offset,
view,
db_session,
):
if view == "count":
query = db_session.query(func.count(Block.id))
elif view == "ids":
query = db_session.query(Block.public_id)
else:
query = db_session.query(Block)
query = query.filter(Block.namespace_id == namespace_id)
# limit to actual attachments (no content-disposition == not a real
# attachment)
query = query.outerjoin(Part)
query = query.filter(or_(Part.id.is_(None), Part.content_disposition.isnot(None)))
if content_type is not None:
query = query.filter(
or_(
Block._content_type_common == content_type,
Block._content_type_other == content_type,
)
)
if filename is not None:
query = query.filter(Block.filename == filename)
# Handle the case of fetching attachments on a particular message.
if message_public_id is not None:
query = query.join(Message).filter(Message.public_id == message_public_id)
if view == "count":
return {"count": query.one()[0]}
query = query.order_by(asc(Block.id)).distinct().limit(limit)
if offset:
query = query.offset(offset)
if view == "ids":
return [x[0] for x in query.all()]
else:
return query.all()
def filter_event_query(
0
Source : filtering.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def events(
namespace_id,
event_public_id,
calendar_public_id,
title,
description,
location,
busy,
title_email,
description_email,
owner_email,
participant_email,
any_email,
starts_before,
starts_after,
ends_before,
ends_after,
limit,
offset,
view,
expand_recurring,
show_cancelled,
db_session,
):
query = db_session.query(Event)
if not expand_recurring:
if view == "count":
query = db_session.query(func.count(Event.id))
elif view == "ids":
query = db_session.query(Event.public_id)
filters = [
namespace_id,
event_public_id,
calendar_public_id,
title,
description,
location,
busy,
]
query = filter_event_query(query, Event, *filters)
event_criteria = []
if starts_before is not None:
event_criteria.append(Event.start < starts_before)
if starts_after is not None:
event_criteria.append(Event.start > starts_after)
if ends_before is not None:
event_criteria.append(Event.end < ends_before)
if ends_after is not None:
event_criteria.append(Event.end > ends_after)
if not show_cancelled:
if expand_recurring:
event_criteria.append(Event.status != "cancelled")
else:
# It doesn't make sense to hide cancelled events
# when we're not expanding recurring events,
# so don't do it.
# We still need to show cancelled recurringevents
# for those users who want to do event expansion themselves.
event_criteria.append(
(Event.discriminator == "recurringeventoverride")
| (
(Event.status != "cancelled")
& (Event.discriminator != "recurringeventoverride")
)
)
if title_email is not None:
title_email_query = (
db_session.query(EventContactAssociation.event_id)
.join(Contact, EventContactAssociation.contact_id == Contact.id)
.filter(
Contact.email_address == title_email,
Contact.namespace_id == namespace_id,
EventContactAssociation.field == "title",
)
.subquery()
)
event_criteria.append(Event.id.in_(title_email_query))
if description_email is not None:
description_email_query = (
db_session.query(EventContactAssociation.event_id)
.join(Contact, EventContactAssociation.contact_id == Contact.id)
.filter(
Contact.email_address == description_email,
Contact.namespace_id == namespace_id,
EventContactAssociation.field == "description",
)
.subquery()
)
event_criteria.append(Event.id.in_(description_email_query))
if owner_email is not None:
owner_email_query = (
db_session.query(EventContactAssociation.event_id)
.join(Contact, EventContactAssociation.contact_id == Contact.id)
.filter(
Contact.email_address == owner_email,
Contact.namespace_id == namespace_id,
EventContactAssociation.field == "owner",
)
.subquery()
)
event_criteria.append(Event.id.in_(owner_email_query))
if participant_email is not None:
participant_email_query = (
db_session.query(EventContactAssociation.event_id)
.join(Contact, EventContactAssociation.contact_id == Contact.id)
.filter(
Contact.email_address == participant_email,
Contact.namespace_id == namespace_id,
EventContactAssociation.field == "participant",
)
.subquery()
)
event_criteria.append(Event.id.in_(participant_email_query))
if any_email is not None:
any_email_query = (
db_session.query(EventContactAssociation.event_id)
.join(Contact, EventContactAssociation.contact_id == Contact.id)
.filter(
Contact.email_address == any_email, Contact.namespace_id == namespace_id
)
.subquery()
)
event_criteria.append(Event.id.in_(any_email_query))
event_predicate = and_(*event_criteria)
query = query.filter(event_predicate)
if expand_recurring:
expanded = recurring_events(
filters,
starts_before,
starts_after,
ends_before,
ends_after,
db_session,
show_cancelled=show_cancelled,
)
# Combine non-recurring events with expanded recurring ones
all_events = query.filter(Event.discriminator == "event").all() + expanded
if view == "count":
return {"count": len(all_events)}
all_events = sorted(all_events, key=lambda e: e.start)
if limit:
offset = offset or 0
all_events = all_events[offset : offset + limit]
else:
if view == "count":
return {"count": query.one()[0]}
query = query.order_by(asc(Event.start)).limit(limit)
if offset:
query = query.offset(offset)
# Eager-load some objects in order to make constructing API
# representations faster.
all_events = query.all()
if view == "ids":
return [x[0] for x in all_events]
else:
return all_events
def messages_for_contact_scores(db_session, namespace_id, starts_after=None):
0
Source : filtering.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def metadata_for_app(app_id, limit, last, query_value, query_type, db_session):
if app_id is None:
raise ValueError("Must specify an app_id")
query = db_session.query(Metadata).filter(Metadata.app_id == app_id)
if last is not None:
query = query.filter(Metadata.id > last)
if query_type is not None:
if query_type not in METADATA_QUERY_OPERATORS:
raise ValueError(
"Invalid query operator for metadata query_type. Must be "
"one of {}".format(", ".join(METADATA_QUERY_OPERATORS))
)
operator_filter = METADATA_QUERY_OPERATORS[query_type](query_value)
query = query.filter(operator_filter)
query = query.order_by(asc(Metadata.id)).limit(limit)
return query.all()
def page_over_shards(Model, cursor, limit, get_results=lambda q: q.all()):
0
Source : filtering.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def page_over_shards(Model, cursor, limit, get_results=lambda q: q.all()):
# TODO revisit passing lambda, and cursor format
cursor = int(cursor)
start_shard_id = engine_manager.shard_key_for_id(cursor)
results = []
remaining_limit = limit
next_cursor = None
for shard_id in sorted(engine_manager.engines):
if shard_id < start_shard_id:
continue
if len(results) >= limit:
break
with session_scope_by_shard_id(shard_id) as mailsync_session:
latest_cursor = cursor if shard_id == start_shard_id else None
query = mailsync_session.query(Model)
if latest_cursor:
query = query.filter(Model.id > latest_cursor)
query = query.order_by(asc(Model.id)).limit(remaining_limit)
latest_results = get_results(query)
if latest_results:
results.extend(latest_results)
last = latest_results[-1]
if hasattr(last, "id"):
next_cursor = last.id
elif "id" in last:
next_cursor = last["id"]
else:
raise ValueError(
"Results returned from get_query must" "have an id"
)
# Handle invalid ids
cursor_implied_shard = next_cursor >> 48
if shard_id != 0 and cursor_implied_shard == 0:
next_cursor += shard_id < < 48
remaining_limit -= len(latest_results)
return results, str(next_cursor)
METADATA_QUERY_OPERATORS = {
0
Source : ns_api.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def folders_labels_query_api():
category_type = g.namespace.account.category_type
rule = request.url_rule.rule
valid_category_type(category_type, rule)
g.parser.add_argument("view", type=bounded_str, location="args")
args = strict_parse_args(g.parser, request.args)
if args["view"] == "count":
results = g.db_session.query(func.count(Category.id))
elif args["view"] == "ids":
results = g.db_session.query(Category.public_id)
else:
results = g.db_session.query(Category)
results = results.filter(
Category.namespace_id == g.namespace.id, Category.deleted_at == EPOCH
)
results = results.order_by(asc(Category.id))
if args["view"] == "count":
return g.encoder.jsonify({"count": results.scalar()})
results = results.limit(args["limit"]).offset(args["offset"]).all()
if args["view"] == "ids":
return g.encoder.jsonify([r for r, in results])
return g.encoder.jsonify(results)
@app.route("/folders/ < public_id>")
0
Source : ns_api.py
with GNU Affero General Public License v3.0
from closeio
with GNU Affero General Public License v3.0
from closeio
def contact_api():
g.parser.add_argument("filter", type=bounded_str, default="", location="args")
g.parser.add_argument("view", type=bounded_str, location="args")
args = strict_parse_args(g.parser, request.args)
if args["view"] == "count":
results = g.db_session.query(func.count(Contact.id))
elif args["view"] == "ids":
results = g.db_session.query(Contact.public_id)
else:
results = g.db_session.query(Contact)
results = results.filter(Contact.namespace_id == g.namespace.id)
if args["filter"]:
results = results.filter(Contact.email_address == args["filter"])
results = results.with_hint(Contact, "USE INDEX (idx_namespace_created)").order_by(
asc(Contact.created_at)
)
if args["view"] == "count":
return g.encoder.jsonify({"count": results.scalar()})
if args["view"] != "ids":
results = results.options(
load_only("public_id", "_raw_address", "name"),
joinedload(Contact.phone_numbers),
)
results = results.limit(args["limit"]).offset(args["offset"]).all()
if args["view"] == "ids":
return g.encoder.jsonify([r for r, in results])
return g.encoder.jsonify(results)
@app.route("/contacts/search", methods=["GET"])
See More Examples