Here are the examples of the python api sqlalchemy.sql.expression.desc taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
36 Examples
3
Example 1
def failures(self):
events = self.session.query(Event)\
.filter_by(type=self.FINISHED)\
.filter(Event.return_code != 0)\
.order_by(desc(Event.id)).limit(50)
return self.event_models_to_docs(events)
3
Example 2
Project: dodotable Source File: condition.py
def __query__(self):
if self.order == self.DESCENDANT:
query = desc(self.attribute)
elif self.order == self.ASCENDANT:
query = asc(self.attribute)
return query
3
Example 3
Project: tvb-framework Source File: datatype_dao.py
def get_last_data_with_uid(self, uid, datatype_class=model.DataType):
"""Retrieve the last dataType ID witch has UDI field as
the passed parameter, or None if nothing found."""
try:
resulted_data = None
result = self.session.query(datatype_class.gid
).filter(datatype_class.user_tag_1 == uid
).order_by(desc(datatype_class.id)).all()
if result is not None and len(result) > 0:
resulted_data = result[0][0]
return resulted_data
except SQLAlchemyError, excep:
self.logger.exception(excep)
return None
3
Example 4
Project: tvb-framework Source File: datatype_dao.py
def try_load_last_entity_of_type(self, project_id, datatype_class):
query = self.session.query(datatype_class
).join((model.Operation, datatype_class.fk_from_operation == model.Operation.id)
).outerjoin(model.Links
).filter(or_(model.Operation.fk_launched_in == project_id,
model.Links.fk_to_project == project_id))
query = query.order_by(desc(datatype_class.id)).limit(1)
result = query.all()
if result is not None and len(result):
return result[0]
return None
3
Example 5
Project: tvb-framework Source File: project_dao.py
def get_projects_for_user(self, user_id, page_start=0, page_size=20, is_count=False):
"""
Return all projects a given user can access (administrator or not).
"""
# First load projects that current user is administrator for.
query = self.session.query(model.Project).join((model.User, model.Project.fk_admin == model.User.id)
).outerjoin((model.User_to_Project,
and_(model.Project.id == model.User_to_Project.fk_project,
model.User_to_Project.fk_user == user_id))
).filter(or_(model.User.id == user_id, model.User_to_Project.fk_user == user_id)
).order_by(desc(model.Project.id))
if is_count:
result = query.count()
else:
result = query.offset(max(page_start, 0)).limit(max(page_size, 0)).all()
[project.administrator.username for project in result]
return result
3
Example 6
Project: tvb-framework Source File: workflow_dao.py
def get_bursts_for_project(self, project_id, page_start=0, page_size=None, count=False):
"""Get latest 50 BurstConfiguration entities for the current project"""
try:
bursts = self.session.query(model.BurstConfiguration
).filter_by(fk_project=project_id
).order_by(desc(model.BurstConfiguration.start_time))
if count:
return bursts.count()
if page_size is not None:
bursts = bursts.offset(max(page_start, 0)).limit(page_size)
bursts = bursts.all()
except SQLAlchemyError, excep:
self.logger.exception(excep)
bursts = None
return bursts
3
Example 7
def upgradeDb():
currentVersion = Session.query(DbVersion).order_by(desc(DbVersion.version)).first()
if currentVersion:
if currentVersion.version == latestDatabaseVersion:
log.debug('Database is up to date.')
return
if currentVersion.version < 2: migrateVersion2()
if currentVersion.version < 3: migrateVersion3()
if currentVersion.version < 4: migrateVersion4()
else: # assume version 2
migrateVersion3()
3
Example 8
Project: CouchPotatoV1 Source File: movie.py
@cherrypy.expose
@cherrypy.tools.mako(filename = "movie/index.html")
def index(self):
'''
Show all wanted, snatched, downloaded movies
'''
if cherrypy.request.path_info == '/':
return redirect('movie/')
qMovie = Db.query(Movie)
movies = qMovie.order_by(Movie.name).filter(or_(Movie.status == u'want', Movie.status == u'waiting')).all()
snatched = qMovie.order_by(desc(Movie.dateChanged), Movie.name).filter_by(status = u'snatched').all()
downloaded = qMovie.order_by(desc(Movie.dateChanged), Movie.name).filter_by(status = u'downloaded').all()
return self.render({'movies': movies, 'snatched':snatched, 'downloaded':downloaded})
3
Example 9
def migration_log(application_name=None):
if application_name is None:
# get application_list and sort it by name
application_list = _application_list()
# migration list
migrations = {}
for application_name in application_list:
migrations[application_name] = migration_log(application_name)
return migrations
else:
db_session = _migration_session(application_name)
return db_session.query(Migration).order_by(desc(Migration.signature)).all()
3
Example 10
Project: dms Source File: cmdline_engine.py
def list_failed_events(self, last_limit=None):
"""
List failed events
"""
self._begin_op()
if not last_limit:
last_limit = get_numeric_setting('list_events_last_limit',
float)
db_query_slice = get_numeric_setting('db_query_slice', int)
db_session = self.db_session
query = db_session.query(Event).filter(Event.state == ESTATE_FAILURE)\
.order_by(desc(Event.id_)).limit(last_limit)\
.yield_per(db_query_slice)
results = []
for event in query:
json_event = event.to_engine_brief(time_format=self.time_format)
results.append(json_event)
self._finish_op()
return results
3
Example 11
Project: dms Source File: cmdline_engine.py
def list_events(self, last_limit=None):
"""
List failed events
"""
self._begin_op()
if not last_limit:
last_limit = get_numeric_setting('list_events_last_limit',
float)
db_query_slice = get_numeric_setting('db_query_slice', int)
db_session = self.db_session
query = db_session.query(Event)\
.order_by(desc(Event.id_)).limit(last_limit)\
.yield_per(db_query_slice)
results = []
for event in query:
json_event = event.to_engine_brief(time_format=self.time_format)
results.append(json_event)
self._finish_op()
return results
3
Example 12
Project: synnefo Source File: 3dd56e750a3_fix_latest_version.py
def upgrade():
n = table('nodes',
column('node', sa.Integer),
column('latest_version', sa.Integer)
)
v = table('versions',
column('node', sa.Integer),
column('mtime', sa.Integer),
column('serial', sa.Integer),
)
s = sa.select([v.c.serial]).where(
n.c.node == v.c.node).order_by(desc(v.c.mtime)).limit(1)
op.execute(
n.update().
values({'latest_version': s})
)
3
Example 13
Project: sqlalchemy-continuum Source File: test_concrete_inheritance.py
def test_transaction_changed_entities(self):
article = self.Article()
article.name = u'Text 1'
self.session.add(article)
self.session.commit()
Transaction = versioning_manager.transaction_cls
transaction = (
self.session.query(Transaction)
.order_by(sa.sql.expression.desc(Transaction.issued_at))
).first()
assert transaction.entity_names == [u'Article']
assert transaction.changed_entities
3
Example 14
Project: masakari Source File: api.py
@_session_handle
def get_one_vm_list_by_uuid_create_at_last(session, uuid):
# SELECT progress, create_at, retry_cnt FROM vm_list \
# WHERE uuid = :uuid ORDER BY create_at DESC LIMIT 1
with _sqlalchemy_error():
res = session.query(VmList).filter_by(uuid=uuid).order_by(
desc(VmList.create_at)).first()
return res
3
Example 15
Project: masakari Source File: api.py
@_session_handle
def get_one_vm_list_by_uuid_and_progress_create_at_last(session,
notification_uuid):
# SELECT * FROM vm_list WHERE uuid = :notification_uuid \
# AND (progress = 0 OR progress = 1) \
# ORDER BY create_at DESC LIMIT 1
with _sqlalchemy_error():
res = session.query(VmList).filter_by(uuid=notification_uuid).filter(
or_(VmList.progress == 0, VmList.progress == 1)).order_by(
desc(VmList.create_at)).first()
return res
3
Example 16
Project: masakari Source File: api.py
@_session_handle
def get_vm_list_by_uuid_and_progress_sorted(session, notification_uuid):
# sql = "SELECT id, uuid FROM vm_list " \
# "WHERE uuid = '%s' " \
# "AND (progress = 0 OR progress = 1) " \
# "ORDER BY recover_by ASC, create_at DESC" \
# % (row.get("uuid"))
with _sqlalchemy_error():
res = session.query(VmList).filter_by(
uuid=notification_uuid).filter(or_(
VmList.progress == 0, VmList.progress == 1)).order_by(
asc(VmList.recover_by), desc(VmList.create_at)
).all()
return res
3
Example 17
Project: masakari Source File: api.py
@_session_handle
def get_reprocessing_records_list(session, notification_uuid):
# sql = "SELECT id, notification_id, notification_hostname, "
# "notification_uuid, notification_cluster_port, recover_by "
# "FROM notification_list "
# "WHERE progress = 0 AND notification_uuid = '%s' "
# "ORDER BY create_at DESC, id DESC"
# % (row.get("notification_uuid"))
with _sqlalchemy_error():
res = session.query(NotificationList).filter_by(
progress=0).filter_by(notification_uuid=notification_uuid).order_by(
desc(NotificationList.create_at),
desc(NotificationList.id)).all()
return res
3
Example 18
Project: masakari Source File: api.py
@_session_handle
def get_notification_list_by_hostname(session, notification_hostname):
# sql = "SELECT id, notification_id, notification_hostname, "
# "notification_uuid, notification_cluster_port, recover_by "
# "FROM notification_list "
# "WHERE progress = 0 AND notification_hostname = '%s' "
# "ORDER BY create_at DESC, id DESC"
# % ("notification_hostname")
with _sqlalchemy_error():
res = session.query(NotificationList).filter_by(progress=0).filter_by(
notification_hostname=notification_hostname).order_by(
desc(NotificationList.create_at),
desc(NotificationList.id)).all()
return res
0
Example 19
Project: sqlalchemy-datatables Source File: __init__.py
def sorting(self):
"""Construct the query: sorting.
Add sorting(ORDER BY) on the columns needed to be applied on.
"""
sorting = []
Order = namedtuple('order', ['name', 'dir', 'nullsOrder'])
if self.legacy:
columnOrder = 'iSortCol_%s'
dirOrder = 'sSortDir_%s'
else:
columnOrder = 'order[%s][column]'
dirOrder = 'order[%s][dir]'
i = 0
if self.request_values.get(columnOrder % i) is not None:
sorting.append(
Order(
self.columns[
int(self.request_values[columnOrder % i])].column_name,
self.request_values[dirOrder % i],
self.columns[
int(self.request_values[columnOrder % i])]
.nulls_order))
for sort in sorting:
tmp_sort_name = sort.name.split('.')
for tmp_name in tmp_sort_name:
# iterate over the list so we can support things like x.y.z.a
if tmp_sort_name.index(tmp_name) == 0:
obj = getattr(self.sqla_object, tmp_name)
parent = self.sqla_object
elif isinstance(obj.property, RelationshipProperty):
# otherwise try and see if we can percolate down the list
# for relationships of relationships.
parent = obj.property.mapper.class_
obj = getattr(parent, tmp_name)
if not hasattr(obj, 'property'): # hybrid_property or property
sort_name = tmp_name
if hasattr(parent, '__tablename__'):
tablename = parent.__tablename__
else:
tablename = parent.__table__.name
# Ex: ForeignKey
elif isinstance(obj.property, RelationshipProperty):
# Ex: address.description => description =>
# addresses.description
sort_name = tmp_name
if not sort_name:
# Find first primary key
sort_name = obj.property.table.primary_key.columns \
.values()[0].name
tablename = obj.property.table.name
else: # -> ColumnProperty
sort_name = tmp_name
if hasattr(parent, '__tablename__'):
tablename = parent.__tablename__
else:
tablename = parent.__table__.name
sort_name = '%s.%s' % (tablename, sort_name)
ordering = asc(text(sort_name)) if sort.dir == 'asc' else desc(
text(sort_name))
if sort.nullsOrder:
ordering = nullsMethods[sort.nullsOrder](ordering)
self.query = self.query.order_by(ordering)
0
Example 20
Project: cronq Source File: mysql.py
def last_event_chunks_for_job(self, job_id, number):
events = self.session.query(Event).filter_by(job_id=job_id).\
order_by(desc(Event.id)).limit(number)
events = [doc for doc in self.event_models_to_docs(events)]
if len(events) == 0:
return []
chunks = {}
for event in events:
if event['run_id'] not in chunks:
chunks[event['run_id']] = {
'first': None,
'last': None,
'job_id': event['job_id']
}
if event['type'] == 'starting':
chunks[event['run_id']]['first'] = event
else:
chunks[event['run_id']]['last'] = event
log_url_template = os.getenv('CRONQ_LOG_URL_TEMPLATE', None)
if log_url_template:
self._add_log_urls(chunks, log_url_template)
docs = [chunk for i, chunk in chunks.iteritems()]
def chunk_compare(x, y):
first = x.get('first', x.get('last'))
last = y.get('first', y.get('last'))
if first is None and last is None:
return 0
if first is None:
return -1
if last is None:
return 1
return int((first['datetime'] - last['datetime']).total_seconds())
# newest to oldest
docs = sorted(docs, cmp=chunk_compare, reverse=True)
return docs
0
Example 21
def get_list(self, page=0, sort=None, sort_desc=None, execute=False, search_query=None):
qs = self.get_queryset()
# Filter by search query
if search_query and self.search_fields:
qs = self.apply_search(qs, search_query)
#Calculate number of rows
count = qs.count()
#Order queryset
if sort:
if sort_desc:
sort = desc(sort)
qs = qs.order_by(sort)
# Pagination
if page is not None:
qs = qs.offset(page * self.list_per_page)
qs = qs.limit(self.list_per_page)
if execute:
qs = qs.all()
return count, qs
0
Example 22
Project: tvb-framework Source File: datatype_dao.py
def get_values_of_datatype(self, project_id, datatype_class, filters=None, page_size=50):
"""
Retrieve a list of dataTypes matching a filter inside a project.
:returns: (results, total_count) maximum page_end rows are returned, to avoid endless time when loading a page
"""
result = []
count = 0
if not issubclass(datatype_class, model.Base):
self.logger.warning("Trying to filter not DB class:" + str(datatype_class))
return result, count
try:
#Prepare generic query:
query = self.session.query(datatype_class.id,
func.max(datatype_class.type),
func.max(datatype_class.gid),
func.max(datatype_class.subject),
func.max(model.Operation.completion_date),
func.max(model.Operation.user_group),
func.max(text('"OPERATION_GROUPS_1".name')),
func.max(model.DataType.user_tag_1)
).join((model.Operation, datatype_class.fk_from_operation == model.Operation.id)
).outerjoin(model.Links
).outerjoin((model.OperationGroup, model.Operation.fk_operation_group ==
model.OperationGroup.id), aliased=True
).filter(model.DataType.invalid == False
).filter(or_(model.Operation.fk_launched_in == project_id,
model.Links.fk_to_project == project_id))
if filters:
filter_str = filters.get_sql_filter_equivalent(datatype_to_check='datatype_class')
if filter_str is not None:
query = query.filter(eval(filter_str))
#Retrieve the results
query = query.group_by(datatype_class.id).order_by(desc(datatype_class.id))
result = query.limit(max(page_size, 0)).all()
count = query.count()
except Exception, excep:
self.logger.exception(excep)
return result, count
0
Example 23
Project: tvb-framework Source File: operation_dao.py
def get_filtered_operations(self, project_id, filter_chain, page_start=0, page_size=20, is_count=False):
"""
:param project_id: current project ID
:param filter_chain: instance of FilterChain
:param is_count: when True, return a number, otherwise the list of operation entities
:return a list of filtered operation in current project, page by page, or the total count for them.
"""
try:
select_clause = self.session.query(func.min(model.Operation.id))
if not is_count:
# Do not add select columns in case of COUNT, as they will be ignored anyway
select_clause = self.session.query(func.min(model.Operation.id), func.max(model.Operation.id),
func.count(model.Operation.id),
func.max(model.Operation.fk_operation_group),
func.min(model.Operation.fk_from_algo),
func.max(model.Operation.fk_launched_by),
func.min(model.Operation.create_date),
func.min(model.Operation.start_date),
func.max(model.Operation.completion_date),
func.min(model.Operation.status),
func.max(model.Operation.additional_info),
func.min(case_([(model.Operation.visible, 1)], else_=0)),
func.min(model.Operation.user_group),
func.min(model.Operation.gid))
query = select_clause.join(model.Algorithm).join(
model.AlgorithmCategory).filter(model.Operation.fk_launched_in == project_id)
if filter_chain is not None:
filter_string = filter_chain.get_sql_filter_equivalent()
query = query.filter(eval(filter_string))
query = query.group_by(case_([(model.Operation.fk_operation_group > 0,
- model.Operation.fk_operation_group)], else_=model.Operation.id))
if is_count:
return query.count()
return query.order_by(desc(func.max(model.Operation.id))).offset(page_start).limit(page_size).all()
except SQLAlchemyError, excep:
self.logger.exception(excep)
return 0 if is_count else None
0
Example 24
Project: tvb-framework Source File: operation_dao.py
def get_previews(self, project_id, user_id=None, selected_session_name='all_sessions'):
"""
This method returns a tuple of 2 elements. The first element represents a dictionary
of form {'$session_name': [list_of_figures]}. This dictionary contains data only for the selected self.session.
If the selected session is 'all_sessions' than it will contain data for all the sessions.
The second element of the returned tuple is a dictionary of form
{'$session_name': $no_of_figures_in_this_session, ...}.
This dictionary contains information about all the sessions.
selected_session_name - represents the name of the session for which you
want to obtain the stored figures.
"""
try:
previews_info = self._get_previews_info(project_id, user_id)
if selected_session_name == 'all_sessions':
session_names = previews_info.keys()
session_names.sort()
else:
session_names = [selected_session_name]
result = {}
for session_name in session_names:
figures_list = self.session.query(model.ResultFigure
).filter_by(fk_in_project=project_id
).filter_by(session_name=session_name)
if user_id is not None:
figures_list = figures_list.filter_by(fk_for_user=user_id)
figures_list = figures_list.order_by(desc(model.ResultFigure.id)).all()
# Force loading of project and operation - needed to compute image path
for figure in figures_list:
figure.project
figure.operation
result[session_name] = figures_list
return result, previews_info
except SQLAlchemyError, excep:
self.logger.exception(excep)
return {}, {}
0
Example 25
Project: tvb-framework Source File: workflow_dao.py
def get_all_datatypes_in_burst(self, burst_id):
"""
Get all dataTypes in burst, order by their creation, desc.
:param burst_id BurstConfiguration Identifier.
:returns: list dataType GIDs or empty list.
"""
try:
groups = self.session.query(model.DataTypeGroup,
).join(model.Operation, model.DataTypeGroup.fk_from_operation == model.Operation.id
).join(model.WorkflowStep, model.Operation.id == model.WorkflowStep.fk_operation
).join(model.Workflow).filter(model.Workflow.fk_burst == burst_id
).order_by(desc(model.DataTypeGroup.id)).all()
result = self.session.query(model.DataType
).filter(model.DataType.fk_parent_burst == burst_id
).filter(model.DataType.fk_datatype_group == None
).filter(model.DataType.type != self.EXCEPTION_DATATYPE_GROUP
).order_by(desc(model.DataType.id)).all()
result.extend(groups)
except SQLAlchemyError, exc:
self.logger.exception(exc)
result = []
return result
0
Example 26
Project: blitzdb Source File: queryset.py
def sort(self, keys,direction = None,explicit_nullsfirst = False):
#we sort by a single argument
if direction:
keys = ((keys,direction),)
order_bys = []
for key,direction in keys:
if direction > 0:
#when sorting in ascending direction, NULL values should come first
if explicit_nullsfirst:
direction = lambda *args,**kwargs: nullsfirst(asc(*args,**kwargs))
else:
direction = asc
else:
#when sorting in descending direction, NULL values should come last
if explicit_nullsfirst:
direction = lambda *args,**kwargs: nullslast(desc(*args,**kwargs))
else:
direction = desc
order_bys.append((key,direction))
self.order_bys = order_bys
self.objects = None
return self
0
Example 27
def _order_by(self, query, joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:param joins:
Joins set
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
# TODO: Preprocessing for joins
# Try to handle it as a string
if isinstance(sort_field, string_types):
# Create automatic join against a table if column name
# contains dot.
if '.' in sort_field:
parts = sort_field.split('.', 1)
if parts[0] not in joins:
query = query.join(parts[0])
joins.add(parts[0])
elif isinstance(sort_field, InstrumentedAttribute):
# SQLAlchemy 0.8+ uses 'parent' as a name
mapper = getattr(sort_field, 'parent', None)
if mapper is None:
# SQLAlchemy 0.7.x uses parententity
mapper = getattr(sort_field, 'parententity', None)
if mapper is not None:
table = mapper.tables[0]
if self._need_join(table) and table.name not in joins:
query = query.join(table)
joins.add(table.name)
elif isinstance(sort_field, Column):
pass
else:
raise TypeError('Wrong argument type')
if sort_field is not None:
if sort_desc:
query = query.order_by(desc(sort_field))
else:
query = query.order_by(sort_field)
return query, joins
0
Example 28
Project: PyDev.Debugger Source File: sql.py
@Transactional
def find(self,
signature = None, order = 0,
since = None, until = None,
offset = None, limit = None):
"""
Retrieve all crash dumps in the database, optionally filtering them by
signature and timestamp, and/or sorting them by timestamp.
Results can be paged to avoid consuming too much memory if the database
is large.
@see: L{find_by_example}
@type signature: object
@param signature: (Optional) Return only through crashes matching
this signature. See L{Crash.signature} for more details.
@type order: int
@param order: (Optional) Sort by timestamp.
If C{== 0}, results are not sorted.
If C{> 0}, results are sorted from older to newer.
If C{< 0}, results are sorted from newer to older.
@type since: datetime
@param since: (Optional) Return only the crashes after and
including this date and time.
@type until: datetime
@param until: (Optional) Return only the crashes before this date
and time, not including it.
@type offset: int
@param offset: (Optional) Skip the first I{offset} results.
@type limit: int
@param limit: (Optional) Return at most I{limit} results.
@rtype: list(L{Crash})
@return: List of Crash objects.
"""
# Validate the parameters.
if since and until and since > until:
warnings.warn("CrashDAO.find() got the 'since' and 'until'"
" arguments reversed, corrected automatically.")
since, until = until, since
if limit is not None and not limit:
warnings.warn("CrashDAO.find() was set a limit of 0 results,"
" returning without executing a query.")
return []
# Build the SQL query.
query = self._session.query(CrashDTO)
if signature is not None:
sig_pickled = pickle.dumps(signature, protocol = 0)
query = query.filter(CrashDTO.signature == sig_pickled)
if since:
query = query.filter(CrashDTO.timestamp >= since)
if until:
query = query.filter(CrashDTO.timestamp < until)
if order:
if order > 0:
query = query.order_by(asc(CrashDTO.timestamp))
else:
query = query.order_by(desc(CrashDTO.timestamp))
else:
# Default ordering is by row ID, to get consistent results.
# Also some database engines require ordering when using offsets.
query = query.order_by(asc(CrashDTO.id))
if offset:
query = query.offset(offset)
if limit:
query = query.limit(limit)
# Execute the SQL query and convert the results.
try:
return [dto.toCrash() for dto in query.all()]
except NoResultFound:
return []
0
Example 29
def _order_by(self, query, joins, sort_joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:pram joins:
Current joins
:param sort_joins:
Sort joins (properties or tables)
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
if sort_field is not None:
# Handle joins
query, joins, alias = self._apply_path_joins(query, joins, sort_joins, inner_join=False)
column = sort_field if alias is None else getattr(alias, sort_field.key)
if sort_desc:
if isinstance(column, tuple):
query = query.order_by(*map(desc, column))
else:
query = query.order_by(desc(column))
else:
if isinstance(column, tuple):
query = query.order_by(*column)
else:
query = query.order_by(column)
return query, joins
0
Example 30
Project: Flexget Source File: proper_movies.py
def on_task_filter(self, task, config):
log.debug('check for enforcing')
# parse config
if isinstance(config, bool):
# configured a boolean false, disable plugin
if not config:
return
# configured a boolean true, disable timeframe
timeframe = None
else:
# parse time window
log.debug('interval: %s' % config)
try:
timeframe = parse_timedelta(config)
except ValueError:
raise plugin.PluginError('Invalid time format', log)
# throws DependencyError if not present aborting task
imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance
for entry in task.entries:
parser = get_plugin_by_name('parsing').instance.parse_movie(entry['title'])
# if we have imdb_id already evaluated
if entry.get('imdb_id', None, eval_lazy=False) is None:
try:
# TODO: fix imdb_id_lookup, cuembersome that it returns None and or throws exception
# Also it's crappy name!
imdb_id = imdb_lookup.imdb_id_lookup(movie_title=parser.name,
movie_year=parser.year,
raw_title=entry['title'])
if imdb_id is None:
continue
entry['imdb_id'] = imdb_id
except plugin.PluginError as pe:
log_once(pe.value)
continue
quality = parser.quality.name
log.debug('quality: %s' % quality)
log.debug('imdb_id: %s' % entry['imdb_id'])
log.debug('current proper count: %s' % parser.proper_count)
proper_movie = task.session.query(ProperMovie). \
filter(ProperMovie.imdb_id == entry['imdb_id']). \
filter(ProperMovie.quality == quality). \
order_by(desc(ProperMovie.proper_count)).first()
if not proper_movie:
log.debug('no previous download recorded for %s' % entry['imdb_id'])
continue
highest_proper_count = proper_movie.proper_count
log.debug('highest_proper_count: %i' % highest_proper_count)
accept_proper = False
if parser.proper_count > highest_proper_count:
log.debug('proper detected: %s ' % proper_movie)
if timeframe is None:
accept_proper = True
else:
expires = proper_movie.added + timeframe
log.debug('propers timeframe: %s' % timeframe)
log.debug('added: %s' % proper_movie.added)
log.debug('propers ignore after: %s' % str(expires))
if datetime.now() < expires:
accept_proper = True
else:
log.verbose('Proper `%s` has past it\'s expiration time' % entry['title'])
if accept_proper:
log.info('Accepting proper version previously downloaded movie `%s`' % entry['title'])
# TODO: does this need to be called?
# fire_event('forget', entry['imdb_url'])
fire_event('forget', entry['imdb_id'])
entry.accept('proper version of previously downloaded movie')
0
Example 31
Project: nowin_core Source File: radio.py
def get_sites(
self,
offset=None,
limit=None,
on_air=True,
keywords=None,
radio_names=None,
public=True,
locations=None,
exclude_locations=None,
tags=None,
ids=None,
order_by=None,
return_count=False,
load_user=True,
load_tags=True,
):
"""Get query of on air sites
if on_air is true, only on air radio sites will be returned, if
it's false, only offline radio sites will be return, otherwise,
all sites will be returned
order_by can be 'listener_count', 'online_time' or None
radio_names is list of user name of radio to filter
public is same as on_air but for filtering public and private sites
location is the location of site to filter
ids is a list of user_id
if return_count is Ture, the result will be count of data only
load_user indicate whether to load user eagerly
load_tags indicate whether to load tags eagerly
"""
from sqlalchemy.sql.expression import func, not_, desc, or_
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.functions import sum
assert order_by in [None, 'listener_count', 'online_time']
# table short cut
User = tables.User
Site = tables.Site
OnAir = tables.OnAir
ProxyConnection = tables.ProxyConnection
Tag = tables.Tag
SiteTag = tables.SiteTag
listener_count = func.ifnull(sum(ProxyConnection.listener), 0) \
.label('listener_count')
sites = self.session \
.query(Site, listener_count) \
.outerjoin((OnAir, OnAir.user_id == Site.user_id)) \
.outerjoin((ProxyConnection, ProxyConnection.user_id == Site.user_id)) \
.group_by(Site.user_id)
# only on air sites
if on_air is True:
sites = sites.filter(OnAir.user_id is not None)
elif on_air is False:
sites = sites.filter(OnAir.user_id is None)
# conditions
if keywords is not None:
ors = []
for keyword in keywords:
ors.append(Site.title.like('%%%s%%' % keyword))
ors.append(Site.brief.like('%%%s%%' % keyword))
ors.append(Site.description.like('%%%s%%' % keyword))
user_ids = self.session.query(User.user_id) \
.filter(User.user_name.in_(keywords)) \
.subquery()
user_ids_by_tag = self.session \
.query(Site.user_id) \
.join((SiteTag,
SiteTag.user_id == Site.user_id)) \
.join((Tag, SiteTag.tag_id == Tag.id)) \
.filter(Tag.name.in_(keywords)) \
.subquery()
ors.append(Site.user_id.in_(user_ids))
ors.append(Site.user_id.in_(user_ids_by_tag))
sites = sites.filter(or_(*ors))
if ids is not None:
sites = sites.filter(Site.user_id.in_(ids))
if radio_names is not None:
user_ids = self.session.query(User.user_id) \
.filter(User.user_name.in_(radio_names)) \
.subquery()
sites = sites.filter(Site.user_id.in_(user_ids))
if public is True:
sites = sites.filter(Site.public is True)
elif public is False:
sites = sites.filter(Site.public is False)
if locations is not None:
locations = map(lambda x: x.upper(), locations)
sites = sites.filter(Site.location.in_(locations))
if exclude_locations is not None:
sites = sites.filter(not_(Site.location.in_(exclude_locations)))
if tags is not None:
ids = self.session \
.query(Site.user_id) \
.join((SiteTag,
SiteTag.user_id == Site.user_id)) \
.join((Tag, SiteTag.tag_id == Tag.id)) \
.filter(Tag.name.in_(tags)) \
.subquery()
sites = sites.filter(Site.user_id.in_(ids))
# all we need is count of data
if return_count:
return sites.count()
# set the order
if order_by is not None:
if order_by == 'listener_count':
sites = sites.order_by(desc('listener_count'))
elif order_by == 'online_time':
sites = sites.order_by(desc(OnAir.online_time))
# offset and limit
if offset is not None:
sites = sites.offset(offset)
if limit is not None:
sites = sites.limit(limit)
sites = sites.all()
# eager load
ids = [site.user_id for site, _ in sites]
query = self.session.query(Site)
if load_user:
query = query.options(joinedload('user'))
if load_tags:
query = query.options(joinedload('tags'))
query = query.filter(Site.user_id.in_(ids))
query.all()
return sites
0
Example 32
Project: nowin_core Source File: radio.py
def get_country_list(self, limit=30):
"""Get country list order by online listener count
"""
from sqlalchemy.sql.expression import func, desc
from sqlalchemy.sql.functions import sum
# table short cut
Site = tables.Site
OnAir = tables.OnAir
ProxyConnection = tables.ProxyConnection
listener_count = func.ifnull(sum(ProxyConnection.listener), 0) \
.label('listener_count')
locations = self.session \
.query(Site.location, listener_count) \
.join((OnAir, OnAir.user_id == Site.user_id)) \
.filter((Site.public is True)) \
.outerjoin((ProxyConnection, ProxyConnection.user_id == Site.user_id)) \
.group_by(Site.location) \
.order_by(desc('listener_count')) \
.limit(limit)
return locations.all()
0
Example 33
Project: nowin_core Source File: stats.py
def get_country_stats(self):
"""Get country list order by online listener count
"""
from sqlalchemy import distinct
from sqlalchemy.sql.expression import func, desc
from sqlalchemy.sql.functions import sum
# table short cut
Site = tables.Site
OnAir = tables.OnAir
ProxyConnection = tables.ProxyConnection
listener_count = func.ifnull(sum(ProxyConnection.listener), 0) \
.label('listener_count')
radio_count = func.ifnull(func.count(distinct(Site.user_id)), 0) \
.label('radio_count')
locations = self.session \
.query(Site.location, listener_count, radio_count) \
.join((OnAir, OnAir.user_id == Site.user_id)) \
.outerjoin((ProxyConnection, ProxyConnection.user_id == Site.user_id)) \
.group_by(Site.location) \
.order_by(desc('listener_count'))
return locations.all()
0
Example 34
Project: kokoropy Source File: kokoro.py
def _migration_max(application_name):
db_session = _migration_session(application_name)
return db_session.query(Migration).order_by(desc(Migration.signature)).first()
0
Example 35
Project: dms Source File: cmdline_engine.py
def vacuum_zis(self, age_days=None, zi_max_num=None):
"""
Age ZIs according to age_days and zi_max_num
"""
self._begin_op()
db_session = self.db_session
db_query_slice = get_numeric_setting('db_query_slice', int)
if age_days is None:
age_days = float(zone_cfg.get_row_exc(db_session,
key='zi_max_age'))
age_days = timedelta(days=age_days)
if zi_max_num is None:
zi_max_num = int(zone_cfg.get_row_exc(db_session,
key='zi_max_num'))
stmt = db_session.query(ZoneInstance.zone_id,
func.count(ZoneInstance.id_).label('zi_count'))\
.group_by(ZoneInstance.zone_id).subquery()
zone_sm_query = db_session.query(ZoneSM)\
.filter(ZoneSM.state != ZSTATE_DELETED)\
.outerjoin(stmt, ZoneSM.id_ == stmt.c.zone_id)\
.filter(stmt.c.zi_count > zi_max_num)\
.yield_per(db_query_slice)
count = 0
for zone_sm in zone_sm_query:
zi_keep = db_session.query(ZoneInstance.id_)\
.filter(ZoneInstance.zone_id == zone_sm.id_)\
.order_by(desc(ZoneInstance.mtime))\
.limit(zi_max_num)
zi_query = db_session.query(ZoneInstance)\
.filter(ZoneInstance.zone_id == zone_sm.id_)\
.filter(ZoneInstance.id_ != zone_sm.zi_id)\
.filter(not_(ZoneInstance.id_.in_(zi_keep)))\
.filter(ZoneInstance.mtime < (func.now() - age_days))
for zi in zi_query:
if (zi.id_ == zone_sm.zi_id
or zi.id_ == zone_sm.zi_candidate_id):
# Skip if this ZI has ben selected for republishing in
# the mean time
continue
db_session.delete(zi)
count += 1
result = {'num_deleted': count}
self._finish_op()
return result
0
Example 36
Project: ostip Source File: my_datatables.py
def sorting(self):
"""Construct the query: sorting.
Add sorting(ORDER BY) on the columns needed to be applied on.
"""
sorting = []
Order = namedtuple('order', ['name', 'dir', 'nullsOrder'])
if self.legacy:
columnOrder = 'iSortCol_%s'
dirOrder = 'sSortDir_%s'
else:
columnOrder = 'order[%s][column]'
dirOrder = 'order[%s][dir]'
i = 0
if self.request_values.get(columnOrder % i) is not None:
sorting.append(
Order(
self.columns[
int(self.request_values[columnOrder % i])].column_name,
self.request_values[dirOrder % i],
self.columns[
int(self.request_values[columnOrder % i])]
.nulls_order))
for sort in sorting:
tmp_sort_name = sort.name.split('.')
for tmp_name in tmp_sort_name:
# iterate over the list so we can support things like x.y.z.a
if tmp_sort_name.index(tmp_name) == 0:
obj = getattr(self.sqla_object, tmp_name)
parent = self.sqla_object
elif isinstance(obj.property, RelationshipProperty):
# otherwise try and see if we can percolate down the list
# for relationships of relationships.
parent = obj.property.mapper.class_
obj = getattr(parent, tmp_name)
if not hasattr(obj, 'property'): # hybrid_property or property
sort_name = tmp_name
if hasattr(parent, '__tablename__'):
tablename = parent.__tablename__
else:
tablename = parent.__table__.name
tablename = None
# Ex: ForeignKey
elif isinstance(obj.property, RelationshipProperty):
# Ex: address.description => description =>
# addresses.description
sort_name = tmp_name
if not sort_name:
# Find first primary key
sort_name = obj.property.table.primary_key.columns \
.values()[0].name
tablename = obj.property.table.name
else: # -> ColumnProperty
sort_name = tmp_name
if hasattr(parent, '__tablename__'):
tablename = parent.__tablename__
else:
tablename = parent.__table__.name
if tablename:
sort_name = text('%s.%s' % (tablename, sort_name))
else:
sort_name = getattr(parent, sort_name)
ordering = asc(sort_name) if sort.dir == 'asc' else desc(sort_name)
if sort.nullsOrder:
ordering = nullsMethods[sort.nullsOrder](ordering)
self.query = self.query.order_by(ordering)