Here are the examples of the python api django.db.models.Subquery taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
84 Examples
3
Source : browser_metadata_base.py
with GNU General Public License v3.0
from ajslater
with GNU General Public License v3.0
from ajslater
def _annotate_cover_path(self, queryset, model):
"""Annotate the query set for the coverpath for the sort."""
# Select comics for the children by an outer ref for annotation
# Order the descendant comics by the sort argumentst
if model == Comic:
cover_path = F("cover_path")
else:
order_by = self.get_order_by(model, for_cover_path=True)
cover_path = Subquery(
queryset.filter(pk=OuterRef("pk"))
.order_by(*order_by)
.values("comic__cover_path")[:1]
)
obj_list = queryset.annotate(**{f"{UNIONFIX_PREFIX}cover_path": cover_path})
return obj_list
def _annotate_page_count(self, obj_list):
3
Source : managers.py
with MIT License
from Arx-Game
with MIT License
from Arx-Game
def annotate_highest_treatment(self, treatment_type, attr_name):
"""This annotates the queryset with the highest value for a given type of treatments"""
from world.conditions.models import TreatmentAttempt
subquery_queryset = (
TreatmentAttempt.objects.filter(
target=OuterRef("pk"),
treatment_type=treatment_type,
uses_remaining__gt=0,
)
.annotate(highest_value=Max("value", output_field=IntegerField(default=0)))
.values("highest_value")[:1]
)
query_kwargs = {
attr_name: Subquery(subquery_queryset, output_field=IntegerField(default=0))
}
return self.annotate(**query_kwargs)
def annotate_recovery_treatment(self):
3
Source : 0071_connection_organisation.py
with MIT License
from betagouv
with MIT License
from betagouv
def populate_organisation(apps, _):
Connection = apps.get_model("aidants_connect_web", "Connection")
Connection.objects.filter(aidant__isnull=False).update(
organisation=Subquery(
Connection.objects.filter(pk=OuterRef("pk")).values_list(
"aidant__organisation"
)[:1]
)
)
class Migration(migrations.Migration):
3
Source : models.py
with GNU Affero General Public License v3.0
from betagouv
with GNU Affero General Public License v3.0
from betagouv
def to_users_qs(memberships):
"""
Return a User QuerySet. Useful to iterate over User objects instead of Membership ones.
"""
memberships = memberships.filter(user=OuterRef("pk"))
user_model = memberships.model._meta.get_field("user").related_model
return user_model.objects.filter(pk=Subquery(memberships.values("user")))
class MembershipAbstract(models.Model):
3
Source : models.py
with GNU Affero General Public License v3.0
from betagouv
with GNU Affero General Public License v3.0
from betagouv
def with_count_active_job_descriptions(self):
"""
Count the number of active job descriptions by SIAE.
"""
# A subquery is way more efficient here than a join.
# See `self.with_count_recent_received_job_apps`.
sub_query = Subquery(
(
SiaeJobDescription.objects.filter(is_active=True, siae=OuterRef("id"))
.values("siae")
.annotate(count=Count("pk"))
.values("count")
),
output_field=models.IntegerField(),
)
return self.annotate(count_active_job_descriptions=Coalesce(sub_query, 0))
def with_job_app_score(self):
3
Source : views.py
with GNU Affero General Public License v3.0
from Dwarni
with GNU Affero General Public License v3.0
from Dwarni
def get_species_in_query(region_query):
return Q(species__in=Subquery(Distribution
.objects
.filter(region_query)
.values('species__pk')))
class AntsByRegionDiffView(APIView):
3
Source : test_json.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_obj_subquery_lookup(self):
qs = JSONModel.objects.annotate(
value=Subquery(JSONModel.objects.filter(pk=OuterRef('pk')).values('field')),
).filter(value__a='b')
self.assertSequenceEqual(qs, [self.objs[7], self.objs[8]])
def test_deep_lookup_objs(self):
3
Source : managers.py
with Apache License 2.0
from heartexlabs
with Apache License 2.0
from heartexlabs
def annotate_completed_at(queryset):
from tasks.models import Annotation
newest = Annotation.objects.filter(task=OuterRef("pk"), task__is_labeled=True).distinct().order_by("-created_at")
return queryset.annotate(completed_at=Subquery(newest.values("created_at")[:1]))
def annotate_annotations_results(queryset):
3
Source : models.py
with BSD 3-Clause "New" or "Revised" License
from ietf-tools
with BSD 3-Clause "New" or "Revised" License
from ietf-tools
def with_requested_by(self):
"""Annotate session with requested_by field
Adds requested_by field - pk of the Person who made the request
"""
return self.annotate(
requested_by=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('by')[:1]),
)
def with_requested_time(self):
3
Source : models.py
with BSD 3-Clause "New" or "Revised" License
from ietf-tools
with BSD 3-Clause "New" or "Revised" License
from ietf-tools
def with_requested_time(self):
"""Annotate session with requested_time field"""
return self.annotate(
requested_time=Subquery(
SchedulingEvent.objects.filter(
session=OuterRef('pk')
).order_by(
'time', 'id'
).values('time')[:1]),
)
def not_canceled(self):
3
Source : test_model_field.py
with BSD 3-Clause "New" or "Revised" License
from laymonage
with BSD 3-Clause "New" or "Revised" License
from laymonage
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")),
).filter(field__a="b")
self.assertSequenceEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
3
Source : serializers.py
with BSD 2-Clause "Simplified" License
from macports
with BSD 2-Clause "Simplified" License
from macports
def generate_time_range_query(self):
is_valid = self.validate_context()
if not is_valid:
return Submission.objects.none()
end_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=self.days_ago)
start_date = end_date - datetime.timedelta(days=self.days)
submissions = Submission.objects.only('id').filter(timestamp__range=[start_date, end_date]).order_by('user', '-timestamp').distinct('user')
return Submission.objects.filter(id__in=Subquery(submissions.values('id')))
def validate_properties(self):
3
Source : port_installs.py
with BSD 2-Clause "Simplified" License
from macports
with BSD 2-Clause "Simplified" License
from macports
def get_install_count(port_name, days):
last_x_days = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=days)
submissions_last_x_days = Submission.objects.filter(timestamp__gte=last_x_days).order_by('user', '-timestamp').distinct('user')
installations = PortInstallation.objects.filter(submission_id__in=Subquery(submissions_last_x_days.values('id')),port__iexact=port_name).select_related('submission').defer('submission__raw_json')
count = installations.aggregate(requested=Count('submission__user_id', filter=Q(requested=True)), all=Count('submission__user_id'))
return count
3
Source : utilities.py
with BSD 2-Clause "Simplified" License
from macports
with BSD 2-Clause "Simplified" License
from macports
def get_ports_context(request, req_ports, builder):
builds = BuildHistory.objects.filter(port_name=OuterRef('name'), builder_name__name=builder).order_by('time_start')
req_ports = req_ports.order_by(Lower('name')).select_related('livecheck').annotate(build=Subquery(builds.values_list('status')[:1]))
req_ports = apply_filters(request, req_ports)
# Paginate the req_ports
ports = paginate(request, req_ports, 100)
return ports
def get_ports_by_email(emails_list):
3
Source : run_queries.py
with MIT License
from paul-wolf
with MIT License
from paul-wolf
def q_subquery_outerref(**kwargs):
qs = Book.objects.filter(
publisher__in=Subquery(
Publisher.objects.filter(pk=OuterRef("publisher")).only("pk")
)
)
if kwargs.get("sql"):
print(sql(qs))
a = []
for rec in qs:
a.append(rec)
@timeit
3
Source : models.py
with MIT License
from pennlabs
with MIT License
from pennlabs
def sections_with_reviews(queryset):
return review_averages(
queryset,
{
"review__section__course__full_code": OuterRef("course__full_code"),
# get all the reviews for instructors in the Section.instructors many-to-many
"review__instructor__in": Subquery(
Instructor.objects.filter(section=OuterRef(OuterRef("id"))).values("id").order_by()
),
},
extra_metrics=False,
).order_by("code")
def course_reviews(queryset):
3
Source : match.py
with MIT License
from pythrick
with MIT License
from pythrick
def get_queryset(self):
sub_query = (
ProfileLike.objects.filter(
movie=OuterRef("movie"), from_user=self.request.user, like=True
)
.annotate(match_user=F("to_user"))
.values("to_user")
)
return ProfileLike.objects.filter(
from_user__in=Subquery(sub_query),
to_user=self.request.user,
like=True,
).all()
3
Source : sorters.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def qs_with_payment(queryset: QuerySet, **_kwargs) -> QuerySet:
subquery = Subquery(
Payment.objects.filter(order_id=OuterRef("pk"))
.order_by("-pk")
.values_list("charge_status")[:1]
)
return queryset.annotate(
last_charge_status=ExpressionWrapper(subquery, output_field=CharField())
)
class OrderSortingInput(SortInputObjectType):
3
Source : sorters.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def qs_with_product_count(queryset: QuerySet, **_kwargs) -> QuerySet:
return queryset.annotate(
product_count=Coalesce(
Subquery(
Category.tree.add_related_count(
queryset, Product, "category", "p_c", cumulative=True
)
.values("p_c")
.filter(pk=OuterRef("pk"))[:1]
),
0,
output_field=IntegerField(),
)
)
@staticmethod
3
Source : sorters.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def qs_with_availability(queryset: QuerySet, channel_slug: str) -> QuerySet:
subquery = Subquery(
CollectionChannelListing.objects.filter(
collection_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("is_published")[:1]
)
return queryset.annotate(
is_published=ExpressionWrapper(subquery, output_field=BooleanField())
)
@staticmethod
3
Source : sorters.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def qs_with_publication_date(queryset: QuerySet, channel_slug: str) -> QuerySet:
subquery = Subquery(
CollectionChannelListing.objects.filter(
collection_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("publication_date")[:1]
)
return queryset.annotate(
publication_date=ExpressionWrapper(subquery, output_field=DateField())
)
class CollectionSortingInput(ChannelSortInputObjectType):
3
Source : sorters.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def qs_with_published(queryset: QuerySet, channel_slug: str) -> QuerySet:
subquery = Subquery(
ProductChannelListing.objects.filter(
product_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("is_published")[:1]
)
return queryset.annotate(
is_published=ExpressionWrapper(subquery, output_field=BooleanField())
)
@staticmethod
3
Source : sorters.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def qs_with_publication_date(queryset: QuerySet, channel_slug: str) -> QuerySet:
subquery = Subquery(
ProductChannelListing.objects.filter(
product_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("publication_date")[:1]
)
return queryset.annotate(
publication_date=ExpressionWrapper(subquery, output_field=DateField())
)
@staticmethod
3
Source : models.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def annotate_is_published(self, channel_slug: str):
query = Subquery(
ProductChannelListing.objects.filter(
product_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("is_published")[:1]
)
return self.annotate(
is_published=ExpressionWrapper(query, output_field=BooleanField())
)
def annotate_publication_date(self, channel_slug: str):
3
Source : models.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def annotate_publication_date(self, channel_slug: str):
query = Subquery(
ProductChannelListing.objects.filter(
product_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("publication_date")[:1]
)
return self.annotate(
publication_date=ExpressionWrapper(query, output_field=DateField())
)
def annotate_visible_in_listings(self, channel_slug):
3
Source : models.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def annotate_visible_in_listings(self, channel_slug):
query = Subquery(
ProductChannelListing.objects.filter(
product_id=OuterRef("pk"), channel__slug=str(channel_slug)
).values_list("visible_in_listings")[:1]
)
return self.annotate(
visible_in_listings=ExpressionWrapper(query, output_field=BooleanField())
)
def sort_by_attribute(
3
Source : models.py
with GNU General Public License v3.0
from Saleor-Multi-Vendor
with GNU General Public License v3.0
from Saleor-Multi-Vendor
def applicable_shipping_methods_by_channel(shipping_methods, channel_id):
query = ShippingMethodChannelListing.objects.filter(
shipping_method=OuterRef("pk"), channel_id=channel_id
).values_list("price_amount")
return shipping_methods.annotate(price_amount=Subquery(query)).order_by(
"price_amount"
)
def exclude_shipping_methods_for_excluded_products(
3
Source : django-tests-postgres_tests-test_hstore.py
with Apache License 2.0
from SMAT-Lab
with Apache License 2.0
from SMAT-Lab
def test_obj_subquery_lookup(self):
qs = HStoreModel.objects.annotate(
value=Subquery(HStoreModel.objects.filter(pk=OuterRef('pk')).values('field')),
).filter(value__a='b')
self.assertSequenceEqual(qs, self.objs[:2])
@isolate_apps('postgres_tests')
3
Source : statistics.py
with GNU Affero General Public License v3.0
from TicketVise
with GNU Affero General Public License v3.0
from TicketVise
def get(self, request, inbox_id):
inbox = get_object_or_404(Inbox, pk=inbox_id)
first_close_event = TicketStatusEvent.objects.filter(ticket=OuterRef('pk'),
status=Status.CLOSED).order_by("-date_created")
data = Ticket.objects.filter(inbox=inbox) \
.annotate(first_close_event_created=Subquery(first_close_event.values("date_created")[:1])) \
.filter(first_close_event_created__isnull=False) \
.annotate(time_to_close=F('date_created') - F('first_close_event_created')) \
.aggregate(avg_time_to_close=Avg("time_to_close"))
return JsonResponse(data, safe=False)
class LabelWithCountSerializer(ModelSerializer):
3
Source : subquery.py
with BSD 3-Clause "New" or "Revised" License
from W1ldPo1nter
with BSD 3-Clause "New" or "Revised" License
from W1ldPo1nter
def _build_subquery(self, queryset):
from django.db.models import Subquery
return Subquery(queryset.values(self.field_name)[:1], output_field=self.output_field)
class SubqueryExistenceCheckProperty(SubqueryMixin, QueryableProperty):
0
Source : admin.py
with MIT License
from abrookins
with MIT License
from abrookins
def goals_dashboard_view_sql(self, request):
completed_tasks = Subquery( # < 1>
TaskStatus.objects.filter(
task__goal=OuterRef('pk'), # < 2>
status=TaskStatus.DONE
).values(
'task__goal'
).annotate( # < 3>
count=Count('pk')
).values('count'),
output_field=IntegerField()) # < 4>
goals = Goal.objects.all().annotate(
completed_tasks=Coalesce(completed_tasks, 0)
).order_by('-completed_tasks')[:10]
return render(request, "admin/goal_dashboard.html",
{"goals": goals})
# end::counting-with-sql[]
# tag::caching-view-in-redis[]
def goals_dashboard_view_redis(self, request):
0
Source : admin.py
with MIT License
from abrookins
with MIT License
from abrookins
def goals_avg_completions_view(self, request):
completed_tasks = Subquery(
TaskStatus.objects.filter(
task__goal=OuterRef('pk'),
status=TaskStatus.DONE
).values(
'task__goal'
).annotate(
count=Count('pk')
).values('count'),
output_field=IntegerField())
goals = Goal.objects.all().annotate(
completed_tasks=Coalesce(completed_tasks, 0))
top_ten_goals = goals.order_by('-completed_tasks')[:10]
average_completions = goals.aggregate(
Avg('completed_tasks')) # < 1>
avg = int(average_completions['completed_tasks__avg'])
other_stats = (
{
'name': 'Average Completed Tasks',
'stat': avg
},
)
return render(request, "admin/goal_dashboard.html", {
"goals": top_ten_goals,
"other_stats": other_stats
})
# end::aggregations[]
# tag::querying-materialized-views[]
def goals_dashboard_view_materialized(self, request):
0
Source : metadata.py
with GNU General Public License v3.0
from ajslater
with GNU General Public License v3.0
from ajslater
def _intersection_annotate(
self,
simple_qs,
qs,
fields,
related_suffix="",
annotation_prefix="",
):
"""Annotate the intersection of value and fk fields."""
for field in fields:
if self.is_model_comic:
comic_rel_field = field
else:
comic_rel_field = f"comic__{field}"
# Annotate variant counts
# Have to use simple_qs because every annotation in the loop
# corrupts the the main qs
val = Subquery(
simple_qs.values("id")
.order_by() # magic order_by saves the day again
.annotate(count=Count(comic_rel_field, distinct=True))
.values("count")
)
field_variants = field.replace("__", "_") + "_variants"
then = comic_rel_field + related_suffix
qs = qs.annotate(**{field_variants: val})
# If 1 variant, annotate value, otherwise None
ann_field = (annotation_prefix + field).replace("__", "_")
condition = {field_variants: 1}
lookup = Case(
When(**condition, then=then),
default=Value(None),
)
qs = qs.annotate(**{ann_field: lookup})
return qs
@classmethod
0
Source : staff_commands.py
with MIT License
from Arx-Game
with MIT License
from Arx-Game
def view_revelation_tables(self):
"""Table of all revelations, or clues comprising a specified revelation."""
def get_gmnote(thing):
gm_notes = thing.gm_notes or ""
if gm_notes and len(gm_notes) > 20:
gm_notes = "%s+" % gm_notes[:20]
return gm_notes
if self.args:
rev = self.get_taggable_thing(self.args, "revelation")
table = PrettyTable(["|w%s|n" % rev, "About", "Disco", "GM Notes"])
clues = rev.clues.annotate(
disco=Count("discoveries", distinct=True)
).order_by("clue_type")
if not clues:
raise CommandError("No clues exist for %s." % rev)
for clue in clues:
gmnote = get_gmnote(clue)
if clue.clue_type == Clue.CHARACTER_SECRET:
clue_type = (
clue.tangible_object.key if clue.tangible_object else "secret"
)
elif clue.clue_type == Clue.VISION:
clue_type = "vision"
else:
clue_type = "lore"
table.add_row([clue.name, clue_type, clue.disco, gmnote])
else:
secrets = Subquery(
Clue.objects.filter(
clue_type=Clue.CHARACTER_SECRET, revelations=OuterRef("id")
)
.values("revelations")
.annotate(cnt=Count("id"))
.values("cnt"),
output_field=IntegerField(),
)
revs = Revelation.objects.annotate(
clue_total=Count("clues_used", distinct=True)
).annotate(secret_total=secrets)
table = PrettyTable(
["{w#{n", "{wRevelation{n", "{wClu{n", "{wSecrt{n", "{wGM Notes{n"]
)
for r in revs:
gmnote = get_gmnote(r)
table.add_row([r.id, r.name, r.clue_total, r.secret_total, gmnote])
self.msg(str(table))
def view_plot_connections(self):
0
Source : admin.py
with MIT License
from Arx-Game
with MIT License
from Arx-Game
def queryset(self, request, queryset):
"""
This performs a total tally of all clues for a given revelation, annotating the
queryset accordingly, which is used in a subquery to determine if the requirements
for this revelation can actually be met by players or not.
"""
qs = queryset
clues = (
Clue.objects.filter(revelations=OuterRef("id"))
.order_by()
.values("revelations")
)
total_rating = clues.annotate(total=Sum("rating")).values("total")
if self.value() == "true":
qs = qs.filter(required_clue_value__lte=Subquery(total_rating))
if self.value() == "false":
qs = qs.filter(required_clue_value__gt=Subquery(total_rating))
return qs
class RevelationAdmin(BaseCharAdmin):
0
Source : views.py
with GNU Affero General Public License v3.0
from avantifellows
with GNU Affero General Public License v3.0
from avantifellows
def list(self, request):
queryset = self.get_queryset()
# personal workspace
if not self.is_organizational_workspace:
queryset = queryset.filter(created_by=self.request.user)
else:
# organizational workspace
if OrganizationUser.objects.filter(
organization__shortcode=self.organization_shortcode,
user=self.request.user.id,
).exists():
# user should be a part of the org
queryset = queryset.filter(
Q(is_public=True)
| (Q(is_public=False) & Q(created_by=self.request.user))
)
else:
# otherwise, they don't have access to any plio
queryset = Plio.objects.none()
num_plios = queryset.count()
# add the number of unique viewers to the queryset
plio_session_group = Session.objects.filter(plio__uuid=OuterRef("uuid")).values(
"plio__uuid"
)
plios_unique_users_count = plio_session_group.annotate(
count_unique_users=Count("user__id", distinct=True)
).values("count_unique_users")
# annotate the plio's queryset with the count of unique users
queryset = queryset.annotate(
unique_viewers=Coalesce(Subquery(plios_unique_users_count), 0)
)
queryset = self.filter_queryset(queryset)
# adds the video URL to the queryset
queryset = queryset.annotate(video_url=F("video__url"))
page = self.paginate_queryset(queryset.values())
if page is not None:
return self.get_paginated_response({"data": page, "raw_count": num_plios})
# return an empty response in the paginated format if pagination fails
return Response(
{
"count": 0,
"raw_count": 0,
"page_size": self.get_page_size(self.request),
"next": None,
"previous": None,
"results": [],
}
)
@action(
0
Source : models.py
with GNU Affero General Public License v3.0
from betagouv
with GNU Affero General Public License v3.0
from betagouv
def eligible_as_employee_record(self, siae):
"""
List job applications that will have to be transfered to ASP
via the employee record app.
These job applications must:
- be definitely accepted
- have no one-to-one relationship with an employee record
- have been created after production date
An eligible job application *may* or *may not* have an employee record object linked
to it.
For instance, when creating a new employee record from an eligible job application
and NOT finishing the entire creation process.
(employee record object creation occurs half-way of the "tunnel")
"""
# Exclude existing employee records with same approval and asp_id
# Rule: you can only create *one* employee record for a given asp_id / approval pair
subquery = Subquery(
self.exclude(to_siae=siae).filter(
employee_record__asp_id=siae.asp_id,
employee_record__approval_number=OuterRef("approval__number"),
)
)
return (
# Job application without approval are out of scope
self.exclude(approval=None)
# Prevent employee records creation (batch import for example).
.filter(create_employee_record=True)
# See `subquery` above : exclude possible ASP duplicates
.exclude(Exists(subquery))
# Only ACCEPTED job applications can be transformed into employee records
.accepted()
# Accept only job applications without linked or processed employee record
.filter(Q(employee_record__status="NEW") | Q(employee_record__isnull=True))
.filter(
# Only for current SIAE
to_siae=siae,
# Hiring must start after production date:
hiring_start_at__gte=settings.EMPLOYEE_RECORD_FEATURE_AVAILABILITY_DATE,
)
.select_related("job_seeker", "approval")
.order_by("-hiring_start_at")
)
class JobApplication(xwf_models.WorkflowEnabled, models.Model):
0
Source : move_siae_data.py
with GNU Affero General Public License v3.0
from betagouv
with GNU Affero General Public License v3.0
from betagouv
def move_siae_data(from_id, to_id, dry_run=False, only_job_applications=False):
if from_id == to_id:
logger.error("Unable to use the same siae as source and destination (ID %s)", from_id)
return
from_siae_qs = siaes_models.Siae.objects.filter(pk=from_id)
try:
from_siae = from_siae_qs.get()
except siaes_models.Siae.DoesNotExist:
logger.error("Unable to find the siae ID %s", from_id)
return
to_siae_qs = siaes_models.Siae.objects.filter(pk=to_id)
try:
to_siae = to_siae_qs.get()
except siaes_models.Siae.DoesNotExist:
logger.error("Unable to find the siae ID %s", to_id)
return
if from_siae.kind != to_siae.kind:
logger.error("Both siaes should have the same kind but they don't")
return
# Intermediate variable for better readability
move_all_data = not only_job_applications
logger.info(
"MOVE %s OF siae.id=%s - %s %s - %s",
"DATA" if move_all_data else "JOB APPLICATIONS",
from_siae.pk,
from_siae.kind,
from_siae.siret,
from_siae.display_name,
)
job_applications_sent = job_applications_models.JobApplication.objects.filter(sender_siae_id=from_id)
logger.info("| Job applications sent: %s", job_applications_sent.count())
job_applications_received = job_applications_models.JobApplication.objects.filter(to_siae_id=from_id)
logger.info("| Job applications received: %s", job_applications_received.count())
if move_all_data:
# Move Job Description not already present in siae destination, Job Applications related will be attached to
# Job Description present in siae destination
appellation_subquery = Subquery(
siaes_models.SiaeJobDescription.objects.filter(siae_id=to_id, appellation_id=OuterRef("appellation_id"))
)
job_descriptions = siaes_models.SiaeJobDescription.objects.filter(siae_id=from_id).exclude(
Exists(appellation_subquery)
)
logger.info("| Job descriptions: %s", job_descriptions.count())
# Move users not already present in siae destination
members = siaes_models.SiaeMembership.objects.filter(siae_id=from_id).exclude(
user__in=users_models.User.objects.filter(siaemembership__siae_id=to_id)
)
logger.info("| Members: %s", members.count())
diagnoses = eligibility_models.EligibilityDiagnosis.objects.filter(author_siae_id=from_id)
logger.info("| Diagnoses: %s", diagnoses.count())
prolongations = approvals_models.Prolongation.objects.filter(declared_by_siae_id=from_id)
logger.info("| Prolongations: %s", prolongations.count())
suspensions = approvals_models.Suspension.objects.filter(siae_id=from_id)
logger.info("| Suspensions: %s", suspensions.count())
# Don't move invitations for existing members
# The goal is to keep information about the original information
invitations = invitations_models.SiaeStaffInvitation.objects.filter(siae_id=from_id).exclude(
email__in=users_models.User.objects.filter(siaemembership__siae_id=to_id).values_list("email", flat=True)
)
logger.info("| Invitations: %s", invitations.count())
logger.info(
"INTO siae.id=%s - %s %s - %s",
to_siae.pk,
to_siae.kind,
to_siae.siret,
to_siae.display_name,
)
dest_siae_job_applications_sent = job_applications_models.JobApplication.objects.filter(sender_siae_id=to_id)
logger.info("| Job applications sent: %s", dest_siae_job_applications_sent.count())
dest_siae_job_applications_received = job_applications_models.JobApplication.objects.filter(to_siae_id=to_id)
logger.info("| Job applications received: %s", dest_siae_job_applications_received.count())
if move_all_data:
logger.info(f"| Brand '{to_siae.brand}' will be updated with '{from_siae.display_name}'")
logger.info(f"| Description \n{to_siae.description}\nwill be updated with\n{from_siae.description}")
logger.info(f"| Phone '{to_siae.phone}' will be updated with '{from_siae.phone}'")
logger.info(f"| Coords '{to_siae.coords}' will be updated with '{from_siae.coords}'")
logger.info(f"| Geoscore '{to_siae.geocoding_score}' will be updated with '{from_siae.geocoding_score}'")
if dry_run:
logger.info("Nothing to do in dry run mode.")
return
with transaction.atomic():
# If we move the job applications without moving the job descriptions as well, we need to unlink them,
# as job applications will be attached to siae B but job descriptions will stay attached to siae A.
if only_job_applications:
for job_application in job_applications_sent:
job_application.selected_jobs.clear()
for job_application in job_applications_received:
job_application.selected_jobs.clear()
# If we move job_description, we have to take care of existant job_description linked to siae B (destination),
# because we can't have 2 job_applications with the same Appellation for one siae. Job applications linked to
# these kind of job_description have to be unlinked to be transfered. Job_description can be different enough
# to be irrelevant.
if move_all_data:
# find Appellation linked to job_description siae B
to_siae_appellation_id = siaes_models.SiaeJobDescription.objects.filter(siae_id=to_id).values_list(
"appellation_id", flat=True
)
# find job_applications in siae A, linked with job_description which Appellation is found in siae B
job_applications_to_clear = job_applications_models.JobApplication.objects.filter(
to_siae_id=from_id,
selected_jobs__in=siaes_models.SiaeJobDescription.objects.filter(
siae_id=from_id, appellation_id__in=to_siae_appellation_id
),
)
# clean job_applications to let them be transfered in siae B
for job_application in job_applications_to_clear:
job_application.selected_jobs.clear()
job_applications_sent.update(sender_siae_id=to_id)
job_applications_received.update(to_siae_id=to_id)
if move_all_data:
# do not move duplicated job_descriptions
job_descriptions.exclude(appellation_id__in=to_siae_appellation_id).update(siae_id=to_id)
members.update(siae_id=to_id)
diagnoses.update(author_siae_id=to_id)
prolongations.update(declared_by_siae_id=to_id)
suspensions.update(siae_id=to_id)
invitations.update(siae_id=to_id)
to_siae_qs.update(
brand=from_siae.display_name,
description=from_siae.description,
phone=from_siae.phone,
coords=from_siae.coords,
geocoding_score=from_siae.geocoding_score,
)
from_siae_qs.update(
block_job_applications=True,
job_applications_blocked_at=timezone.now(),
# Make sure the old siae no longer appears in results
coords=None,
geocoding_score=None,
)
logger.info("MOVE %s OF siae.id=%s FINISHED", "DATA" if move_all_data else "JOB APPLICATIONS", from_siae.pk)
orig_job_applications_sent = job_applications_models.JobApplication.objects.filter(sender_siae_id=from_id)
logger.info("| Job applications sent: %s", orig_job_applications_sent.count())
orig_job_applications_received = job_applications_models.JobApplication.objects.filter(to_siae_id=from_id)
logger.info("| Job applications received: %s", orig_job_applications_received.count())
logger.info("INTO siae.id=%s", to_siae.pk)
dest_siae_job_applications_sent = job_applications_models.JobApplication.objects.filter(sender_siae_id=to_id)
logger.info("| Job applications sent: %s", dest_siae_job_applications_sent.count())
dest_siae_job_applications_received = job_applications_models.JobApplication.objects.filter(to_siae_id=to_id)
logger.info("| Job applications received: %s", dest_siae_job_applications_received.count())
class Command(BaseCommand):
0
Source : models.py
with GNU Affero General Public License v3.0
from betagouv
with GNU Affero General Public License v3.0
from betagouv
def with_count_recent_received_job_apps(self):
"""
Count the number of recently received job applications.
The count with a `Subquery` instead of a `join` is way more efficient here.
We generate this SQL using the Django ORM:
SELECT
*,
COALESCE((
SELECT COUNT(U0."id") AS "count"
FROM "job_applications_jobapplication" U0
WHERE (U0."created_at" >= 2021-06-10 08:45:51.998244 + 00:00 AND U0."to_siae_id" = "siaes_siae"."id")
GROUP BY U0."to_siae_id"), 0) AS "count_recent_received_job_apps"
FROM
"siaes_siae"
See https://github.com/martsberger/django-sql-utils
"""
# Avoid a circular import
job_application_model = self.model._meta.get_field("jobapplication").related_model
sub_query = Subquery(
(
job_application_model.objects.filter(
to_siae=OuterRef("id"),
created_at__gte=timezone.now()
- timezone.timedelta(weeks=job_application_model.WEEKS_BEFORE_CONSIDERED_OLD),
)
.values("to_siae") # group job apps by to_siae
.annotate(count=Count("pk"))
.values("count")
),
output_field=models.IntegerField(),
)
# `Coalesce` will return the first not null value or zero.
return self.annotate(count_recent_received_job_apps=Coalesce(sub_query, 0))
def with_count_active_job_descriptions(self):
0
Source : querysets.py
with GNU General Public License v2.0
from bkmakerspace
with GNU General Public License v2.0
from bkmakerspace
def borrowing_by_user(self, user: User, exclude_own=False) -> QuerySet:
"""Return a QS of all tools a user is currently borrowing
Args:
user (User): The use who we are filtering tools to what they are currently borrowing
exclude_own (bool, optional): Description
Returns:
QuerySet
"""
from tools.models import ToolHistory
history_qs = ToolHistory.objects.filter(tool=OuterRef("pk"), user=user).order_by(
"-created"
)
qs = self
if exclude_own:
qs = qs.exclude(user=user)
return qs.annotate(
last_history_action=Subquery(history_qs.values("action")[:1]),
last_history_date=Subquery(history_qs.values("created")[:1]),
).filter(
state=self.model.States.in_use.value,
last_history_action=self.model.Transitions.borrow.value,
)
class ToolHistoryQuerySet(QuerySet):
0
Source : region.py
with Apache License 2.0
from digitalfabrik
with Apache License 2.0
from digitalfabrik
def get_pages(
self,
archived=False,
return_unrestricted_queryset=False,
prefetch_translations=False,
prefetch_public_translations=False,
annotate_language_tree=False,
):
"""
This method returns either all archived or all non-archived pages of this region.
To retrieve all pages independently of their archived-state, use the reverse foreign key
:attr:`~integreat_cms.cms.models.regions.region.Region.pages`.
Per default, the returned queryset has some limitations because of the usage of
:meth:`~django.db.models.query.QuerySet.difference` and :meth:`~django.db.models.query.QuerySet.union`.
To perform the extra effort of returning an unrestricted queryset, set the parameter
``return_unrestricted_queryset`` to ``True``.
:param archived: Whether or not only archived pages should be returned (default: ``False``)
:type archived: bool
:param return_unrestricted_queryset: Whether or not the result should be returned as unrestricted queryset.
(default: ``False``)
:type return_unrestricted_queryset: bool
:param prefetch_translations: Whether the latest translations for each language should be prefetched
(default: ``False``)
:type prefetch_translations: bool
:param prefetch_public_translations: Whether the latest public translations for each language should be prefetched
(default: ``False``)
:type prefetch_public_translations: bool
:param annotate_language_tree: Whether the pages should be annotated with the region's language tree
(default: ``False``)
:type annotate_language_tree: bool
:return: Either the archived or the non-archived pages of this region
:rtype: ~treebeard.ns_tree.NS_NodeQuerySet [ ~integreat_cms.cms.models.pages.page.Page ]
"""
if archived:
pages = self.archived_pages
else:
pages = self.non_archived_pages
if (
return_unrestricted_queryset
or prefetch_translations
or prefetch_public_translations
):
# Generate a new unrestricted queryset containing the same pages
page_ids = [page.id for page in pages]
pages = self.pages.filter(id__in=page_ids)
if prefetch_translations:
pages = pages.prefetch_translations()
if prefetch_public_translations:
pages = pages.prefetch_public_translations()
if annotate_language_tree:
pages = pages.annotate(language_tree=models.Subquery(self.language_tree))
return pages
def get_root_pages(self):
0
Source : event_actions.py
with Apache License 2.0
from digitalfabrik
with Apache License 2.0
from digitalfabrik
def search_poi_ajax(request, region_slug):
"""
AJAX endpoint for searching POIs
:param request: Object representing the user call
:type request: ~django.http.HttpRequest
:param region_slug: The current regions slug
:type region_slug: str
:return: The rendered template response
:rtype: ~django.template.response.TemplateResponse
"""
data = json.loads(request.body.decode("utf-8"))
poi_query = data.get("query_string")
create_poi_option = data.get("create_poi_option")
logger.debug('Ajax call: Live search for POIs with query "%r"', poi_query)
region = get_object_or_404(Region, slug=data.get("region_slug"))
# All latest revisions of a POI (one for each language)
latest_public_poi_revisions = (
POITranslation.objects.filter(poi=OuterRef("pk"), status=status.PUBLIC)
.order_by("language__pk", "-version")
.distinct("language")
.values("id")
)
# All POIs which are not archived and have a latest public revision which contains the query
poi_query_result = (
region.pois.prefetch_related("translations")
.filter(
archived=False,
translations__in=Subquery(latest_public_poi_revisions),
translations__title__icontains=poi_query,
)
.distinct()
)
return render(
request,
"events/_poi_query_result.html",
{
"poi_query": poi_query,
"poi_query_result": poi_query_result,
"create_poi_option": create_poi_option,
"region": region,
},
)
@require_POST
0
Source : 0058_auto_20180620_1346.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def populate_stats(apps, schema_editor):
GenePanelSnapshot = apps.get_model("panels", "GenePanelSnapshot")
for panel in (
GenePanelSnapshot.objects.filter(
pk__in=Subquery(
GenePanelSnapshot.objects.exclude(
panel__status=GenePanel.STATUS.deleted
)
.distinct("panel__pk")
.values("pk")
.order_by("panel__pk", "-major_version", "-minor_version")
)
)
.annotate(child_panels_count=Count("child_panels"))
.annotate(superpanels_count=Count("genepanelsnapshot"))
.annotate(
is_super_panel=Case(
When(child_panels_count__gt=0, then=Value(True)),
default=Value(False),
output_field=models.BooleanField(),
),
is_child_panel=Case(
When(superpanels_count__gt=0, then=Value(True)),
default=Value(False),
output_field=models.BooleanField(),
),
)
):
pks = [panel.pk]
if panel.is_super_panel:
pks = panel.child_panels.values_list("pk", flat=True)
keys = [
"gene_reviewers",
"number_of_evaluated_genes",
"number_of_genes",
"number_of_ready_genes",
"number_of_green_genes",
"str_reviewers",
"number_of_evaluated_strs",
"number_of_strs",
"number_of_ready_strs",
"number_of_green_strs",
]
out = {"gene_reviewers": [], "str_reviewers": []}
for pk in pks:
# This is ~ 3000x faster than `.filter(pk__in=pks)`. Not a typo: 24 seconds vs 4ms per pk
info = (
GenePanelSnapshot.objects.filter(pk=pk)
.prefetch_related(
"str",
"str__evaluation",
"str__evaluation__user",
"genepanelentrysnapshot",
"genepanelentrysnapshot__evaluation",
"genepanelentrysnapshot__evaluation__user",
)
.aggregate(
gene_reviewers=ArrayAgg(
"genepanelentrysnapshot__evaluation__user__pk", distinct=True
),
number_of_evaluated_genes=Count(
Case(
# Count unique genes if that gene has more than 1 evaluation
When(
genepanelentrysnapshot__evaluation__isnull=False,
then=models.F("genepanelentrysnapshot__pk"),
)
),
distinct=True,
),
number_of_genes=Count("genepanelentrysnapshot__pk", distinct=True),
number_of_ready_genes=Count(
Case(
When(
genepanelentrysnapshot__ready=True,
then=models.F("genepanelentrysnapshot__pk"),
)
),
distinct=True,
),
number_of_green_genes=Count(
Case(
When(
genepanelentrysnapshot__saved_gel_status__gte=3,
then=models.F("genepanelentrysnapshot__pk"),
)
),
distinct=True,
),
str_reviewers=ArrayAgg("str__evaluation__user__pk", distinct=True),
number_of_evaluated_strs=Count(
Case(
# Count unique genes if that gene has more than 1 evaluation
When(
str__evaluation__isnull=False, then=models.F("str__pk")
)
),
distinct=True,
),
number_of_strs=Count("str__pk", distinct=True),
number_of_ready_strs=Count(
Case(When(str__ready=True, then=models.F("str__pk"))),
distinct=True,
),
number_of_green_strs=Count(
Case(
When(str__saved_gel_status__gte=3, then=models.F("str__pk"))
),
distinct=True,
),
)
)
for key in keys:
out[key] = out.get(key, 0) + info.get(key, 0)
out["gene_reviewers"] = [r for r in out["gene_reviewers"] if r] # remove None
out["str_reviewers"] = [r for r in out["str_reviewers"] if r] # remove None
out["entity_reviewers"] = list(
set(out["gene_reviewers"] + out["str_reviewers"])
)
out["number_of_reviewers"] = len(out["entity_reviewers"])
out["number_of_evaluated_entities"] = (
out["number_of_evaluated_genes"] + out["number_of_evaluated_strs"]
)
out["number_of_entities"] = out["number_of_genes"] + out["number_of_strs"]
out["number_of_ready_entities"] = (
out["number_of_ready_genes"] + out["number_of_ready_strs"]
)
out["number_of_green_entities"] = (
out["number_of_green_genes"] + out["number_of_green_strs"]
)
panel.stats = out
panel.save(update_fields=["stats"])
class Migration(migrations.Migration):
0
Source : 0069_auto_20190109_1428.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def update_stats(apps, schema_editor):
keys = [
"gene_reviewers",
"number_of_evaluated_genes",
"number_of_genes",
"number_of_ready_genes",
"number_of_green_genes",
"str_reviewers",
"number_of_evaluated_strs",
"number_of_strs",
"number_of_ready_strs",
"number_of_green_strs",
"region_reviewers",
"number_of_evaluated_regions",
"number_of_regions",
"number_of_ready_regions",
"number_of_green_regions",
]
GenePanelSnapshot = apps.get_model("panels", "GenePanelSnapshot")
for panel in (
GenePanelSnapshot.objects.filter(
pk__in=Subquery(
GenePanelSnapshot.objects.exclude(
panel__status=GenePanel.STATUS.deleted
)
.distinct("panel_id")
.values("pk")
.order_by("panel_id", "-major_version", "-minor_version")
)
)
.annotate(child_panels_count=Count("child_panels"))
.annotate(superpanels_count=Count("genepanelsnapshot"))
.annotate(
is_super_panel=Case(
When(child_panels_count__gt=0, then=Value(True)),
default=Value(False),
output_field=models.BooleanField(),
),
is_child_panel=Case(
When(superpanels_count__gt=0, then=Value(True)),
default=Value(False),
output_field=models.BooleanField(),
),
)
):
pks = [panel.pk]
if panel.is_super_panel:
pks = panel.child_panels.values_list("pk", flat=True)
# another way to refactor below info: when copying data, just count the numbers...
info = GenePanelSnapshot.objects.filter(pk__in=pks).aggregate(
gene_reviewers=ArrayAgg(
"genepanelentrysnapshot__evaluation__user", distinct=True
),
number_of_evaluated_genes=Count(
Case(
# Count unique genes if that gene has more than 1 evaluation
When(
genepanelentrysnapshot__evaluation__isnull=False,
then=models.F("genepanelentrysnapshot"),
)
),
distinct=True,
),
number_of_genes=Count("genepanelentrysnapshot", distinct=True),
number_of_ready_genes=Count(
Case(
When(
genepanelentrysnapshot__ready=True,
then=models.F("genepanelentrysnapshot"),
)
),
distinct=True,
),
number_of_green_genes=Count(
Case(
When(
genepanelentrysnapshot__saved_gel_status__gte=3,
then=models.F("genepanelentrysnapshot"),
)
),
distinct=True,
),
str_reviewers=ArrayAgg("str__evaluation__user", distinct=True),
number_of_evaluated_strs=Count(
Case(
# Count unique genes if that gene has more than 1 evaluation
When(str__evaluation__isnull=False, then=models.F("str"))
),
distinct=True,
),
number_of_strs=Count("str", distinct=True),
number_of_ready_strs=Count(
Case(When(str__ready=True, then=models.F("str"))), distinct=True
),
number_of_green_strs=Count(
Case(When(str__saved_gel_status__gte=3, then=models.F("str"))),
distinct=True,
),
region_reviewers=ArrayAgg("region__evaluation__user", distinct=True),
number_of_evaluated_regions=Count(
Case(
# Count unique genes if that gene has more than 1 evaluation
When(region__evaluation__isnull=False, then=models.F("region"))
),
distinct=True,
),
number_of_regions=Count("region", distinct=True),
number_of_ready_regions=Count(
Case(When(region__ready=True, then=models.F("region"))), distinct=True
),
number_of_green_region=Count(
Case(When(region__saved_gel_status__gte=3, then=models.F("region"))),
distinct=True,
),
)
out = {"gene_reviewers": [], "str_reviewers": [], "region_reviewers": []}
for key in keys:
out[key] = out.get(key, 0) + info.get(key, 0)
out["gene_reviewers"] = list(
set([r for r in out["gene_reviewers"] if r])
) # remove None
out["str_reviewers"] = list(
set([r for r in out["str_reviewers"] if r])
) # remove None
out["region_reviewers"] = list(
set([r for r in out["region_reviewers"] if r])
) # remove None
out["entity_reviewers"] = list(
set(out["gene_reviewers"] + out["str_reviewers"] + out["region_reviewers"])
)
out["number_of_reviewers"] = len(out["entity_reviewers"])
out["number_of_evaluated_entities"] = (
out["number_of_evaluated_genes"]
+ out["number_of_evaluated_strs"]
+ out["number_of_evaluated_regions"]
)
out["number_of_entities"] = (
out["number_of_genes"] + out["number_of_strs"] + out["number_of_regions"]
)
out["number_of_ready_entities"] = (
out["number_of_ready_genes"]
+ out["number_of_ready_strs"]
+ out["number_of_ready_regions"]
)
out["number_of_green_entities"] = (
out["number_of_green_genes"]
+ out["number_of_green_strs"]
+ out["number_of_green_regions"]
)
panel.stats = out
panel.save(update_fields=["stats"])
class Migration(migrations.Migration):
0
Source : entity.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def get_active(self, deleted=False, gene_symbol=None, name=None, pks=None):
"""Get active Entities"""
if pks:
qs = super().get_queryset().filter(panel_id__in=pks)
else:
qs = (
super()
.get_queryset()
.filter(panel_id__in=Subquery(self.get_latest_ids(deleted)))
)
if name:
qs = qs.filter(name=name)
if gene_symbol:
qs = qs.filter(gene__gene_symbol=gene_symbol)
return (
qs.annotate(
number_of_reviewers=Count("evaluation__user", distinct=True),
number_of_evaluated_entities=Count("evaluation"),
number_of_entities=Count("pk"),
)
.prefetch_related(
"evaluation",
"tags",
"evidence",
"panel",
"panel__level4title",
"panel__panel",
)
.order_by("panel_id", "-panel__major_version", "-panel__minor_version")
)
def get_gene_panels(self, gene_symbol, deleted=False, pks=None):
0
Source : genepanelentrysnapshot.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def get_active(self, deleted=False, gene_symbol=None, pks=None, panel_types=None):
"""Get active Gene Entry Snapshots"""
if pks:
qs = super().get_queryset().filter(panel_id__in=pks)
else:
qs = (
super()
.get_queryset()
.filter(panel_id__in=Subquery(self.get_latest_ids(deleted)))
)
if gene_symbol:
if type(gene_symbol) == list:
qs = qs.filter(gene_core__gene_symbol__in=gene_symbol)
else:
qs = qs.filter(gene_core__gene_symbol=gene_symbol)
if panel_types:
qs = qs.filter(panel__panel__types__slug__in=panel_types)
return (
qs.annotate(
entity_type=V("gene", output_field=models.CharField()),
entity_name=models.F("gene_core__gene_symbol"),
)
.prefetch_related(
"tags",
"evidence",
"panel",
"panel__level4title",
"panel__panel",
"panel__panel__types",
)
.order_by(
"panel_id",
"-panel__major_version",
"-panel__minor_version",
"-panel__modified",
"-panel_id",
)
)
def get_gene_panels(self, gene_symbol, deleted=False, pks=None):
0
Source : genepanelsnapshot.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def get_active(
self,
all=False,
deleted=False,
internal=False,
name=None,
panel_types=None,
superpanels=True,
):
"""Get active panels
Parameters:
- all
Setting it to False will only return `public` panels
- deleted
Whether to include the deleted panels
- internal
If we want to include `internal` panels in the list
"""
qs = super().get_queryset()
if not all:
qs = qs.filter(
Q(panel__status=GenePanel.STATUS.public)
| Q(panel__status=GenePanel.STATUS.promoted)
)
if panel_types:
qs = qs.filter(panel__types__slug__in=panel_types)
if not internal:
qs = qs.exclude(panel__status=GenePanel.STATUS.internal)
if not superpanels:
# exclude super panels when incrementing versions for all panels
qs = qs.annotate(child_panels_count=Count("child_panels")).exclude(
child_panels_count__gt=0
)
qs = qs.filter(pk__in=Subquery(self.get_latest_ids(deleted)))
if name:
if name.isdigit():
filters = Q(panel_id=name)
else:
filters = (
Q(panel__old_pk=name)
| Q(panel__name=name)
| Q(old_panels__contains=[name])
| Q(panel__name__icontains=name)
)
qs = qs.filter(filters)
return qs.prefetch_related(
"panel", "panel__types", "child_panels", "level4title"
).order_by(
"level4title__name", "-major_version", "-minor_version", "-modified", "-pk"
)
def get_active_annotated(
0
Source : region.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def get_active(
self, deleted=False, name=None, gene_symbol=None, pks=None, panel_types=None
):
"""Get active Regions"""
if pks:
qs = super().get_queryset().filter(panel_id__in=pks)
else:
qs = (
super()
.get_queryset()
.filter(panel_id__in=Subquery(self.get_latest_ids(deleted)))
)
if name:
if isinstance(name, list):
qs = qs.filter(name__in=name)
else:
qs = qs.filter(name=name)
if gene_symbol:
if isinstance(gene_symbol, list):
qs = qs.filter(gene_core__gene_symbol__in=gene_symbol)
else:
qs = qs.filter(gene_core__gene_symbol=gene_symbol)
if panel_types:
qs = qs.filter(panel__panel__types__slug__in=panel_types)
return (
qs.annotate(
number_of_reviewers=Count("evaluation__user", distinct=True),
number_of_evaluations=Count("evaluation"),
entity_type=V("region", output_field=models.CharField()),
entity_name=models.F("name"),
)
.prefetch_related(
"evaluation",
"tags",
"evidence",
"panel",
"panel__level4title",
"panel__panel",
"panel__panel__types",
)
.order_by("panel_id", "-panel__major_version", "-panel__minor_version")
)
def get_region_panels(self, name, deleted=False, pks=None):
0
Source : strs.py
with Apache License 2.0
from genomicsengland
with Apache License 2.0
from genomicsengland
def get_active(
self, deleted=False, name=None, gene_symbol=None, pks=None, panel_types=None
):
"""Get active STRs"""
# TODO (Oleg) there is a lot of similar logic between entities models, simplify
if pks:
qs = super().get_queryset().filter(panel_id__in=pks)
else:
qs = (
super()
.get_queryset()
.filter(panel_id__in=Subquery(self.get_latest_ids(deleted)))
)
if name:
if isinstance(name, list):
qs = qs.filter(name__in=name)
else:
qs = qs.filter(name=name)
if gene_symbol:
if isinstance(gene_symbol, list):
qs = qs.filter(gene_core__gene_symbol__in=gene_symbol)
else:
qs = qs.filter(gene_core__gene_symbol=gene_symbol)
if panel_types:
qs = qs.filter(panel__panel__types__slug__in=panel_types)
return (
qs.annotate(
number_of_reviewers=Count("evaluation__user", distinct=True),
number_of_evaluations=Count("evaluation"),
entity_type=V("str", output_field=models.CharField()),
entity_name=models.F("name"),
)
.prefetch_related(
"evaluation",
"tags",
"evidence",
"panel",
"panel__level4title",
"panel__panel",
"panel__panel__types",
)
.order_by("panel_id", "-panel__major_version", "-panel__minor_version")
)
def get_str_panels(self, name, deleted=False, pks=None):
0
Source : tests.py
with Apache License 2.0
from gethue
with Apache License 2.0
from gethue
def test_orders_nulls_first_on_filtered_subquery(self):
Article.objects.filter(headline='Article 1').update(author=self.author_1)
Article.objects.filter(headline='Article 2').update(author=self.author_1)
Article.objects.filter(headline='Article 4').update(author=self.author_2)
Author.objects.filter(name__isnull=True).delete()
author_3 = Author.objects.create(name='Name 3')
article_subquery = Article.objects.filter(
author=OuterRef('pk'),
headline__icontains='Article',
).order_by().values('author').annotate(
last_date=Max('pub_date'),
).values('last_date')
self.assertQuerysetEqualReversible(
Author.objects.annotate(
last_date=Subquery(article_subquery, output_field=DateTimeField())
).order_by(
F('last_date').asc(nulls_first=True)
).distinct(),
[author_3, self.author_1, self.author_2],
)
def test_stop_slicing(self):
See More Examples