Here are the examples of the python api django.db.models.Count taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
159 Examples
3
Example 1
Project: write-it Source File: stats.py
@property
def public_messages_with_answers(self):
return (self.writeitinstance.message_set
.annotate(num_answers=Count('answers'))
.filter(public=True, num_answers__gt=0)
.count())
3
Example 2
Project: Weeb.FM Source File: views.py
@detail_route(methods=['GET'])
def top(self, request, pk=None):
user = Member.objects.get(nick_name__iexact=pk)
scrobbles = Scrobble.objects.filter(member_id=user.pk)
count = scrobbles.count()
data = scrobbles.filter(member=user).values(
'member', 'song__artist', 'song__artist__name').annotate(
total=Count('id')).order_by('-total')
data = data[:10]
for i in data:
i['percent'] = 100 * (i['total'] / count)
return Response(status=200, data=data)
3
Example 3
Project: 3bot Source File: __init__.py
@login_required
def index(request, template='threebot/index.html'):
orgs = get_my_orgs(request)
counts = Workflow.objects.filter(owner__in=orgs).annotate(issue_count=Count('workflowlog')).order_by('-issue_count')
counts = counts.filter(issue_count__gte=1)[:5]
team_logs = filter_workflow_log_history(teams=orgs, quantity=5)
my_logs = filter_workflow_log_history(teams=orgs, user=request.user, quantity=5)
return render_to_response(template, {'request': request,
'counts': counts,
'team_logs': team_logs,
'my_logs': my_logs,
}, context_instance=RequestContext(request))
3
Example 4
Project: djangocms-blog Source File: managers.py
def tag_cloud(self, other_model=None, queryset=None, published=True, on_site=False):
from taggit.models import TaggedItem
if on_site:
queryset = queryset.on_site()
tag_ids = self._taglist(other_model, queryset)
kwargs = {}
if published:
kwargs = TaggedItem.bulk_lookup_kwargs(self.model.objects.published())
kwargs['tag_id__in'] = tag_ids
counted_tags = dict(TaggedItem.objects
.filter(**kwargs)
.values('tag')
.annotate(count=models.Count('tag'))
.values_list('tag', 'count'))
tags = TaggedItem.tag_model().objects.filter(pk__in=counted_tags.keys())
for tag in tags:
tag.count = counted_tags[tag.pk]
return sorted(tags, key=lambda x: -x.count)
3
Example 5
def get_query(self, request, term):
products = Product.objects.filter(collection__isnull=False,
collection__istartswith=term)
products = products.select_subclasses()
qs = products.values('collection').annotate(
products=Count('collection')).order_by('-products')
return qs
3
Example 6
Project: pootle Source File: utils.py
def get_duplicate_emails():
"""Get a list of emails that occur more than once in user accounts.
"""
return (get_user_model().objects.hide_meta()
.values('email')
.annotate(Count('email'))
.filter(email__count__gt=1)
.values_list("email", flat=True))
3
Example 7
def get_categories(self, language=None):
"""
Returns all categories used in posts and the amount, ordered by amount.
"""
entries = (self.filter_by_language(language) if language else self).distinct()
if not entries:
return Category.objects.none()
return Category.objects.filter(post__in=entries).annotate(count=models.Count('post')).order_by('-count')
3
Example 8
Project: bugle_project Source File: views.py
def stats(request):
blast_dates = list(Blast.objects.values_list('created', flat=True))
date_counts = {}
for date in blast_dates:
d = date.date()
date_counts[d] = date_counts.get(d, 0) + 1
top_dates = date_counts.items()
top_dates.sort(key = lambda x: x[0])
return render(request, 'stats.html', {
'top_users': User.objects.annotate(
num_blasts = Count('blasts')
).order_by('-num_blasts'),
'top_dates': top_dates,
})
3
Example 9
Project: speakerfight Source File: models.py
def get_votes_to_export(self):
return self.proposals.values(
'id', 'title', 'author__username', 'author__email'
).annotate(
Sum('votes__rate')
).annotate(Count('votes'))
3
Example 10
Project: econsensus Source File: models.py
def get_feedback_statistics(self):
statistics = dict([(unicode(x), 0) for x in Feedback.rating_names])
raw_data = self.feedback_set.values('rating').annotate(Count('rating'))
for x in raw_data:
key = unicode(Feedback.rating_names[x['rating']])
statistics[key] = x['rating__count']
return statistics
3
Example 11
Project: storybase Source File: banner.py
def get_random_topic(self, min_count):
# Limit elgible topics to only those with at least ``count``
# associated stories
topics = Category.objects.filter(stories__status='published')\
.annotate(num_stories=Count('stories'))\
.filter(num_stories__gte=min_count)
if not topics.count():
# No topics with stories
return None
return topics[0]
3
Example 12
def tt(self, tt_date=None):
queryset = self.active()
if tt_date:
queryset = queryset.filter(hits__date__gte=tt_date)
return queryset.annotate(tt_hits=models.Count('hits')).\
order_by('-tt_hits', '-date')
3
Example 13
@register.as_tag
def blog_categories(*args):
"""
Put a list of categories for blog posts into the template context.
"""
posts = BlogProxy.secondary.published()
categories = BlogCategory.objects.filter(blogposts__in=posts)
return list(categories.annotate(post_count=Count("blogposts")))
3
Example 14
def handle(self, *args, **options):
cutoff = timezone.now() - timedelta(days=31)
# Old accounts, never logged in
q = User.objects
q = q.filter(date_joined__lt=cutoff, last_login=None)
n1, _ = q.delete()
# Not logged in for 1 month, 0 checks
q = User.objects
q = q.annotate(n_checks=Count("check"))
q = q.filter(last_login__lt=cutoff, n_checks=0)
n2, _ = q.delete()
return "Done! Pruned %d user accounts." % (n1 + n2)
3
Example 15
Project: euscan Source File: views.py
@render_to("euscan/statistics.html")
def statistics(request):
handlers = (
Version.objects.values("handler")
.filter(overlay="")
.annotate(n=models.Count("handler"),
avg_conf=models.Avg("confidence"))
)
for i in xrange(len(handlers)):
if not handlers[i]['handler']:
handlers[i]['handler'] = "unknown"
return {"handlers": handlers}
3
Example 16
def get_queryset(self, request, *args, **kwargs):
"""Modifies the queryset."""
qs = super(GroupAdmin, self).get_queryset(request, *args, **kwargs)
qs = qs.annotate(
user_count = Count("user"),
)
return qs
3
Example 17
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('pk'), alias='x_sf_count', is_summary=True)
number = obj.get_aggregation(using, ['x_sf_count'])['x_sf_count']
if number is None:
number = 0
return number
3
Example 18
def licenses(self):
""" Returns all podcast licenses """
query = Podcast.objects.exclude(license__isnull=True)
values = query.values("license").annotate(Count("id")).order_by()
counter = Counter({l['license']: l['id__count'] for l in values})
return counter.most_common()
3
Example 19
Project: postcodeinfo Source File: models.py
def most_common_in_area(self, postcode_area):
postcodes = Address.objects.filter(postcode_area=postcode_area).\
values_list('postcode_index', flat=True).\
distinct()
gss_codes = PostcodeGssCode.objects.filter(
postcode_index__in=postcodes).\
values('local_authority_gss_code', 'country_gss_code').\
annotate(count=Count('local_authority_gss_code')).\
order_by("-count")
return gss_codes.first()
3
Example 20
Project: dj-stripe Source File: managers.py
def started_plan_summary_for(self, year, month):
return self.started_during(year, month).values(
"current_subscription__plan"
).order_by().annotate(
count=models.Count("current_subscription__plan")
)
3
Example 21
Project: public-contracts Source File: analysis.py
def contracts_statistics():
contracts = models.Contract.objects.all()
today = date.today()
contracts_year = contracts.filter(signing_date__year=today.year)
contracts_month = contracts_year.filter(signing_date__month=today.month)
total_price = contracts.aggregate(count=Count('price'), sum=Sum('price'))
year_price = contracts_year.aggregate(count=Count('price'), sum=Sum('price'))
month_price = contracts_month.aggregate(count=Count('price'), sum=Sum('price'))
return {'total_sum': total_price['sum'],
'total_count': total_price['count'],
'year_sum': year_price['sum'],
'year_count': year_price['count'],
'month_sum': month_price['sum'],
'month_count': month_price['count']}
3
Example 22
def sponsors(request):
levels = SponsorLevel.objects.annotate(
num_sponsors=Count('sponsor')).filter(num_sponsors__gt=0)
return {
'levels': levels,
}
3
Example 23
Project: pylyglot Source File: context_processors.py
def total_languages(request):
return {
'total_languages': Language.objects.annotate(
trans=Count('translation')).filter(trans__gt=1).count()
}
3
Example 24
Project: zentral Source File: models.py
def current_machine_group_sources(self):
return (self.filter(machinegroup__isnull=False,
machinegroup__machinesnapshot__mt_next__isnull=True,
machinegroup__machinesnapshot__archived_at__isnull=True)
.annotate(num_machine_groups=Count('machinegroup'))
.order_by('module', 'name'))
0
Example 25
Project: rapidpro Source File: omnibox.py
def omnibox_query(org, **kwargs):
"""
Performs a omnibox query based on the given arguments
"""
# determine what type of group/contact/URN lookup is being requested
contact_uuids = kwargs.get('c', None) # contacts with ids
step_uuid = kwargs.get('s', None) # contacts in flow step with UUID
message_ids = kwargs.get('m', None) # contacts with message ids
label_id = kwargs.get('l', None) # contacts in flow step with UUID
group_uuids = kwargs.get('g', None) # groups with ids
urn_ids = kwargs.get('u', None) # URNs with ids
search = kwargs.get('search', None) # search of groups, contacts and URNs
types = kwargs.get('types', None) # limit search to types (g | s | c | u)
simulation = kwargs.get('simulation', 'false') == 'true'
# these lookups return a Contact queryset
if contact_uuids or step_uuid or message_ids or label_id:
qs = Contact.objects.filter(org=org, is_blocked=False, is_active=True, is_test=simulation)
if contact_uuids:
qs = qs.filter(uuid__in=contact_uuids.split(","))
elif step_uuid:
from temba.flows.models import FlowStep
steps = FlowStep.objects.filter(run__is_active=True, step_uuid=step_uuid,
left_on=None, run__flow__org=org).distinct('contact').select_related('contact')
contact_uuids = [f.contact.uuid for f in steps]
qs = qs.filter(uuid__in=contact_uuids)
elif message_ids:
qs = qs.filter(msgs__in=message_ids.split(","))
elif label_id:
label = Label.label_objects.get(pk=label_id)
qs = qs.filter(msgs__in=label.get_messages())
return qs.distinct().order_by('name')
# this lookup returns a ContactGroup queryset
elif group_uuids:
qs = ContactGroup.user_groups.filter(org=org, uuid__in=group_uuids.split(","))
return qs.annotate(members=Count('contacts')).order_by('name')
# this lookup returns a ContactURN queryset
elif urn_ids:
qs = ContactURN.objects.filter(org=org, id__in=urn_ids.split(",")).select_related('contact')
return qs.order_by('path')
# searching returns something which acts enough like a queryset to be paged
return omnibox_mixed_search(org, search, types)
0
Example 26
Project: SmartElect Source File: data_pull_ed.py
def generate_election_day_hq_reports(election):
"""Generate election day HQ reports for the election day HQ view"""
# Ignore centers which are inactive for polling for this election,
# except when building the list of all centers.
inactive_center_db_ids = [
inactivity.registration_center.id
for inactivity in CenterClosedForElection.objects.filter(election=election)
]
# For a given center there are usually 4 polling reports, one for each reporting period.
# Sometimes a center has multiple reports per period, and occasionally no report for a
# period. Since PollingReports are cuemulative, we only want the most recent report for each
# center, and that's what this query does.
# The use of .distinct('registration_center__id') at the end activates a Postgres-specific
# 'DISTINCT ON' clause. See the queryset API ref for distinct, also see here:
# http://stackoverflow.com/questions/3800551/select-first-row-in-each-group-by-group
polling_reports = PollingReport.objects.filter(election=election,
registration_center__deleted=False) \
.exclude(registration_center__id__in=inactive_center_db_ids) \
.order_by('registration_center__id', '-period_number', '-modification_date') \
.distinct('registration_center__id')
open_centers = RegistrationCenter.objects.filter(centeropen__isnull=False) \
.exclude(id__in=inactive_center_db_ids) \
.distinct()
all_centers = RegistrationCenter.objects.all() \
.annotate(n_registrations=Count('registration'))
# OK, all the queries are done. The next step is to loop through the results and count.
all_offices = sorted(set([center.office for center in all_centers]))
REPORT_TYPES = ('by_office', 'by_region', 'by_center_type', )
# You can picture this dict of dicts as a tree. The root dict has children that are the
# report types (plus national, which behaves a bit differently), the grandchildren are
# all offices, all regions, and all center types, and the g-grandchildren are the leaf nodes,
# or very nearly so. They're either 'v' (for vote count) which is a leaf node, or 'r' (for
# registration count) which has leaf nodes 'open' and 'active'.
d = {}
for report_type in REPORT_TYPES:
# Using an ordered dict here means that downstream consumers (like the template) get
# results that are already sorted.
d[report_type] = OrderedDict()
if report_type == 'by_office':
for office in all_offices:
d[report_type][str(office.id)] = {'v': 0, 'r': {'open': 0, 'active': 0, }}
elif report_type == 'by_region':
for region in Office.ALL_REGIONS:
d[report_type][str(region)] = {'v': 0, 'r': {'open': 0, 'active': 0, }}
elif report_type == 'by_center_type':
for center_type in RegistrationCenter.Types.ALL:
d[report_type][str(center_type)] = {'v': 0, 'r': {'open': 0, 'active': 0, }}
d['national'] = {}
d['national']['r'] = {}
d['national']['r']['open'] = 0
d['national']['r']['active'] = 0
d['national']['v'] = 0
# Tally registrations
for center in all_centers:
is_open = center in open_centers
for report_type in REPORT_TYPES:
if report_type == 'by_office':
key = str(center.office.id)
elif report_type == 'by_region':
key = str(center.office.region)
elif report_type == 'by_center_type':
key = str(center.center_type)
d[report_type][key]['r']['active'] += center.n_registrations
if report_type == 'by_region':
d['national']['r']['active'] += center.n_registrations
if is_open:
d[report_type][key]['r']['open'] += center.n_registrations
if report_type == 'by_region':
d['national']['r']['open'] += center.n_registrations
all_centers = {center.id: center for center in all_centers}
# Tally votes
for polling_report in polling_reports:
center = all_centers[polling_report.registration_center_id]
for report_type in REPORT_TYPES:
if report_type == 'by_office':
key = str(center.office.id)
elif report_type == 'by_region':
key = str(center.office.region)
elif report_type == 'by_center_type':
if center.center_type == RegistrationCenter.Types.COPY:
# Copy center stats are rolled into the original center's stats in this context.
# See:
# https://github.com/hnec-vr/libya-elections/issues/1008#issuecomment-70111548
key = str(center.copy_of.center_type)
else:
key = str(center.center_type)
d[report_type][key]['v'] += polling_report.num_voters
# Now that copy center stats have been counted and rolled into other categories, I delete them
# so that they don't clutter up the report.
del d['by_center_type'][str(RegistrationCenter.Types.COPY)]
d['national']['r']['open'] = 0
d['national']['r']['active'] = 0
d['national']['v'] = 0
d['national']['r']['open'] = sum([d['by_region'][str(region)]['r']['open'] for region in
Office.ALL_REGIONS])
d['national']['r']['active'] = sum([d['by_region'][str(region)]['r']['active'] for region in
Office.ALL_REGIONS])
d['national']['v'] = sum([d['by_region'][str(region)]['v'] for region in Office.ALL_REGIONS])
return d
0
Example 27
Project: mezzanine Source File: keyword_tags.py
@register.as_tag
def keywords_for(*args):
"""
Return a list of ``Keyword`` objects for the given model instance
or a model class. In the case of a model class, retrieve all
keywords for all instances of the model and apply a ``weight``
attribute that can be used to create a tag cloud.
"""
# Handle a model instance.
if isinstance(args[0], Model):
obj = args[0]
if getattr(obj, "content_model", None):
obj = obj.get_content_model()
keywords_name = obj.get_keywordsfield_name()
keywords_queryset = getattr(obj, keywords_name).all()
# Keywords may have been prefetched already. If not, we
# need select_related for the actual keywords.
prefetched = getattr(obj, "_prefetched_objects_cache", {})
if keywords_name not in prefetched:
keywords_queryset = keywords_queryset.select_related("keyword")
return [assigned.keyword for assigned in keywords_queryset]
# Handle a model class.
try:
app_label, model = args[0].split(".", 1)
except ValueError:
return []
content_type = ContentType.objects.get(app_label=app_label, model=model)
assigned = AssignedKeyword.objects.filter(content_type=content_type)
keywords = Keyword.objects.filter(assignments__in=assigned)
keywords = keywords.annotate(item_count=Count("assignments"))
if not keywords:
return []
counts = [keyword.item_count for keyword in keywords]
min_count, max_count = min(counts), max(counts)
factor = (settings.TAG_CLOUD_SIZES - 1.)
if min_count != max_count:
factor /= (max_count - min_count)
for kywd in keywords:
kywd.weight = int(round((kywd.item_count - min_count) * factor)) + 1
return keywords
0
Example 28
Project: djangopackages Source File: views.py
def homepage(request, template_name="homepage.html"):
categories = []
for category in Category.objects.annotate(package_count=Count("package")):
element = {
"title": category.title,
"description": category.description,
"count": category.package_count,
"slug": category.slug,
"title_plural": category.title_plural,
"show_pypi": category.show_pypi,
}
categories.append(element)
# get up to 5 random packages
package_count = Package.objects.count()
random_packages = []
if package_count > 1:
package_ids = set([])
# Get 5 random keys
package_ids = sample(
range(1, package_count + 1), # generate a list from 1 to package_count +1
min(package_count, 5) # Get a sample of the smaller of 5 or the package count
)
# Get the random packages
random_packages = Package.objects.filter(pk__in=package_ids)[:5]
try:
potw = Dpotw.objects.latest().package
except Dpotw.DoesNotExist:
potw = None
except Package.DoesNotExist:
potw = None
try:
gotw = Gotw.objects.latest().grid
except Gotw.DoesNotExist:
gotw = None
except Grid.DoesNotExist:
gotw = None
# Public Service Announcement on homepage
try:
psa_body = PSA.objects.latest().body_text
except PSA.DoesNotExist:
psa_body = '<p>There are currently no announcements. To request a PSA, tweet at <a href="http://twitter.com/open_comparison">@Open_Comparison</a>.</p>'
# Latest Django Packages blog post on homepage
feed_result = get_feed()
if len(feed_result.entries):
blogpost_title = feed_result.entries[0].title
blogpost_body = feed_result.entries[0].summary
else:
blogpost_title = ''
blogpost_body = ''
return render(request,
template_name, {
"latest_packages": Package.objects.all().order_by('-created')[:5],
"random_packages": random_packages,
"potw": potw,
"gotw": gotw,
"psa_body": psa_body,
"blogpost_title": blogpost_title,
"blogpost_body": blogpost_body,
"categories": categories,
"package_count": package_count,
"py3_compat": Package.objects.filter(version__supports_python3=True).select_related().distinct().count(),
"latest_python3": Version.objects.filter(supports_python3=True).select_related("package").distinct().order_by("-created")[0:5]
}
)
0
Example 29
Project: reviewboard Source File: managers.py
def _migrate_legacy_fdd(self, legacy_data_items, count, batch_size):
"""Migrates data from LegacyFileDiffData to RawFileDiffData.
This will go through every LegacyFileDiffData and convert them to
RawFileDiffData entries, removing the old versions. All associated
FileDiffs are then updated to point to the new RawFileDiffData entry
instead of the old LegacyFileDiffData.
"""
from reviewboard.diffviewer.models import RawFileDiffData
cursor = connection.cursor()
legacy_data_items = legacy_data_items.annotate(
num_filediffs=Count('filediffs'),
num_parent_filediffs=Count('parent_filediffs'))
for batch in self._iter_batches(legacy_data_items, count, batch_size):
batch_total_diff_size = 0
batch_total_bytes_saved = 0
raw_fdds = []
all_diff_hashes = []
filediff_hashes = []
parent_filediff_hashes = []
for legacy_fdd in batch:
raw_fdd = RawFileDiffData.objects.create_from_legacy(
legacy_fdd, save=False)
raw_fdds.append(raw_fdd)
binary_hash = legacy_fdd.binary_hash
old_diff_size = len(legacy_fdd.get_binary_base64())
batch_total_diff_size += old_diff_size
batch_total_bytes_saved += old_diff_size - len(raw_fdd.binary)
# Update all associated FileDiffs to use the new objects
# instead of the old ones.
if legacy_fdd.num_filediffs > 0:
filediff_hashes.append(binary_hash)
if legacy_fdd.num_parent_filediffs > 0:
parent_filediff_hashes.append(binary_hash)
all_diff_hashes.append(binary_hash)
try:
# Attempt to create all the entries we want in one go.
RawFileDiffData.objects.bulk_create(raw_fdds)
except IntegrityError:
# One or more entries in the batch conflicte with an existing
# entry, meaning it was already created. We'll just need to
# operate on the contents of this batch one-by-one.
for raw_fdd in raw_fdds:
try:
raw_fdd.save()
except IntegrityError:
raw_fdd = RawFileDiffData.objects.get(
binary_hash=raw_fdd.binary_hash)
if filediff_hashes:
self._transition_hashes(cursor, 'diff_hash', filediff_hashes)
if parent_filediff_hashes:
self._transition_hashes(cursor, 'parent_diff_hash',
parent_filediff_hashes)
legacy_data_items.filter(pk__in=all_diff_hashes).delete()
yield (len(batch), batch_total_diff_size,
batch_total_bytes_saved, filediff_hashes,
parent_filediff_hashes, all_diff_hashes)
0
Example 30
def get_context_data(self, **kwargs):
context = super(DocuementcategoryList, self).get_context_data(**kwargs)
# Get all categories
categories = list(models.DocuementCategory.objects.all()
.prefetch_related('lecturers')
.prefetch_related('courses'))
# To reduce number of queries, prefetch aggregated count values from the
# docuement model. The query returns the count for each (category, dtype) pair.
category_counts = models.Docuement.objects.values('category', 'dtype') \
.order_by().annotate(count=Count('dtype'))
# Create counts dictionary ({category_id: {dtype: count, dtype: count, ...}})
counts = defaultdict(lambda: defaultdict(int))
for item in category_counts:
category = item['category']
dtype = item['dtype']
counts[category][dtype] = item['count']
# Add counts to category objects
simplecounts = defaultdict(dict)
empty_categories = []
nonempty_categories = []
for c in categories:
d = simplecounts[c.pk]
d['summary'] = counts[c.pk][models.Docuement.DTypes.SUMMARY]
d['exam'] = counts[c.pk][models.Docuement.DTypes.EXAM]
d['other'] = counts[c.pk][models.Docuement.DTypes.SOFTWARE] + \
counts[c.pk][models.Docuement.DTypes.LEARNING_AID] + \
counts[c.pk][models.Docuement.DTypes.ATTESTATION]
d['total'] = sum(d.values())
# Sort by category activity
if d['total'] == 0:
empty_categories.append(c)
else:
nonempty_categories.append(c)
context['categories'] = nonempty_categories + empty_categories
context['counts'] = simplecounts
return context
0
Example 31
def get_context_data(self, **kwargs):
context = super(Stats, self).get_context_data(**kwargs)
# Lecturers
base_query = "SELECT lecturer_id AS id \
FROM lecturers_lecturerrating \
WHERE category = '%c' \
GROUP BY lecturer_id HAVING COUNT(id) > 5"
base_query_top = base_query + " ORDER BY AVG(rating) DESC, COUNT(id) DESC"
base_query_flop = base_query + " ORDER BY AVG(rating) ASC, COUNT(id) DESC"
def fetchfirst(queryset):
try:
return queryset[0]
except IndexError:
return None
context['lecturer_top_d'] = fetchfirst(
lecturer_models.Lecturer.objects.raw(base_query_top % 'd'))
context['lecturer_top_m'] = fetchfirst(
lecturer_models.Lecturer.objects.raw(base_query_top % 'm'))
context['lecturer_top_f'] = fetchfirst(
lecturer_models.Lecturer.objects.raw(base_query_top % 'f'))
context['lecturer_flop_d'] = fetchfirst(
lecturer_models.Lecturer.objects.raw(base_query_flop % 'd'))
context['lecturer_flop_m'] = fetchfirst(
lecturer_models.Lecturer.objects.raw(base_query_flop % 'm'))
context['lecturer_flop_f'] = fetchfirst(
lecturer_models.Lecturer.objects.raw(base_query_flop % 'f'))
context['lecturer_quotes'] = lecturer_models.Lecturer.objects \
.annotate(quotes_count=Count('Quote')) \
.order_by('-quotes_count')[:3]
# Users
context['user_topratings'] = fetchfirst(
models.User.objects.raw('''
SELECT u.id AS id, COUNT(DISTINCT lr.lecturer_id) AS lrcount
FROM front_user u
JOIN lecturers_lecturerrating lr
ON u.id = lr.user_id
GROUP BY u.id
ORDER BY lrcount DESC'''))
context['user_topuploads'] = fetchfirst(
models.User.objects
.exclude(username='spimport')
.annotate(uploads_count=Count('Docuement'))
.order_by('-uploads_count'))
context['user_topevents'] = fetchfirst(
models.User.objects
.annotate(events_count=Count('Event'))
.order_by('-events_count'))
context['user_topquotes'] = fetchfirst(
models.User.objects
.exclude(username='spimport')
.annotate(quotes_count=Count('Quote'))
.order_by('-quotes_count'))
return context
0
Example 32
Project: rocket-league-replays Source File: replays.py
@register.assignment_tag
def steam_stats(uid):
data = {}
season_id = get_default_season()
# Winning goals scored.
data['winning_goals'] = Goal.objects.filter(
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
replay__show_leaderboard=True,
replay__season_id=season_id,
number=F('replay__team_0_score') + F('replay__team_1_score')
).count()
# Last minute goals (literally, goals scored within the last minute of the game)
data['last_minute_goals'] = Goal.objects.filter(
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
replay__show_leaderboard=True,
replay__season_id=season_id,
frame__gte=F('replay__num_frames') - (60 * F('replay__record_fps'))
).count()
# Number of times the player has scored a goal which equalised the game and
# forced it into overtime.
data['overtime_triggering_goals'] = 0
data['overtime_triggering_and_winning_goals'] = 0
data['overtime_trigger_and_team_win'] = 0
# Find replays which went into overtime.
replays = Replay.objects.filter(
show_leaderboard=True,
season_id=season_id,
goal__frame__gte=(60 * 5 * F('record_fps')),
)
# Get all games with overtime goals.
replays = Replay.objects.annotate(
num_goals=F('team_0_score') + F('team_1_score'),
).filter(
season_id=season_id,
num_frames__gt=60 * 5 * F('record_fps'),
show_leaderboard=True,
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
num_goals__gte=2,
).prefetch_related('goal_set')
for replay in replays:
# Who scored the 2nd to last goal?
try:
goal = replay.goal_set.get(
number=replay.num_goals - 1,
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
)
data['overtime_triggering_goals'] += 1
# Did the team win?
team = goal.player.team
if (
team == 0 and replay.team_0_score > replay.team_1_score or
team == 1 and replay.team_1_score > replay.team_0_score
):
data['overtime_trigger_and_team_win'] += 1
# Did they also score the winning goal?
replay.goal_set.get(
number=replay.num_goals,
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
)
data['overtime_triggering_and_winning_goals'] += 1
except Goal.DoesNotExist:
pass
except Goal.MultipleObjectsReturned:
pass
# Which match size does this player appear most in?
data['preferred_match_size'] = None
sizes = Replay.objects.filter(
season_id=season_id,
show_leaderboard=True,
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
).values('team_sizes').annotate(
Count('team_sizes'),
).order_by('-team_sizes__count')
if len(sizes) > 0:
data['preferred_match_size'] = sizes[0]['team_sizes']
# What's this player's prefered role within a team?
data['preferred_role'] = None
role_query = Player.objects.filter(
replay__show_leaderboard=True,
replay__season_id=season_id,
platform__in=['OnlinePlatform_Steam', '1'],
online_id=uid,
).aggregate(
goals=Sum('goals'),
assists=Sum('assists'),
saves=Sum('saves'),
)
if not any([v[1] for v in role_query.items()]):
data['preferred_role'] = None
else:
max_stat = max(role_query, key=lambda k: role_query[k])
if max_stat == 'goals':
data['preferred_role'] = 'Goalscorer'
elif max_stat == 'assists':
data['preferred_role'] = 'Assister'
elif max_stat == 'saves':
data['preferred_role'] = 'Goalkeeper'
"""
# Number of times the player's score was higher than everyone else on their
# team put together.
data['carries'] = 0
# Number of times the player's score was higher than everyone else put together.
data['dominations'] = 0
replays = Replay.objects.filter(
team_sizes__gte=2,
show_leaderboard=True,
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
)
for replay in replays:
# Which team was the player on? Split screen players will break a .get()
# here, so we have to filter().
player = replay.player_set.filter(
platform='OnlinePlatform_Steam',
online_id=uid,
)[0]
# What was the total score for this team?
team_score = Player.objects.filter(
replay=replay,
team=player.team,
).exclude(
pk=player.pk,
).aggregate(
score=Sum('score'),
)['score']
if player.score > team_score:
data['carries'] += 1
# What was the total score for the other team?
other_team_score = Player.objects.filter(
replay=replay,
).exclude(
team=player.team,
).aggregate(
score=Sum('score'),
)['score']
if not team_score:
team_score = 0
if not other_team_score:
other_team_score = 0
if player.score > team_score + other_team_score:
data['dominations'] += 1
# The biggest gap in a win involving the player.
data['biggest_win'] = None
replays = Replay.objects.filter(
team_sizes__gte=2,
show_leaderboard=True,
player__platform__in=['OnlinePlatform_Steam', '1'],
player__online_id=uid,
).extra(select={
'goal_diff': 'abs("team_0_score" - "team_1_score")'
}).order_by('-goal_diff')
"""
for replay in replays:
# Which team was the player on? Split screen players will break a .get()
# here, so we have to filter().
player = replay.player_set.filter(
platform__in=['OnlinePlatform_Steam', '1'],
online_id=uid,
)[0]
# Check if the player was on the winning team.
if (
player.team == 0 and replay.team_0_score > replay.team_1_score or
player.team == 1 and replay.team_1_score > replay.team_0_score
):
data['biggest_win'] = mark_safe('<a href="{}">{} - {}</a>'.format(
replay.get_absolute_url(),
replay.team_0_score,
replay.team_1_score,
))
break
data.update(Player.objects.filter(
replay__season_id=season_id,
platform__in=['OnlinePlatform_Steam', '1'],
online_id=uid,
).aggregate(
highest_score=Max('score'),
most_goals=Max('goals'),
most_shots=Max('shots'),
most_assists=Max('assists'),
most_saves=Max('saves'),
))
return data
0
Example 33
Project: amy Source File: trainings_completion_rates.py
def handle(self, *args, **options):
fields = [
'start', 'slug', 'online', 'badge', 'learners',
'completed this', 'completed this [%]',
'completed other', 'completed other [%]',
'no badge', 'no badge [%]',
'taught at least once', 'taught at least once [%]',
]
writer = csv.DictWriter(self.stdout, fieldnames=fields)
writer.writeheader()
for training in self.trainings():
badge = self.badge_type(training.tags.all())
learners = self.learners(training)
learners_len = learners.count()
completed_len = learners.filter(badges=badge,
award__event=training).count()
completed_other_len = learners.filter(badges=badge) \
.exclude(award__event=training) \
.count()
no_badge_len = learners.exclude(badges=badge).count()
# Django tries to optimize every query; for example here I had to
# cast to list explicitly to achieve a query without any
# WHEREs to task__role__name (which self.learners() unfortunately
# has to add).
learners2 = Person.objects.filter(
pk__in=list(learners.values_list('pk', flat=True)))
# 1. Grab people who received a badge for this training
# 2. Count how many times each of them taught
instructors = learners2.filter(award__badge=badge,
award__event=training)\
.annotate(
num_taught=Count(
Case(
When(
task__role__name='instructor',
# task__event__start__gte=training.start,
then=Value(1)
),
output_field=IntegerField()
)
)
)
# 3. Get only people who taught at least once
# 4. And count them
instructors_taught_at_least_once = instructors \
.filter(num_taught__gt=0) \
.aggregate(Count('num_taught'))['num_taught__count'] or 0
record = {
fields[0]: universal_date_format(training.start),
fields[1]: training.slug,
fields[2]: int(self.online_tag in training.tags.all()),
fields[3]: badge.title,
fields[4]: learners_len,
fields[5]: completed_len,
fields[6]: self.percent(completed_len, learners_len),
fields[7]: completed_other_len,
fields[8]: self.percent(completed_other_len, learners_len),
fields[9]: no_badge_len,
fields[10]: self.percent(no_badge_len, learners_len),
fields[11]: instructors_taught_at_least_once,
fields[12]: self.percent(instructors_taught_at_least_once,
learners_len),
}
writer.writerow(record)
0
Example 34
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
tz = timezone.get_current_timezone()
if 'latest' in self.request.GET:
clear_cache(self.request.event)
cache = self.request.event.get_cache()
# Orders by day
ctx['obd_data'] = cache.get('statistics_obd_data')
if not ctx['obd_data']:
ordered_by_day = {}
for o in Order.objects.filter(event=self.request.event).values('datetime'):
day = o['datetime'].astimezone(tz).date()
ordered_by_day[day] = ordered_by_day.get(day, 0) + 1
paid_by_day = {}
for o in Order.objects.filter(event=self.request.event,
payment_date__isnull=False).values('payment_date'):
day = o['payment_date'].astimezone(tz).date()
paid_by_day[day] = paid_by_day.get(day, 0) + 1
data = []
for d in dateutil.rrule.rrule(
dateutil.rrule.DAILY,
dtstart=min(ordered_by_day.keys()) if ordered_by_day else datetime.date.today(),
until=max(
max(ordered_by_day.keys() if paid_by_day else [datetime.date.today()]),
max(paid_by_day.keys() if paid_by_day else [datetime.date(1970, 1, 1)])
)):
d = d.date()
data.append({
'date': d.strftime('%Y-%m-%d'),
'ordered': ordered_by_day.get(d, 0),
'paid': paid_by_day.get(d, 0)
})
ctx['obd_data'] = json.dumps(data)
cache.set('statistics_obd_data', ctx['obd_data'])
# Orders by product
ctx['obp_data'] = cache.get('statistics_obp_data')
if not ctx['obp_data']:
num_ordered = {
p['item']: p['cnt']
for p in (OrderPosition.objects
.filter(order__event=self.request.event)
.values('item')
.annotate(cnt=Count('id')))
}
num_paid = {
p['item']: p['cnt']
for p in (OrderPosition.objects
.filter(order__event=self.request.event, order__status=Order.STATUS_PAID)
.values('item')
.annotate(cnt=Count('id')))
}
item_names = {
i.id: str(i.name)
for i in Item.objects.filter(event=self.request.event)
}
ctx['obp_data'] = json.dumps([
{
'item': item_names[item],
'ordered': cnt,
'paid': num_paid.get(item, 0)
} for item, cnt in num_ordered.items()
])
cache.set('statistics_obp_data', ctx['obp_data'])
ctx['rev_data'] = cache.get('statistics_rev_data')
if not ctx['rev_data']:
rev_by_day = {}
for o in Order.objects.filter(event=self.request.event,
status=Order.STATUS_PAID,
payment_date__isnull=False).values('payment_date', 'total'):
day = o['payment_date'].astimezone(tz).date()
rev_by_day[day] = rev_by_day.get(day, 0) + o['total']
data = []
total = 0
for d in dateutil.rrule.rrule(
dateutil.rrule.DAILY,
dtstart=min(rev_by_day.keys() if rev_by_day else [datetime.date.today()]),
until=max(rev_by_day.keys() if rev_by_day else [datetime.date.today()])):
d = d.date()
total += float(rev_by_day.get(d, 0))
data.append({
'date': d.strftime('%Y-%m-%d'),
'revenue': round(total, 2),
})
ctx['rev_data'] = json.dumps(data)
cache.set('statistics_rev_data', ctx['rev_data'])
ctx['has_orders'] = self.request.event.orders.exists()
return ctx
0
Example 35
Project: taiga-back Source File: services.py
def get_users_public_stats():
model = get_user_model()
queryset = model.objects.filter(is_active=True, is_system=False)
stats = OrderedDict()
today = timezone.now()
yesterday = today - timedelta(days=1)
seven_days_ago = yesterday - timedelta(days=7)
a_year_ago = today - timedelta(days=365)
stats["total"] = queryset.count()
stats["today"] = queryset.filter(date_joined__year=today.year,
date_joined__month=today.month,
date_joined__day=today.day).count()
stats["average_last_seven_days"] = (queryset.filter(date_joined__range=(seven_days_ago, yesterday))
.count()) / 7
stats["average_last_five_working_days"] = (queryset.filter(date_joined__range=(seven_days_ago, yesterday))
.exclude(Q(date_joined__week_day=1) |
Q(date_joined__week_day=7))
.count()) / 5
# Graph: users last year
# increments ->
# SELECT date_trunc('week', "filtered_users"."date_joined") AS "week",
# count(*)
# FROM (SELECT *
# FROM "users_user"
# WHERE "users_user"."is_active" = TRUE
# AND "users_user"."is_system" = FALSE
# AND "users_user"."date_joined" >= %s) AS "filtered_users"
# GROUP BY "week"
# ORDER BY "week";
increments = (queryset.filter(date_joined__gte=a_year_ago)
.extra({"week": "date_trunc('week', date_joined)"})
.values("week")
.order_by("week")
.annotate(count=Count("id")))
counts_last_year_per_week = OrderedDict()
sumatory = queryset.filter(date_joined__lt=increments[0]["week"]).count()
for inc in increments:
sumatory += inc["count"]
counts_last_year_per_week[str(inc["week"].date())] = sumatory
stats["counts_last_year_per_week"] = counts_last_year_per_week
return stats
0
Example 36
Project: django-form-designer Source File: __init__.py
def export(self, request, queryset=None):
self.init_response()
self.init_writer()
distinct_forms = queryset.aggregate(Count('form_definition', distinct=True))['form_definition__count']
include_created = settings.CSV_EXPORT_INCLUDE_CREATED
include_pk = settings.CSV_EXPORT_INCLUDE_PK
include_header = settings.CSV_EXPORT_INCLUDE_HEADER and distinct_forms == 1
include_form = settings.CSV_EXPORT_INCLUDE_FORM and distinct_forms > 1
if queryset.count():
fields = queryset[0].form_definition.get_field_dict()
if include_header:
header = []
if include_form:
header.append(_('Form'))
if include_created:
header.append(_('Created'))
if include_pk:
header.append(_('ID'))
# Form fields might have been changed and not match
# existing form logs anymore.
# Hence, use current form definition for header.
# for field in queryset[0].data:
# header.append(field['label'] if field['label'] else field['key'])
for field_name, field in fields.items():
header.append(field.label if field.label else field.key)
self.writerow([smart_str(cell, encoding=settings.CSV_EXPORT_ENCODING) for cell in header])
for entry in queryset:
row = []
if include_form:
row.append(entry.form_definition)
if include_created:
row.append(entry.created)
if include_pk:
row.append(entry.pk)
for item in entry.data:
value = friendly(item['value'], null_value=settings.CSV_EXPORT_NULL_VALUE)
value = smart_str(
value, encoding=settings.CSV_EXPORT_ENCODING)
row.append(value)
self.writerow(row)
self.close()
return self.response
0
Example 37
Project: tendenci Source File: tasks.py
def run(self, **kwargs):
"""Create the xls file"""
event_fields = [
'entity',
'type',
'title',
'description',
'all_day',
'start_dt',
'end_dt',
'timezone',
'private_slug',
'password',
'on_weekend',
'external_url',
'image',
'tags',
'allow_anonymous_view',
'allow_user_view',
'allow_member_view',
'allow_user_edit',
'allow_member_edit',
'create_dt',
'update_dt',
'creator',
'creator_username',
'owner',
'owner_username',
'status',
'status_detail',
]
place_fields = [
'name',
'description',
'address',
'city',
'state',
'zip',
'country',
'url',
]
configuration_fields = [
'payment_method',
'payment_required',
'limit',
'enabled',
'is_guest_price',
'use_custom_reg_form',
'reg_form',
'bind_reg_form_to_conf_only',
]
speaker_fields = [
'name',
'description',
]
organizer_fields = [
'name',
'description',
]
pricing_fields = [
'title',
'quantity',
'group',
'price',
'reg_form',
'start_dt',
'end_dt',
'allow_anonymous',
'allow_user',
'allow_member',
'status',
]
events = Event.objects.filter(status=True)
max_speakers = events.annotate(num_speakers=Count('speaker')).aggregate(Max('num_speakers'))['num_speakers__max']
max_organizers = events.annotate(num_organizers=Count('organizer')).aggregate(Max('num_organizers'))['num_organizers__max']
max_pricings = events.annotate(num_pricings=Count('registration_configuration__regconfpricing')).aggregate(Max('num_pricings'))['num_pricings__max']
file_name = 'events.csv'
data_row_list = []
for event in events:
data_row = []
# event setup
event_d = full_model_to_dict(event, fields=event_fields)
for field in event_fields:
value = None
if field == 'entity':
if event.entity:
value = event.entity.entity_name
elif field == 'type':
if event.type:
value = event.type.name
elif field in event_d:
value = event_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
if event.place:
# place setup
place_d = full_model_to_dict(event.place)
for field in place_fields:
value = place_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
if event.registration_configuration:
# config setup
conf_d = full_model_to_dict(event.registration_configuration)
for field in configuration_fields:
if field == "payment_method":
value = event.registration_configuration.payment_method.all()
else:
value = conf_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
if event.speaker_set.all():
# speaker setup
for speaker in event.speaker_set.all():
speaker_d = full_model_to_dict(speaker)
for field in speaker_fields:
value = speaker_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
# fill out the rest of the speaker columns
if event.speaker_set.all().count() < max_speakers:
for i in range(0, max_speakers - event.speaker_set.all().count()):
for field in speaker_fields:
data_row.append('')
if event.organizer_set.all():
# organizer setup
for organizer in event.organizer_set.all():
organizer_d = full_model_to_dict(organizer)
for field in organizer_fields:
value = organizer_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
# fill out the rest of the organizer columns
if event.organizer_set.all().count() < max_organizers:
for i in range(0, max_organizers - event.organizer_set.all().count()):
for field in organizer_fields:
data_row.append('')
reg_conf = event.registration_configuration
if reg_conf and reg_conf.regconfpricing_set.all():
# pricing setup
for pricing in reg_conf.regconfpricing_set.all():
pricing_d = full_model_to_dict(pricing)
for field in pricing_fields:
value = pricing_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
# fill out the rest of the pricing columns
if reg_conf and reg_conf.regconfpricing_set.all().count() < max_pricings:
for i in range(0, max_pricings - reg_conf.regconfpricing_set.all().count()):
for field in pricing_fields:
data_row.append('')
data_row_list.append(data_row)
fields = event_fields + ["place %s" % f for f in place_fields]
fields = fields + ["config %s" % f for f in configuration_fields]
for i in range(0, max_speakers):
fields = fields + ["speaker %s %s" % (i, f) for f in speaker_fields]
for i in range(0, max_organizers):
fields = fields + ["organizer %s %s" % (i, f) for f in organizer_fields]
for i in range(0, max_pricings):
fields = fields + ["pricing %s %s" % (i, f) for f in pricing_fields]
return render_csv(file_name, fields, data_row_list)
0
Example 38
Project: tendenci Source File: tasks.py
def run(self, **kwargs):
"""Create the xls file"""
form_fields = [
'title',
'slug',
'intro',
'response',
'email_text',
'subject_template',
'send_email',
'email_from',
'email_copies',
'completion_url',
'custom_payment',
'payment_methods',
'allow_anonymous_view',
'allow_user_view',
'allow_member_view',
'allow_user_edit',
'allow_member_edit',
'create_dt',
'update_dt',
'creator',
'creator_username',
'owner',
'owner_username',
'status',
'status_detail',
]
field_fields = [
'label',
'field_type',
'field_function',
'required',
'visible',
'choices',
'position',
'default',
]
pricing_fields = [
'label',
'price',
]
forms = Form.objects.filter(status=True)
max_fields = forms.annotate(num_fields=Count('fields')).aggregate(Max('num_fields'))['num_fields__max']
max_pricings = forms.annotate(num_pricings=Count('pricing')).aggregate(Max('num_pricings'))['num_pricings__max']
file_name = 'forms.csv'
data_row_list = []
for form in forms:
data_row = []
# form setup
form_d = full_model_to_dict(form)
for field in form_fields:
if field == 'payment_methods':
value = [m.human_name for m in form.payment_methods.all()]
else:
value = form_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
if form.fields.all():
# field setup
for field in form.fields.all():
field_d = full_model_to_dict(field)
for f in field_fields:
value = field_d[f]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
# fill out the rest of the field columns
if form.fields.all().count() < max_fields:
for i in range(0, max_fields - form.fields.all().count()):
for f in field_fields:
data_row.append('')
if form.pricing_set.all():
# field setup
for pricing in form.pricing_set.all():
pricing_d = full_model_to_dict(pricing)
for f in pricing_fields:
value = pricing_d[f]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
# fill out the rest of the field columns
if form.pricing_set.all().count() < max_pricings:
for i in range(0, max_pricings - form.pricing_set.all().count()):
for f in pricing_fields:
data_row.append('')
data_row_list.append(data_row)
fields = form_fields
for i in range(0, max_fields):
fields = fields + ["field %s %s" % (i, f) for f in field_fields]
for i in range(0, max_pricings):
fields = fields + ["pricing %s %s" % (i, f) for f in pricing_fields]
return render_csv(file_name, fields, data_row_list)
0
Example 39
Project: SchoolIdolAPI Source File: generate_settings.py
def generate_settings(opt={}):
print 'Get total donators'
total_donators = unicode(models.UserPreferences.objects.filter(status__isnull=False).exclude(status__exact='').count())
print 'Check the current contest'
current_contests = get_current_contests()
if not current_contests:
current_contests = [{
'url': 'http://schoolido.lu/contest/',
'image': 'http://i.schoolido.lu/static/currentcontest_no.png',
'homepage_image': 'http://i.schoolido.lu/static/currentcontest_no.png',
'name': None,
}]
else:
current_contests = [{
'url': 'http://schoolido.lu/contest/' + str(current_contest.id) + '/' + tourldash(current_contest.name) + '/',
'image': (u'%s%s' % (settings.IMAGES_HOSTING_PATH, current_contest.image)) if current_contest.image else 'http://i.schoolido.lu/static/currentcontest.png',
'homepage_image': (u'%s%s' % (settings.IMAGES_HOSTING_PATH, current_contest.homepage_image)) if current_contest.homepage_image else ((u'%s%s' % (settings.IMAGES_HOSTING_PATH, current_contest.image)) if current_contest.image else 'http://i.schoolido.lu/static/currentcontest.png'),
'name': current_contest.name,
} for current_contest in current_contests]
print 'Check the current events'
try:
try:
current_jp = models.Event.objects.filter(end__lte=timezone.now()).order_by('-beginning')[0]
except IndexError:
current_jp = models.Event.objects.order_by('-beginning')[0]
current_jp = {
'japanese_name': current_jp.japanese_name,
'slide_position': len(current_contests) + 1,
'image': '{}{}'.format(settings.IMAGES_HOSTING_PATH, current_jp.image),
}
except:
current_jp = None
try:
try:
current_en = models.Event.objects.filter(english_beginning__isnull=False).filter(end__lte=timezone.now()).order_by('-english_beginning')[0]
except IndexError:
current_en = models.Event.objects.filter(english_beginning__isnull=False).order_by('-english_beginning')[0]
current_en = {
'japanese_name': current_en.japanese_name,
'slide_position': len(current_contests),
'image': '{}{}'.format(settings.IMAGES_HOSTING_PATH, current_en.english_image if current_en.english_image else current_en.image),
}
except:
current_en = None
print 'Get ages'
ages = {}
for i in range(10,30):
ages[i] = 0
prefs = models.UserPreferences.objects.filter(birthdate__isnull=False)
total_ages = prefs.count()
for p in prefs:
age = p.age
if age > 0 and age < 88:
if age in ages:
ages[age] += 1
else:
ages[age] = 1
ages = OrderedDict(sorted(ages.items()))
print 'Get cardsinfo dictionary'
cards_info = unicode({
'max_stats': {
'Smile': models.Card.objects.order_by('-idolized_maximum_statistics_smile')[:1][0].idolized_maximum_statistics_smile,
'Pure': models.Card.objects.order_by('-idolized_maximum_statistics_pure')[:1][0].idolized_maximum_statistics_pure,
'Cool': models.Card.objects.order_by('-idolized_maximum_statistics_cool')[:1][0].idolized_maximum_statistics_cool,
},
'songs_max_stats': models.Song.objects.order_by('-expert_notes')[0].expert_notes,
'idols': ValuesQuerySetToDict(models.Card.objects.values('name', 'idol__japanese_name').annotate(total=Count('name')).order_by('-total', 'name')),
'sub_units': [card['sub_unit'] for card in models.Idol.objects.filter(sub_unit__isnull=False).values('sub_unit').distinct()],
'years': [idol['year'] for idol in models.Idol.objects.filter(year__isnull=False).values('year').distinct()],
'schools': [idol['school'] for idol in models.Idol.objects.filter(school__isnull=False).values('school').distinct()],
'collections': ValuesQuerySetToDict(models.Card.objects.filter(japanese_collection__isnull=False).exclude(japanese_collection__exact='').values('japanese_collection').annotate(total=Count('name')).order_by('-total', 'japanese_collection')),
'translated_collections': ValuesQuerySetToDict(models.Card.objects.filter(translated_collection__isnull=False).exclude(translated_collection__exact='').values('translated_collection').annotate(total=Count('name')).order_by('-total', 'translated_collection')),
'skills': ValuesQuerySetToDict(models.Card.objects.filter(skill__isnull=False).values('skill').annotate(total=Count('skill')).order_by('-total')),
'total_cards': models.Card.objects.order_by('-id')[0].id,
'en_cards': [int(c.id) for c in models.Card.objects.filter(japan_only=False)],
})
print 'Save generated settings'
s = u'\
from collections import OrderedDict\n\
import datetime\n\
TOTAL_DONATORS = ' + total_donators + u'\n\
CURRENT_CONTESTS = ' + unicode(current_contests) + u'\n\
CURRENT_EVENT_JP = ' + unicode(current_jp) + u'\n\
CURRENT_EVENT_EN = ' + unicode(current_en) + u'\n\
USERS_AGES = ' + unicode(ages) + u'\n\
USERS_TOTAL_AGES = ' + unicode(total_ages) + u'\n\
GENERATED_DATE = datetime.datetime.fromtimestamp(' + unicode(time.time()) + u')\n\
CARDS_INFO = ' + cards_info + u'\n\
'
print s
f = open('schoolidolapi/generated_settings.py', 'w')
print >> f, s
f.close()
0
Example 40
Project: tendenci Source File: tasks.py
def run(self, **kwargs):
"""Create the xls file"""
nav_fields = [
'title',
'description',
'megamenu',
'allow_anonymous_view',
'allow_user_view',
'allow_member_view',
'allow_user_edit',
'allow_member_edit',
'create_dt',
'update_dt',
'creator',
'creator_username',
'owner',
'owner_username',
'status',
'status_detail',
]
nav_item_fields = [
'label',
'title',
'new_window',
'css',
'position',
'level',
'page',
'url',
]
navs = Nav.objects.filter(status=True)
max_nav_items = navs.annotate(num_navitems=Count('navitem')).aggregate(Max('num_navitems'))['num_navitems__max']
file_name = 'navs.csv'
data_row_list = []
for nav in navs:
data_row = []
# nav setup
nav_d = full_model_to_dict(nav)
for field in nav_fields:
value = nav_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
if nav.navitem_set.all():
# nav_item setup
for nav_item in nav.navitem_set.all():
nav_item_d = full_model_to_dict(nav_item)
for field in nav_item_fields:
value = nav_item_d[field]
value = unicode(value).replace(os.linesep, ' ').rstrip()
data_row.append(value)
# fill out the rest of the nav_item columns
if nav.navitem_set.all().count() < max_nav_items:
for i in range(0, max_nav_items - nav.navitem_set.all().count()):
for field in nav_item_fields:
data_row.append('')
data_row_list.append(data_row)
fields = nav_fields
for i in range(0, max_nav_items):
fields = fields + ["nav_item %s %s" % (i, f) for f in nav_item_fields]
return render_csv(file_name, fields, data_row_list)
0
Example 41
Project: ion Source File: serializers.py
@transaction.atomic
def fetch_activity_list_with_metadata(self, block):
user = self.context.get("user", self.context["request"].user)
favorited_activities = set(user.favorited_activity_set.values_list("id", flat=True))
available_restricted_acts = EighthActivity.restricted_activities_available_to_user(user)
activity_list = FallbackDict(functools.partial(self.get_activity, user, favorited_activities, available_restricted_acts))
scheduled_activity_to_activity_map = FallbackDict(self.get_scheduled_activity)
# Find all scheduled activities that don't correspond to
# deleted activities
scheduled_activities = (block.eighthscheduledactivity_set.exclude(activity__deleted=True).select_related("activity"))
for scheduled_activity in scheduled_activities:
# Avoid re-fetching scheduled_activity.
activity_info = self.get_activity(user, favorited_activities, available_restricted_acts, None, scheduled_activity)
activity = scheduled_activity.activity
scheduled_activity_to_activity_map[scheduled_activity.id] = activity.id
activity_list[activity.id] = activity_info
# Find the number of students signed up for every activity
# in this block
activities_with_signups = (EighthSignup.objects.filter(scheduled_activity__block=block).exclude(
scheduled_activity__activity__deleted=True).values_list("scheduled_activity__activity_id")
.annotate(user_count=Count("scheduled_activity")))
for activity, user_count in activities_with_signups:
activity_list[activity]["roster"]["count"] = user_count
sponsors_dict = (EighthSponsor.objects.values_list("id", "user_id", "first_name", "last_name", "show_full_name"))
all_sponsors = dict((sponsor[0], {"user_id": sponsor[1],
"name": sponsor[2] + " " + sponsor[3] if sponsor[4] else sponsor[3]}) for sponsor in sponsors_dict)
activity_ids = scheduled_activities.values_list("activity__id")
sponsorships = (EighthActivity.sponsors.through.objects.filter(eighthactivity_id__in=activity_ids).select_related("sponsors").values(
"eighthactivity", "eighthsponsor"))
scheduled_activity_ids = scheduled_activities.values_list("id", flat=True)
overidden_sponsorships = (EighthScheduledActivity.sponsors.through.objects.filter(
eighthscheduledactivity_id__in=scheduled_activity_ids).values("eighthscheduledactivity", "eighthsponsor"))
for sponsorship in sponsorships:
activity_id = int(sponsorship["eighthactivity"])
sponsor_id = sponsorship["eighthsponsor"]
sponsor = all_sponsors[sponsor_id]
if sponsor["user_id"]:
# We're not using User.get_user() here since we only want
# a value from LDAP that is probably already cached.
# This eliminates several hundred SQL queries on some
# pages.
dn = User.dn_from_id(sponsor["user_id"])
if dn is not None:
name = User(dn=dn).last_name
else:
name = None
else:
name = None
if activity_id in activity_list:
activity_list[activity_id]["sponsors"].append(sponsor["name"] or name)
activities_sponsors_overidden = []
for sponsorship in overidden_sponsorships:
scheduled_activity_id = sponsorship["eighthscheduledactivity"]
activity_id = scheduled_activity_to_activity_map[scheduled_activity_id]
sponsor_id = sponsorship["eighthsponsor"]
sponsor = all_sponsors[sponsor_id]
if activity_id not in activities_sponsors_overidden:
activities_sponsors_overidden.append(activity_id)
del activity_list[activity_id]["sponsors"][:]
if sponsor["user_id"]:
# See a few lines up for why we're not using User.get_user()
dn = User.dn_from_id(sponsor["user_id"])
if dn is not None:
name = User(dn=dn).last_name
else:
name = None
else:
name = None
activity_list[activity_id]["sponsors"].append(sponsor["name"] or name)
roomings = (EighthActivity.rooms.through.objects.filter(eighthactivity_id__in=activity_ids).select_related("eighthroom", "eighthactivity"))
overidden_roomings = (EighthScheduledActivity.rooms.through.objects.filter(
eighthscheduledactivity_id__in=scheduled_activity_ids).select_related("eighthroom", "eighthscheduledactivity"))
for rooming in roomings:
activity_id = rooming.eighthactivity.id
activity_cap = rooming.eighthactivity.default_capacity
room_name = rooming.eighthroom.name
activity_list[activity_id]["rooms"].append(room_name)
if activity_cap:
# use activity default capacity instead of sum of activity rooms
activity_list[activity_id]["roster"]["capacity"] = activity_cap
else:
activity_list[activity_id]["roster"]["capacity"] += rooming.eighthroom.capacity
activities_rooms_overidden = []
for rooming in overidden_roomings:
scheduled_activity_id = rooming.eighthscheduledactivity.id
activity_id = scheduled_activity_to_activity_map[scheduled_activity_id]
if activity_id not in activities_rooms_overidden:
activities_rooms_overidden.append(activity_id)
del activity_list[activity_id]["rooms"][:]
activity_list[activity_id]["roster"]["capacity"] = 0
room_name = rooming.eighthroom.name
activity_list[activity_id]["rooms"].append(room_name)
activity_list[activity_id]["roster"]["capacity"] += rooming.eighthroom.capacity
for scheduled_activity in scheduled_activities:
if scheduled_activity.capacity is not None:
capacity = scheduled_activity.capacity
sched_act_id = scheduled_activity.activity.id
activity_list[sched_act_id]["roster"]["capacity"] = capacity
return activity_list
0
Example 42
Project: qualitio Source File: views.py
@permission_required('USER')
@json_response
def testcaserun_setstatus(request, testcaserun_id, **kwargs):
testcaserun = TestCaseRun.objects.get(pk=testcaserun_id)
testcaserun_status_form = forms.TestCaseRunStatus(request.POST, instance=testcaserun)
if testcaserun_status_form.is_valid():
testcaserun = testcaserun_status_form.save()
log = history.History(request.user, testcaserun.parent)
log.add_form(testcaserun_status_form, capture=["status"], prefix=True)
log.save()
# TODO: move this to testrun? method. Chec also templatetags
passrate_ratio = []
testrun = testcaserun.parent
testcaseruns_count = testrun.testcases.count()
statuses = TestCaseRunStatus.objects.filter(testcaserun__parent=testrun).annotate(count=Count('testcaserun'))
for status in statuses:
passrate_ratio.append({
"ratio": float(status.count) / float(testcaseruns_count) * 100,
"name": status.name,
"color": status.color,
})
return success(message=testcaserun.status.name,
data=dict(id=testcaserun.pk,
status_id=testcaserun.status.id,
name=testcaserun.status.name,
color=testcaserun.status.color,
passrate=testcaserun.parent.passrate,
passrate_ratio=passrate_ratio))
else:
return failed(message=testcaserun.status.name,
data=testcaserun_status_form.errors_list())
0
Example 43
Project: coursys Source File: views.py
def view_doc(request, doc_slug):
context = {'BASE_ABS_URL': settings.BASE_ABS_URL}
# set up useful context variables for this doc
if doc_slug in ["submission", "pages-api"]:
instructor = Member.objects.filter(person__userid=request.user.username, offering__graded=True, role__in=["INST","TA"])
offerings = [m.offering for m in instructor]
activities = Activity.objects.filter(offering__in=offerings).annotate(Count('submissioncomponent')).order_by('-offering__semester', '-due_date')
# decorate to prefer (1) submission configured, (2) has due date.
activities = [(a.submissioncomponent__count==0, not bool(a.due_date), a) for a in activities]
activities.sort()
if activities:
context['activity'] = activities[0][2]
context['course'] = context['activity'].offering
elif offerings:
context['course'] = offerings[0]
else:
sem = Semester.current()
context['cslug'] = sem.slugform() + '-cmpt-001-d1' # a sample contemporary course slug
context['userid'] = request.user.username or 'userid'
elif doc_slug == "impersonate":
instructor = Member.objects.filter(person__userid=request.user.username, offering__graded=True, role__in=["INST","TA"])
offerings = [(Member.objects.filter(offering=m.offering, role="STUD"), m.offering) for m in instructor]
offerings = [(students.count()>0, course.semester.name, students, course) for students, course in offerings]
offerings.sort()
offerings.reverse()
if offerings:
nonempty, semester, students, course = offerings[0]
context['course'] = course
if students:
context['student'] = students[0]
else:
sem = Semester.current()
context['cslug'] = sem.slugform() + '-cmpt-001-d1' # a sample contemporary course slug
elif doc_slug == "calc_numeric":
instructor = Member.objects.filter(person__userid=request.user.username, offering__graded=True, role__in=["INST","TA"])
offering_ids = [m.offering.id for m in instructor]
offerings = CourseOffering.objects.filter(id__in=offering_ids).annotate(Count('activity'))
# decorate to prefer (1) recent offerings, (2) many activities
offerings = [(o.semester, o.activity__count, o) for o in offerings if o.activity__count>0]
offerings.sort()
if offerings:
sem, count, course = offerings[0]
context['course'] = course
activities = NumericActivity.objects.filter(offering=course, deleted=False)
context['activities'] = activities
if activities.count() > 1:
context['act1'] = activities[0]
context['act2'] = activities[1]
elif activities.count() > 0:
context['act1'] = activities[0]
context['act2'] = None
else:
context['act1'] = None
context['act2'] = None
else:
context['course'] = None
context['act1'] = None
context['act2'] = None
elif doc_slug == "search":
context['two_years'] = datetime.date.today().year - 2
try:
res = render(request, "docs/doc_" + doc_slug + ".html", context)
except TemplateDoesNotExist:
raise Http404
return res
0
Example 44
Project: ion Source File: attendance.py
@eighth_admin_required
def delinquent_students_view(request):
lower_absence_limit = request.GET.get("lower", "")
upper_absence_limit = request.GET.get("upper", "")
include_freshmen = (request.GET.get("freshmen", "off") == "on")
include_sophumores = (request.GET.get("sophumores", "off") == "on")
include_juniors = (request.GET.get("juniors", "off") == "on")
include_seniors = (request.GET.get("seniors", "off") == "on")
if not request.META["QUERY_STRING"]:
include_freshmen = True
include_sophumores = True
include_juniors = True
include_seniors = True
start_date = request.GET.get("start", "")
end_date = request.GET.get("end", "")
if not lower_absence_limit.isdigit():
lower_absence_limit = ""
lower_absence_limit_filter = 1
else:
lower_absence_limit_filter = lower_absence_limit
if not upper_absence_limit.isdigit():
upper_absence_limit = ""
upper_absence_limit_filter = 1000
else:
upper_absence_limit_filter = upper_absence_limit
try:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
start_date_filter = start_date
except ValueError:
start_date = ""
start_date_filter = date(MINYEAR, 1, 1)
try:
end_date = datetime.strptime(end_date, "%Y-%m-%d")
end_date_filter = end_date
except ValueError:
end_date = ""
end_date_filter = date(MAXYEAR, 12, 31)
context = {
"lower_absence_limit": lower_absence_limit,
"upper_absence_limit": upper_absence_limit,
"include_freshmen": include_freshmen,
"include_sophumores": include_sophumores,
"include_juniors": include_juniors,
"include_seniors": include_seniors,
"start_date": start_date,
"end_date": end_date
}
query_params = ["lower", "upper", "freshmen", "sophumores", "juniors", "seniors", "start", "end"]
if set(request.GET.keys()).intersection(set(query_params)):
# attendance MUST have been taken on the activity for the absence to be valid
non_delinquents = []
delinquents = []
if int(upper_absence_limit_filter) == 0 or int(lower_absence_limit_filter) == 0:
users_with_absence = (EighthSignup.objects.filter(
was_absent=True, scheduled_activity__attendance_taken=True, scheduled_activity__block__date__gte=start_date_filter,
scheduled_activity__block__date__lte=end_date_filter).values("user").annotate(absences=Count("user")).filter(absences__gte=1)
.values("user", "absences").order_by("user"))
uids_with_absence = [row["user"] for row in users_with_absence]
all_students = User.objects.get_students().values_list("id")
uids_all_students = [row[0] for row in all_students]
uids_without_absence = set(uids_all_students) - set(uids_with_absence)
users_without_absence = User.objects.filter(id__in=uids_without_absence).order_by("id")
non_delinquents = []
for usr in users_without_absence:
non_delinquents.append({"absences": 0, "user": usr})
logger.debug(non_delinquents)
if int(upper_absence_limit_filter) > 0:
delinquents = (EighthSignup.objects.filter(
was_absent=True, scheduled_activity__attendance_taken=True, scheduled_activity__block__date__gte=start_date_filter,
scheduled_activity__block__date__lte=end_date_filter).values("user").annotate(absences=Count("user"))
.filter(absences__gte=lower_absence_limit_filter,
absences__lte=upper_absence_limit_filter).values("user", "absences").order_by("user"))
user_ids = [d["user"] for d in delinquents]
delinquent_users = User.objects.filter(id__in=user_ids).order_by("id")
for index, user in enumerate(delinquent_users):
delinquents[index]["user"] = user
logger.debug(delinquents)
delinquents = list(delinquents)
delinquents += non_delinquents
def filter_by_grade(delinquent):
grade = delinquent["user"].grade.number
include = False
if include_freshmen:
include |= (grade == 9)
if include_sophumores:
include |= (grade == 10)
if include_juniors:
include |= (grade == 11)
if include_seniors:
include |= (grade == 12)
return include
delinquents = list(filter(filter_by_grade, delinquents))
# most absences at top
delinquents = sorted(delinquents, key=lambda x: (-1 * x["absences"], x["user"].last_name))
logger.debug(delinquents)
else:
delinquents = None
context["delinquents"] = delinquents
if request.resolver_match.url_name == "eighth_admin_view_delinquent_students":
context["admin_page_title"] = "Delinquent Students"
return render(request, "eighth/admin/delinquent_students.html", context)
else:
response = http.HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename=\"delinquent_students.csv\""
writer = csv.writer(response)
writer.writerow(["Start Date", "End Date", "Absences", "Last Name", "First Name", "Student ID", "Grade", "Counselor", "TJ Email",
"Other Email"])
for delinquent in delinquents:
row = []
row.append(str(start_date).split(" ", 1)[0])
row.append(str(end_date).split(" ", 1)[0])
row.append(delinquent["absences"])
row.append(delinquent["user"].last_name)
row.append(delinquent["user"].first_name)
row.append(delinquent["user"].student_id)
row.append(delinquent["user"].grade.number)
counselor = delinquent["user"].counselor
row.append(counselor.last_name if counselor else "")
row.append("{}".format(delinquent["user"].tj_email))
row.append(delinquent["user"].emails[0] if delinquent["user"].emails and len(delinquent["user"].emails) > 0 else "")
writer.writerow(row)
return response
0
Example 45
Project: baruwa Source File: sendpdfreports.py
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError(_("Command doesn't accept any arguments"))
by_domain = options.get('by_domain')
domain_name = options.get('domain_name')
copy_admin = options.get('copy_admin')
period = options.get('period')
include_daily = options.get('include_daily')
startdate = options.get('startdate')
end_date = options.get('enddate')
enddate = None
if startdate and end_date:
if not checkdate(startdate) or not checkdate(end_date):
raise CommandError(_("The startdate, enddate specified is invalid"))
daterange = (startdate, end_date)
else:
daterange = None
period_re = re.compile(r"(?P<num>(\d+))\s+(?P<period>(day|week|month))(?:s)?")
if period:
match = period_re.match(period)
if not match:
raise CommandError(_("The period you specified is invalid"))
num = match.group('num')
ptype = match.group('period')
if not ptype.endswith('s'):
ptype = ptype + 's'
delta = datetime.timedelta(**{ptype: int(num)})
enddate = datetime.date.today() - delta
table_style = TableStyle([
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('FONTSIZE', (0, 0), (-1, -1), 8),
('GRID', (0, 0), (-1, -1), 0.15, colors.black),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN', (4, 1), (-1, -1), 'CENTER'),
('ALIGN', (0, 0), (0, -1), 'CENTER'),
('VALIGN', (4, 1), (-1, -1), 'MIDDLE'),
('SPAN', (4, 1), (-1, -1)),
])
styles = getSampleStyleSheet()
reports = [
[
'from_address', {'from_address__exact': ""}, 'num_count',
'Top senders by quantity'],
[
'from_address', {'from_address__exact': ""}, 'total_size',
'Top senders by volume'],
[
'from_domain', {'from_domain__exact': ""}, 'num_count',
'Top sender domains by quantity'],
[
'from_domain', {'from_domain__exact': ""}, 'total_size',
'Top sender domains by volume'],
[
'to_address', {'to_address__exact': ""}, 'num_count',
'Top recipients by quantity'],
[
'to_address', {'to_address__exact': ""}, 'total_size',
'Top recipients by volume'],
[
'to_domain', {'to_domain__exact': "",
'to_domain__isnull': False}, 'num_count',
'Top recipient domains by quantity'],
[
'to_domain', {'to_domain__exact': "",
'to_domain__isnull': False}, 'total_size',
'Top recipient domains by volume'],
]
emails = []
admin_addrs = []
if copy_admin:
mails = User.objects.values('email').filter(is_superuser=True)
admin_addrs = [mail['email'] for mail in mails]
from_email = getattr(settings, 'DEFAULT_FROM_EMAIL',
'postmaster@localhost')
url = getattr(settings, 'QUARANTINE_REPORT_HOSTURL', '')
logo_dir = getattr(settings, 'MEDIA_ROOT', '')
img = Image(logo_dir + '/imgs/css/logo.jpg')
def build_chart(data, column, order, title):
"build chart"
headings = [('', _('Address'), _('Count'), _('Volume'), '')]
rows = [[draw_square(PIE_CHART_COLORS[index]),
tds_trunc(row[column], 45), row['num_count'],
filesizeformat(row['total_size']), '']
for index, row in enumerate(data)]
if len(rows) != 10:
missing = 10 - len(rows)
add_rows = [
('', '', '', '', '') for ind in range(missing)
]
rows.extend(add_rows)
headings.extend(rows)
dat = [row[order] for row in data]
total = sum(dat)
labels = [
("%.1f%%" % ((1.0 * row[order] / total) * 100))
for row in data
]
pie = PieChart()
pie.chart.labels = labels
pie.chart.data = dat
headings[1][4] = pie
table_with_style = Table(headings, [0.2 * inch,
2.8 * inch, 0.5 * inch, 0.7 * inch, 3.2 * inch])
table_with_style.setStyle(table_style)
paragraph = Paragraph(title, styles['Heading1'])
return [paragraph, table_with_style]
def build_parts(account, enddate, isdom=None, daterange=None):
"build parts"
parts = []
sentry = 0
for report in reports:
column = report[0]
exclude_kwargs = report[1]
order_by = "-%s" % report[2]
order = report[2]
title = report[3]
if isdom:
#dom
data = Message.objects.values(column).\
filter(Q(from_domain=account.address) | \
Q(to_domain=account.address)).\
exclude(**exclude_kwargs).annotate(
num_count=Count(column), total_size=Sum('size')
).order_by(order_by)
if daterange:
data.filter(date__range=(daterange[0], daterange[1]))
elif enddate:
data.filter(date__gt=enddate)
data = data[:10]
else:
#all users
data = Message.report.all(user, enddate, daterange).values(
column).exclude(**exclude_kwargs).annotate(
num_count=Count(column), total_size=Sum('size')
).order_by(order_by)
data = data[:10]
if data:
sentry += 1
pgraphs = build_chart(data, column, order, title)
parts.extend(pgraphs)
parts.append(Spacer(1, 70))
if (sentry % 2) == 0:
parts.append(PageBreak())
parts.append(Paragraph(_('Message Totals'), styles['Heading1']))
if isdom:
#doms
msg_totals = MessageTotals.objects.doms(account.address, enddate)
else:
#norm
filters = []
addrs = [
addr.address for addr in UserAddresses.objects.filter(
user=account
).exclude(enabled__exact=0)]
if enddate:
efilter = {
'filter': 3,
'field': 'date',
'value': str(enddate)
}
filters.append(efilter)
msg_totals = MessageTotals.objects.all(
account, filters, addrs,
profile.account_type,
daterange)
mail_total = []
spam_total = []
virus_total = []
dates = []
if include_daily:
rows = [(
Table([[draw_square(colors.white),
Paragraph('Date', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
Table([[draw_square(colors.green),
Paragraph('Mail totals', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
Table([[draw_square(colors.pink),
Paragraph('Spam totals', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
Table([[draw_square(colors.red),
Paragraph('Virus totals', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
)]
for ind, msgt in enumerate(msg_totals):
if ind % 10:
dates.append('')
else:
dates.append(str(msgt.date))
mail_total.append(int(msgt.mail_total))
spam_total.append(int(msgt.spam_total))
virus_total.append(int(msgt.virus_total))
if include_daily:
rows.append((str(msgt.date), msgt.mail_total,
msgt.spam_total, msgt.virus_total))
graph = BarChart()
graph.chart.data = [
tuple(mail_total), tuple(spam_total),
tuple(virus_total)
]
graph.chart.categoryAxis.categoryNames = dates
graph_table = Table([[graph]], [7.4 * inch])
parts.append(graph_table)
if include_daily:
rows.append(('Totals', sum(mail_total), sum(spam_total),
sum(virus_total)))
parts.append(Spacer(1, 20))
graph_table = Table(rows, [1.85 * inch, 1.85 * inch,
1.85 * inch, 1.85 * inch, ])
graph_table.setStyle(TableStyle([
('FONTSIZE', (0, 0), (-1, -1), 8),
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('GRID', (0, 0), (-1, -1), 0.15, colors.black),
('FONT', (0, -1), (-1, -1), 'Helvetica-Bold'),
#('BACKGROUND', (0, -1), (-1, -1), colors.green),
]))
parts.append(graph_table)
return parts
def build_pdf(charts):
"Build a PDF"
pdf = StringIO()
doc = SimpleDocTemplate(pdf, topMargin=50, bottomMargin=18)
logo = [(img, _('Baruwa mail report'))]
logo_table = Table(logo, [2.0 * inch, 5.4 * inch])
logo_table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('ALIGN', (0, 0), (-1, 0), 'LEFT'),
('ALIGN', (1, 0), (-1, 0), 'RIGHT'),
('FONTSIZE', (1, 0), (-1, 0), 10),
('LINEBELOW', (0, 0), (-1, -1), 0.15, colors.black),
]))
parts = [logo_table]
parts.append(Spacer(1, 20))
parts.extend(charts)
try:
doc.build(parts)
except IndexError:
pass
return pdf
def gen_email(pdf, user, owner):
"generate and return email"
text_content = render_to_string('reports/pdf_report.txt',
{'user': user, 'url': url})
subject = _('Baruwa usage report for: %(user)s') % {
'user': owner}
if email_re.match(user.username):
toaddr = user.username
if email_re.match(user.email):
toaddr = user.email
if admin_addrs:
msg = EmailMessage(subject, text_content, from_email, [toaddr], admin_addrs)
else:
msg = EmailMessage(subject, text_content, from_email, [toaddr])
msg.attach('baruwa.pdf', pdf.getvalue(), "application/pdf")
print _("* Queue %(user)s's report to: %(addr)s") % {
'user': owner, 'addr': toaddr}
pdf.close()
return msg
print _("=================== Processing reports ======================")
if by_domain:
#do domain query
#print "camacamlilone"
domains = UserAddresses.objects.filter(Q(enabled=1), Q(address_type=1))
if domain_name != 'all':
domains = domains.filter(address=domain_name)
if not domains:
print _("========== domain name %(dom)s does not exist ==========") % {
'dom': domain_name
}
for domain in domains:
if email_re.match(domain.user.email):
parts = build_parts(domain, enddate, True, daterange)
if parts:
pdf = build_pdf(parts)
email = gen_email(pdf, domain.user, domain.address)
emails.append(email)
else:
#do normal query
profiles = UserProfile.objects.filter(send_report=1)
for profile in profiles:
try:
user = profile.user
if email_re.match(user.email) or email_re.match(user.username):
parts = build_parts(user, enddate, False, daterange)
if parts:
pdf = build_pdf(parts)
email = gen_email(pdf, user, user.username)
emails.append(email)
except User.DoesNotExist:
pass
if emails:
try:
conn = SMTPConnection()
conn.send_messages(emails)
print _("====== sending %(num)s messages =======") % {
'num': str(len(emails))}
except Exception, exception:
print _("Sending failed ERROR: %(error)s") % {'error': str(exception)}
0
Example 46
Project: wagtail Source File: pages.py
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id).specific
else:
parent_page = Page.get_first_root_node().specific
pages = parent_page.get_children().prefetch_related('content_type')
# Get page ordering
ordering = request.GET.get('ordering', '-latest_revision_created_at')
if ordering not in [
'title',
'-title',
'content_type',
'-content_type',
'live', '-live',
'latest_revision_created_at',
'-latest_revision_created_at',
'ord'
]:
ordering = '-latest_revision_created_at'
if ordering == 'ord':
# preserve the native ordering from get_children()
pass
elif ordering == 'latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the top of the list.
# Do this by annotating with Count('latest_revision_created_at'),
# which returns 0 for these
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('null_position', 'latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the end of the list.
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('-null_position', '-latest_revision_created_at')
else:
pages = pages.order_by(ordering)
# Don't paginate if sorting by page order - all pages must be shown to
# allow drag-and-drop reordering
do_paginate = ordering != 'ord'
if do_paginate:
# Retrieve pages in their most specific form.
# Only do this for paginated listings, as this could potentially be a
# very expensive operation when performed on a large queryset.
pages = pages.specific()
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_explorer_page_queryset'):
pages = hook(parent_page, pages, request)
# Pagination
if do_paginate:
paginator, pages = paginate(request, pages, per_page=50)
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page.specific,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'pages': pages,
'do_paginate': do_paginate,
})
0
Example 47
Project: pretix Source File: stats.py
def order_overview(event: Event) -> Tuple[List[Tuple[ItemCategory, List[Item]]], Dict[str, Tuple[Decimal, Decimal]]]:
items = event.items.all().select_related(
'category', # for re-grouping
).prefetch_related(
'variations'
).order_by('category__position', 'category_id', 'name')
counters = OrderPosition.objects.filter(
order__event=event
).values(
'item', 'variation', 'order__status'
).annotate(cnt=Count('id'), price=Sum('price')).order_by()
num_canceled = {
(p['item'], p['variation']): (p['cnt'], p['price'])
for p in counters if p['order__status'] == Order.STATUS_CANCELED
}
num_refunded = {
(p['item'], p['variation']): (p['cnt'], p['price'])
for p in counters if p['order__status'] == Order.STATUS_REFUNDED
}
num_paid = {
(p['item'], p['variation']): (p['cnt'], p['price'])
for p in counters if p['order__status'] == Order.STATUS_PAID
}
num_s_pending = {
(p['item'], p['variation']): (p['cnt'], p['price'])
for p in counters if p['order__status'] == Order.STATUS_PENDING
}
num_expired = {
(p['item'], p['variation']): (p['cnt'], p['price'])
for p in counters if p['order__status'] == Order.STATUS_EXPIRED
}
num_pending = dictsum(num_s_pending, num_expired)
num_total = dictsum(num_pending, num_paid)
for item in items:
item.all_variations = list(item.variations.all())
item.has_variations = (len(item.all_variations) > 0)
if item.has_variations:
for var in item.all_variations:
variid = var.id
var.num_total = num_total.get((item.id, variid), (0, 0))
var.num_pending = num_pending.get((item.id, variid), (0, 0))
var.num_canceled = num_canceled.get((item.id, variid), (0, 0))
var.num_refunded = num_refunded.get((item.id, variid), (0, 0))
var.num_paid = num_paid.get((item.id, variid), (0, 0))
item.num_total = tuplesum(var.num_total for var in item.all_variations)
item.num_pending = tuplesum(var.num_pending for var in item.all_variations)
item.num_canceled = tuplesum(var.num_canceled for var in item.all_variations)
item.num_refunded = tuplesum(var.num_refunded for var in item.all_variations)
item.num_paid = tuplesum(var.num_paid for var in item.all_variations)
else:
item.num_total = num_total.get((item.id, None), (0, 0))
item.num_pending = num_pending.get((item.id, None), (0, 0))
item.num_canceled = num_canceled.get((item.id, None), (0, 0))
item.num_refunded = num_refunded.get((item.id, None), (0, 0))
item.num_paid = num_paid.get((item.id, None), (0, 0))
nonecat = ItemCategory(name=_('Uncategorized'))
# Regroup those by category
items_by_category = sorted(
[
# a group is a tuple of a category and a list of items
(cat if cat is not None else nonecat, [i for i in items if i.category == cat])
for cat in set([i.category for i in items])
# insert categories into a set for uniqueness
# a set is unsorted, so sort again by category
],
key=lambda group: (group[0].position, group[0].id) if (
group[0] is not None and group[0].id is not None) else (0, 0)
)
for c in items_by_category:
c[0].num_total = tuplesum(item.num_total for item in c[1])
c[0].num_pending = tuplesum(item.num_pending for item in c[1])
c[0].num_canceled = tuplesum(item.num_canceled for item in c[1])
c[0].num_refunded = tuplesum(item.num_refunded for item in c[1])
c[0].num_paid = tuplesum(item.num_paid for item in c[1])
# Payment fees
payment_cat_obj = DummyObject()
payment_cat_obj.name = _('Payment method fees')
payment_items = []
counters = event.orders.values('payment_provider', 'status').annotate(
cnt=Count('id'), payment_fee=Sum('payment_fee')
).order_by()
num_canceled = {
o['payment_provider']: (o['cnt'], o['payment_fee'])
for o in counters if o['status'] == Order.STATUS_CANCELED
}
num_refunded = {
o['payment_provider']: (o['cnt'], o['payment_fee'])
for o in counters if o['status'] == Order.STATUS_REFUNDED
}
num_s_pending = {
o['payment_provider']: (o['cnt'], o['payment_fee'])
for o in counters if o['status'] == Order.STATUS_PENDING
}
num_expired = {
o['payment_provider']: (o['cnt'], o['payment_fee'])
for o in counters if o['status'] == Order.STATUS_EXPIRED
}
num_paid = {
o['payment_provider']: (o['cnt'], o['payment_fee'])
for o in counters if o['status'] == Order.STATUS_PAID
}
num_pending = dictsum(num_s_pending, num_expired)
num_total = dictsum(num_pending, num_paid)
provider_names = {}
responses = register_payment_providers.send(event)
for receiver, response in responses:
provider = response(event)
provider_names[provider.identifier] = provider.verbose_name
for pprov, total in num_total.items():
ppobj = DummyObject()
ppobj.name = provider_names.get(pprov, pprov)
ppobj.provider = pprov
ppobj.has_variations = False
ppobj.num_total = total
ppobj.num_canceled = num_canceled.get(pprov, (0, 0))
ppobj.num_refunded = num_refunded.get(pprov, (0, 0))
ppobj.num_pending = num_pending.get(pprov, (0, 0))
ppobj.num_paid = num_paid.get(pprov, (0, 0))
payment_items.append(ppobj)
payment_cat_obj.num_total = (Dontsum(''), sum(i.num_total[1] for i in payment_items))
payment_cat_obj.num_canceled = (Dontsum(''), sum(i.num_canceled[1] for i in payment_items))
payment_cat_obj.num_refunded = (Dontsum(''), sum(i.num_refunded[1] for i in payment_items))
payment_cat_obj.num_pending = (Dontsum(''), sum(i.num_pending[1] for i in payment_items))
payment_cat_obj.num_paid = (Dontsum(''), sum(i.num_paid[1] for i in payment_items))
payment_cat = (payment_cat_obj, payment_items)
items_by_category.append(payment_cat)
total = {
'num_total': tuplesum(c.num_total for c, i in items_by_category),
'num_pending': tuplesum(c.num_pending for c, i in items_by_category),
'num_canceled': tuplesum(c.num_canceled for c, i in items_by_category),
'num_refunded': tuplesum(c.num_refunded for c, i in items_by_category),
'num_paid': tuplesum(c.num_paid for c, i in items_by_category)
}
return items_by_category, total
0
Example 48
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['voucher'] = self.voucher
# Fetch all items
items = self.request.event.items.all().filter(
Q(active=True)
& Q(Q(available_from__isnull=True) | Q(available_from__lte=now()))
& Q(Q(available_until__isnull=True) | Q(available_until__gte=now()))
)
vouchq = Q(hide_without_voucher=False)
if self.voucher.item_id:
vouchq |= Q(pk=self.voucher.item_id)
items = items.filter(pk=self.voucher.item_id)
elif self.voucher.quota_id:
items = items.filter(quotas__in=[self.voucher.quota_id])
items = items.filter(vouchq).select_related(
'category', # for re-grouping
).prefetch_related(
'quotas', 'variations__quotas', 'quotas__event' # for .availability()
).annotate(quotac=Count('quotas')).filter(
quotac__gt=0
).distinct().order_by('category__position', 'category_id', 'position', 'name')
for item in items:
item.available_variations = list(item.variations.filter(active=True, quotas__isnull=False).distinct())
if self.voucher.item_id and self.voucher.variation_id:
item.available_variations = [v for v in item.available_variations if v.pk == self.voucher.variation_id]
item.has_variations = item.variations.exists()
if not item.has_variations:
if self.voucher.allow_ignore_quota or self.voucher.block_quota:
item.cached_availability = (Quota.AVAILABILITY_OK, 1)
else:
item.cached_availability = item.check_quotas()
if self.voucher.price is not None:
item.price = self.voucher.price
else:
item.price = item.default_price
else:
for var in item.available_variations:
if self.voucher.allow_ignore_quota or self.voucher.block_quota:
var.cached_availability = (Quota.AVAILABILITY_OK, 1)
else:
var.cached_availability = list(var.check_quotas())
if self.voucher.price is not None:
var.price = self.voucher.price
else:
var.price = var.default_price if var.default_price is not None else item.default_price
if len(item.available_variations) > 0:
item.min_price = min([v.price for v in item.available_variations])
item.max_price = max([v.price for v in item.available_variations])
items = [item for item in items if len(item.available_variations) > 0 or not item.has_variations]
context['options'] = sum([(len(item.available_variations) if item.has_variations else 1)
for item in items])
# Regroup those by category
context['items_by_category'] = item_group_by_category(items)
return context
0
Example 49
def calculate_counts(apps, schema_editor):
"""
Iterate across all our channels, calculate our message counts for each category
"""
ChannelCount = apps.get_model('channels', 'ChannelCount')
Channel = apps.get_model('channels', 'Channel')
Msg = apps.get_model('msgs', 'Msg')
def add_daily_counts(count_channel, count_type, count_totals):
for daily_count in count_totals:
print "Adding %d - %s - %s" % (count_channel.id, count_type, str(daily_count))
ChannelCount.objects.create(channel=channel, count_type=count_type,
day=daily_count['created'], count=daily_count['count'])
for channel in Channel.objects.all():
# remove any previous counts
ChannelCount.objects.filter(channel=channel, count_type__in=['IM', 'OM', 'IV', 'OV']).delete()
# incoming msgs
daily_counts = Msg.all_messages.filter(channel=channel, contact__is_test=False, direction='I')\
.exclude(msg_type='V')\
.extra({'created': "date(msgs_msg.created_on)"})\
.values('created')\
.annotate(count=Count('id'))\
.order_by('created')
add_daily_counts(channel, 'IM', daily_counts)
# outgoing msgs
daily_counts = Msg.all_messages.filter(channel=channel, contact__is_test=False, direction='O')\
.exclude(msg_type='V')\
.extra({'created': "date(msgs_msg.created_on)"})\
.values('created')\
.annotate(count=Count('id'))\
.order_by('created')
add_daily_counts(channel, 'OM', daily_counts)
# incoming voice
daily_counts = Msg.all_messages.filter(channel=channel, contact__is_test=False, direction='I')\
.filter(msg_type='V')\
.extra({'created': "date(msgs_msg.created_on)"})\
.values('created')\
.annotate(count=Count('id'))\
.order_by('created')
add_daily_counts(channel, 'IV', daily_counts)
# outgoing voice
daily_counts = Msg.all_messages.filter(channel=channel, contact__is_test=False, direction='O')\
.filter(msg_type='V')\
.extra({'created': "date(msgs_msg.created_on)"})\
.values('created')\
.annotate(count=Count('id'))\
.order_by('created')
add_daily_counts(channel, 'OV', daily_counts)
0
Example 50
Project: baruwa Source File: views.py
@login_required
def index(request):
"""index"""
errors = ''
success = True
active_filters = []
saved_filters = []
data = Message.messages.for_user(request)
filters = SavedFilter.objects.all().filter(user=request.user)
filter_form = FilterForm()
if request.method == 'POST':
filter_form = FilterForm(request.POST)
if filter_form.is_valid():
cleaned_data = filter_form.cleaned_data
in_field = force_escape(cleaned_data['filtered_field'])
in_value = force_escape(cleaned_data['filtered_value'])
in_filtered_by = int(cleaned_data['filtered_by'])
if not request.session.get('filter_by', False):
request.session['filter_by'] = []
request.session['filter_by'].append(
{'field': in_field, 'filter': in_filtered_by,
'value': in_value})
else:
fitem = {'field': in_field, 'filter': in_filtered_by,
'value': in_value}
if not fitem in request.session['filter_by']:
request.session['filter_by'].append(fitem)
request.session.modified = True
else:
success = False
errors = _("The requested filter is already being used")
filter_list = request.session.get('filter_by')
data = gen_dynamic_query(data, filter_list, active_filters)
else:
success = False
error_list = filter_form.errors.values()[0]
errors = error_list[0]
if request.session.get('filter_by', False):
filter_list = request.session.get('filter_by')
data = gen_dynamic_query(data, filter_list, active_filters)
else:
filter_form = FilterForm()
if request.session.get('filter_by', False):
filter_list = request.session.get('filter_by')
data = gen_dynamic_query(data, filter_list, active_filters)
data = data.aggregate(count=Count('timestamp'), newest=Max('timestamp'),
oldest=Min('timestamp'))
if filters.count() > 0:
if request.session.get('filter_by', False):
filter_list = request.session.get('filter_by')
else:
filter_list = []
for filt in filters:
loaded = 0
if filter_list:
loaded = 0
for fitem in filter_list:
if fitem['filter'] == filt.op_field and (
fitem['value'] == filt.value and
fitem['field'] == filt.field):
loaded = 1
break
saved_filters.append(
{'filter_id': filt.id, 'filter_name': force_escape(filt.name),
'is_loaded': loaded})
if request.is_ajax():
if not data['newest'] is None and not data['oldest'] is None:
data['newest'] = data['newest'].strftime("%a %d %b %Y @ %H:%M %p")
data['oldest'] = data['oldest'].strftime("%a %d %b %Y @ %H:%M %p")
else:
data['newest'] = ''
data['oldest'] = ''
response = anyjson.dumps({'success': success, 'data': data,
'errors': errors, 'active_filters': active_filters,
'saved_filters': saved_filters})
return HttpResponse(response,
content_type='application/javascript; charset=utf-8')
return render_to_response('reports/index.html', {'form': filter_form,
'data': data, 'errors': errors, 'active_filters': active_filters,
'saved_filters': saved_filters},
context_instance=RequestContext(request))