django.utils.translation._

Here are the examples of the python api django.utils.translation._ taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

199 Examples 7

Example 1

Project: pretix
Source File: invoices.py
View license
def _invoice_generate_german(invoice, f):
    _invoice_register_fonts()
    styles = _invoice_get_stylesheet()
    pagesize = pagesizes.A4

    def on_page(canvas, doc):
        canvas.saveState()
        canvas.setFont('OpenSans', 8)
        canvas.drawRightString(pagesize[0] - 20 * mm, 10 * mm, _("Page %d") % (doc.page,))

        for i, line in enumerate(invoice.footer_text.split('\n')[::-1]):
            canvas.drawCentredString(pagesize[0] / 2, 25 + (3.5 * i) * mm, line.strip())

        canvas.restoreState()

    def on_first_page(canvas, doc):
        canvas.setCreator('pretix.eu')
        canvas.setTitle(pgettext('invoice', 'Invoice {num}').format(num=invoice.number))

        canvas.saveState()
        canvas.setFont('OpenSans', 8)
        canvas.drawRightString(pagesize[0] - 20 * mm, 10 * mm, _("Page %d") % (doc.page,))

        for i, line in enumerate(invoice.footer_text.split('\n')[::-1]):
            canvas.drawCentredString(pagesize[0] / 2, 25 + (3.5 * i) * mm, line.strip())

        textobject = canvas.beginText(25 * mm, (297 - 15) * mm)
        textobject.setFont('OpenSansBd', 8)
        textobject.textLine(pgettext('invoice', 'Invoice from').upper())
        textobject.moveCursor(0, 5)
        textobject.setFont('OpenSans', 10)
        textobject.textLines(invoice.invoice_from.strip())
        canvas.drawText(textobject)

        textobject = canvas.beginText(25 * mm, (297 - 50) * mm)
        textobject.setFont('OpenSansBd', 8)
        textobject.textLine(pgettext('invoice', 'Invoice to').upper())
        textobject.moveCursor(0, 5)
        textobject.setFont('OpenSans', 10)
        textobject.textLines(invoice.invoice_to.strip())
        canvas.drawText(textobject)

        textobject = canvas.beginText(125 * mm, (297 - 50) * mm)
        textobject.setFont('OpenSansBd', 8)
        if invoice.is_cancellation:
            textobject.textLine(pgettext('invoice', 'Cancellation number').upper())
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSans', 10)
            textobject.textLine(invoice.number)
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSansBd', 8)
            textobject.textLine(pgettext('invoice', 'Original invoice').upper())
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSans', 10)
            textobject.textLine(invoice.refers.number)
        else:
            textobject.textLine(pgettext('invoice', 'Invoice number').upper())
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSans', 10)
            textobject.textLine(invoice.number)
        textobject.moveCursor(0, 5)

        if invoice.is_cancellation:
            textobject.setFont('OpenSansBd', 8)
            textobject.textLine(pgettext('invoice', 'Cancellation date').upper())
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSans', 10)
            textobject.textLine(date_format(invoice.date, "DATE_FORMAT"))
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSansBd', 8)
            textobject.textLine(pgettext('invoice', 'Original invoice date').upper())
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSans', 10)
            textobject.textLine(date_format(invoice.refers.date, "DATE_FORMAT"))
            textobject.moveCursor(0, 5)
        else:
            textobject.setFont('OpenSansBd', 8)
            textobject.textLine(pgettext('invoice', 'Invoice date').upper())
            textobject.moveCursor(0, 5)
            textobject.setFont('OpenSans', 10)
            textobject.textLine(date_format(invoice.date, "DATE_FORMAT"))
            textobject.moveCursor(0, 5)

        canvas.drawText(textobject)

        textobject = canvas.beginText(165 * mm, (297 - 50) * mm)
        textobject.setFont('OpenSansBd', 8)
        textobject.textLine(_('Order code').upper())
        textobject.moveCursor(0, 5)
        textobject.setFont('OpenSans', 10)
        textobject.textLine(invoice.order.full_code)
        textobject.moveCursor(0, 5)
        textobject.setFont('OpenSansBd', 8)
        textobject.textLine(_('Order date').upper())
        textobject.moveCursor(0, 5)
        textobject.setFont('OpenSans', 10)
        textobject.textLine(date_format(invoice.order.datetime, "DATE_FORMAT"))
        canvas.drawText(textobject)

        textobject = canvas.beginText(125 * mm, (297 - 15) * mm)
        textobject.setFont('OpenSansBd', 8)
        textobject.textLine(_('Event').upper())
        textobject.moveCursor(0, 5)
        textobject.setFont('OpenSans', 10)
        textobject.textLine(str(invoice.event.name))
        if invoice.event.settings.show_date_to:
            textobject.textLines(
                _('{from_date}\nuntil {to_date}').format(from_date=invoice.event.get_date_from_display(),
                                                         to_date=invoice.event.get_date_to_display()))
        else:
            textobject.textLine(invoice.event.get_date_from_display())
        canvas.drawText(textobject)

        canvas.restoreState()

    doc = BaseDocTemplate(f.name, pagesize=pagesizes.A4,
                          leftMargin=25 * mm, rightMargin=20 * mm,
                          topMargin=20 * mm, bottomMargin=15 * mm)

    footer_length = 3.5 * len(invoice.footer_text.split('\n')) * mm
    frames_p1 = [
        Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height - 75 * mm,
              leftPadding=0, rightPadding=0, topPadding=0, bottomPadding=footer_length,
              id='normal')
    ]
    frames = [
        Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height,
              leftPadding=0, rightPadding=0, topPadding=0, bottomPadding=footer_length,
              id='normal')
    ]
    doc.addPageTemplates([
        PageTemplate(id='FirstPage', frames=frames_p1, onPage=on_first_page, pagesize=pagesize),
        PageTemplate(id='OtherPages', frames=frames, onPage=on_page, pagesize=pagesize)
    ])
    story = [
        NextPageTemplate('FirstPage'),
        Paragraph(pgettext('invoice', 'Invoice')
                  if not invoice.is_cancellation
                  else pgettext('invoice', 'Cancellation'),
                  styles['Heading1']),
        Spacer(1, 5 * mm),
        NextPageTemplate('OtherPages'),
    ]

    if invoice.introductory_text:
        story.append(Paragraph(invoice.introductory_text, styles['Normal']))
        story.append(Spacer(1, 10 * mm))

    taxvalue_map = defaultdict(Decimal)
    grossvalue_map = defaultdict(Decimal)

    tstyledata = [
        ('ALIGN', (1, 0), (-1, -1), 'RIGHT'),
        ('FONTNAME', (0, 0), (-1, 0), 'OpenSansBd'),
        ('FONTNAME', (0, -1), (-1, -1), 'OpenSansBd'),
        ('LEFTPADDING', (0, 0), (0, -1), 0),
        ('RIGHTPADDING', (-1, 0), (-1, -1), 0),
    ]
    tdata = [(
        pgettext('invoice', 'Description'),
        pgettext('invoice', 'Tax rate'),
        pgettext('invoice', 'Net'),
        pgettext('invoice', 'Gross'),
    )]
    total = Decimal('0.00')
    for line in invoice.lines.all():
        tdata.append((
            line.description,
            lformat("%.2f", line.tax_rate) + " %",
            lformat("%.2f", line.net_value) + " " + invoice.event.currency,
            lformat("%.2f", line.gross_value) + " " + invoice.event.currency,
        ))
        taxvalue_map[line.tax_rate] += line.tax_value
        grossvalue_map[line.tax_rate] += line.gross_value
        total += line.gross_value

    tdata.append([pgettext('invoice', 'Invoice total'), '', '', lformat("%.2f", total) + " " + invoice.event.currency])
    colwidths = [a * doc.width for a in (.55, .15, .15, .15)]
    table = Table(tdata, colWidths=colwidths, repeatRows=1)
    table.setStyle(TableStyle(tstyledata))
    story.append(table)

    story.append(Spacer(1, 15 * mm))

    if invoice.payment_provider_text:
        story.append(Paragraph(invoice.payment_provider_text, styles['Normal']))

    if invoice.additional_text:
        story.append(Paragraph(invoice.additional_text, styles['Normal']))
        story.append(Spacer(1, 15 * mm))

    tstyledata = [
        ('SPAN', (1, 0), (-1, 0)),
        ('ALIGN', (2, 1), (-1, -1), 'RIGHT'),
        ('LEFTPADDING', (0, 0), (0, -1), 0),
        ('RIGHTPADDING', (-1, 0), (-1, -1), 0),
        ('FONTSIZE', (0, 0), (-1, -1), 8),
    ]
    tdata = [('', pgettext('invoice', 'Included taxes'), '', '', ''),
             ('', pgettext('invoice', 'Tax rate'),
              pgettext('invoice', 'Net value'), pgettext('invoice', 'Gross value'), pgettext('invoice', 'Tax'))]

    for rate, gross in grossvalue_map.items():
        if line.tax_rate == 0:
            continue
        tax = taxvalue_map[rate]
        tdata.append((
            '',
            lformat("%.2f", rate) + " %",
            lformat("%.2f", (gross - tax)) + " " + invoice.event.currency,
            lformat("%.2f", gross) + " " + invoice.event.currency,
            lformat("%.2f", tax) + " " + invoice.event.currency,
        ))

    if len(tdata) > 2:
        colwidths = [a * doc.width for a in (.45, .10, .15, .15, .15)]
        table = Table(tdata, colWidths=colwidths, repeatRows=2)
        table.setStyle(TableStyle(tstyledata))
        story.append(table)

    doc.build(story)
    return doc

Example 2

Project: pretix
Source File: stats.py
View license
def order_overview(event: Event) -> Tuple[List[Tuple[ItemCategory, List[Item]]], Dict[str, Tuple[Decimal, Decimal]]]:
    items = event.items.all().select_related(
        'category',  # for re-grouping
    ).prefetch_related(
        'variations'
    ).order_by('category__position', 'category_id', 'name')

    counters = OrderPosition.objects.filter(
        order__event=event
    ).values(
        'item', 'variation', 'order__status'
    ).annotate(cnt=Count('id'), price=Sum('price')).order_by()

    num_canceled = {
        (p['item'], p['variation']): (p['cnt'], p['price'])
        for p in counters if p['order__status'] == Order.STATUS_CANCELED
    }
    num_refunded = {
        (p['item'], p['variation']): (p['cnt'], p['price'])
        for p in counters if p['order__status'] == Order.STATUS_REFUNDED
    }
    num_paid = {
        (p['item'], p['variation']): (p['cnt'], p['price'])
        for p in counters if p['order__status'] == Order.STATUS_PAID
    }
    num_s_pending = {
        (p['item'], p['variation']): (p['cnt'], p['price'])
        for p in counters if p['order__status'] == Order.STATUS_PENDING
    }
    num_expired = {
        (p['item'], p['variation']): (p['cnt'], p['price'])
        for p in counters if p['order__status'] == Order.STATUS_EXPIRED
    }
    num_pending = dictsum(num_s_pending, num_expired)
    num_total = dictsum(num_pending, num_paid)

    for item in items:
        item.all_variations = list(item.variations.all())
        item.has_variations = (len(item.all_variations) > 0)
        if item.has_variations:
            for var in item.all_variations:
                variid = var.id
                var.num_total = num_total.get((item.id, variid), (0, 0))
                var.num_pending = num_pending.get((item.id, variid), (0, 0))
                var.num_canceled = num_canceled.get((item.id, variid), (0, 0))
                var.num_refunded = num_refunded.get((item.id, variid), (0, 0))
                var.num_paid = num_paid.get((item.id, variid), (0, 0))
            item.num_total = tuplesum(var.num_total for var in item.all_variations)
            item.num_pending = tuplesum(var.num_pending for var in item.all_variations)
            item.num_canceled = tuplesum(var.num_canceled for var in item.all_variations)
            item.num_refunded = tuplesum(var.num_refunded for var in item.all_variations)
            item.num_paid = tuplesum(var.num_paid for var in item.all_variations)
        else:
            item.num_total = num_total.get((item.id, None), (0, 0))
            item.num_pending = num_pending.get((item.id, None), (0, 0))
            item.num_canceled = num_canceled.get((item.id, None), (0, 0))
            item.num_refunded = num_refunded.get((item.id, None), (0, 0))
            item.num_paid = num_paid.get((item.id, None), (0, 0))

    nonecat = ItemCategory(name=_('Uncategorized'))
    # Regroup those by category
    items_by_category = sorted(
        [
            # a group is a tuple of a category and a list of items
            (cat if cat is not None else nonecat, [i for i in items if i.category == cat])
            for cat in set([i.category for i in items])
            # insert categories into a set for uniqueness
            # a set is unsorted, so sort again by category
        ],
        key=lambda group: (group[0].position, group[0].id) if (
            group[0] is not None and group[0].id is not None) else (0, 0)
    )

    for c in items_by_category:
        c[0].num_total = tuplesum(item.num_total for item in c[1])
        c[0].num_pending = tuplesum(item.num_pending for item in c[1])
        c[0].num_canceled = tuplesum(item.num_canceled for item in c[1])
        c[0].num_refunded = tuplesum(item.num_refunded for item in c[1])
        c[0].num_paid = tuplesum(item.num_paid for item in c[1])

    # Payment fees
    payment_cat_obj = DummyObject()
    payment_cat_obj.name = _('Payment method fees')
    payment_items = []

    counters = event.orders.values('payment_provider', 'status').annotate(
        cnt=Count('id'), payment_fee=Sum('payment_fee')
    ).order_by()

    num_canceled = {
        o['payment_provider']: (o['cnt'], o['payment_fee'])
        for o in counters if o['status'] == Order.STATUS_CANCELED
    }
    num_refunded = {
        o['payment_provider']: (o['cnt'], o['payment_fee'])
        for o in counters if o['status'] == Order.STATUS_REFUNDED
    }
    num_s_pending = {
        o['payment_provider']: (o['cnt'], o['payment_fee'])
        for o in counters if o['status'] == Order.STATUS_PENDING
    }
    num_expired = {
        o['payment_provider']: (o['cnt'], o['payment_fee'])
        for o in counters if o['status'] == Order.STATUS_EXPIRED
    }
    num_paid = {
        o['payment_provider']: (o['cnt'], o['payment_fee'])
        for o in counters if o['status'] == Order.STATUS_PAID
    }
    num_pending = dictsum(num_s_pending, num_expired)
    num_total = dictsum(num_pending, num_paid)

    provider_names = {}
    responses = register_payment_providers.send(event)
    for receiver, response in responses:
        provider = response(event)
        provider_names[provider.identifier] = provider.verbose_name

    for pprov, total in num_total.items():
        ppobj = DummyObject()
        ppobj.name = provider_names.get(pprov, pprov)
        ppobj.provider = pprov
        ppobj.has_variations = False
        ppobj.num_total = total
        ppobj.num_canceled = num_canceled.get(pprov, (0, 0))
        ppobj.num_refunded = num_refunded.get(pprov, (0, 0))
        ppobj.num_pending = num_pending.get(pprov, (0, 0))
        ppobj.num_paid = num_paid.get(pprov, (0, 0))
        payment_items.append(ppobj)

    payment_cat_obj.num_total = (Dontsum(''), sum(i.num_total[1] for i in payment_items))
    payment_cat_obj.num_canceled = (Dontsum(''), sum(i.num_canceled[1] for i in payment_items))
    payment_cat_obj.num_refunded = (Dontsum(''), sum(i.num_refunded[1] for i in payment_items))
    payment_cat_obj.num_pending = (Dontsum(''), sum(i.num_pending[1] for i in payment_items))
    payment_cat_obj.num_paid = (Dontsum(''), sum(i.num_paid[1] for i in payment_items))
    payment_cat = (payment_cat_obj, payment_items)

    items_by_category.append(payment_cat)

    total = {
        'num_total': tuplesum(c.num_total for c, i in items_by_category),
        'num_pending': tuplesum(c.num_pending for c, i in items_by_category),
        'num_canceled': tuplesum(c.num_canceled for c, i in items_by_category),
        'num_refunded': tuplesum(c.num_refunded for c, i in items_by_category),
        'num_paid': tuplesum(c.num_paid for c, i in items_by_category)
    }

    return items_by_category, total

Example 3

Project: DistrictBuilder
Source File: forms.py
View license
    def clean(self):
        """
        Check the validity of the form. If all immediate fields are present,
        this method kicks off a celery task to verify the counts of elements
        in the uploaded file.
        """

        subject_upload = self.cleaned_data['subject_upload']
        if not subject_upload is None and isinstance(subject_upload,UploadedFile):
            self.ul_file = subject_upload.name
            task_id = ''

            # Create a new record of uploaded subjects
            sup = SubjectUpload(upload_filename=subject_upload.name, status='UL')
            sup.save()

            # try saving the uploaded file via stream to the file system
            try:
                localstore = tempfile.NamedTemporaryFile(mode='w+', delete=False)
                for chunk in subject_upload.chunks():
                    localstore.write(chunk)
            except Exception, ex:
                sup.status = 'ER'
                sup.save()

                raise forms.ValidationError(_('Could not store uploaded Subject template.'))

            sup.processing_filename = self.ps_file = localstore.name
            sup.save()

            # Check if the subject exists.
            localstore.seek(0)

            reader = csv.DictReader(localstore)

            if len(reader.fieldnames) < 2:
                localstore.close()

                sup.status = 'ER'
                sup.save()

                raise forms.ValidationError(_('The uploaded file is missing subject data.'))

            try:
                clean_name = self._clean_name(reader.fieldnames[1][0:50])
                sup.subject_name = self.temp_subject_name = clean_name
                sup.save()
            except Exception, ex:
                raise forms.ValidationError(_('The new subject name could not be determined.'))
            finally:
                localstore.close()

        # If the subject_upload is not a file, it was probably already uploaded.
        # Check the processing_file field to see if that file exists on the file system.
        elif self.cleaned_data['processing_file'] != '':
            self.ps_file = self.cleaned_data['processing_file']
            self.ul_file = self.cleaned_data['uploaded_file']
            self.temp_subject_name = self._clean_name(self.cleaned_data['subject_name'])

            sup = SubjectUpload.objects.get(upload_filename=self.ul_file,
                processing_filename=self.ps_file)

            # the processing file must be in the /tmp/ folder, and may not contain any ".."
            if not self.temp_path_re.match(self.ps_file) or \
                self.dotdot_path_re.match(self.ps_file) or \
                not os.path.exists(self.ps_file):
                raise forms.ValidationError(_('Uploaded file cannot be found.'))
        else:
            self._errors['subject_upload'] = self.error_class([_('Uploaded file is required.')])
            return self.cleaned_data

        # path for data dir is adjacent to the web_temp setting
        pathspec = settings.WEB_TEMP.split('/')
        pathspec[-1] = 'data'
        pathspec = '/'.join(pathspec)

        saved_ul = '%s/%s' % (pathspec, self.temp_subject_name)

        collisions = Subject.objects.filter(name=self.temp_subject_name)
        if collisions.count() > 0:
            if not self.cleaned_data['force_overwrite']:
                self.temp_subject_name = sup.subject_name
                self._errors = {}
                self._errors['subject_name'] = self.error_class([_('Please specify a unique subject name.')])
                self._errors['force_overwrite'] = self.error_class([_('Check this box to overwrite the existing subject with the same name.')])
                return self.cleaned_data
            saved_ul = '%s_%s.csv' % (saved_ul, str(collisions[0].version))
        else:
            saved_ul = '%s_1.csv' % saved_ul

        # move the uploaded subject file to the data directory
        os.rename(self.ps_file, saved_ul)

        # update the location of the processing_file
        sup.processing_filename = self.ps_file = saved_ul

        sup.subject_name = self.temp_subject_name
        sup.status = 'CH'
        sup.save()

        # verify_count begins a cascade of validation operations
        task = verify_count.delay(sup.id, self.ps_file, language=get_language())
        sup.task_id = task.task_id

        sup.save()

        self.cleaned_data['task_uuid'] = sup.task_id
        self.cleaned_data['processing_file'] = sup.upload_filename

        return self.cleaned_data

Example 4

Project: django-bulk-admin
Source File: admin.py
View license
    @csrf_protect_m
    @transaction.atomic
    def bulk_view(self, request, form_url='', extra_context=None):
        to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
        if to_field and not self.to_field_allowed(request, to_field):
            raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)

        model = self.model
        opts = model._meta

        continue_requested = request.POST.get('_continue', request.GET.get('_continue'))
        force_continue = False
        inline = self.get_bulk_inline(request)
        formset_class = inline.get_formset(request)
        formset_params = {}
        prefix = formset_class.get_default_prefix()
        queryset = inline.get_queryset(request)

        if not self.has_add_permission(request):
            formset_class.max_num = 0

        if request.method == 'GET':
            if 'pks' in request.GET and self.has_change_permission(request):
                pks = [opts.pk.to_python(pk) for pk in request.GET.get('pks').split(',')]
                queryset = queryset.filter(pk__in=pks)
            else:
                queryset = queryset.none()

        elif request.method == 'POST':
            management_form = ManagementForm(request.POST, prefix=prefix)

            if not management_form.is_valid():
                raise ValidationError(
                    _('ManagementForm data is missing or has been tampered with'),
                    code='missing_management_form',
                )

            if not self.has_add_permission(request) and management_form.cleaned_data[INITIAL_FORM_COUNT] < management_form.cleaned_data[TOTAL_FORM_COUNT]:
                raise PermissionDenied

            if not self.has_change_permission(request) and management_form.cleaned_data[INITIAL_FORM_COUNT] > 0:
                raise PermissionDenied

            queryset = self.transform_queryset(request, queryset, management_form, prefix)

            post, files, force_continue = self.transform_post_and_files(request, prefix)
            formset_params.update({
                'data': post,
                'files': files,
            })

        formset_params['queryset'] = queryset

        formset = formset_class(**formset_params)

        if request.method == 'POST':
            if formset.is_valid():
                self.save_formset(request, form=None, formset=formset, change=False)

                if continue_requested or force_continue:
                    # The implementation of ModelAdmin redirects to the change view if valid and continue was requested
                    # The change view then reads the edited model again from database
                    # In our case, we can't make a redirect as we would loose the information which models should be edited
                    # Thus, we create a new formset with the edited models and continue as this would have been a usual GET request

                    if self.has_change_permission(request):
                        queryset = _ListQueryset(queryset)
                        queryset.extend(formset.new_objects)
                    else:
                        queryset = _ListQueryset()

                    formset_params.update({
                        'data': None,
                        'files': None,
                        'queryset': queryset,
                    })

                    formset = formset_class(**formset_params)

                    msg = _('The %s were bulk added successfully. You may edit them again below.') % (force_text(opts.verbose_name_plural),)
                    self.message_user(request, msg, messages.SUCCESS)

                else:
                    return self.response_bulk(request, formset)

        media = self.media

        inline_formsets = self.get_inline_formsets(request, [formset], [inline], obj=None)
        for inline_formset in inline_formsets:
            media = media + inline_formset.media

        errors = ErrorList()

        if formset.is_bound:
            errors.extend(formset.non_form_errors())
            for formset_errors in formset.errors:
                errors.extend(list(six.itervalues(formset_errors)))

        context = dict(
            self.admin_site.each_context(request) if django.VERSION >= (1, 8) else self.admin_site.each_context(),
            bulk=True,
            bulk_formset_prefix=prefix,
            bulk_upload_fields=self.get_bulk_upload_fields(request),
            title=_('Bulk add %s') % force_text(opts.verbose_name_plural),
            is_popup=(IS_POPUP_VAR in request.POST or
                      IS_POPUP_VAR in request.GET),
            to_field=to_field,
            media=media,
            inline_admin_formsets=inline_formsets,
            errors=errors,
            preserved_filters=self.get_preserved_filters(request),
        )

        context.update(extra_context or {})

        return self.render_change_form(request, context, add=True, change=False, obj=None, form_url=form_url)

Example 5

Project: Misago
Source File: 0003_default_roles.py
View license
def create_default_roles(apps, schema_editor):
    Role = apps.get_model('misago_acl', 'Role')

    role = Role(name=_("Member"), special_role='authenticated')
    pickle_permissions(
        role,
        {
            # account
            'misago.users.permissions.account': {
                'name_changes_allowed': 2,
                'name_changes_expire': 180,
                'can_have_signature': 0,
                'allow_signature_links': 0,
                'allow_signature_images': 0,
            },

            # profiles
            'misago.users.permissions.profiles': {
                'can_browse_users_list': 1,
                'can_search_users': 1,
                'can_follow_users': 1,
                'can_be_blocked': 1,
                'can_see_users_name_history': 0,
                'can_see_users_emails': 0,
                'can_see_users_ips': 0,
                'can_see_hidden_users': 0,
            },

            # attachments
            'misago.threads.permissions.attachments': {
                'max_attachment_size': 4 * 1024,
                'can_download_other_users_attachments': True,
            },

            # polls
            'misago.threads.permissions.polls': {
                'can_start_polls': 1,
                'can_edit_polls': 1
            },

            # delete users
            'misago.users.permissions.delete': {
                'can_delete_users_newer_than': 0,
                'can_delete_users_with_less_posts_than': 0,
            },
        })
    role.save()

    role = Role(name=_("Guest"), special_role='anonymous')
    pickle_permissions(
        role,
        {
            # account
            'misago.users.permissions.account': {
                'name_changes_allowed': 0,
                'name_changes_expire': 0,
                'can_have_signature': 0,
                'allow_signature_links': 0,
                'allow_signature_images': 0,
            },

            # profiles
            'misago.users.permissions.profiles': {
                'can_browse_users_list': 1,
                'can_search_users': 1,
                'can_see_users_name_history': 0,
                'can_see_users_emails': 0,
                'can_see_users_ips': 0,
                'can_see_hidden_users': 0,
            },

            # attachments
            'misago.threads.permissions.attachments': {
                'can_download_other_users_attachments': True,
            },

            # delete users
            'misago.users.permissions.delete': {
                'can_delete_users_newer_than': 0,
                'can_delete_users_with_less_posts_than': 0,
            },
        })
    role.save()

    role = Role(name=_("Moderator"))
    pickle_permissions(
        role,
        {
            # account
            'misago.users.permissions.account': {
                'name_changes_allowed': 5,
                'name_changes_expire': 14,
                'can_have_signature': 1,
                'allow_signature_links': 1,
                'allow_signature_images': 0,
            },

            # profiles
            'misago.users.permissions.profiles': {
                'can_browse_users_list': 1,
                'can_search_users': 1,
                'can_be_blocked': 0,
                'can_see_users_name_history': 1,
                'can_see_ban_details': 1,
                'can_see_users_emails': 1,
                'can_see_users_ips': 1,
                'can_see_hidden_users': 1,
            },

            # warnings
            'misago.users.permissions.warnings': {
                'can_see_other_users_warnings': 1,
                'can_warn_users': 1,
                'can_cancel_warnings': 1,
                'can_be_warned': 0,
            },

            # attachments
            'misago.threads.permissions.attachments': {
                'max_attachment_size': 8 * 1024,
                'can_download_other_users_attachments': True,
                'can_delete_other_users_attachments': True,
            },

            # polls
            'misago.threads.permissions.polls': {
                'can_start_polls': 2,
                'can_edit_polls': 2,
                'can_delete_polls': 2,
                'can_always_see_poll_voters': 1
            },

            # moderation
            'misago.threads.permissions.threads': {
                'can_see_unapproved_content_lists': True,
                'can_see_reported_content_lists': True,
                'can_omit_flood_protection': True,
            },
            'misago.users.permissions.moderation': {
                'can_warn_users': 1,
                'can_moderate_avatars': 1,
                'can_moderate_signatures': 1,
            },

            # delete users
            'misago.users.permissions.delete': {
                'can_delete_users_newer_than': 0,
                'can_delete_users_with_less_posts_than': 0,
            },
        })
    role.save()

    role = Role(name=_("See warnings"))
    pickle_permissions(
        role,
        {
            # warnings
            'misago.users.permissions.warnings': {
                'can_see_other_users_warnings': 1,
            },
        })
    role.save()

    role = Role(name=_("Renaming users"))
    pickle_permissions(
        role,
        {
            # rename users
            'misago.users.permissions.moderation': {
                'can_rename_users': 1,
            },
        })
    role.save()

    role = Role(name=_("Banning users"))
    pickle_permissions(
        role,
        {
            # ban users
            'misago.users.permissions.profiles': {
                'can_see_ban_details': 1,
            },

            'misago.users.permissions.moderation': {
                'can_ban_users': 1,
                'max_ban_length': 14,
                'can_lift_bans': 1,
                'max_lifted_ban_length': 14,
            },
        })
    role.save()

    role = Role(name=_("Deleting users"))
    pickle_permissions(
        role,
        {
            # delete users
            'misago.users.permissions.delete': {
                'can_delete_users_newer_than': 3,
                'can_delete_users_with_less_posts_than': 7,
            },
        })
    role.save()

    role = Role(name=_("Can't be blocked"))
    pickle_permissions(
        role,
        {
            # profiles
            'misago.users.permissions.profiles': {
                'can_be_blocked': 0,
            },
        })
    role.save()

    role = Role(name=_("Private threads"))
    pickle_permissions(
        role,
        {
            # private threads
            'misago.threads.permissions.privatethreads': {
                'can_use_private_threads': 1,
                'can_start_private_threads': 1,
                'max_private_thread_participants': 3,
                'can_add_everyone_to_private_threads': 0,
                'can_report_private_threads': 1,
                'can_moderate_private_threads': 0,
            },
        })
    role.save()

    role = Role(name=_("Private threads moderator"))
    pickle_permissions(
        role,
        {
            # private threads
            'misago.threads.permissions.privatethreads': {
                'can_use_private_threads': 1,
                'can_start_private_threads': 1,
                'max_private_thread_participants': 15,
                'can_add_everyone_to_private_threads': 1,
                'can_report_private_threads': 1,
                'can_moderate_private_threads': 1,
            },
        })
    role.save()

Example 6

Project: Misago
Source File: 0003_categories_roles.py
View license
def create_default_categories_roles(apps, schema_editor):
    """
    Crete roles
    """
    CategoryRole = apps.get_model('misago_categories', 'CategoryRole')

    see_only = CategoryRole(name=_('See only'))
    pickle_permissions(see_only,
        {
            # categories perms
            'misago.categories.permissions': {
                'can_see': 1,
                'can_browse': 0
            },
        })
    see_only.save()

    read_only = CategoryRole(name=_('Read only'))
    pickle_permissions(read_only,
        {
            # categories perms
            'misago.categories.permissions': {
                'can_see': 1,
                'can_browse': 1
            },

            # threads perms
            'misago.threads.permissions.threads': {
                'can_see_all_threads': 1,
                'can_see_posts_likes': 2,
                'can_download_other_users_attachments': 1,
                'can_like_posts': 1
            },
        })
    read_only.save()

    reply_only = CategoryRole(name=_('Reply to threads'))
    pickle_permissions(reply_only,
        {
            # categories perms
            'misago.categories.permissions': {
                'can_see': 1,
                'can_browse': 1
            },

            # threads perms
            'misago.threads.permissions.threads': {
                'can_see_all_threads': 1,
                'can_reply_threads': 1,
                'can_edit_posts': 1,
                'can_download_other_users_attachments': 1,
                'max_attachment_size': 500,
                'can_see_posts_likes': 2,
                'can_like_posts': 1
            },
        })
    reply_only.save()

    standard = CategoryRole(name=_('Start and reply threads'))
    pickle_permissions(standard,
        {
            # categories perms
            'misago.categories.permissions': {
                'can_see': 1,
                'can_browse': 1
            },

            # threads perms
            'misago.threads.permissions.threads': {
                'can_see_all_threads': 1,
                'can_start_threads': 1,
                'can_reply_threads': 1,
                'can_edit_threads': 1,
                'can_edit_posts': 1,
                'can_download_other_users_attachments': 1,
                'max_attachment_size': 500,
                'can_see_posts_likes': 2,
                'can_like_posts': 1
            },
        })
    standard.save()

    standard_with_polls = CategoryRole(name=_('Start and reply threads, make polls'))
    pickle_permissions(standard_with_polls,
        {
            # categories perms
            'misago.categories.permissions': {
                'can_see': 1,
                'can_browse': 1,
            },

            # threads perms
            'misago.threads.permissions.threads': {
                'can_see_all_threads': 1,
                'can_start_threads': 1,
                'can_reply_threads': 1,
                'can_edit_threads': 1,
                'can_edit_posts': 1,
                'can_download_other_users_attachments': 1,
                'max_attachment_size': 500,
                'can_see_posts_likes': 2,
                'can_like_posts': 1
            },
        })
    standard_with_polls.save()

    moderator = CategoryRole(name=_('Moderator'))
    pickle_permissions(moderator,
        {
            # categories perms
            'misago.categories.permissions': {
                'can_see': 1,
                'can_browse': 1
            },

            # threads perms
            'misago.threads.permissions.threads': {
                'can_see_all_threads': 1,
                'can_start_threads': 1,
                'can_reply_threads': 1,
                'can_edit_threads': 2,
                'can_edit_posts': 2,
                'can_hide_own_threads': 2,
                'can_hide_own_posts': 2,
                'thread_edit_time': 0,
                'post_edit_time': 0,
                'can_hide_threads': 2,
                'can_hide_posts': 2,
                'can_protect_posts': 1,
                'can_move_posts': 1,
                'can_merge_posts': 1,
                'can_announce_threads': 1,
                'can_pin_threads': 2,
                'can_close_threads': 1,
                'can_move_threads': 1,
                'can_merge_threads': 1,
                'can_approve_content': 1,
                'can_download_other_users_attachments': 1,
                'max_attachment_size': 2500,
                'can_delete_other_users_attachments': 1,
                'can_see_posts_likes': 2,
                'can_like_posts': 1,
                'can_report_content': 1,
                'can_see_reports': 1,
                'can_hide_events': 2
            },
        })
    moderator.save()

    """
    Assign category roles to roles
    """
    Category = apps.get_model('misago_categories', 'Category')
    Role = apps.get_model('misago_acl', 'Role')
    RoleCategoryACL = apps.get_model('misago_categories', 'RoleCategoryACL')

    moderators = Role.objects.get(name=_('Moderator'))
    members = Role.objects.get(special_role='authenticated')
    guests = Role.objects.get(special_role='anonymous')

    category = Category.objects.get(tree_id=1, level=1)

    RoleCategoryACL.objects.bulk_create([
        RoleCategoryACL(
            role=moderators,
            category=category,
            category_role=moderator
        ),

        RoleCategoryACL(
            role=members,
            category=category,
            category_role=standard
        ),

        RoleCategoryACL(
            role=guests,
            category=category,
            category_role=read_only
        ),
    ])

Example 7

Project: Misago
Source File: 0002_users_settings.py
View license
def create_users_settings_group(apps, schema_editor):
    migrate_settings_group(
        apps,
        {
            'key': 'users',
            'name': _("Users"),
            'description': _("Those settings control user accounts default behaviour and features availability."),
            'settings': (
                {
                    'setting': 'account_activation',
                    'name': _("New accounts activation"),
                    'legend': _("New accounts"),
                    'value': 'none',
                    'form_field': 'select',
                    'field_extra': {
                        'choices': (
                            ('none', _("No activation required")),
                            ('user', _("Activation Token sent to User")),
                            ('admin', _("Activation by Administrator")),
                            ('closed', _("Don't allow new registrations"))
                        )
                    },
                    'is_public': True,
                },
                {
                    'setting': 'username_length_min',
                    'name': _("Minimum length"),
                    'description': _("Minimum allowed username length."),
                    'legend': _("User names"),
                    'python_type': 'int',
                    'value': 3,
                    'field_extra': {
                        'min_value': 2,
                        'max_value': 20,
                    },
                    'is_public': True,
                },
                {
                    'setting': 'username_length_max',
                    'name': _("Maximum length"),
                    'description': _("Maximum allowed username length."),
                    'python_type': 'int',
                    'value': 14,
                    'field_extra': {
                        'min_value': 2,
                        'max_value': 20,
                    },
                    'is_public': True,
                },
                {
                    'setting': 'password_length_min',
                    'name': _("Minimum length"),
                    'description': _("Minimum allowed user password length."),
                    'legend': _("Passwords"),
                    'python_type': 'int',
                    'value': 5,
                    'field_extra': {
                        'min_value': 2,
                        'max_value': 255,
                    },
                    'is_public': True,
                },
                {
                    'setting': 'allow_custom_avatars',
                    'name': _("Allow custom avatars"),
                    'legend': _("Avatars"),
                    'description': _("Turning this option off will forbid "
                                     "forum users from using avatars from "
                                     "outside forums. Good for forums "
                                     "adressed at young users."),
                    'python_type': 'bool',
                    'value': True,
                    'form_field': 'yesno',
                },
                {
                    'setting': 'default_avatar',
                    'name': _("Default avatar"),
                    'value': 'gravatar',
                    'form_field': 'select',
                    'field_extra': {
                        'choices': (
                            ('dynamic', _("Individual")),
                            ('gravatar', _("Gravatar")),
                            ('gallery', _("Random avatar from gallery")),
                        ),
                    },
                },
                {
                    'setting': 'default_gravatar_fallback',
                    'name': _("Fallback for default gravatar"),
                    'description': _("Select which avatar to use when user "
                                     "has no gravatar associated with his "
                                     "e-mail address."),
                    'value': 'dynamic',
                    'form_field': 'select',
                    'field_extra': {
                        'choices': (
                            ('dynamic', _("Individual")),
                            ('gallery', _("Random avatar from gallery")),
                        ),
                    },
                },
                {
                    'setting': 'avatar_upload_limit',
                    'name': _("Maximum size of uploaded avatar"),
                    'description': _("Enter maximum allowed file size "
                                     "(in KB) for avatar uploads"),
                    'python_type': 'int',
                    'value': 750,
                    'field_extra': {
                        'min_value': 0,
                    },
                    'is_public': True,
                },
                {
                    'setting': 'signature_length_max',
                    'name': _("Maximum length"),
                    'legend': _("Signatures"),
                    'description': _("Maximum allowed signature length."),
                    'python_type': 'int',
                    'value': 256,
                    'field_extra': {
                        'min_value': 10,
                        'max_value': 5000,
                    },
                    'is_public': True,
                },
                {
                    'setting': 'subscribe_start',
                    'name': _("Started threads"),
                    'legend': _("Default subscriptions settings"),
                    'value': 'watch_email',
                    'form_field': 'select',
                    'field_extra': {
                        'choices': (
                            ('no', _("Don't watch")),
                            ('watch', _("Put on watched threads list")),
                            ('watch_email', _("Put on watched threads "
                                              "list and e-mail user when "
                                              "somebody replies")),
                        ),
                    },
                },
                {
                    'setting': 'subscribe_reply',
                    'name': _("Replied threads"),
                    'value': 'watch_email',
                    'form_field': 'select',
                    'field_extra': {
                        'choices': (
                            ('no', _("Don't watch")),
                            ('watch', _("Put on watched threads list")),
                            ('watch_email', _("Put on watched threads "
                                              "list and e-mail user when "
                                              "somebody replies")),
                        ),
                    },
                },
            )
        })

    migrate_settings_group(
        apps,
        {
            'key': 'captcha',
            'name': _("CAPTCHA"),
            'description': _("Those settings allow you to combat automatic "
                             "registrations on your forum."),
            'settings': (
                {
                    'setting': 'captcha_type',
                    'name': _("Select CAPTCHA type"),
                    'legend': _("CAPTCHA type"),
                    'value': 'no',
                    'form_field': 'select',
                    'field_extra': {
                        'choices': (
                            ('no', _("No CAPTCHA")),
                            ('re', _("reCaptcha")),
                            ('qa', _("Question and answer")),
                        ),
                    },
                    'is_public': True,
                },
                {
                    'setting': 'recaptcha_site_key',
                    'name': _("Site key"),
                    'legend': _("reCAPTCHA"),
                    'value': '',
                    'field_extra': {
                        'required': False,
                        'max_length': 100,
                    },
                    'is_public': True,
                },
                {
                    'setting': 'recaptcha_secret_key',
                    'name': _("Secret key"),
                    'value': '',
                    'field_extra': {
                        'required': False,
                        'max_length': 100,
                    },
                },
                {
                    'setting': 'qa_question',
                    'name': _("Test question"),
                    'legend': _("Question and answer"),
                    'value': '',
                    'field_extra': {
                        'required': False,
                        'max_length': 250,
                    },
                },
                {
                    'setting': 'qa_help_text',
                    'name': _("Question help text"),
                    'value': '',
                    'field_extra': {
                        'required': False,
                        'max_length': 250,
                    },
                },
                {
                    'setting': 'qa_answers',
                    'name': _("Valid answers"),
                    'description': _("Enter each answer in new line. "
                                     "Answers are case-insensitive."),
                    'value': '',
                    'form_field': 'textarea',
                    'field_extra': {
                        'rows': 4,
                        'required': False,
                        'max_length': 250,
                    },
                },
            )
        })

Example 8

Project: rapidpro
Source File: models.py
View license
    @classmethod
    def get_filtered_value_summary(cls, ruleset=None, contact_field=None, filters=None, return_contacts=False, filter_contacts=None):
        """
        Return summary results for the passed in values, optionally filtering by a passed in filter on the contact.

        This will try to aggregate results based on the values found.

        Filters expected in the following formats:
            { ruleset: rulesetId, categories: ["Red", "Blue", "Yellow"] }
            { groups: 12,124,15 }
            { location: 1515, boundary: "f1551" }
            { contact_field: fieldId, values: ["UK", "RW"] }
        """
        from temba.flows.models import RuleSet, FlowStep
        from temba.contacts.models import Contact

        start = time.time()

        # caller may identify either a ruleset or contact field to summarize
        if (not ruleset and not contact_field) or (ruleset and contact_field):
            raise ValueError("Must define either a RuleSet or ContactField to summarize values for")

        if ruleset:
            (categories, uuid_to_category) = ruleset.build_uuid_to_category_map()

        org = ruleset.flow.org if ruleset else contact_field.org

        # this is for the case when we are filtering across our own categories, we build up the category uuids we will
        # pay attention then filter before we grab the actual values
        self_filter_uuids = []

        org_contacts = Contact.objects.filter(org=org, is_test=False, is_active=True)

        if filters:
            if filter_contacts is None:
                contacts = org_contacts
            else:
                contacts = Contact.objects.filter(pk__in=filter_contacts)

            for contact_filter in filters:
                # empty filters are no-ops
                if not contact_filter:
                    continue

                # we are filtering by another rule
                if 'ruleset' in contact_filter:
                    # load the ruleset for this filter
                    filter_ruleset = RuleSet.objects.get(pk=contact_filter['ruleset'])
                    (filter_cats, filter_uuids) = filter_ruleset.build_uuid_to_category_map()

                    uuids = []
                    for (uuid, category) in filter_uuids.items():
                        if category in contact_filter['categories']:
                            uuids.append(uuid)

                    contacts = contacts.filter(values__rule_uuid__in=uuids)

                    # this is a self filter, save the uuids for later filtering
                    if ruleset and ruleset.pk == filter_ruleset.pk:
                        self_filter_uuids = uuids

                # we are filtering by one or more groups
                elif 'groups' in contact_filter:
                    # filter our contacts by that group
                    for group_id in contact_filter['groups']:
                        contacts = contacts.filter(all_groups__pk=group_id)

                # we are filtering by one or more admin boundaries
                elif 'boundary' in contact_filter:
                    boundaries = contact_filter['boundary']
                    if not isinstance(boundaries, list):
                        boundaries = [boundaries]

                    # filter our contacts by those that are in that location boundary
                    contacts = contacts.filter(values__contact_field__id=contact_filter['location'],
                                               values__location_value__osm_id__in=boundaries)

                # we are filtering by a contact field
                elif 'contact_field' in contact_filter:
                    contact_query = Q()

                    # we can't use __in as we want case insensitive matching
                    for value in contact_filter['values']:
                        contact_query |= Q(values__contact_field__id=contact_filter['contact_field'],
                                           values__string_value__iexact=value)

                    contacts = contacts.filter(contact_query)

                else:
                    raise ValueError("Invalid filter definition, must include 'group', 'ruleset', 'contact_field' or 'boundary'")

            contacts = set([c['id'] for c in contacts.values('id')])

        else:
            # no filter, default either to all contacts or our filter contacts
            if filter_contacts:
                contacts = filter_contacts
            else:
                contacts = set([c['id'] for c in org_contacts.values('id')])

        # we are summarizing a flow ruleset
        if ruleset:
            filter_uuids = set(self_filter_uuids)

            # grab all the flow steps for this ruleset, this gets us the most recent run for each contact
            steps = [fs for fs in FlowStep.objects.filter(step_uuid=ruleset.uuid)
                                                  .values('arrived_on', 'rule_uuid', 'contact')
                                                  .order_by('-arrived_on')]

            # this will build up sets of contacts for each rule uuid
            seen_contacts = set()
            value_contacts = defaultdict(set)
            for step in steps:
                contact = step['contact']
                if contact in contacts:
                    if contact not in seen_contacts:
                        value_contacts[step['rule_uuid']].add(contact)
                        seen_contacts.add(contact)

            results = defaultdict(set)
            for uuid, contacts in value_contacts.items():
                if uuid and (not filter_uuids or uuid in filter_uuids):
                    category = uuid_to_category.get(uuid, None)
                    if category:
                        results[category] |= contacts

            # now create an ordered array of our results
            set_contacts = set()
            for category in categories:
                contacts = results.get(category['label'], set())
                if return_contacts:
                    category['contacts'] = contacts

                category['count'] = len(contacts)
                set_contacts |= contacts

            # how many runs actually entered a response?
            set_contacts = set_contacts
            unset_contacts = value_contacts[None]

        # we are summarizing based on contact field
        else:
            values = Value.objects.filter(contact_field=contact_field)

            if contact_field.value_type == Value.TYPE_TEXT:
                values = values.values('string_value', 'contact')
                categories, set_contacts = cls._filtered_values_to_categories(contacts, values, 'string_value',
                                                                              return_contacts=return_contacts)

            elif contact_field.value_type == Value.TYPE_DECIMAL:
                values = values.values('decimal_value', 'contact')
                categories, set_contacts = cls._filtered_values_to_categories(contacts, values, 'decimal_value',
                                                                              formatter=format_decimal,
                                                                              return_contacts=return_contacts)

            elif contact_field.value_type == Value.TYPE_DATETIME:
                values = values.extra({'date_value': "date_trunc('day', datetime_value)"}).values('date_value', 'contact')
                categories, set_contacts = cls._filtered_values_to_categories(contacts, values, 'date_value',
                                                                              return_contacts=return_contacts)

            elif contact_field.value_type in [Value.TYPE_STATE, Value.TYPE_DISTRICT, Value.TYPE_WARD]:
                values = values.values('location_value__osm_id', 'contact')
                categories, set_contacts = cls._filtered_values_to_categories(contacts, values, 'location_value__osm_id',
                                                                              return_contacts=return_contacts)

            else:
                raise ValueError(_("Summary of contact fields with value type of %s is not supported" % contact_field.get_value_type_display()))

            set_contacts = contacts & set_contacts
            unset_contacts = contacts - set_contacts

        print "RulesetSummary [%f]: %s contact_field: %s with filters: %s" % (time.time() - start, ruleset, contact_field, filters)

        if return_contacts:
            return (set_contacts, unset_contacts, categories)
        else:
            return (len(set_contacts), len(unset_contacts), categories)

Example 9

Project: rapidpro
Source File: models.py
View license
    @classmethod
    def get_value_summary(cls, ruleset=None, contact_field=None, filters=None, segment=None):
        """
        Returns the results for the passed in ruleset or contact field given the passed in filters and segments.

        Filters are expected in the following formats:
            { field: rulesetId, categories: ["Red", "Blue", "Yellow"] }

        Segments are expected in these formats instead:
            { ruleset: 1515, categories: ["Red", "Blue"] }  // segmenting by another field, for those categories
            { groups: 124,151,151 }                         // segment by each each group in the passed in ids
            { location: "State", parent: null }             // segment for each admin boundary within the parent
            { contact_field: "Country", values: ["US", "EN", "RW"] } // segment by a contact field for these values
        """
        from temba.contacts.models import ContactGroup, ContactField
        from temba.flows.models import TrueTest, RuleSet

        # start = time.time()
        results = []

        if (not ruleset and not contact_field) or (ruleset and contact_field):
            raise ValueError("Must specify either a RuleSet or Contact field.")

        org = ruleset.flow.org if ruleset else contact_field.org

        open_ended = ruleset and ruleset.ruleset_type == RuleSet.TYPE_WAIT_MESSAGE and len(ruleset.get_rules()) == 1

        # default our filters to an empty list if None are passed in
        if filters is None:
            filters = []

        # build the kwargs for our subcall
        kwargs = dict(ruleset=ruleset, contact_field=contact_field, filters=filters)

        # this is our list of dependencies, that is things that will blow away our results
        dependencies = set()
        fingerprint_dict = dict(filters=filters, segment=segment)
        if ruleset:
            fingerprint_dict['ruleset'] = ruleset.id
            dependencies.add(RULESET_KEY % ruleset.id)
        if contact_field:
            fingerprint_dict['contact_field'] = contact_field.id
            dependencies.add(CONTACT_KEY % contact_field.id)

        for contact_filter in filters:
            if 'ruleset' in contact_filter:
                dependencies.add(RULESET_KEY % contact_filter['ruleset'])
            if 'groups' in contact_filter:
                for group_id in contact_filter['groups']:
                    dependencies.add(GROUP_KEY % group_id)
            if 'location' in contact_filter:
                field = ContactField.get_by_label(org, contact_filter['location'])
                dependencies.add(CONTACT_KEY % field.id)

        if segment:
            if 'ruleset' in segment:
                dependencies.add(RULESET_KEY % segment['ruleset'])
            if 'groups' in segment:
                for group_id in segment['groups']:
                    dependencies.add(GROUP_KEY % group_id)
            if 'location' in segment:
                field = ContactField.get_by_label(org, segment['location'])
                dependencies.add(CONTACT_KEY % field.id)

        # our final redis key will contain each dependency as well as a HASH representing the fingerprint of the
        # kwargs passed to this method, generate that hash
        fingerprint = hash(dict_to_json(fingerprint_dict))

        # generate our key
        key = VALUE_SUMMARY_CACHE_KEY + ":" + str(org.id) + ":".join(sorted(list(dependencies))) + ":" + str(fingerprint)

        # does our value exist?
        r = get_redis_connection()
        cached = r.get(key)

        if cached is not None:
            try:
                return json_to_dict(cached)
            except Exception:
                # failed decoding, oh well, go calculate it instead
                pass

        if segment:
            # segmenting a result is the same as calculating the result with the addition of each
            # category as a filter so we expand upon the passed in filters to do this
            if 'ruleset' in segment and 'categories' in segment:
                for category in segment['categories']:
                    category_filter = list(filters)
                    category_filter.append(dict(ruleset=segment['ruleset'], categories=[category]))

                    # calculate our results for this segment
                    kwargs['filters'] = category_filter
                    (set_count, unset_count, categories) = cls.get_filtered_value_summary(**kwargs)
                    results.append(dict(label=category, open_ended=open_ended, set=set_count, unset=unset_count, categories=categories))

            # segmenting by groups instead, same principle but we add group filters
            elif 'groups' in segment:
                for group_id in segment['groups']:
                    # load our group
                    group = ContactGroup.user_groups.get(org=org, pk=group_id)

                    category_filter = list(filters)
                    category_filter.append(dict(groups=[group_id]))

                    # calculate our results for this segment
                    kwargs['filters'] = category_filter
                    (set_count, unset_count, categories) = cls.get_filtered_value_summary(**kwargs)
                    results.append(dict(label=group.name, open_ended=open_ended, set=set_count, unset_count=unset_count, categories=categories))

            # segmenting by a contact field, only for passed in categories
            elif 'contact_field' in segment and 'values' in segment:
                # look up the contact field
                field = ContactField.get_by_label(org, segment['contact_field'])

                for value in segment['values']:
                    value_filter = list(filters)
                    value_filter.append(dict(contact_field=field.pk, values=[value]))

                    # calculate our results for this segment
                    kwargs['filters'] = value_filter
                    (set_count, unset_count, categories) = cls.get_filtered_value_summary(**kwargs)
                    results.append(dict(label=value, open_ended=open_ended, set=set_count, unset=unset_count, categories=categories))

            # segmenting by a location field
            elif 'location' in segment:
                # look up the contact field
                field = ContactField.get_by_label(org, segment['location'])

                # make sure they are segmenting on a location type that makes sense
                if field.value_type not in [Value.TYPE_STATE, Value.TYPE_DISTRICT, Value.TYPE_WARD]:
                    raise ValueError(_("Cannot segment on location for field that is not a State or District type"))

                # make sure our org has a country for location based responses
                if not org.country:
                    raise ValueError(_("Cannot segment by location until country has been selected for organization"))

                # the boundaries we will segment by
                parent = org.country

                # figure out our parent
                parent_osm_id = segment.get('parent', None)
                if parent_osm_id:
                    parent = AdminBoundary.objects.get(osm_id=parent_osm_id)

                # get all the boundaries we are segmenting on
                boundaries = list(AdminBoundary.objects.filter(parent=parent).order_by('name'))

                # if the field is a district field, they need to specify the parent state
                if not parent_osm_id and field.value_type == Value.TYPE_DISTRICT:
                    raise ValueError(_("You must specify a parent state to segment results by district"))

                if not parent_osm_id and field.value_type == Value.TYPE_WARD:
                    raise ValueError(_("You must specify a parent state to segment results by ward"))

                # if this is a district, we can speed things up by only including those districts in our parent, build
                # the filter for that
                if parent and field.value_type in [Value.TYPE_DISTRICT, Value.TYPE_WARD]:
                    location_filters = [filters, dict(location=field.pk, boundary=[b.osm_id for b in boundaries])]
                else:
                    location_filters = filters

                # get all the contacts segment by location first
                (location_set_contacts, location_unset_contacts, location_results) = \
                    cls.get_filtered_value_summary(contact_field=field, filters=location_filters, return_contacts=True)

                # now get the contacts for our primary query
                kwargs['return_contacts'] = True
                kwargs['filter_contacts'] = location_set_contacts
                (primary_set_contacts, primary_unset_contacts, primary_results) = cls.get_filtered_value_summary(**kwargs)

                # build a map of osm_id to location_result
                osm_results = {lr['label']: lr for lr in location_results}
                empty_result = dict(contacts=list())

                for boundary in boundaries:
                    location_result = osm_results.get(boundary.osm_id, empty_result)

                    # clone our primary results
                    segmented_results = dict(label=boundary.name,
                                             boundary=boundary.osm_id,
                                             open_ended=open_ended)

                    location_categories = list()
                    location_contacts = set(location_result['contacts'])

                    for category in primary_results:
                        category_contacts = set(category['contacts'])

                        intersection = location_contacts & category_contacts
                        location_categories.append(dict(label=category['label'], count=len(intersection)))

                    segmented_results['set'] = len(location_contacts & primary_set_contacts)
                    segmented_results['unset'] = len(location_contacts & primary_unset_contacts)
                    segmented_results['categories'] = location_categories
                    results.append(segmented_results)

                results = sorted(results, key=lambda r: r['label'])

        else:
            (set_count, unset_count, categories) = cls.get_filtered_value_summary(**kwargs)

            # Check we have and we have an OPEN ENDED ruleset
            if ruleset and len(ruleset.get_rules()) == 1 and isinstance(ruleset.get_rules()[0].test, TrueTest):
                cursor = connection.cursor()

                custom_sql = """SELECT w.label, count(*) AS count FROM (
                    SELECT
                      regexp_split_to_table(LOWER(text), E'[^[:alnum:]_]') AS label
                    FROM msgs_msg INNER JOIN contacts_contact ON ( msgs_msg.contact_id = contacts_contact.id )
                    WHERE msgs_msg.id IN (
                      SELECT
                        msg_id
                        FROM flows_flowstep_messages, flows_flowstep
                        WHERE flowstep_id = flows_flowstep.id AND
                        flows_flowstep.step_uuid = '%s'
                      ) AND contacts_contact.is_test = False
                  ) w group by w.label order by count desc;""" % ruleset.uuid

                cursor.execute(custom_sql)
                unclean_categories = get_dict_from_cursor(cursor)
                categories = []

                org_languages = [lang.name.lower() for lang in org.languages.filter(orgs=None).distinct()]

                if 'english' not in org_languages:
                    org_languages.append('english')

                ignore_words = []
                for lang in org_languages:
                    ignore_words += safe_get_stop_words(lang)

                for category in unclean_categories:
                    if len(category['label']) > 1 and category['label'] not in ignore_words and len(categories) < 100:
                        categories.append(dict(label=category['label'], count=int(category['count'])))

                # sort by count, then alphabetically
                categories = sorted(categories, key=lambda c: (-c['count'], c['label']))

            results.append(dict(label=unicode(_("All")), open_ended=open_ended, set=set_count, unset=unset_count, categories=categories))

        # for each of our dependencies, add our key as something that depends on it
        pipe = r.pipeline()
        for dependency in dependencies:
            pipe.sadd(dependency, key)
            pipe.expire(dependency, VALUE_SUMMARY_CACHE_TIME)

        # and finally set our result
        pipe.set(key, dict_to_json(results), VALUE_SUMMARY_CACHE_TIME)
        pipe.execute()

        # leave me: nice for profiling..
        # from django.db import connection as db_connection, reset_queries
        # print "=" * 80
        # for query in db_connection.queries:
        #    print "%s - %s" % (query['time'], query['sql'][:1000])
        # print "-" * 80
        # print "took: %f" % (time.time() - start)
        # print "=" * 80
        # reset_queries()

        return results

Example 10

Project: django-modelclone
Source File: admin.py
View license
    def clone_view(self, request, object_id, form_url='', extra_context=None):
        opts = self.model._meta

        if not self.has_add_permission(request):
            raise PermissionDenied

        original_obj = self.get_object(request, unquote(object_id))

        if original_obj is None:
            raise Http404(_('{name} object with primary key {key} does not exist.'.format(
                name=force_text(opts.verbose_name),
                key=repr(escape(object_id))
            )))

        ModelForm = self.get_form(request)
        formsets = []

        if request.method == 'POST':
            form = ModelForm(request.POST, request.FILES)
            if form.is_valid():
                new_object = self.save_form(request, form, change=False)
                form_validated = True
            else:
                new_object = self.model()
                form_validated = False

            prefixes = {}
            for FormSet, inline in self.get_formsets_with_inlines(request):
                prefix = FormSet.get_default_prefix()
                prefixes[prefix] = prefixes.get(prefix, 0) + 1
                if prefixes[prefix] != 1 or not prefix:
                    prefix = "%s-%s" % (prefix, prefixes[prefix])
                formset = FormSet(data=request.POST, files=request.FILES,
                                  instance=new_object,
                                  save_as_new="_saveasnew" in request.POST,   # ????
                                  prefix=prefix)
                formsets.append(formset)

            if all_valid(formsets) and form_validated:

                # if original model has any file field, save new model
                # with same paths to these files
                for name in vars(original_obj):
                    field = getattr(original_obj, name)
                    if isinstance(field, FieldFile) and name not in request.FILES:
                        setattr(new_object, name, field)

                self.save_model(request, new_object, form, False)
                self.save_related(request, form, formsets, False)
                try:
                    self.log_addition(request, new_object)
                except TypeError:
                    # In Django 1.9 we need one more param
                    self.log_addition(request, new_object, "Cloned object")

                return self.response_add(request, new_object, None)

        else:
            initial = model_to_dict(original_obj)
            initial = self.tweak_cloned_fields(initial)
            form = ModelForm(initial=initial)

            prefixes = {}
            for FormSet, inline in self.get_formsets_with_inlines(request):
                prefix = FormSet.get_default_prefix()
                prefixes[prefix] = prefixes.get(prefix, 0) + 1
                if prefixes[prefix] != 1 or not prefix:
                    prefix = "%s-%s" % (prefix, prefixes[prefix])
                initial = []

                queryset = inline.get_queryset(request).filter(
                    **{FormSet.fk.name: original_obj})
                for obj in queryset:
                    initial.append(model_to_dict(obj, exclude=[obj._meta.pk.name,
                                                               FormSet.fk.name]))
                initial = self.tweak_cloned_inline_fields(prefix, initial)
                formset = FormSet(prefix=prefix, initial=initial)
                # Since there is no way to customize the `extra` in the constructor,
                # construct the forms again...
                # most of this view is a hack, but this is the ugliest one
                formset.extra = len(initial) + formset.extra
                # _construct_forms() was removed on django 1.6
                # see https://github.com/django/django/commit/ef79582e8630cb3c119caed52130c9671188addd
                if hasattr(formset, '_construct_forms'):
                    formset._construct_forms()
                formsets.append(formset)

        admin_form = helpers.AdminForm(
            form,
            list(self.get_fieldsets(request)),
            self.get_prepopulated_fields(request),
            self.get_readonly_fields(request),
            model_admin=self
        )
        media = self.media + admin_form.media

        inline_admin_formsets = []
        for inline, formset in zip(self.get_inline_instances(request), formsets):
            fieldsets = list(inline.get_fieldsets(request, original_obj))
            readonly = list(inline.get_readonly_fields(request, original_obj))
            prepopulated = dict(inline.get_prepopulated_fields(request, original_obj))
            inline_admin_formset = InlineAdminFormSetFakeOriginal(inline, formset,
                fieldsets, prepopulated, readonly, model_admin=self)
            inline_admin_formsets.append(inline_admin_formset)
            media = media + inline_admin_formset.media


        title = u'{0} {1}'.format(self.clone_verbose_name, opts.verbose_name)

        context = {
            'title': title,
            'original': title,
            'adminform': admin_form,
            'is_popup': "_popup" in getattr(request, 'REQUEST', request.GET),
            'show_delete': False,
            'media': media,
            'inline_admin_formsets': inline_admin_formsets,
            'errors': helpers.AdminErrorList(form, formsets),
            'app_label': opts.app_label,
        }
        context.update(extra_context or {})

        return self.render_change_form(request,
            context,
            form_url=form_url,
            change=False
        )

Example 11

Project: virtmgr
Source File: views.py
View license
def pool(request, host_id, pool):

	if not request.user.is_authenticated():
		return HttpResponseRedirect('/')

	kvm_host = Host.objects.get(user=request.user.id, id=host_id)

	def add_error(msg, type_err):
		error_msg = Log(host_id=host_id, 
			            type=type_err, 
			            message=msg, 
			            user_id=request.user.id
			            )
		error_msg.save()

	def get_vms():
		try:
			vname = {}
			for id in conn.listDomainsID():
				id = int(id)
				dom = conn.lookupByID(id)
				vname[dom.name()] = dom.info()[0]
			for id in conn.listDefinedDomains():
				dom = conn.lookupByName(id)
				vname[dom.name()] = dom.info()[0]
			return vname
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_networks():
		try:
			networks = {}
			for name in conn.listNetworks():
				net = conn.networkLookupByName(name)
				status = net.isActive()
				networks[name] = status
			for name in conn.listDefinedNetworks():
				net = conn.networkLookupByName(name)
				status = net.isActive()
				networks[name] = status
			return networks
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def vm_conn():
		try:
			flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
			auth = [flags, creds, None]
			uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
			conn = libvirt.openAuth(uri, auth, 0)
			return conn
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	if not kvm_host.login or not kvm_host.passwd:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = request.session['login_kvm']
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = request.session['passwd_kvm']
				else:
					return -1
			return 0
	else:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = kvm_host.login
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = kvm_host.passwd
				else:
					return -1
			return 0
			
	def get_conn_pool(pool):
		try:
			net = conn.networkLookupByName(pool)
			return net
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def pool_start():
		try:
			net.create()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def pool_stop():
		try:
			net.destroy()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def pool_delete():
		try:
			net.undefine()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def net_set_autostart(pool):
		try:
			net = conn.networkLookupByName(pool)
			net.setAutostart(1)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_net_info(get):
		try:
			if get == "bridge":
				return net.bridgeName()
			elif get == "status":
				return net.isActive()
			elif get == "start":
				return net.autostart()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_ipv4_net():
		try:
			net = conn.networkLookupByName(pool)
			xml = net.XMLDesc(0)
			addrStr = util.get_xml_path(xml, "/network/ip/@address")
			netmaskStr = util.get_xml_path(xml, "/network/ip/@netmask")

			netmask = IP(netmaskStr)
			gateway = IP(addrStr)

			network = IP(gateway.int() & netmask.int())
			return IP(str(network) + "/" + netmaskStr)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_ipv4_dhcp_range():
		try:
			net = conn.networkLookupByName(pool)
			xml = net.XMLDesc(0)
			dhcpstart = util.get_xml_path(xml, "/network/ip/dhcp/range[1]/@start")
			dhcpend = util.get_xml_path(xml, "/network/ip/dhcp/range[1]/@end")
			if not dhcpstart or not dhcpend:
				return None
			
			return [IP(dhcpstart), IP(dhcpend)]
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_ipv4_forward():
		try:
			xml = net.XMLDesc(0)
			fw = util.get_xml_path(xml, "/network/forward/@mode")
			forwardDev = util.get_xml_path(xml, "/network/forward/@dev")
			return [fw, forwardDev]
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def create_net_pool(name_pool, forward, ipaddr, netmask, dhcp, s_dhcp, e_dhcp):
		try:
			xml = """
				<network>
					<name>%s</name>""" % (name_pool)

			if forward == "nat" or "route":
				xml += """<forward mode='%s'/>""" % (forward)

			xml += """<bridge stp='on' delay='0' />
						<ip address='%s' netmask='%s'>""" % (gw_ipaddr, netmask)

			if dhcp == "yes":
				xml += """<dhcp>
							<range start='%s' end='%s' />
						</dhcp>""" % (s_dhcp, e_dhcp)
					
			xml += """</ip>
				</network>"""
			conn.networkDefineXML(xml)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	conn = vm_conn()

	if conn == None:
		return HttpResponseRedirect('/overview/%s/' % (host_id))

	pools = get_networks()
	all_vm = get_vms()
	errors = []

	if pool != 'new_net_pool':
		net = get_conn_pool(pool)
		bridge = get_net_info('bridge')
		status = get_net_info('status')
		if status == 1:
			start = get_net_info('start')
			network = get_ipv4_net()
			dhcprange = get_ipv4_dhcp_range()
			netmode = get_ipv4_forward()

	if request.method == 'POST':
		if request.POST.get('new_net_pool',''):
			name_pool = request.POST.get('name_pool','')
			net_addr = request.POST.get('net_addr','')
			forward = request.POST.get('forward','')
			dhcp = request.POST.get('dhcp','')
			simbol = re.search('[^a-zA-Z0-9\_]+', name_pool)
			if len(name_pool) > 20:
				msg = _('The name of the network pool must not exceed 20 characters')
				errors.append(msg)
			if simbol:
				msg = _('The name of the network pool must not contain any characters and Russian characters')
				errors.append(msg)
			if not name_pool:
				msg = _('Enter the name of the pool')
				errors.append(msg)
			if not net_addr:
				msg = _('Enter the IP subnet')
				errors.append(msg)
			try:
				netmask = IP(net_addr).strNetmask()
				ipaddr = IP(net_addr)
				gw_ipaddr = ipaddr[1].strNormal()
				start_dhcp = ipaddr[2].strNormal()
				end_dhcp = ipaddr[254].strNormal()
			except:
				msg = _('IP subnet must be 192.168.1.0/24 or 192.168.1.0/26')
				errors.append(msg)
			if errors:
				return render_to_response('network.html', locals())
			if not errors:
				if create_net_pool(name_pool, forward, gw_ipaddr, netmask, dhcp, start_dhcp, end_dhcp) is "error":
					msg = _('Such a pool already exists')
					errors.append(msg)
				if not errors:
					net_set_autostart(name_pool)
					net = get_conn_pool(name_pool)
					if pool_start() is "error":
						msg = _('Pool is created, but when I run the pool fails, you may specify an existing network')
						errors.append(msg)
					else:
						msg = _('Creating a network pool: ') 
						msg = msg + name_pool
						add_error(msg, 'user')
						return HttpResponseRedirect('/network/%s/%s/' % (host_id, name_pool))
					if errors:
						return render_to_response('network.html', locals())
		if request.POST.get('stop_pool',''):
			msg = _('Stop network pool: ')
			msg = msg + pool
			pool_stop()
			add_error(msg, 'user')
		if request.POST.get('start_pool',''):
			msg = _('Start network pool: ')
			msg = msg + pool
			pool_start()
			add_error(msg, 'user')
		if request.POST.get('del_pool',''):
			msg = _('Delete network pool: ')
			msg = msg + pool
			pool_delete()
			add_error(msg, 'user')
			return HttpResponseRedirect('/network/%s/' % (host_id))
		return HttpResponseRedirect('/network/%s/%s/' % (host_id, pool))

	conn.close()

	return render_to_response('network.html', locals())

Example 12

Project: virtmgr
Source File: views.py
View license
def index(request, host_id):

   	if not request.user.is_authenticated():
	   	return HttpResponseRedirect('/user/login')

	kvm_host = Host.objects.get(user=request.user.id, id=host_id)

	def add_error(msg, type_err):
		error_msg = Log(host_id=host_id, 
			            type=type_err, 
			            message=msg, 
			            user_id=request.user.id
			            )
		error_msg.save()

	if not kvm_host.login or not kvm_host.passwd:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = request.session['login_kvm']
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = request.session['passwd_kvm']
				else:
					return -1
			return 0
	else:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = kvm_host.login
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = kvm_host.passwd
				else:
					return -1
			return 0

  	def vm_conn():
  		flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
	   	auth = [flags, creds, None]
		uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
	   	try:
		   	conn = libvirt.openAuth(uri, auth, 0)
		   	return conn
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_all_vm():
		try:
			vname = {}
			for id in conn.listDomainsID():
				id = int(id)
				dom = conn.lookupByID(id)
				vname[dom.name()] = dom.info()[0]
			for id in conn.listDefinedDomains():
				dom = conn.lookupByName(id)
				vname[dom.name()] = dom.info()[0]
			return vname
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"
	
	def get_all_stg():
		try:
			storages = []
			for name in conn.listStoragePools():
				storages.append(name)
			for name in conn.listDefinedStoragePools():
				storages.append(name)
			return storages
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_all_net():
		try:
			networks = []
			for name in conn.listNetworks():
				networks.append(name)
			for name in conn.listDefinedNetworks():
				networks.append(name)
			# Not support all distro but Fedora!!!
			#for ifcfg in conn.listInterfaces():
			#	if ifcfg != 'lo' and not re.findall("eth", ifcfg):
			#		networks.append(ifcfg)
			#for ifcfg in conn.listDefinedInterfaces():
			#	if ifcfg != 'lo' and not re.findall("eth", ifcfg):
			#		networks.append(ifcfg)
			return networks
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"
	
	def find_all_iso():
		try:
			iso = []
			for storage in storages:
				stg = conn.storagePoolLookupByName(storage)
				stg.refresh(0)
				for img in stg.listVolumes():
					if re.findall(".iso", img) or re.findall(".ISO", img):
						iso.append(img)
			return iso
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def find_all_img():
		try:		
			disk = []
			for storage in storages:
				stg = conn.storagePoolLookupByName(storage)
				stg.refresh(0)
				for img in stg.listVolumes():
					if re.findall(".img", img) or re.findall(".IMG", img):
						disk.append(img)
			return disk
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"
	
	def get_img_path(vol):
		try:
			for storage in storages:
				stg = conn.storagePoolLookupByName(storage)
				for img in stg.listVolumes():
					if vol == img:
						vl = stg.storageVolLookupByName(vol)
						return vl.path()
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_img_format(vol):
		try:
			for storage in storages:
				stg = conn.storagePoolLookupByName(storage)
				for img in stg.listVolumes():
					if vol == img:
						vl = stg.storageVolLookupByName(vol)
						xml = vl.XMLDesc(0)
						format = util.get_xml_path(xml, "/volume/target/format/@type")
						return format
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_cpus():
		try:
			return conn.getInfo()[2]
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"
	
	def get_mem():
		try:
			return conn.getInfo()[1]
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_emulator():
		try:
			emulator = []
			xml = conn.getCapabilities()
			arch = conn.getInfo()[0]
			if arch == 'x86_64':
				emulator.append(util.get_xml_path(xml,"/capabilities/guest[1]/arch/emulator"))
				emulator.append(util.get_xml_path(xml,"/capabilities/guest[2]/arch/emulator"))
			else:
				emulator = util.get_xml_path(xml,"/capabilities/guest/arch/emulator")
			return emulator
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_machine():
		try:
			xml = conn.getCapabilities()
			machine = util.get_xml_path(xml,"/capabilities/guest/arch/machine/@canonical")
			return machine
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"
			
	def create_volume(stg_pool, img, size_max):
		try:
			size_max = int(size_max) * 1073741824
			xml = """
				<volume>
					<name>%s.img</name>
					<capacity>%s</capacity>
					<allocation>0</allocation>
					<target>
						<format type='qcow2'/>
					</target>
				</volume>""" % (img, size_max)
			stg = conn.storagePoolLookupByName(stg_pool)
			stg.createXML(xml,0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"
	
	def add_vm(name, mem, cpus, machine, emul, img, iso, bridge):
		try:
			arch = conn.getInfo()[0]
			if not iso:
				iso = ''
			memaloc = mem
			xml = """<domain type='kvm'>
					  <name>%s</name>
					  <memory>%s</memory>
					  <currentMemory>%s</currentMemory>
					  <vcpu>%s</vcpu>
					  <os>
					    <type arch='%s' machine='%s'>hvm</type>
					    <boot dev='hd'/>
					    <boot dev='cdrom'/>
					    <bootmenu enable='yes'/>
					  </os>
					  <features>
					    <acpi/>
					    <apic/>
					    <pae/>
					  </features>
					  <clock offset='utc'/>
					  <on_poweroff>destroy</on_poweroff>
					  <on_reboot>restart</on_reboot>
					  <on_crash>restart</on_crash>
					  <devices>""" % (name, mem, memaloc, cpus, arch, machine)
				
			if arch == 'x86_64':
				xml += """<emulator>%s</emulator>""" % (emul[1])
			else:
				xml += """<emulator>%s</emulator>""" % (emul)

			xml += """<disk type='file' device='disk'>
					      <driver name='qemu' type='qcow2'/>
					      <source file='%s'/>
					      <target dev='hda' bus='ide'/>
					    </disk>
					    <disk type='file' device='cdrom'>
					      <driver name='qemu' type='raw'/>
					      <source file='%s'/>
					      <target dev='hdc' bus='ide'/>
					      <readonly/>
					    </disk>
					    <controller type='ide' index='0'>
					      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
					    </controller>
					    """ % (img, iso)

			if re.findall("br", bridge):
				xml += """<interface type='bridge'>
						<source bridge='%s'/>""" % (bridge)
			else:
				xml += """<interface type='network'>
						<source network='%s'/>""" % (bridge)
				
			xml += """<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
					    </interface>
					    <input type='tablet' bus='usb'/>
					    <input type='mouse' bus='ps2'/>
					    <graphics type='vnc' port='-1' autoport='yes'/>
					    <video>
					      <model type='cirrus' vram='9216' heads='1'/>
					      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
					    </video>
					    <memballoon model='virtio'>
					      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
					    </memballoon>
					  </devices>
					</domain>"""
			conn.defineXML(xml)
			dom = conn.lookupByName(name)
			dom.setAutostart(1)
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	conn = vm_conn()

	if conn == "error":
		return HttpResponseRedirect('/overview/' + host_id + '/')

	errors = []
	cores = get_cpus()
	all_vm = get_all_vm()
	storages = get_all_stg()
	all_iso = find_all_iso()
	all_img = find_all_img()
	if all_iso is "error" or all_img is "error":
		msg = _('Storage pools are not available or are not active')
		errors.append(msg)	
	bridge = get_all_net()
	if bridge == "error":
		msg = _('Network pools are not available or are not active')
		errors.append(msg)			
	emul = get_emulator()
	machine = get_machine()
	addmem = get_mem()
	
	cpus = []
	for cpu in range(1,cores+1):
		cpus.append(cpu)

	if request.method == 'POST':
		name = request.POST.get('name','')
		setmem = request.POST.get('memory','')
		cpus = request.POST.get('cpus','')
		iso = request.POST.get('iso','')		
		img = request.POST.get('img','')
		netbr = request.POST.get('bridge','')
		setmem = int(setmem) * 1024
		hdd = get_img_path(img)
		cdrom = get_img_path(iso)
		hdd_frmt = get_img_format(img)
		simbol = re.search('[^a-zA-Z0-9\_]+', name)
		if name in all_vm:
			msg = _('This is the name of the virtual machine already exists')
			errors.append(msg)
		if len(name) > 20:
			msg = _('The name of the virtual machine must not exceed 20 characters')
			errors.append(msg)
		if simbol:
			msg = _('The name of the virtual machine must not contain any characters and Russian characters')
			errors.append(msg)
		if not request.POST.get('hdd','') and not img or img == 'error':
			msg = _('Images of the HDD to a virtual machine not available. You need to create an HDD image')
			errors.append(msg)
		if not name:
			msg = _('Enter the name of the virtual machine')
			errors.append(msg)
		if not errors:
			if request.POST.get('hdd',''):
				size = request.POST.get('hdd','')
				stg_pool = request.POST.get('stg_pool','')
				create_volume(stg_pool, name, size)
				img = name + '.img'
				hdd = get_img_path(img)
			if add_vm(name, setmem, cpus, machine, emul, hdd, cdrom, netbr) is 'error':
				msg = _('Hardware acceleration is not found')
				errors.append(msg)
			else:
				msg = _('Creating a virtual machine: ')
				msg = msg + name
				add_error(msg,'user')
				return HttpResponseRedirect('/vm/%s/%s/' % (host_id, name))


	conn.close()

	return render_to_response('newvm.html', locals())

Example 13

Project: virtmgr
Source File: views.py
View license
def index(request, host_id):

	if not request.user.is_authenticated():
		return HttpResponseRedirect('/user/login/')

	def add_error(msg, type_err):
		error_msg = Log(host_id=host_id, 
                      type=type_err, 
                      message=msg, 
                      user_id=request.user.id
                      )
		error_msg.save()

	kvm_host = Host.objects.get(user=request.user.id, id=host_id)

	def vm_conn():
		flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
	  	auth = [flags, creds, None]
		uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
		try:
		   	conn = libvirt.openAuth(uri, auth, 0)
		   	return conn
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	if not kvm_host.login or not kvm_host.passwd:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = request.session['login_kvm']
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = request.session['passwd_kvm']
				else:
					return -1
			return 0
	else:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = kvm_host.login
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = kvm_host.passwd
				else:
					return -1
			return 0

	def get_all_vm():
		try:
			vname = {}
			for id in conn.listDomainsID():
				id = int(id)
				dom = conn.lookupByID(id)
				vname[dom.name()] = dom.info()[0]
			for id in conn.listDefinedDomains():
				dom = conn.lookupByName(id)
				vname[dom.name()] = dom.info()[0]
			return vname
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_info():
		try:
			info = []
			xml_inf = conn.getSysinfo(0)
			info.append(conn.getHostname())
			info.append(conn.getInfo()[0])
			info.append(conn.getInfo()[2])
			info.append(util.get_xml_path(xml_inf, "/sysinfo/processor/entry[6]"))
			return info
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_mem_usage():
		try:
			allmem = conn.getInfo()[1] * 1048576
			get_freemem = conn.getMemoryStats(-1,0)
			if type(get_freemem) == dict:
				freemem = (get_freemem.values()[0] + get_freemem.values()[2] + get_freemem.values()[3]) * 1024
				percent = (freemem * 100) / allmem
				percent = 100 - percent
				memusage = (allmem - freemem)
			else:
				memusage = None
				percent = None
			return allmem, memusage, percent
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	def get_cpu_usage():
		try:
			prev_idle = 0
			prev_total = 0
			cpu = conn.getCPUStats(-1,0)
			if type(cpu) == dict:
				for num in range(2):
				        idle = conn.getCPUStats(-1,0).values()[1]
				        total = sum(conn.getCPUStats(-1,0).values())
				        diff_idle = idle - prev_idle
				        diff_total = total - prev_total
				        diff_usage = (1000 * (diff_total - diff_idle) / diff_total + 5) / 10
				        prev_total = total
				        prev_idle = idle
				        if num == 0: 
			        		time.sleep(1)
			        	else:
			        		if diff_usage < 0:
			        			diff_usage = 0
			else:
				diff_usage = None
			return diff_usage
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"
	
	def get_dom(vname):
		try:
			dom = conn.lookupByName(vname)
			return dom
		except libvirt.libvirtError as e:
			add_error(e, 'libvirt')
			return "error"

	errors = []
		
	if request.method == 'POST':
		login_kvm = request.POST.get('login_kvm','')
		passwd_kvm = request.POST.get('passwd_kvm','')
		if login_kvm or passwd_kvm:
			request.session['login_kvm'] = login_kvm 
			request.session['passwd_kvm'] = passwd_kvm
			return HttpResponseRedirect('/overview/%s/' % (host_id))
		vname = request.POST.get('vname','')
		if vname:
			conn = vm_conn()
			dom = get_dom(vname)
		if request.POST.get('suspend',''):
			try:
				dom.suspend()
				msg = _('Suspend VM: ')
				msg = msg + vname
				add_error(msg, 'user')
			except libvirt.libvirtError as e:
				add_error(e, 'libvirt')
				msg = _('Error: VM alredy suspended')
				errors.append(msg)
		if request.POST.get('resume',''):
			try:
				dom.resume()
				msg = _('Resume VM: ')
				msg = msg + vname
				add_error(msg, 'user')
			except libvirt.libvirtError as e:
				add_error(e, 'libvirt')
				msg = _('Error: VM alredy resume')
				errors.append(msg)
		if request.POST.get('start',''):
			try:
				dom.create()
				msg = _('Start VM: ')
				msg = msg + vname
				add_error(msg, 'user')
			except libvirt.libvirtError as e:
				add_error(e, 'libvirt')
				msg = _('Error: VM alredy start')
				errors.append(msg)
		if request.POST.get('shutdown',''):
			try:
				dom.shutdown()
				msg = _('Shutdown VM: ')
				msg = msg + vname
				add_error(msg, 'user')
			except libvirt.libvirtError as e:
				add_error(e, 'libvirt')
				msg = _('Error: VM alredy shutdown')
				errors.append(msg)
		if request.POST.get('destroy',''):
			try:
				dom.destroy()
				msg = _('Force shutdown VM: ')
				msg = msg + vname
				add_error(msg, 'user')
			except libvirt.libvirtError as e:
				add_error(e, 'libvirt')
				msg = _('Error: VM alredy shutdown')
				errors.append(msg)
            
		if not errors:
			return HttpResponseRedirect('/overview/%s/' % (host_id))

	conn = vm_conn()

	if conn != "error":
		all_vm = get_all_vm()
		host_info = get_info()
		mem_usage = get_mem_usage()
		cpu_usage = get_cpu_usage()
		lib_virt_ver = conn.getLibVersion()
		conn_type = conn.getURI()
		conn.close()
	else:
		msg = _('Error connecting: Check the KVM login and KVM password')
		errors.append(msg)
		
	return render_to_response('overview.html', locals())

Example 14

Project: virtmgr
Source File: views.py
View license
def pool(request, host_id, pool):

	if not request.user.is_authenticated():
		return HttpResponseRedirect('/')

	kvm_host = Host.objects.get(user=request.user.id, id=host_id)

	def add_error(msg, type_err):
		error_msg = Log(host_id=host_id, 
			            type=type_err, 
			            message=msg, 
			            user_id=request.user.id
			            )
		error_msg.save()

	def get_storages():
		try:
			storages = {}
			for name in conn.listStoragePools():
				stg = conn.storagePoolLookupByName(name)
				status = stg.isActive()
				storages[name] = status
			for name in conn.listDefinedStoragePools():
				stg = conn.storagePoolLookupByName(name)
				status = stg.isActive()
				storages[name] = status
			return storages
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def vm_conn():
	   	try:
			flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
	  		auth = [flags, creds, None]
			uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
		   	conn = libvirt.openAuth(uri, auth, 0)
		   	return conn
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def get_vms():
		try:
			vname = {}
			for id in conn.listDomainsID():
				id = int(id)
				dom = conn.lookupByID(id)
				vname[dom.name()] = dom.info()[0]
			for id in conn.listDefinedDomains():
				dom = conn.lookupByName(id)
				vname[dom.name()] = dom.info()[0]
			return vname
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	if not kvm_host.login or not kvm_host.passwd:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = request.session['login_kvm']
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = request.session['passwd_kvm']
				else:
					return -1
			return 0
	else:
		def creds(credentials, user_data):
			for credential in credentials:
				if credential[0] == libvirt.VIR_CRED_AUTHNAME:
					credential[4] = kvm_host.login
					if len(credential[4]) == 0:
						credential[4] = credential[3]
				elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
					credential[4] = kvm_host.passwd
				else:
					return -1
			return 0

	def get_conn_pool(pool):
		try:
			stg = conn.storagePoolLookupByName(pool)
			return stg
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def pool_start():
		try:
			stg.create(0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def pool_stop():
		try:
			stg.destroy()
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def pool_delete():
		try:
			stg.undefine()
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"
			
	def pool_refresh():
		try:
			stg.refresh(0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def get_stg_info(get):
		try:
			if get == "info":
				if stg.info()[3] == 0:
					percent = 0
				else:
					percent = (stg.info()[2] * 100) / stg.info()[1]
				stg_info = stg.info()
				stg_info.append(percent)
				return stg_info
			elif get == "status":
				return stg.isActive()
			elif get == "start":
				return stg.autostart()
			elif get == "list":
				return stg.listVolumes()
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def get_type():
		try:
			xml = stg.XMLDesc(0)
			return util.get_xml_path(xml, "/pool/@type")
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def get_target_path():
		try:
			xml = stg.XMLDesc(0)
			return util.get_xml_path(xml, "/pool/target/path")
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def delete_volume(img):
		try:
			vol = stg.storageVolLookupByName(img)
			vol.delete(0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def stg_set_autostart(pool):
		try:
			stg = conn.storagePoolLookupByName(pool)
			stg.setAutostart(1)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def create_volume(img, size_max):
		try:
			size_max = int(size_max) * 1073741824
			xml = """
				<volume>
					<name>%s.img</name>
					<capacity>%s</capacity>
					<allocation>0</allocation>
					<target>
						<format type='qcow2'/>
					</target>
				</volume>""" % (img, size_max)
			stg.createXML(xml,0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def create_stg_pool(name_pool, path_pool):
		try:
			xml = """
				<pool type='dir'>
					<name>%s</name>
						<target>
							<path>%s</path>
						</target>
				</pool>""" % (name_pool, path_pool)
			conn.storagePoolDefineXML(xml,0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def clone_volume(img, new_img):
		try:
			vol = stg.storageVolLookupByName(img)
			xml = """
				<volume>
					<name>%s</name>
					<capacity>0</capacity>
					<allocation>0</allocation>
					<target>
						<format type='qcow2'/>
					</target>
				</volume>""" % (new_img)
			stg.createXMLFrom(xml, vol, 0)
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"

	def get_vl_info(listvol):
		try:
			volinfo = {}
			if stg.isActive() != 0:
				for name in listvol:
					vol = stg.storageVolLookupByName(name)
					xml = vol.XMLDesc(0)
					size = vol.info()[1]
					format = util.get_xml_path(xml, "/volume/target/format/@type")
	 				volinfo[name] = size,format
			return volinfo
		except libvirt.libvirtError as e:
			add_error(e,'libvirt')
			return "error"
	
	conn = vm_conn()
	errors = []

	if conn == "error":
		return HttpResponseRedirect('/overview/%s/' % (host_id))

	pools = get_storages()
	all_vm = get_vms()
	
	if pool != 'new_stg_pool':
		stg = get_conn_pool(pool)
		status = get_stg_info('status')
		if status == 1:
			pool_refresh()
			info = get_stg_info('info')
			stype = get_type()
			spath = get_target_path()
			start = get_stg_info('start')
			listvol = get_stg_info('list')
			volinfo = get_vl_info(listvol)
			hdd_size = range(1,321)
		errors = []

	if request.method == 'POST':
		if request.POST.get('new_stg_pool',''):
			name_pool = request.POST.get('name_pool','')
			path_pool = request.POST.get('path_pool','')
			simbol = re.search('[^a-zA-Z0-9\_]+', name_pool)
			if len(name_pool) > 20:
				msg = _('The name of the storage pool must not exceed 20 characters')
				errors.append(msg)
			if simbol:
				msg = _('The name of the storage pool must not contain any characters and Russian characters')
				errors.append(msg)
			if not name_pool:
				msg = _('Enter the name of the pool')
				errors.append(msg)
			if not path_pool:
				msg = _('Enter the path of the pool')
				errors.append(msg)
			if not errors:
				if create_stg_pool(name_pool, path_pool) is "error":
					msg = _('Such a pool already exists')
					errors.append(msg)
				else:
					stg = get_conn_pool(name_pool)
					stg_set_autostart(name_pool)
					if pool_start() is "error":
						msg = _('Pool is created, but when I run the pool fails, you may specify the path does not exist')
						errors.append(msg)
						return HttpResponseRedirect('/storage/%s/%s/' % (host_id, name_pool))
					else:
						msg = _('Creating a storage pool: ')
						msg = msg + name_pool
						add_error(msg,'user')
						return HttpResponseRedirect('/storage/%s/%s/' % (host_id, name_pool))
				if errors:
					return render_to_response('storage.html', locals())
		if request.POST.get('stop_pool',''):
			pool_stop()
			msg = _('Stop storage pool: ')
			msg = msg + pool
			add_error(msg,'user')
			return HttpResponseRedirect('/storage/%s/%s/' % (host_id, pool))
		if request.POST.get('start_pool',''):
			pool_start()
			msg = _('Start storage pool: ')
			msg = msg + pool
			add_error(msg,'user')
			return HttpResponseRedirect('/storage/%s/%s/' % (host_id, pool))
		if request.POST.get('del_pool',''):
			pool_delete()
			msg = _('Delete storage pool: ')
			msg = msg + pool
			add_error(msg,'user')
			return HttpResponseRedirect('/storage/%s/' % (host_id))
		if request.POST.get('vol_del',''):
			img = request.POST['img']
			delete_volume(img)
			msg = _('Delete image: ')
			msg = msg + img
			add_error(msg,'user')
			return HttpResponseRedirect('/storage/%s/%s/' % (host_id, pool))
		if request.POST.get('vol_add',''):
			img = request.POST.get('img','')
			size_max = request.POST.get('size_max','')
			simbol = re.search('[^a-zA-Z0-9\_]+', img)
			if len(img) > 20:
				msg = _('The name of the images must not exceed 20 characters')
				errors.append(msg)
			if simbol:
				msg = _('The name of the image must not contain any characters and Russian characters')
				errors.append(msg)
			if not img:
				msg = _('Enter image name')
				errors.append(msg)
			if not size_max:
				msg = _('Enter image size')
				errors.append(msg)
			if not errors:
				create_volume(img, size_max)
				msg = _('Create image: ')
				msg = msg + img + '.img'
				add_error(msg,'user')
				return HttpResponseRedirect('/storage/%s/%s/' % (host_id, pool))
		if request.POST.get('vol_clone',''):
			img = request.POST.get('img','')
			new_img = request.POST.get('new_img','')
			simbol = re.search('[^a-zA-Z0-9\_]+', new_img)
			new_img = new_img + '.img'
			if new_img == '.img':
				msg = _('Enter image name')
				errors.append(msg)
			if len(new_img) > 20:
				msg = _('The name of the images must not exceed 20 characters')
				errors.append(msg)
			if simbol:
				msg = _('The name of the image must not contain any characters and Russian characters')
				errors.append(msg)
			if new_img in listvol:
				msg = _('The image of the same name already exists')
				errors.append(msg)
			if re.search('.ISO', img) or re.search('.iso', img):
				msg = _('You can only clone a virtual machine images')
				errors.append(msg)
			if not errors:
				clone_volume(img, new_img)
				msg = _('Cloning image: ')
				msg = msg + img + ' => ' + new_img
				add_error(msg,'user')
				return HttpResponseRedirect('/storage/%s/%s/' % (host_id, pool))

	conn.close()
				
	return render_to_response('storage.html', locals())

Example 15

Project: virtmgr
Source File: views.py
View license
def index(request, host_id, vname):

   if not request.user.is_authenticated():
      return HttpResponseRedirect('/')

   kvm_host = Host.objects.get(user=request.user.id, id=host_id)

   def add_error(msg, type_err):
      error_msg = Log(host_id=host_id, 
                      type=type_err, 
                      message=msg, 
                      user_id=request.user.id
                      )
      error_msg.save()

   def get_vms():
      try:
         vname = {}
         for id in conn.listDomainsID():
            id = int(id)
            dom = conn.lookupByID(id)
            vname[dom.name()] = dom.info()[0]
         for id in conn.listDefinedDomains():
            dom = conn.lookupByName(id)
            vname[dom.name()] = dom.info()[0]
         return vname
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_storages():
      try:
         storages = []
         for name in conn.listStoragePools():
            storages.append(name)
         for name in conn.listDefinedStoragePools():
            storages.append(name)
         return storages
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def vm_conn():
      try:
         flags = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE]
         auth = [flags, creds, None]
         uri = 'qemu+tcp://' + kvm_host.ipaddr + '/system'
         conn = libvirt.openAuth(uri, auth, 0)
         return conn
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_dom(vname):
      try:
         dom = conn.lookupByName(vname)
         return dom
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   if not kvm_host.login or not kvm_host.passwd:
      def creds(credentials, user_data):
         for credential in credentials:
            if credential[0] == libvirt.VIR_CRED_AUTHNAME:
               credential[4] = request.session['login_kvm']
               if len(credential[4]) == 0:
                  credential[4] = credential[3]
            elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
               credential[4] = request.session['passwd_kvm']
            else:
               return -1
         return 0
   else:
      def creds(credentials, user_data):
         for credential in credentials:
            if credential[0] == libvirt.VIR_CRED_AUTHNAME:
               credential[4] = kvm_host.login
               if len(credential[4]) == 0:
                  credential[4] = credential[3]
            elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
               credential[4] = kvm_host.passwd
            else:
               return -1
         return 0

   def get_vm_active():
      try:
         state = dom.isActive()
         return state
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_uuid():
      try:
         xml = dom.XMLDesc(0)
         uuid = util.get_xml_path(xml, "/domain/uuid")
         return uuid
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_xml():
      try:
         xml = dom.XMLDesc(0)
         xml_spl = xml.split('\n')
         return xml_spl
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_mem():
      try:
         xml = dom.XMLDesc(0)
         mem = util.get_xml_path(xml, "/domain/currentMemory")
         mem = int(mem) * 1024
         return mem
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_core():
      try:
         xml = dom.XMLDesc(0)
         cpu = util.get_xml_path(xml, "/domain/vcpu")
         return cpu
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_vnc():
      try:
         xml = dom.XMLDesc(0)
         vnc = util.get_xml_path(xml, "/domain/devices/graphics/@port")
         return vnc
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_hdd():
      try:
         xml = dom.XMLDesc(0)
         hdd_path = util.get_xml_path(xml, "/domain/devices/disk[1]/source/@file")
         hdd_fmt = util.get_xml_path(xml, "/domain/devices/disk[1]/driver/@type")
         #image = re.sub('\/.*\/', '', hdd_path)
         size = dom.blockInfo(hdd_path, 0)[0]
         #return image, size, hdd_fmt
         return hdd_path, size, hdd_fmt
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_cdrom():
      try:
         xml = dom.XMLDesc(0)
         cdr_path = util.get_xml_path(xml, "/domain/devices/disk[2]/source/@file")
         if cdr_path:
            #image = re.sub('\/.*\/', '', cdr_path)
            size = dom.blockInfo(cdr_path, 0)[0]
            #return image, cdr_path, size
            return cdr_path, cdr_path, size
         else:
            return cdr_path
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_boot_menu():
      try:
         xml = dom.XMLDesc(0)
         boot_menu = util.get_xml_path(xml, "/domain/os/bootmenu/@enable")
         return boot_menu
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"
   
   def get_vm_arch():
      try:
         xml = dom.XMLDesc(0)
         arch = util.get_xml_path(xml, "/domain/os/type/@arch")
         return arch
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_nic():
      try:
         xml = dom.XMLDesc(0)
         mac = util.get_xml_path(xml, "/domain/devices/interface/mac/@address")
         nic = util.get_xml_path(xml, "/domain/devices/interface/source/@network")
         if nic is None:
         	nic = util.get_xml_path(xml, "/domain/devices/interface/source/@bridge")
         return mac, nic
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"
      
   def mnt_iso_on(vol):
      try:
         for storage in storages:
            stg = conn.storagePoolLookupByName(storage)
            for img in stg.listVolumes():
               if vol == img:
                  vl = stg.storageVolLookupByName(vol)
         xml = """<disk type='file' device='cdrom'>
                     <driver name='qemu' type='raw'/>
                     <target dev='hdc' bus='ide'/>
                     <source file='%s'/>
                     <readonly/>
                  </disk>""" % vl.path()
         dom.attachDevice(xml)
         xmldom = dom.XMLDesc(0)
         conn.defineXML(xmldom)
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def mnt_iso_off(vol):
      try:
         for storage in storages:
            stg = conn.storagePoolLookupByName(storage)
            for img in stg.listVolumes():
               if vol == img:
                  vl = stg.storageVolLookupByName(vol)
         xml = dom.XMLDesc(0)
         iso = "<disk type='file' device='cdrom'>\n      <driver name='qemu' type='raw'/>\n      <source file='%s'/>" % vl.path()
         xmldom = xml.replace("<disk type='file' device='cdrom'>\n      <driver name='qemu' type='raw'/>", iso)
         conn.defineXML(xmldom)
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def umnt_iso_on():
      try:
         xml = """<disk type='file' device='cdrom'>
                     <driver name="qemu" type='raw'/>
                     <target dev='hdc' bus='ide'/>
                     <readonly/>
                  </disk>"""
         dom.attachDevice(xml)
         xmldom = dom.XMLDesc(0)
         conn.defineXML(xmldom)
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def umnt_iso_off():
      try:
         xml = dom.XMLDesc(0)
         cdrom = get_vm_cdrom()[1]
         xmldom = xml.replace("<source file='%s'/>\n" % cdrom,"")
         conn.defineXML(xmldom)
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def find_all_iso():
      try:
         iso = []
         for storage in storages:
            stg = conn.storagePoolLookupByName(storage)
            stg.refresh(0)
            for img in stg.listVolumes():
               if re.findall(".iso", img) or re.findall(".ISO", img):
                  iso.append(img)
         return iso
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"
   
   def get_vm_autostart():
      try:
         return dom.autostart()
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def page_refresh():
      try:
         return HttpResponseRedirect('/vm/' + host_id + '/' + vname + '/' )
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_vm_state():
      try:
         return dom.info()[0]
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def vm_cpu_usage():
      try:
         nbcore = conn.getInfo()[2]
         cpu_use_ago = dom.info()[4]
         time.sleep(1) 
         cpu_use_now = dom.info()[4]
         diff_usage = cpu_use_now - cpu_use_ago
         cpu_usage = 100 * diff_usage / (1 * nbcore * 10**9L)
         return cpu_usage
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_memusage():
      try:
         allmem = conn.getInfo()[1] * 1048576
         dom_mem = dom.info()[1] * 1024
         percent = (dom_mem * 100) / allmem
         return allmem, percent
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_all_core():
      try:
         allcore = conn.getInfo()[2]
         return allcore
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def vm_create_snapshot():
      try:
         xml = """<domainsnapshot>\n
                     <name>%d</name>\n
                     <state>shutoff</state>\n
                     <creationTime>%d</creationTime>\n""" % (time.time(), time.time())
         xml += dom.XMLDesc(0)
         xml += """<active>0</active>\n
                  </domainsnapshot>"""
         dom.snapshotCreateXML(xml, 0)
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   def get_snapshot_num():
      try:
         return dom.snapshotNum(0)
      except libvirt.libvirtError as e:
         add_error(e, 'libvirt')
         return "error"

   conn = vm_conn()
   errors = []

   if conn == None:
      return HttpResponseRedirect('/overview/' + host + '/')

   all_vm = get_vms()
   dom = get_dom(vname)
   active = get_vm_active()
   state = get_vm_state()
   uuid = get_vm_uuid()
   memory = get_vm_mem()
   core =  get_vm_core()
   autostart = get_vm_autostart()
   vnc_port = get_vm_vnc()
   hdd = get_vm_hdd()
   boot_menu = get_vm_boot_menu()
   vm_arch = get_vm_arch()
   vm_nic = get_vm_nic()
   cdrom = get_vm_cdrom()
   storages = get_storages()
   isos = find_all_iso()
   all_core = get_all_core()
   cpu_usage = vm_cpu_usage()
   mem_usage = get_memusage()
   num_snapshot = get_snapshot_num()
   vm_xml = get_vm_xml()

   # Post form html
   if request.method == 'POST':
      if request.POST.get('suspend',''):
         try:
            dom.suspend()
            msg = _('Suspend VM: ')
            msg = msg + vname
            add_error(msg, 'user')
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            msg = _('Error: VM alredy suspended')
            errors.append(msg)
      if request.POST.get('resume',''):
         try:
            dom.resume()
            msg = _('Resume VM: ')
            msg = msg + vname
            add_error(msg, 'user')
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            msg = _('Error: VM alredy resume')
            errors.append(msg)
      if request.POST.get('start',''):
         try:
            dom.create()
            msg = _('Start VM: ')
            msg = msg + vname
            add_error(msg, 'user')
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            msg = _('Error: VM alredy start')
            errors.append(msg)
      if request.POST.get('shutdown',''):
         try:
            dom.shutdown()
            msg = _('Shutdown VM: ')
            msg = msg + vname
            add_error(msg, 'user')
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            msg = _('Error: VM alredy shutdown')
            errors.append(msg)
      if request.POST.get('destroy',''):
         try:
            dom.destroy()
            msg = _('Force shutdown VM: ')
            msg = msg + vname
            add_error(msg, 'user')
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            msg = _('Error: VM alredy shutdown')
            errors.append(msg)
      if request.POST.get('snapshot',''):
         try:
            msg = _('Create snapshot for VM: ')
            msg = msg + vname
            add_error(msg, 'user')
            vm_create_snapshot()
            message = _('Successful create snapshot')
            return render_to_response('vm.html', locals())
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            msg = _('Error: create snapshot')
            errors.append(msg)
      if request.POST.get('auto_on',''):
         try:
            msg = _('Enable autostart for VM: ')
            msg = msg + vname
            add_error(msg, 'user')
            dom.setAutostart(1)
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            return "error"
      if request.POST.get('auto_off',''):
         try:
            msg = _('Disable autostart for VM: ')
            msg = msg + vname
            add_error(msg, 'user')
            dom.setAutostart(0)
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            return "error"
      if request.POST.get('disconnect',''):
         iso = request.POST.get('iso_img','')
         if state == 1:
            umnt_iso_on()
         else:
            umnt_iso_off()
      if request.POST.get('connect',''):
         iso = request.POST.get('iso_img','')     
         if state == 1:
            mnt_iso_on(iso)
         else:
            mnt_iso_off(iso)
      if request.POST.get('undefine',''):
         try:
            dom.undefine()
            msg = _('Undefine VM: ')
            msg = msg + vname
            add_error(msg, 'user')
            return HttpResponseRedirect('/overview/%s/' % (host_id))
         except libvirt.libvirtError as e:
            add_error(e, 'libvirt')
            return "error"
      if not errors:
         return HttpResponseRedirect('/vm/%s/%s/' % (host_id, vname))
      else:
         return render_to_response('vm.html', locals())

   conn.close()
   
   return render_to_response('vm.html', locals())

Example 16

Project: reviewboard
Source File: siteconfig.py
View license
def load_site_config(full_reload=False):
    """Load stored site configuration settings.

    This populates the Django settings object with any keys that need to be
    there.
    """
    def apply_setting(settings_key, db_key, default=None):
        """Apply the given siteconfig value to the Django settings object."""
        db_value = siteconfig.settings.get(db_key)

        if db_value:
            setattr(settings, settings_key, db_value)
        elif default:
            setattr(settings, settings_key, default)

    def update_haystack_settings():
        """Update the haystack settings in site config."""
        search_backend_id = (siteconfig.get('search_backend_id') or
                             defaults['search_backend_id'])
        search_backend = search_backend_registry.get_search_backend(
            search_backend_id)

        if not search_backend:
            raise ImproperlyConfigured(_(
                'The search engine "%s" could not be found. If this is '
                'provided by an extension, you will have to make sure that '
                'extension is enabled.'
                % search_backend_id
            ))

        apply_setting(
            'HAYSTACK_CONNECTIONS', None,
            {
                'default': search_backend.configuration,
            })

        # Re-initialize Haystack's connection information to use the updated
        # settings.
        connections.connections_info = settings.HAYSTACK_CONNECTIONS
        connections._connections = {}

    # If siteconfig needs to be saved back to the DB, set dirty=true
    dirty = False
    try:
        siteconfig = SiteConfiguration.objects.get_current()
    except SiteConfiguration.DoesNotExist:
        raise ImproperlyConfigured(
            "The site configuration entry does not exist in the database. "
            "Re-run `./manage.py` syncdb to fix this.")
    except Exception as e:
        # We got something else. Likely, this doesn't exist yet and we're
        # doing a syncdb or something, so silently ignore.
        logging.error('Could not load siteconfig: %s' % e)
        return

    # Populate defaults if they weren't already set.
    if not siteconfig.get_defaults():
        siteconfig.add_defaults(defaults)

    # The default value for DEFAULT_EMAIL_FROM ([email protected])
    # is less than good, so use a better one if it's set to that or if
    # we haven't yet set this value in siteconfig.
    mail_default_from = \
        siteconfig.settings.get('mail_default_from',
                                global_settings.DEFAULT_FROM_EMAIL)

    if (not mail_default_from or
            mail_default_from == global_settings.DEFAULT_FROM_EMAIL):
        domain = siteconfig.site.domain.split(':')[0]
        siteconfig.set('mail_default_from', '[email protected]' + domain)

    # STATIC_* and MEDIA_* must be different paths, and differ in meaning.
    # If site_static_* is empty or equal to media_static_*, we're probably
    # migrating from an earlier Review Board install.
    site_static_root = siteconfig.settings.get('site_static_root', '')
    site_media_root = siteconfig.settings.get('site_media_root')

    if site_static_root == '' or site_static_root == site_media_root:
        siteconfig.set('site_static_root', settings.STATIC_ROOT)

    site_static_url = siteconfig.settings.get('site_static_url', '')
    site_media_url = siteconfig.settings.get('site_media_url')

    if site_static_url == '' or site_static_url == site_media_url:
        siteconfig.set('site_static_url', settings.STATIC_URL)

    # Populate the settings object with anything relevant from the siteconfig.
    apply_django_settings(siteconfig, settings_map)

    if full_reload and not getattr(settings, 'RUNNING_TEST', False):
        # Logging may have changed, so restart logging.
        restart_logging()

    # Now for some more complicated stuff...

    update_haystack_settings()

    # Site administrator settings
    apply_setting("ADMINS", None, (
        (siteconfig.get("site_admin_name", ""),
         siteconfig.get("site_admin_email", "")),
    ))

    apply_setting("MANAGERS", None, settings.ADMINS)

    # Explicitly base this off the STATIC_URL
    apply_setting("ADMIN_MEDIA_PREFIX", None, settings.STATIC_URL + "admin/")

    # Set the auth backends
    auth_backend_id = siteconfig.settings.get("auth_backend", "builtin")
    builtin_backend_obj = auth_backends.get('backend_id', 'builtin')
    builtin_backend = "%s.%s" % (builtin_backend_obj.__module__,
                                 builtin_backend_obj.__name__)

    if auth_backend_id == "custom":
        custom_backends = siteconfig.settings.get("auth_custom_backends")

        if isinstance(custom_backends, six.string_types):
            custom_backends = (custom_backends,)
        elif isinstance(custom_backends, list):
            custom_backends = tuple(custom_backends)

        settings.AUTHENTICATION_BACKENDS = custom_backends

        if builtin_backend not in custom_backends:
            settings.AUTHENTICATION_BACKENDS += (builtin_backend,)
    else:
        backend = auth_backends.get('backend_id', auth_backend_id)

        if backend and backend is not builtin_backend_obj:
            settings.AUTHENTICATION_BACKENDS = \
                ("%s.%s" % (backend.__module__, backend.__name__),
                 builtin_backend)
        else:
            settings.AUTHENTICATION_BACKENDS = (builtin_backend,)

        # If we're upgrading from a 1.x LDAP configuration, populate
        # ldap_uid and clear ldap_uid_mask
        if auth_backend_id == "ldap":
            if not hasattr(settings, 'LDAP_UID'):
                if hasattr(settings, 'LDAP_UID_MASK'):
                    # Get the username attribute from the old UID mask
                    # LDAP attributes can contain only alphanumeric
                    # characters and the hyphen and must lead with an
                    # alphabetic character. This is not dependent upon
                    # locale.
                    m = re.search("([a-zA-Z][a-zA-Z0-9-]+)=%s",
                                  settings.LDAP_UID_MASK)
                    if m:
                        # Assign LDAP_UID the value of the retrieved attribute
                        settings.LDAP_UID = m.group(1)
                    else:
                        # Couldn't match the old value?
                        # This should be impossible, but in this case, let's
                        # just guess a sane default and hope for the best.
                        settings.LDAP_UID = 'uid'

                else:
                    # Neither the old nor new value?
                    # This should be impossible, but in this case, let's just
                    # guess a sane default and hope for the best.
                    settings.LDAP_UID = 'uid'

                # Remove the LDAP_UID_MASK value
                settings.LDAP_UID_MASK = None

                siteconfig.set('auth_ldap_uid', settings.LDAP_UID)
                siteconfig.set('auth_ldap_uid_mask', settings.LDAP_UID_MASK)
                # Set the dirty flag so we save this back
                dirty = True

    # Add APITokenBackend to the list of auth backends. This one is always
    # present, and is used only for API requests.
    settings.AUTHENTICATION_BACKENDS += (
        'reviewboard.webapi.auth_backends.TokenAuthBackend',
    )

    # Set the storage backend
    storage_backend = siteconfig.settings.get('storage_backend', 'builtin')

    if storage_backend in storage_backend_map:
        settings.DEFAULT_FILE_STORAGE = storage_backend_map[storage_backend]
    else:
        settings.DEFAULT_FILE_STORAGE = storage_backend_map['builtin']

    # These blow up if they're not the perfectly right types
    settings.AWS_QUERYSTRING_AUTH = siteconfig.get('aws_querystring_auth')
    settings.AWS_ACCESS_KEY_ID = six.text_type(
        siteconfig.get('aws_access_key_id'))
    settings.AWS_SECRET_ACCESS_KEY = six.text_type(
        siteconfig.get('aws_secret_access_key'))
    settings.AWS_STORAGE_BUCKET_NAME = six.text_type(
        siteconfig.get('aws_s3_bucket_name'))
    try:
        settings.AWS_CALLING_FORMAT = int(siteconfig.get('aws_calling_format'))
    except ValueError:
        settings.AWS_CALLING_FORMAT = 0

    settings.SWIFT_AUTH_URL = six.text_type(
        siteconfig.get('swift_auth_url'))
    settings.SWIFT_USERNAME = six.text_type(
        siteconfig.get('swift_username'))
    settings.SWIFT_KEY = six.text_type(
        siteconfig.get('swift_key'))
    try:
        settings.SWIFT_AUTH_VERSION = int(siteconfig.get('swift_auth_version'))
    except:
        settings.SWIFT_AUTH_VERSION = 1
    settings.SWIFT_CONTAINER_NAME = six.text_type(
        siteconfig.get('swift_container_name'))

    if siteconfig.settings.get('site_domain_method', 'http') == 'https':
        os.environ[b'HTTPS'] = b'on'
    else:
        os.environ[b'HTTPS'] = b'off'

    # Save back changes if they have been made
    if dirty:
        siteconfig.save()

    site_settings_loaded.send(sender=None)

    return siteconfig

Example 17

Project: reviewboard
Source File: diffutils.py
View license
def get_diff_files(diffset, filediff=None, interdiffset=None,
                   interfilediff=None, request=None):
    """Return a list of files that will be displayed in a diff.

    This will go through the given diffset/interdiffset, or a given filediff
    within that diffset, and generate the list of files that will be
    displayed. This file list will contain a bunch of metadata on the files,
    such as the index, original/modified names, revisions, associated
    filediffs/diffsets, and so on.

    This can be used along with :py:func:`populate_diff_chunks` to build a full
    list containing all diff chunks used for rendering a side-by-side diff.

    Args:
        diffset (reviewboard.diffviewer.models.DiffSet):
            The diffset containing the files to return.

        filediff (reviewboard.diffviewer.models.FileDiff, optional):
            A specific file in the diff to return information for.

        interdiffset (reviewboard.diffviewer.models.DiffSet, optional):
            A second diffset used for an interdiff range.

        interfilediff (reviewboard.diffviewer.models.FileDiff, optional):
            A second specific file in ``interdiffset`` used to return
            information for. This should be provided if ``filediff`` and
            ``interdiffset`` are both provided. If it's ``None`` in this
            case, then the diff will be shown as reverted for this file.

    Returns:
        list of dict:
        A list of dictionaries containing information on the files to show
        in the diff, in the order in which they would be shown.
    """
    if filediff:
        filediffs = [filediff]

        if interdiffset:
            log_timer = log_timed("Generating diff file info for "
                                  "interdiffset ids %s-%s, filediff %s" %
                                  (diffset.id, interdiffset.id, filediff.id),
                                  request=request)
        else:
            log_timer = log_timed("Generating diff file info for "
                                  "diffset id %s, filediff %s" %
                                  (diffset.id, filediff.id),
                                  request=request)
    else:
        filediffs = list(diffset.files.select_related().all())

        if interdiffset:
            log_timer = log_timed("Generating diff file info for "
                                  "interdiffset ids %s-%s" %
                                  (diffset.id, interdiffset.id),
                                  request=request)
        else:
            log_timer = log_timed("Generating diff file info for "
                                  "diffset id %s" % diffset.id,
                                  request=request)

    # Filediffs that were created with leading slashes stripped won't match
    # those created with them present, so we need to compare them without in
    # order for the filenames to match up properly.
    tool = diffset.repository.get_scmtool()

    if interdiffset:
        if not filediff:
            interfilediffs = list(interdiffset.files.all())
        elif interfilediff:
            interfilediffs = [interfilediff]
        else:
            interfilediffs = []

        filediff_parts = []
        matched_filediffs = get_matched_interdiff_files(
            tool=tool,
            filediffs=filediffs,
            interfilediffs=interfilediffs)

        for temp_filediff, temp_interfilediff in matched_filediffs:
            if temp_filediff:
                filediff_parts.append((temp_filediff, temp_interfilediff,
                                       True))
            elif temp_interfilediff:
                filediff_parts.append((temp_interfilediff, None, False))
            else:
                logging.error(
                    'get_matched_interdiff_files returned an entry with an '
                    'empty filediff and interfilediff for diffset=%r, '
                    'interdiffset=%r, filediffs=%r, interfilediffs=%r',
                    diffset, interdiffset, filediffs, interfilediffs)

                raise ValueError(
                    'Internal error: get_matched_interdiff_files returned an '
                    'entry with an empty filediff and interfilediff! Please '
                    'report this along with information from the server '
                    'error log.')
    else:
        # We're not working with interdiffs. We can easily create the
        # filediff_parts directly.
        filediff_parts = [
            (temp_filediff, None, False)
            for temp_filediff in filediffs
        ]

    # Now that we have all the bits and pieces we care about for the filediffs,
    # we can start building information about each entry on the diff viewer.
    files = []

    for parts in filediff_parts:
        filediff, interfilediff, force_interdiff = parts

        newfile = filediff.is_new

        if interdiffset:
            # First, find out if we want to even process this one.
            # If the diffs are identical, or the patched files are identical,
            # or if the files were deleted in both cases, then we can be
            # absolutely sure that there's nothing interesting to show to
            # the user.
            if (filediff and interfilediff and
                (filediff.diff == interfilediff.diff or
                 (filediff.deleted and interfilediff.deleted) or
                 (filediff.patched_sha1 is not None and
                  filediff.patched_sha1 == interfilediff.patched_sha1))):
                continue

            source_revision = _("Diff Revision %s") % diffset.revision

        else:
            source_revision = get_revision_str(filediff.source_revision)

        if interfilediff:
            dest_revision = _('Diff Revision %s') % interdiffset.revision
        else:
            if force_interdiff:
                dest_revision = (_('Diff Revision %s - File Reverted') %
                                 interdiffset.revision)
            elif newfile:
                dest_revision = _('New File')
            else:
                dest_revision = _('New Change')

        if interfilediff:
            raw_depot_filename = filediff.dest_file
            raw_dest_filename = interfilediff.dest_file
        else:
            raw_depot_filename = filediff.source_file
            raw_dest_filename = filediff.dest_file

        depot_filename = tool.normalize_path_for_display(raw_depot_filename)
        dest_filename = tool.normalize_path_for_display(raw_dest_filename)

        f = {
            'depot_filename': depot_filename,
            'dest_filename': dest_filename or depot_filename,
            'revision': source_revision,
            'dest_revision': dest_revision,
            'filediff': filediff,
            'interfilediff': interfilediff,
            'force_interdiff': force_interdiff,
            'binary': filediff.binary,
            'deleted': filediff.deleted,
            'moved': filediff.moved,
            'copied': filediff.copied,
            'moved_or_copied': filediff.moved or filediff.copied,
            'newfile': newfile,
            'index': len(files),
            'chunks_loaded': False,
            'is_new_file': (newfile and not interfilediff and
                            not filediff.parent_diff),
        }

        if force_interdiff:
            f['force_interdiff_revision'] = interdiffset.revision

        files.append(f)

    log_timer.done()

    if len(files) == 1:
        return files
    else:
        return get_sorted_filediffs(
            files,
            key=lambda f: f['interfilediff'] or f['filediff'])

Example 18

Project: reviewboard
Source File: difftags.py
View license
@register.simple_tag
def diff_lines(index, chunk, standalone, line_fmt, anchor_fmt='',
               begin_collapse_fmt='', end_collapse_fmt='', moved_fmt=''):
    """Renders the lines of a diff.

    This will render each line in the diff viewer. The function expects
    some basic data on what will be rendered, as well as printf-formatted
    templates for the contents.

    printf-formatted templates are used instead of standard Django templates
    because they're much faster to render, which makes a huge difference
    when rendering thousands of lines or more.
    """
    lines = chunk['lines']
    num_lines = len(lines)
    chunk_index = chunk['index']
    change = chunk['change']
    is_equal = False
    is_replace = False
    is_insert = False
    is_delete = False

    if change == 'equal':
        is_equal = True
    elif change == 'replace':
        is_replace = True
    elif change == 'insert':
        is_insert = True
    elif change == 'delete':
        is_delete = True

    result = []

    for i, line in enumerate(lines):
        row_classes = []
        cell_1_classes = ['l']
        cell_2_classes = ['r']
        row_class_attr = ''
        cell_1_class_attr = ''
        cell_2_class_attr = ''
        line1 = line[2]
        line2 = line[5]
        linenum1 = line[1]
        linenum2 = line[4]
        show_collapse = False
        anchor = None

        if not is_equal:
            if i == 0:
                row_classes.append('first')
                anchor = '%s.%s' % (index, chunk_index)

            if i == num_lines - 1:
                row_classes.append('last')

            if line[7]:
                row_classes.append('whitespace-line')

            if is_replace:
                if len(line1) < DiffChunkGenerator.STYLED_MAX_LINE_LEN:
                    line1 = highlightregion(line1, line[3])

                if len(line2) < DiffChunkGenerator.STYLED_MAX_LINE_LEN:
                    line2 = highlightregion(line2, line[6])
        else:
            show_collapse = (i == 0 and standalone)

        if (not is_insert and
                len(line1) < DiffChunkGenerator.STYLED_MAX_LINE_LEN):
            line1 = showextrawhitespace(line1)

        if (not is_delete and
                len(line2) < DiffChunkGenerator.STYLED_MAX_LINE_LEN):
            line2 = showextrawhitespace(line2)

        moved_from = {}
        moved_to = {}
        is_moved_row = False
        is_first_moved_row = False

        if len(line) > 8 and isinstance(line[8], dict):
            moved_info = line[8]

            if 'from' in moved_info:
                moved_from_linenum, moved_from_first = moved_info['from']
                is_moved_row = True

                cell_2_classes.append('moved-from')

                if moved_from_first:
                    # This is the start of a new move range.
                    is_first_moved_row = True
                    cell_2_classes.append('moved-from-start')
                    moved_from = {
                        'class': 'moved-flag',
                        'line': mark_safe(moved_from_linenum),
                        'target': mark_safe(linenum2),
                        'text': _('Moved from line %s') % moved_from_linenum,
                    }

            if 'to' in moved_info:
                moved_to_linenum, moved_to_first = moved_info['to']
                is_moved_row = True

                cell_1_classes.append('moved-to')

                if moved_to_first:
                    # This is the start of a new move range.
                    is_first_moved_row = True
                    cell_1_classes.append('moved-to-start')
                    moved_to = {
                        'class': 'moved-flag',
                        'line': mark_safe(moved_to_linenum),
                        'target': mark_safe(linenum1),
                        'text': _('Moved to line %s') % moved_to_linenum,
                    }

        if is_moved_row:
            row_classes.append('moved-row')

        if is_first_moved_row:
            row_classes.append('moved-row-start')

        if row_classes:
            row_class_attr = ' class="%s"' % ' '.join(row_classes)

        if cell_1_classes:
            cell_1_class_attr = ' class="%s"' % ' '.join(cell_1_classes)

        if cell_2_classes:
            cell_2_class_attr = ' class="%s"' % ' '.join(cell_2_classes)

        anchor_html = ''
        begin_collapse_html = ''
        end_collapse_html = ''
        moved_from_html = ''
        moved_to_html = ''

        context = {
            'chunk_index': chunk_index,
            'row_class_attr': row_class_attr,
            'cell_1_class_attr': cell_1_class_attr,
            'cell_2_class_attr': cell_2_class_attr,
            'linenum_row': line[0],
            'linenum1': linenum1,
            'linenum2': linenum2,
            'line1': line1,
            'line2': line2,
            'moved_from': moved_from,
            'moved_to': moved_to,
        }

        if anchor:
            anchor_html = anchor_fmt % {
                'anchor': anchor,
            }

        if show_collapse:
            begin_collapse_html = begin_collapse_fmt % context
            end_collapse_html = end_collapse_fmt % context

        if moved_from:
            moved_from_html = moved_fmt % moved_from

        if moved_to:
            moved_to_html = moved_fmt % moved_to

        context.update({
            'anchor_html': anchor_html,
            'begin_collapse_html': begin_collapse_html,
            'end_collapse_html': end_collapse_html,
            'moved_from_html': moved_from_html,
            'moved_to_html': moved_to_html,
        })

        result.append(line_fmt % context)

    return ''.join(result)

Example 19

Project: reviewboard
Source File: cvs.py
View license
    @classmethod
    def build_cvsroot(cls, cvsroot, username, password, validate=True):
        """Parse and construct a CVSROOT from the given arguments.

        This will take a repository path or CVSROOT provided by the caller,
        optionally validate it, and return both a new CVSROOT and the path
        within it.

        If a username/password are provided as arguments, but do not exist in
        ``cvsroot``, then the resulting CVSROOT will contain the
        username/password.

        If data is provided that is not supported by the type of protocol
        specified in ``cvsroot``, then it will raise a
        :py:class:`~django.core.exceptions.ValidationError` (if validating)
        or strip the data from the CVSROOT.

        Args:
            cvsroot (unicode):
                A CVSROOT string, or a bare repository path to turn into one.

            username (unicode):
                Optional username for the CVSROOT.

            password (unicode):
                Optional password for the CVSROOT (only supported for
                ``pserver`` types).

            validate (bool, optional):
                Whether to validate the provided CVSROOT and username/password.

                If set, and the resulting CVSROOT would be invalid, then an
                error is raised.

                If not set, the resulting CVSROOT will have the invalid data
                stripped.

                This will check for ports, usernames, and passwords, depending
                on the type of CVSROOT provided.

        Returns:
            unicode:
            The resulting validated CVSROOT.

        Raises:
            django.core.exceptions.ValidationError:
                The provided data had a validation error. This is only raised
                if ``validate`` is set.
        """
        # CVS supports two types of CVSROOTs: Remote and local.
        #
        # The remote repositories share the same CVSROOT format (defined by
        # CVSTool.remove_cvsroot_re), and the local repositories share their
        # own format (CVSTool.local_cvsroot_re), but the two formats differ
        # in many ways.
        #
        # We'll be testing both formats to see if the path matches, starting
        # with remote repositories (the most common).
        m = cls.remote_cvsroot_re.match(cvsroot)

        if m:
            # The user either specified a valid remote repository path, or
            # simply hostname:port/path. In either case, we'll want to
            # construct our own CVSROOT based on that information and the
            # provided username and password, favoring the credentials in the
            # CVSROOT and falling back on those provided in the repository
            # configuration.
            #
            # There are some restrictions, depending on the type of protocol:
            #
            # * Only "pserver" supports passwords.
            # * Only "pserver", "gserver", and "kserver" support ports.
            protocol = m.group('protocol') or 'pserver'
            username = m.group('username') or username
            password = m.group('password') or password
            port = m.group('port') or None
            path = m.group('path')

            # Apply the restrictions, validating if necessary.
            if password and protocol != 'pserver':
                if validate:
                    raise ValidationError(
                        _('"%s" CVSROOTs do not support passwords.')
                        % protocol)

                password = None

            if port and protocol not in ('pserver', 'gserver', 'kserver'):
                if validate:
                    raise ValidationError(
                        _('"%s" CVSROOTs do not support specifying ports.')
                        % protocol)

                port = None

            # Inject any credentials into the string.
            if username:
                if password:
                    credentials = '%s:%[email protected]' % (username, password)
                else:
                    credentials = '%[email protected]' % (username)
            else:
                credentials = ''

            cvsroot = ':%s:%s%s:%s%s' % (protocol,
                                         credentials,
                                         m.group('hostname'),
                                         port or '',
                                         path)
        else:
            m = cls.local_cvsroot_re.match(cvsroot)

            if m:
                # This is a local path (either :local: or :fork). It's much
                # easier to deal with. We're only dealing with a path.
                path = m.group('path')

                if validate:
                    if username:
                        raise ValidationError(
                            _('"%s" CVSROOTs do not support usernames.')
                            % m.group('protocol'))

                    if password:
                        raise ValidationError(
                            _('"%s" CVSROOTs do not support passwords.')
                            % m.group('protocol'))
            else:
                # We couldn't parse this as a standard CVSROOT. It might be
                # something a lot more specific. We'll treat it as-is, but
                # this might cause some small issues in the diff viewer (files
                # may show up as read-only, since we can't strip the path,
                # for example).
                #
                # We could in theory treat this as a validation error, but
                # we might break special cases with specialized protocols
                # (which do exist but are rare).
                path = cvsroot

        return cvsroot, path

Example 20

View license
    def _move_child_within_tree(self, node, target, position):
        """
        Moves child node ``node`` within its current tree relative to
        the given ``target`` node as specified by ``position``.

        ``node`` will be modified to reflect its new tree state in the
        database.
        """
        left = getattr(node, self.left_attr)
        right = getattr(node, self.right_attr)
        level = getattr(node, self.level_attr)
        width = right - left + 1
        tree_id = getattr(node, self.tree_id_attr)
        target_left = getattr(target, self.left_attr)
        target_right = getattr(target, self.right_attr)
        target_level = getattr(target, self.level_attr)

        if position == 'last-child' or position == 'first-child':
            if node == target:
                raise InvalidMove(_('A node may not be made a child of itself.'))
            elif left < target_left < right:
                raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
            if position == 'last-child':
                if target_right > right:
                    new_left = target_right - width
                    new_right = target_right - 1
                else:
                    new_left = target_right
                    new_right = target_right + width - 1
            else:
                if target_left > left:
                    new_left = target_left - width + 1
                    new_right = target_left
                else:
                    new_left = target_left + 1
                    new_right = target_left + width
            level_change = level - target_level - 1
            parent = target
        elif position == 'left' or position == 'right':
            if node == target:
                raise InvalidMove(_('A node may not be made a sibling of itself.'))
            elif left < target_left < right:
                raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
            if position == 'left':
                if target_left > left:
                    new_left = target_left - width
                    new_right = target_left - 1
                else:
                    new_left = target_left
                    new_right = target_left + width - 1
            else:
                if target_right > right:
                    new_left = target_right - width + 1
                    new_right = target_right
                else:
                    new_left = target_right + 1
                    new_right = target_right + width
            level_change = level - target_level
            parent = getattr(target, self.parent_attr)
        else:
            raise ValueError(_('An invalid position was given: %s.') % position)

        left_boundary = min(left, new_left)
        right_boundary = max(right, new_right)
        left_right_change = new_left - left
        gap_size = width
        if left_right_change > 0:
            gap_size = -gap_size

        opts = self.model._meta
        # The level update must come before the left update to keep
        # MySQL happy - left seems to refer to the updated value
        # immediately after its update has been specified in the query
        # with MySQL, but not with SQLite or Postgres.
        move_subtree_query = """
        UPDATE %(table)s
        SET %(level)s = CASE
                WHEN %(left)s >= %%s AND %(left)s <= %%s
                  THEN %(level)s - %%s
                ELSE %(level)s END,
            %(left)s = CASE
                WHEN %(left)s >= %%s AND %(left)s <= %%s
                  THEN %(left)s + %%s
                WHEN %(left)s >= %%s AND %(left)s <= %%s
                  THEN %(left)s + %%s
                ELSE %(left)s END,
            %(right)s = CASE
                WHEN %(right)s >= %%s AND %(right)s <= %%s
                  THEN %(right)s + %%s
                WHEN %(right)s >= %%s AND %(right)s <= %%s
                  THEN %(right)s + %%s
                ELSE %(right)s END,
            %(parent)s = CASE
                WHEN %(pk)s = %%s
                  THEN %%s
                ELSE %(parent)s END
        WHERE %(tree_id)s = %%s""" % {
            'table': qn(opts.db_table),
            'level': qn(opts.get_field(self.level_attr).column),
            'left': qn(opts.get_field(self.left_attr).column),
            'right': qn(opts.get_field(self.right_attr).column),
            'parent': qn(opts.get_field(self.parent_attr).column),
            'pk': qn(opts.pk.column),
            'tree_id': qn(opts.get_field(self.tree_id_attr).column),
        }

        cursor = connection.cursor()
        cursor.execute(move_subtree_query, [
            left, right, level_change,
            left, right, left_right_change,
            left_boundary, right_boundary, gap_size,
            left, right, left_right_change,
            left_boundary, right_boundary, gap_size,
            node.pk, parent.pk,
            tree_id])

        # Update the node to be consistent with the updated
        # tree in the database.
        setattr(node, self.left_attr, new_left)
        setattr(node, self.right_attr, new_right)
        setattr(node, self.level_attr, level - level_change)
        setattr(node, self.parent_attr, parent)

Example 21

Project: classic.rhizome.org
Source File: managers.py
View license
    def _move_child_within_tree(self, node, target, position):
        """
        Moves child node ``node`` within its current tree relative to
        the given ``target`` node as specified by ``position``.

        ``node`` will be modified to reflect its new tree state in the
        database.
        """
        left = getattr(node, self.left_attr)
        right = getattr(node, self.right_attr)
        level = getattr(node, self.level_attr)
        width = right - left + 1
        tree_id = getattr(node, self.tree_id_attr)
        target_left = getattr(target, self.left_attr)
        target_right = getattr(target, self.right_attr)
        target_level = getattr(target, self.level_attr)

        if position == 'last-child' or position == 'first-child':
            if node == target:
                raise InvalidMove(_('A node may not be made a child of itself.'))
            elif left < target_left < right:
                raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
            if position == 'last-child':
                if target_right > right:
                    new_left = target_right - width
                    new_right = target_right - 1
                else:
                    new_left = target_right
                    new_right = target_right + width - 1
            else:
                if target_left > left:
                    new_left = target_left - width + 1
                    new_right = target_left
                else:
                    new_left = target_left + 1
                    new_right = target_left + width
            level_change = level - target_level - 1
            parent = target
        elif position == 'left' or position == 'right':
            if node == target:
                raise InvalidMove(_('A node may not be made a sibling of itself.'))
            elif left < target_left < right:
                raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
            if position == 'left':
                if target_left > left:
                    new_left = target_left - width
                    new_right = target_left - 1
                else:
                    new_left = target_left
                    new_right = target_left + width - 1
            else:
                if target_right > right:
                    new_left = target_right - width + 1
                    new_right = target_right
                else:
                    new_left = target_right + 1
                    new_right = target_right + width
            level_change = level - target_level
            parent = getattr(target, self.parent_attr)
        else:
            raise ValueError(_('An invalid position was given: %s.') % position)

        left_boundary = min(left, new_left)
        right_boundary = max(right, new_right)
        left_right_change = new_left - left
        gap_size = width
        if left_right_change > 0:
            gap_size = -gap_size

        opts = self.model._meta
        # The level update must come before the left update to keep
        # MySQL happy - left seems to refer to the updated value
        # immediately after its update has been specified in the query
        # with MySQL, but not with SQLite or Postgres.
        move_subtree_query = """
        UPDATE %(table)s
        SET %(level)s = CASE
                WHEN %(left)s >= %%s AND %(left)s <= %%s
                  THEN %(level)s - %%s
                ELSE %(level)s END,
            %(left)s = CASE
                WHEN %(left)s >= %%s AND %(left)s <= %%s
                  THEN %(left)s + %%s
                WHEN %(left)s >= %%s AND %(left)s <= %%s
                  THEN %(left)s + %%s
                ELSE %(left)s END,
            %(right)s = CASE
                WHEN %(right)s >= %%s AND %(right)s <= %%s
                  THEN %(right)s + %%s
                WHEN %(right)s >= %%s AND %(right)s <= %%s
                  THEN %(right)s + %%s
                ELSE %(right)s END,
            %(parent)s = CASE
                WHEN %(pk)s = %%s
                  THEN %%s
                ELSE %(parent)s END
        WHERE %(tree_id)s = %%s""" % {
            'table': qn(opts.db_table),
            'level': qn(opts.get_field(self.level_attr).column),
            'left': qn(opts.get_field(self.left_attr).column),
            'right': qn(opts.get_field(self.right_attr).column),
            'parent': qn(opts.get_field(self.parent_attr).column),
            'pk': qn(opts.pk.column),
            'tree_id': qn(opts.get_field(self.tree_id_attr).column),
        }

        cursor = connection.cursor()
        cursor.execute(move_subtree_query, [
            left, right, level_change,
            left, right, left_right_change,
            left_boundary, right_boundary, gap_size,
            left, right, left_right_change,
            left_boundary, right_boundary, gap_size,
            node.pk, parent.pk,
            tree_id])

        # Update the node to be consistent with the updated
        # tree in the database.
        setattr(node, self.left_attr, new_left)
        setattr(node, self.right_attr, new_right)
        setattr(node, self.level_attr, level - level_change)
        setattr(node, self.parent_attr, parent)

Example 22

Project: django-adminactions
Source File: mass_update.py
View license
def mass_update(modeladmin, request, queryset):  # noqa
    """
        mass update queryset
    """

    def not_required(field, **kwargs):
        """ force all fields as not required"""
        kwargs['required'] = False
        return field.formfield(**kwargs)

    def _doit():
        errors = {}
        updated = 0
        for record in queryset:
            for field_name, value_or_func in list(form.cleaned_data.items()):
                if callable(value_or_func):
                    old_value = getattr(record, field_name)
                    setattr(record, field_name, value_or_func(old_value))
                else:
                    setattr(record, field_name, value_or_func)
            if clean:
                record.clean()
            record.save()
            updated += 1
        if updated:
            messages.info(request, _("Updated %s records") % updated)

        if len(errors):
            messages.error(request, "%s records not updated due errors" % len(errors))
        adminaction_end.send(sender=modeladmin.model,
                             action='mass_update',
                             request=request,
                             queryset=queryset,
                             modeladmin=modeladmin,
                             form=form,
                             errors=errors,
                             updated=updated)

    opts = modeladmin.model._meta
    perm = "{0}.{1}".format(opts.app_label, get_permission_codename('adminactions_massupdate', opts))
    if not request.user.has_perm(perm):
        messages.error(request, _('Sorry you do not have rights to execute this action'))
        return

    try:
        adminaction_requested.send(sender=modeladmin.model,
                                   action='mass_update',
                                   request=request,
                                   queryset=queryset,
                                   modeladmin=modeladmin)
    except ActionInterrupted as e:
        messages.error(request, str(e))
        return

    # Allows to specified a custom mass update Form in the ModelAdmin
    mass_update_form = getattr(modeladmin, 'mass_update_form', MassUpdateForm)

    MForm = modelform_factory(modeladmin.model, form=mass_update_form,
                              exclude=('pk',),
                              formfield_callback=not_required)
    grouped = defaultdict(lambda: [])
    selected_fields = []
    initial = {'_selected_action': request.POST.getlist(helpers.ACTION_CHECKBOX_NAME),
               'select_across': request.POST.get('select_across') == '1',
               'action': 'mass_update'}

    if 'apply' in request.POST:
        form = MForm(request.POST)
        if form.is_valid():
            try:
                adminaction_start.send(sender=modeladmin.model,
                                       action='mass_update',
                                       request=request,
                                       queryset=queryset,
                                       modeladmin=modeladmin,
                                       form=form)
            except ActionInterrupted as e:
                messages.error(request, str(e))
                return HttpResponseRedirect(request.get_full_path())

            # need_transaction = form.cleaned_data.get('_unique_transaction', False)
            validate = form.cleaned_data.get('_validate', False)
            clean = form.cleaned_data.get('_clean', False)

            if validate:
                with compat.atomic():
                    _doit()

            else:
                values = {}
                for field_name, value in list(form.cleaned_data.items()):
                    if isinstance(form.fields[field_name], ModelMultipleChoiceField):
                        messages.error(request, "Unable no mass update ManyToManyField without 'validate'")
                        return HttpResponseRedirect(request.get_full_path())
                    elif callable(value):
                        messages.error(request, "Unable no mass update using operators without 'validate'")
                        return HttpResponseRedirect(request.get_full_path())
                    elif field_name not in ['_selected_action', '_validate', 'select_across', 'action',
                                            '_unique_transaction', '_clean']:
                        values[field_name] = value
                queryset.update(**values)

            return HttpResponseRedirect(request.get_full_path())
    else:
        initial.update({'action': 'mass_update', '_validate': 1})
        # form = MForm(initial=initial)
        prefill_with = request.POST.get('prefill-with', None)
        prefill_instance = None
        try:
            # Gets the instance directly from the queryset for data security
            prefill_instance = queryset.get(pk=prefill_with)
        except ObjectDoesNotExist:
            pass

        form = MForm(initial=initial, instance=prefill_instance)

    for el in queryset.all()[:10]:
        for f in modeladmin.model._meta.fields:
            if f.name not in form._no_sample_for:
                if hasattr(f, 'flatchoices') and f.flatchoices:
                    grouped[f.name] = list(dict(getattr(f, 'flatchoices')).values())
                elif hasattr(f, 'choices') and f.choices:
                    grouped[f.name] = list(dict(getattr(f, 'choices')).values())
                elif isinstance(f, df.BooleanField):
                    grouped[f.name] = [True, False]
                else:
                    value = getattr(el, f.name)
                    if value is not None and value not in grouped[f.name]:
                        grouped[f.name].append(value)
                    initial[f.name] = initial.get(f.name, value)

    adminForm = helpers.AdminForm(form, modeladmin.get_fieldsets(request), {}, [], model_admin=modeladmin)
    media = modeladmin.media + adminForm.media
    dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.date) else str(obj)
    tpl = 'adminactions/mass_update.html'
    ctx = {'adminform': adminForm,
           'form': form,
           'action_short_description': mass_update.short_description,
           'title': u"%s (%s)" % (
               mass_update.short_description.capitalize(),
               smart_text(modeladmin.opts.verbose_name_plural),
           ),
           'grouped': grouped,
           'fieldvalues': json.dumps(grouped, default=dthandler),
           'change': True,
           'selected_fields': selected_fields,
           'is_popup': False,
           'save_as': False,
           'has_delete_permission': False,
           'has_add_permission': False,
           'has_change_permission': True,
           'opts': modeladmin.model._meta,
           'app_label': modeladmin.model._meta.app_label,
           # 'action': 'mass_update',
           # 'select_across': request.POST.get('select_across')=='1',
           'media': mark_safe(media),
           'selection': queryset}
    if django.VERSION[:2] > (1, 7):
        ctx.update(modeladmin.admin_site.each_context(request))
    else:
        ctx.update(modeladmin.admin_site.each_context())

    if django.VERSION[:2] > (1, 8):
        return render(request, tpl, context=ctx)
    else:
        return render_to_response(tpl, RequestContext(request, ctx))

Example 23

Project: django-adminactions
Source File: merge.py
View license
def merge(modeladmin, request, queryset):  # noqa
    """
    Merge two model instances. Move all foreign keys.

    """

    opts = modeladmin.model._meta
    perm = "{0}.{1}".format(opts.app_label, get_permission_codename('adminactions_merge', opts))
    if not request.user.has_perm(perm):
        messages.error(request, _('Sorry you do not have rights to execute this action'))
        return

    def raw_widget(field, **kwargs):
        """ force all fields as not required"""
        kwargs['widget'] = TextInput({'class': 'raw-value'})
        return field.formfield(**kwargs)

    merge_form = getattr(modeladmin, 'merge_form', MergeForm)
    MForm = modelform_factory(modeladmin.model,
                              form=merge_form,
                              exclude=('pk', ),
                              formfield_callback=raw_widget)
    OForm = modelform_factory(modeladmin.model,
                              exclude=('pk', ),
                              formfield_callback=raw_widget)

    tpl = 'adminactions/merge.html'
    # transaction_supported = model_supports_transactions(modeladmin.model)
    ctx = {
        '_selected_action': request.POST.getlist(helpers.ACTION_CHECKBOX_NAME),
        'transaction_supported': 'Un',
        'select_across': request.POST.get('select_across') == '1',
        'action': request.POST.get('action'),
        'fields': [f for f in queryset.model._meta.fields if not f.primary_key and f.editable],
        'app_label': queryset.model._meta.app_label,
        'result': '',
        'opts': queryset.model._meta}

    if 'preview' in request.POST:
        master = queryset.get(pk=request.POST.get('master_pk'))
        original = clone_instance(master)
        other = queryset.get(pk=request.POST.get('other_pk'))
        formset = formset_factory(OForm)(initial=[model_to_dict(master), model_to_dict(other)])
        with transaction.nocommit():
            form = MForm(request.POST, instance=master)
            other.delete()
            form_is_valid = form.is_valid()
        if form_is_valid:
            ctx.update({'original': original})
            tpl = 'adminactions/merge_preview.html'
        else:
            master = queryset.get(pk=request.POST.get('master_pk'))
            other = queryset.get(pk=request.POST.get('other_pk'))

    elif 'apply' in request.POST:
        master = queryset.get(pk=request.POST.get('master_pk'))
        other = queryset.get(pk=request.POST.get('other_pk'))
        formset = formset_factory(OForm)(initial=[model_to_dict(master), model_to_dict(other)])
        with transaction.nocommit():
            form = MForm(request.POST, instance=master)
            stored_pk = other.pk
            other.delete()
            ok = form.is_valid()
            other.pk = stored_pk
        if ok:
            if form.cleaned_data['dependencies'] == MergeForm.DEP_MOVE:
                related = api.ALL_FIELDS
            else:
                related = None
            fields = form.cleaned_data['field_names']
            api.merge(master, other, fields=fields, commit=True, related=related)
            return HttpResponseRedirect(request.path)
        else:
            messages.error(request, form.errors)
    else:
        try:
            master, other = queryset.all()
            # django 1.4 need to remove the trailing milliseconds
            for field in master._meta.fields:
                if isinstance(field, models.DateTimeField):
                    for target in (master, other):
                        raw_value = getattr(target, field.name)
                        if raw_value:
                            fixed_value = datetime(
                                raw_value.year,
                                raw_value.month,
                                raw_value.day,
                                raw_value.hour,
                                raw_value.minute,
                                raw_value.second)
                            setattr(target, field.name, fixed_value)
        except ValueError:
            messages.error(request, _('Please select exactly 2 records'))
            return

        initial = {'_selected_action': request.POST.getlist(helpers.ACTION_CHECKBOX_NAME),
                   'select_across': 0,
                   'generic': MergeForm.GEN_IGNORE,
                   'dependencies': MergeForm.DEP_MOVE,
                   'action': 'merge',
                   'master_pk': master.pk,
                   'other_pk': other.pk}
        formset = formset_factory(OForm)(initial=[model_to_dict(master), model_to_dict(other)])
        form = MForm(initial=initial, instance=master)

    adminForm = helpers.AdminForm(form, modeladmin.get_fieldsets(request), {}, [], model_admin=modeladmin)
    media = modeladmin.media + adminForm.media
    ctx.update({'adminform': adminForm,
                'formset': formset,
                'media': mark_safe(media),
                'action_short_description': merge.short_description,
                'title': u"%s (%s)" % (
                    merge.short_description.capitalize(),
                    smart_text(modeladmin.opts.verbose_name_plural),
                ),
                'master': master,
                'other': other})
    if django.VERSION[:2] > (1, 7):
        ctx.update(modeladmin.admin_site.each_context(request))
    else:
        ctx.update(modeladmin.admin_site.each_context())
    if django.VERSION[:2] > (1, 8):
        return render(request, tpl, context=ctx)
    else:
        return render_to_response(tpl, RequestContext(request, ctx))

Example 24

Project: django-admin-timeline
Source File: views.py
View license
@csrf_exempt
@never_cache
@staff_member_required
def log(request, template_name=TEMPLATE_NAME, \
        template_name_ajax=TEMPLATE_NAME_AJAX):
    """
    Get number of log entires. Serves both non-AJAX and AJAX driven requests.

    Since we have a breakdown of entries per day per entry and we have an AJAX
    driven infinite scroll and we want to avoid having duplicated date headers,
    we always pass a variable named "last_date" when making another request
    to our main AJAX-driven view. So... this is our case scenario:

    Initial timeline rendered as a normal HTML (non AJAX request) (from a list
    of log entries). We send date of last element as "last_date" to the context
    too, which will be used an an initial value for a global JavaScript
    variable. Later on that date will be used to send it to the AJAX driven
    view and used in rendering ("render_to_string" method). After we have
    rendered the HTML to send back, we get the last date of the last element
    and send it along with the HTML rendered to our view in JSON response.
    When receiving the JSON response, we update the above mentioned global
    JavaScript variable with the value given.

    :param request: django.http.HttpRequest
    :param template_name: str
    :param template_name_ajax: str
    :return: django.http.HttpResponse

    This view accepts the following POST variables (all optional).
    :param page: int - Page number to get.
    :param user_id: int - If set, used to filter the user by.
    :param last_date: str - Example value "2012-05-24".
    :param start_date: str - If set, used as a start date to filter the actions
        with. Example value "2012-05-24".
    :param end_date: str - If set, used as an end date to filter the actions
        with. Example value "2012-05-24".

    NOTE: If it gets too complicatd with filtering, we need to have forms to
    validate and process the POST data.
    """
    def _get_date_from_string(s):
        """
        Gets date from a string given.

        :param s: str - date in string format
        :return: datetime.datetime
        """
        try:
            return datetime.date(*map(lambda x: int(x), s.split("-")))
        except Exception as e:
            return ""

    try:
        page = int(request.POST.get('page', 1))
        if page < 1:
            page = 1
    except Exception as e:
        page = 1

    users = []
    content_types = []
    filter_form = None

    if 'POST' == request.method:
        post = dict(request.POST)
        if 'users[]' in post:
            post['users'] = post.pop('users[]')
        if 'content_types[]' in post:
            post['content_types'] = post.pop('content_types[]')

        filter_form = FilterForm(post)
        if filter_form.is_valid():
            users = filter_form.cleaned_data['users']
            content_types = filter_form.cleaned_data['content_types']
        else:
            pass # Anything to do here?
    else:
        filter_form = FilterForm()

    # Some kind of a pagination
    start = (page - 1) * NUMBER_OF_ENTRIES_PER_PAGE
    end = page * NUMBER_OF_ENTRIES_PER_PAGE

    # Getting admin log entires taking page number into consideration.
    log_entries = LogEntry.objects.all().select_related('content_type', 'user')

    start_date = _get_date_from_string(request.POST.get('start_date'))
    end_date = _get_date_from_string(request.POST.get('end_date'))

    if start_date:
        log_entries = log_entries.filter(action_time__gte=start_date) # TODO

    if end_date:
        log_entries = log_entries.filter(action_time__lte=end_date) # TODO

    # If users given, filtering by users
    if users:
        log_entries = log_entries.filter(user__id__in=users)

    # If content types given, filtering by content types
    if content_types:
        log_entries = log_entries.filter(content_type__id__in=content_types)

    # Applying limits / freezing the queryset
    log_entries = log_entries[start:end]

    if log_entries:
        last_date = date_format(
            log_entries[len(log_entries) - 1].action_time, "Y-m-d"
            )
    else:
        last_date = request.POST.get('last_date', None)

    # Using different template for AJAX driven requests
    if request.is_ajax():
        # Context to render the AJAX driven HTML with
        context = {
            'admin_log': log_entries,
            'number_of_entries_per_page': NUMBER_OF_ENTRIES_PER_PAGE,
            'page': page,
            'last_date': request.POST.get('last_date', None),
            'SINGLE_LOG_ENTRY_DATE_FORMAT': SINGLE_LOG_ENTRY_DATE_FORMAT,
            'LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT': \
                LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT
        }

        # Rendering HTML for an AJAX driven request
        html = render_to_string(
            template_name_ajax,
            context,
            context_instance=RequestContext(request)
        )

        # Context to send back to user in a JSON response
        context = {
            'html': html,
            'last_date': last_date,
            'success': 1 if len(log_entries) else 0
        }
        return HttpResponse(json.dumps(context))

    # Context for a non-AJAX request
    context = {
        'admin_log': log_entries,
        'number_of_entries_per_page': NUMBER_OF_ENTRIES_PER_PAGE,
        'page': page,
        'last_date': last_date,
        'start_date': date_format(start_date, "Y-m-d") if start_date else "",
        'end_date': date_format(end_date, "Y-m-d") if end_date else "",
        'users': [int(u) for u in users],
        'content_types': [int(ct) for ct in content_types],
        'filter_form': filter_form,
        'SINGLE_LOG_ENTRY_DATE_FORMAT': SINGLE_LOG_ENTRY_DATE_FORMAT,
        'LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT': \
            LOG_ENTRIES_DAY_HEADINGS_DATE_FORMAT,
        'title': _("Timeline") # For template breadcrumbs, etc.
    }

    return render_to_response(
        template_name, context, context_instance=RequestContext(request)
        )

Example 25

Project: django-slim
Source File: fields.py
View license
    def contribute_to_class(self, cls, name):
        """
        Language field consists of more than one database record. We have ``lanaguage`` (CharField)
        and ``translation_of`` (ForeignKey to ``cls``) in order to identify translated and
        primary objects.

        We have a set of very useful methods implemented in order to get translations easily.
        """
        self.name = name
        self.translation_of = models.ForeignKey(cls, blank=True, null=True, verbose_name=_('Translation of'), \
                                                related_name='translations', \
                                                limit_choices_to={'language': default_language}, \
                                                help_text=_('Leave this empty for entries in the primary language.'))
        cls.add_to_class('translation_of', self.translation_of)
        super(LanguageField, self).contribute_to_class(cls, name)

        if ENABLE_MONKEY_PATCHING:
            @monkeypatch_property(cls)
            def is_multilingual(self):
                """
                Simple flat to use on objects to find our wheither they are multilinugal
                or not

                :return bool: Always returns boolean True
                """
                return True

            @monkeypatch_method(cls)
            def get_redirect_to_target(self, request):
                """
                Find an acceptable redirect target. If this is a local link, then try
                to find the page this redirect references and translate it according
                to the user's language. This way, one can easily implement a localized
                "/"-url to welcome page redirection.
                """
                target = self.redirect_to
                if target and target.find('//') == -1: # Not an offsite link http://bla/blubb
                    try:
                        page = cls.objects.page_for_path(target)
                        page = page.get_translation(getattr(request, 'LANGUAGE_CODE', None))
                        target = page.get_absolute_url()
                    except cls.DoesNotExist:
                        pass
                return target

            @monkeypatch_method(cls)
            def available_translations(self):
                """
                Returns available translations.

                :return interable: At this moment a list of objects.
                """
                if not self.id: # New, unsaved pages have no translations
                    return []
                if is_primary_language(self.language):
                    return self.translations.all()
                elif self.translation_of:
                    return [self.translation_of] + list(self.translation_of.translations.exclude(
                        language=self.language))
                else:
                    return []

            @monkeypatch_method(cls)
            def get_original_translation(self, *args, **kwargs):
                """
                Gets original translation of current object.

                :return obj: Object of the same class as the one queried.
                """
                if is_primary_language(self.language):
                    return self
                return self.translation_of

            @monkeypatch_method(cls)
            def translation_admin(self, *args, **kwargs):
                """
                Gets a HTML with URL to the original translation of available. For admin use.

                :return str:
                """
                if self.translation_of:
                    if not PY3:
                        url_title = unicode(self.translation_of)
                    else:
                        url_title = self.translation_of

                    return admin_change_url(
                        self._meta.app_label,
                        self._meta.module_name,
                        self.translation_of.id,
                        url_title = url_title
                        )
                return ''
            translation_admin.allow_tags = True
            translation_admin.short_description = _('Translation of')

            @monkeypatch_method(cls)
            def _available_translations_admin(self, include_self=True):
                """
                Gets a HTML with all available translation URLs for current object if available. For admin use.

                :return str:
                """
                try:
                    original_translation = self.original_translation
                    available_translations = list(self.available_translations())
                    languages_keys = get_languages_keys()
                    languages = dict(get_languages())

                    if include_self:
                        available_translations.append(self)

                    output = []
                    # Processing all available translations. Adding edit links.
                    if available_translations:
                        for translation in available_translations:
                            if not PY3:
                                url_title = unicode(languages[translation.language])
                            else:
                                url_title = languages[translation.language]
                            output.append(
                                admin_change_url(
                                    translation._meta.app_label,
                                    translation._meta.module_name,
                                    translation.id,
                                    url_title = url_title
                                    )
                                )
                            languages_keys.remove(translation.language)

                    if self.pk and self.language in languages_keys:
                        languages_keys.remove(self.language)

                    # For all languages that are still available (original object has no translations for)
                    for language in languages_keys:
                        url = admin_add_url(
                                self._meta.app_label,
                                self._meta.module_name,
                                '?translation_of=%s&amp;language=%s' % (str(original_translation.id), language)
                                )
                        if not PY3:
                            name = unicode(languages[language])
                        else:
                            name = languages[language]

                        output.append(u'<a href="%(url)s" style="color:#baa">%(name)s</a>' % {'url': url, 'name': name})
                    return ' | '.join(output)
                except Exception as e:
                    return ''

            @monkeypatch_method(cls)
            def available_translations_admin(self, *args, **kwargs):
                """
                Gets a HTML with all available translation URLs for current object if available. For admin use.

                :return str:
                """
                return self._available_translations_admin(include_self=True, *args, **kwargs)
            available_translations_admin.allow_tags = True
            available_translations_admin.short_description = _('Translations')

            @monkeypatch_method(cls)
            def available_translations_exclude_current_admin(self, *args, **kwargs):
                """
                Same as `available_translations_admin` but does not include itself to the list.

                :return str:
                """
                return self._available_translations_admin(include_self=False, *args, **kwargs)
            available_translations_exclude_current_admin.allow_tags = True
            available_translations_exclude_current_admin.short_description = _('Translations')

            @monkeypatch_property(cls)
            def original_translation(self):
                """
                Property for ``get_original_translation`` method.

                :return obj: Object of the same class as the one queried.
                """
                return self.get_original_translation()

            @monkeypatch_method(cls)
            def get_translation_for(self, language):
                """
                Get translation article in given language.

                :param str language: Which shall be one of the languages specified in ``LANGUAGES``
                    in `settings.py`.
                :return obj: Either object of the same class as or None if no translations are
                    available for the given ``language``.
                """
                if not language in get_languages_keys():
                    return None
                if str(self.language) == str(language):
                    return self
                if str(self.original_translation.language) == str(language):
                    return self.original_translation
                try:
                    return self.original_translation.translations.get(language=language)
                except Exception as e:
                    return None

Example 26

Project: baruwa
Source File: sendpdfreports.py
View license
    def handle(self, *args, **options):
        if len(args) != 0:
            raise CommandError(_("Command doesn't accept any arguments"))

        by_domain = options.get('by_domain')
        domain_name = options.get('domain_name')
        copy_admin = options.get('copy_admin')
        period = options.get('period')
        include_daily = options.get('include_daily')
        startdate =  options.get('startdate')
        end_date =  options.get('enddate')
        enddate = None

        if startdate and end_date:
            if not checkdate(startdate) or not checkdate(end_date):
                raise CommandError(_("The startdate, enddate specified is invalid"))
            daterange = (startdate, end_date)
        else:
            daterange = None

        period_re = re.compile(r"(?P<num>(\d+))\s+(?P<period>(day|week|month))(?:s)?")
        if period:
            match = period_re.match(period)
            if not match:
                raise CommandError(_("The period you specified is invalid"))
            num = match.group('num')
            ptype = match.group('period')
            if not ptype.endswith('s'):
                ptype = ptype + 's'
            delta = datetime.timedelta(**{ptype: int(num)})
            enddate = datetime.date.today() - delta

        table_style = TableStyle([
            ('FONT', (0, 0), (-1, -1), 'Helvetica'),
            ('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
            ('FONTSIZE', (0, 0), (-1, -1), 8),
            ('GRID', (0, 0), (-1, -1), 0.15, colors.black),
            ('ALIGN', (0, 0), (-1, 0), 'CENTER'),
            ('ALIGN', (4, 1), (-1, -1), 'CENTER'),
            ('ALIGN', (0, 0), (0, -1), 'CENTER'),
            ('VALIGN', (4, 1), (-1, -1), 'MIDDLE'),
            ('SPAN', (4, 1), (-1, -1)),
        ])

        styles = getSampleStyleSheet()

        reports = [
            [
                'from_address', {'from_address__exact': ""}, 'num_count',
                'Top senders by quantity'],
            [
                'from_address', {'from_address__exact': ""}, 'total_size',
                'Top senders by volume'],
            [
                'from_domain', {'from_domain__exact': ""}, 'num_count',
                'Top sender domains by quantity'],
            [
                'from_domain', {'from_domain__exact': ""}, 'total_size',
                'Top sender domains by volume'],
            [
                'to_address', {'to_address__exact': ""}, 'num_count',
                'Top recipients by quantity'],
            [
                'to_address', {'to_address__exact': ""}, 'total_size',
                'Top recipients by volume'],
            [
                'to_domain', {'to_domain__exact': "",
                'to_domain__isnull': False}, 'num_count',
                'Top recipient domains by quantity'],
            [
                'to_domain', {'to_domain__exact': "",
                'to_domain__isnull': False}, 'total_size',
                'Top recipient domains by volume'],
        ]

        emails = []
        admin_addrs = []
        if copy_admin:
            mails = User.objects.values('email').filter(is_superuser=True)
            admin_addrs = [mail['email'] for mail in mails]

        from_email = getattr(settings, 'DEFAULT_FROM_EMAIL',
            '[email protected]')
        url = getattr(settings, 'QUARANTINE_REPORT_HOSTURL', '')
        logo_dir = getattr(settings, 'MEDIA_ROOT', '')
        img = Image(logo_dir + '/imgs/css/logo.jpg')

        def build_chart(data, column, order, title):
            "build chart"
            headings = [('', _('Address'), _('Count'), _('Volume'), '')]
            rows = [[draw_square(PIE_CHART_COLORS[index]),
            tds_trunc(row[column], 45), row['num_count'],
            filesizeformat(row['total_size']), '']
            for index, row in enumerate(data)]

            if len(rows) != 10:
                missing = 10 - len(rows)
                add_rows = [
                    ('', '', '', '', '') for ind in range(missing)
                    ]
                rows.extend(add_rows)

            headings.extend(rows)
            dat = [row[order] for row in data]
            total = sum(dat)
            labels = [
                    ("%.1f%%" % ((1.0 * row[order] / total) * 100))
                    for row in data
                ]

            pie = PieChart()
            pie.chart.labels = labels
            pie.chart.data = dat
            headings[1][4] = pie

            table_with_style = Table(headings, [0.2 * inch,
                2.8 * inch, 0.5 * inch, 0.7 * inch, 3.2 * inch])
            table_with_style.setStyle(table_style)

            paragraph = Paragraph(title, styles['Heading1'])

            return [paragraph, table_with_style]

        def build_parts(account, enddate, isdom=None, daterange=None):
            "build parts"
            parts = []
            sentry = 0
            for report in reports:
                column = report[0]
                exclude_kwargs = report[1]
                order_by = "-%s" % report[2]
                order = report[2]
                title = report[3]

                if isdom:
                    #dom
                    data = Message.objects.values(column).\
                    filter(Q(from_domain=account.address) | \
                    Q(to_domain=account.address)).\
                    exclude(**exclude_kwargs).annotate(
                        num_count=Count(column), total_size=Sum('size')
                    ).order_by(order_by)
                    if daterange:
                        data.filter(date__range=(daterange[0], daterange[1]))
                    elif enddate:
                        data.filter(date__gt=enddate)
                    data = data[:10]
                else:
                    #all users
                    data = Message.report.all(user, enddate, daterange).values(
                            column).exclude(**exclude_kwargs).annotate(
                            num_count=Count(column), total_size=Sum('size')
                            ).order_by(order_by)
                    data = data[:10]

                if data:
                    sentry += 1
                    pgraphs = build_chart(data, column, order, title)
                    parts.extend(pgraphs)
                    parts.append(Spacer(1, 70))
                    if (sentry % 2) == 0:
                        parts.append(PageBreak())
            parts.append(Paragraph(_('Message Totals'), styles['Heading1']))
            if isdom:
                #doms
                msg_totals = MessageTotals.objects.doms(account.address, enddate)
            else:
                #norm
                filters = []
                addrs = [
                    addr.address for addr in UserAddresses.objects.filter(
                        user=account
                    ).exclude(enabled__exact=0)]
                if enddate:
                    efilter = {
                                'filter': 3,
                                'field': 'date',
                                'value': str(enddate)
                               }
                    filters.append(efilter)
                msg_totals = MessageTotals.objects.all(
                                account, filters, addrs,
                                profile.account_type,
                                daterange)

            mail_total = []
            spam_total = []
            virus_total = []
            dates = []
            if include_daily:
                rows = [(
                Table([[draw_square(colors.white),
                Paragraph('Date', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                Table([[draw_square(colors.green),
                Paragraph('Mail totals', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                Table([[draw_square(colors.pink),
                Paragraph('Spam totals', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                Table([[draw_square(colors.red),
                Paragraph('Virus totals', styles["Heading6"])]],
                [0.35 * inch, 1.50 * inch, ]),
                )]
            for ind, msgt in enumerate(msg_totals):
                if ind % 10:
                    dates.append('')
                else:
                    dates.append(str(msgt.date))

                mail_total.append(int(msgt.mail_total))
                spam_total.append(int(msgt.spam_total))
                virus_total.append(int(msgt.virus_total))
                if include_daily:
                    rows.append((str(msgt.date), msgt.mail_total,
                    msgt.spam_total, msgt.virus_total))

            graph = BarChart()
            graph.chart.data = [
                    tuple(mail_total), tuple(spam_total),
                    tuple(virus_total)
                ]
            graph.chart.categoryAxis.categoryNames = dates
            graph_table = Table([[graph]], [7.4 * inch])
            parts.append(graph_table)
            if include_daily:
                rows.append(('Totals', sum(mail_total), sum(spam_total),
                sum(virus_total)))
                parts.append(Spacer(1, 20))
                graph_table = Table(rows, [1.85 * inch, 1.85 * inch,
                1.85 * inch, 1.85 * inch, ])
                graph_table.setStyle(TableStyle([
                ('FONTSIZE', (0, 0), (-1, -1), 8),
                ('FONT', (0, 0), (-1, -1), 'Helvetica'),
                ('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
                ('GRID', (0, 0), (-1, -1), 0.15, colors.black),
                ('FONT', (0, -1), (-1, -1), 'Helvetica-Bold'),
                #('BACKGROUND', (0, -1), (-1, -1), colors.green),
                ]))
                parts.append(graph_table)
            return parts

        def build_pdf(charts):
            "Build a PDF"
            pdf = StringIO()
            doc = SimpleDocTemplate(pdf, topMargin=50, bottomMargin=18)
            logo = [(img, _('Baruwa mail report'))]
            logo_table = Table(logo, [2.0 * inch, 5.4 * inch])
            logo_table.setStyle(TableStyle([
            ('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
            ('ALIGN', (0, 0), (-1, 0), 'LEFT'),
            ('ALIGN', (1, 0), (-1, 0), 'RIGHT'),
            ('FONTSIZE', (1, 0), (-1, 0), 10),
            ('LINEBELOW', (0, 0), (-1, -1), 0.15, colors.black),
            ]))
            parts = [logo_table]
            parts.append(Spacer(1, 20))
            parts.extend(charts)
            try:
                doc.build(parts)
            except IndexError:
                pass
            return pdf

        def gen_email(pdf, user, owner):
            "generate and return email"
            text_content = render_to_string('reports/pdf_report.txt',
                {'user': user, 'url': url})
            subject = _('Baruwa usage report for: %(user)s') % {
                        'user': owner}
            if email_re.match(user.username):
                toaddr = user.username
            if email_re.match(user.email):
                toaddr = user.email

            if admin_addrs:
                msg = EmailMessage(subject, text_content, from_email, [toaddr], admin_addrs)
            else:
                msg = EmailMessage(subject, text_content, from_email, [toaddr])
            msg.attach('baruwa.pdf', pdf.getvalue(), "application/pdf")
            print _("* Queue %(user)s's report to: %(addr)s") % {
                'user': owner, 'addr': toaddr}
            pdf.close()
            return msg

        print _("=================== Processing reports ======================")
        if by_domain:
            #do domain query
            #print "camacamlilone"
            domains = UserAddresses.objects.filter(Q(enabled=1), Q(address_type=1))
            if domain_name != 'all':
                domains = domains.filter(address=domain_name)
                if not domains:
                    print _("========== domain name %(dom)s does not exist ==========") % {
                    'dom': domain_name
                    }
            for domain in domains:
                if email_re.match(domain.user.email):
                    parts = build_parts(domain, enddate, True, daterange)
                    if parts:
                        pdf = build_pdf(parts)
                        email = gen_email(pdf, domain.user, domain.address)
                        emails.append(email)
        else:
            #do normal query
            profiles = UserProfile.objects.filter(send_report=1)
            for profile in profiles:
                try:
                    user = profile.user
                    if email_re.match(user.email) or email_re.match(user.username):
                        parts = build_parts(user, enddate, False, daterange)
                        if parts:
                            pdf = build_pdf(parts)
                            email = gen_email(pdf, user, user.username)
                            emails.append(email)
                except User.DoesNotExist:
                    pass

        if emails:
            try:
                conn = SMTPConnection()
                conn.send_messages(emails)
                print _("====== sending %(num)s messages =======") % {
                        'num': str(len(emails))}
            except Exception, exception:
                print _("Sending failed ERROR: %(error)s") % {'error': str(exception)}

Example 27

Project: baruwa
Source File: views.py
View license
@login_required
def report(request, report_kind):
    "displays a report"
    report_kind = int(report_kind)
    template = "reports/piereport.html"
    active_filters = []
    if report_kind == 1:
        data = run_query('from_address', {'from_address__exact': ""},
            '-num_count', request, active_filters)
        pie_data = pack_json_data(data, 'from_address', 'num_count')
        report_title = _("Top senders by quantity")
    elif report_kind == 2:
        data = run_query('from_address', {'from_address__exact': ""},
            '-total_size', request, active_filters)
        pie_data = pack_json_data(data, 'from_address', 'total_size')
        report_title = _("Top senders by volume")
    elif report_kind == 3:
        data = run_query('from_domain', {'from_domain__exact': ""},
            '-num_count', request, active_filters)
        pie_data = pack_json_data(data, 'from_domain', 'num_count')
        report_title = _("Top sender domains by quantity")
    elif report_kind == 4:
        data = run_query('from_domain', {'from_domain__exact': ""},
            '-total_size', request, active_filters)
        pie_data = pack_json_data(data, 'from_domain', 'total_size')
        report_title = _("Top sender domains by volume")
    elif report_kind == 5:
        data = run_query('to_address', {'to_address__exact': ""},
            '-num_count', request, active_filters)
        pie_data = pack_json_data(data, 'to_address', 'num_count')
        report_title = _("Top recipients by quantity")
    elif report_kind == 6:
        data = run_query('to_address', {'to_address__exact': ""},
            '-total_size', request, active_filters)
        pie_data = pack_json_data(data, 'to_address', 'total_size')
        report_title = _("Top recipients by volume")
    elif report_kind == 7:
        data = run_query('to_domain', {'to_domain__exact': "",
            'to_domain__isnull': False}, '-num_count', request,
            active_filters)
        pie_data = pack_json_data(data, 'to_domain', 'num_count')
        report_title = _("Top recipient domains by quantity")
    elif report_kind == 8:
        data = run_query('to_domain', {'to_domain__exact': "",
            'to_domain__isnull': False}, '-total_size',
            request, active_filters)
        pie_data = pack_json_data(data, 'to_domain', 'total_size')
        report_title = _("Top recipient domains by volume")
    elif report_kind == 9:
        from baruwa.messages.models import SpamScores

        filter_list = []
        addrs = []
        counts = []
        scores = []
        act = 3

        if not request.user.is_superuser:
            addrs = request.session['user_filter']['addresses']
            act = request.session['user_filter']['account_type']

        if request.session.get('filter_by', False):
            filter_list = request.session.get('filter_by')
            get_active_filters(filter_list, active_filters)

        data = SpamScores.objects.all(request.user, filter_list, addrs, act)
        for index, row in enumerate(data):
            value = index + 1
            scores.append({'value': value, 'text': str(row.score)})
            counts.append({'y': int(row.count),
            'tooltip': 'Score ' + str(row.score) + ': ' + str(row.count)})

        if request.is_ajax():
            data = [obj.obj_to_dict() for obj in data]

        pie_data = {'scores': scores, 'count': counts}
        template = "reports/barreport.html"
        report_title = _("Spam Score distribution")
    elif report_kind == 10:
        data = run_hosts_query(request, active_filters)
        pie_data = pack_json_data(data, 'clientip', 'num_count')
        if request.is_ajax():
            from baruwa.messages.templatetags.messages_extras import \
                tds_geoip, tds_hostname
            for row in data:
                row['country'] = tds_geoip(row['clientip'])
                row['hostname'] = tds_hostname(row['clientip'])
        report_title = _("Top mail hosts by quantity")
        template = "reports/relays.html"
    elif report_kind == 11:
        from baruwa.messages.models import MessageTotals

        filter_list = []
        addrs = []
        dates = []
        mail_total = []
        spam_total = []
        virus_total = []
        size_total = []
        act = 3

        if not request.user.is_superuser:
            addrs = request.session['user_filter']['addresses']
            act = request.session['user_filter']['account_type']

        if request.session.get('filter_by', False):
            filter_list = request.session.get('filter_by')
            get_active_filters(filter_list, active_filters)

        data = MessageTotals.objects.all(request.user, filter_list, addrs, act)

        for row in data:
            dates.append(str(row.date))
            mail_total.append(int(row.mail_total))
            spam_total.append(int(row.spam_total))
            virus_total.append(int(row.virus_total))
            size_total.append(int(row.size_total))

        pie_data = {'dates': [{'value': index + 1, 'text': date}
                    for index, date in enumerate(dates)],
                    'mail': [{'y': total,
                    'tooltip': 'Mail totals on ' + dates[index] + ': ' + str(total)}
                    for index, total in enumerate(mail_total)],
                    'spam': [{'y': total,
                    'tooltip': 'Spam totals on ' + dates[index] + ': ' + str(total)}
                    for index, total in enumerate(spam_total)],
                    'virii': [{'y': total,
                    'tooltip': 'Virus totals on ' + dates[index] + ': ' + str(total)}
                    for index, total in enumerate(virus_total)],
                    'volume': size_total,
                    #'volume_labels': [{'value': total,
                    #'text': str(filesizeformat(total))} for total in size_total],
                    'mail_total': sum(mail_total),
                    'spam_total': sum(spam_total),
                    'virus_total': sum(virus_total),
                    'volume_total': sum(size_total)}
        try:
            vpct = "%.1f" % ((1.0 * sum(virus_total) / sum(mail_total)) * 100)
            spct = "%.1f" % ((1.0 * sum(spam_total) / sum(mail_total)) * 100)
        except ZeroDivisionError:
            vpct = "0.0"
            spct = "0.0"
        pie_data['vpct'] = vpct
        pie_data['spct'] = spct
        #graph_totals = {}
        if request.is_ajax():
            data = [obj.obj_to_dict() for obj in data]

        report_title = _("Total messages [ After SMTP ]")
        template = "reports/listing.html"
    filter_form = FilterForm()

    if request.is_ajax():
        response = anyjson.dumps({'items': list(data), 'pie_data': pie_data})
        return HttpResponse(response,
            content_type='application/javascript; charset=utf-8')
    else:
        if not report_kind in [9, 11]:
            pie_data = anyjson.dumps(pie_data)
        return render_to_response(template, {'pie_data': pie_data,
            'top_items': data, 'report_title': report_title,
            'report_kind': report_kind, 'active_filters': active_filters,
            'form': filter_form}, context_instance=RequestContext(request))

Example 28

Project: django-page-cms
Source File: forms.py
View license
def make_form(model_, placeholders):

    # a new form is needed every single time as some
    # initial data are bound
    class PageForm(forms.ModelForm):
        """Form for page creation"""

        def __init__(self, *args, **kwargs):
            super(PageForm, self).__init__(*args, **kwargs)
            for p in placeholders:
                if not self.fields[p.ctype]:
                    self.fields[p.ctype] = forms.TextField()

        target = forms.IntegerField(required=False, widget=forms.HiddenInput)
        position = forms.CharField(required=False, widget=forms.HiddenInput)

        class Meta:
            model = model_
            exclude = ('author', 'last_modification_date', 'parent')

        title = forms.CharField(
            label=_('Title'),
            widget=forms.TextInput(),
        )
        slug = forms.CharField(
            label=_('Slug'),
            widget=forms.TextInput(),
            help_text=_('The slug will be used to create the page URL, it must be unique among the other pages of the same level.')
        )

        language = forms.ChoiceField(
            label=_('Language'),
            choices=settings.PAGE_LANGUAGES,
            widget=LanguageChoiceWidget()
        )
        template = forms.ChoiceField(
            required=False,
            label=_('Template'),
            choices=settings.get_page_templates(),
        )
        delegate_to = forms.ChoiceField(
            required=False,
            label=_('Delegate to application'),
            choices=get_choices(),
        )
        freeze_date = forms.DateTimeField(
            required=False,
            label=_('Freeze'),
            help_text=_("Don't publish any content after this date. Format is 'Y-m-d H:M:S'")
            # those make tests fail miserably
            #widget=widgets.AdminSplitDateTime()
            #widget=widgets.AdminTimeWidget()
        )

        def clean_slug(self):
            """Handle move action on the pages"""

            slug = slugify(self.cleaned_data['slug'])
            target = self.data.get('target', None)
            position = self.data.get('position', None)

            # this enforce a unique slug for every page
            if settings.PAGE_AUTOMATIC_SLUG_RENAMING:
                def is_slug_safe(slug):
                    content = Content.objects.get_content_slug_by_slug(slug)
                    if content is None:
                        return True
                    if self.instance.id:
                        if content.page.id == self.instance.id:
                            return True
                    else:
                        return False

                return automatic_slug_renaming(slug, is_slug_safe)

            if settings.PAGE_UNIQUE_SLUG_REQUIRED:
                # We can return here as not futher checks
                # are necessary
                return unique_slug_required(self, slug)

            intersects_sites = intersect_sites_method(self)

            if not settings.PAGE_UNIQUE_SLUG_REQUIRED:
                if target and position:
                    target = Page.objects.get(pk=target)
                    if position in ['right', 'left']:
                        slugs = [sibling.slug() for sibling in
                                target.get_siblings()
                                if intersects_sites(sibling)]
                        slugs.append(target.slug())
                        if slug in slugs:
                            raise forms.ValidationError(error_dict['sibling_position_error'])
                    if position == 'first-child':
                        if slug in [sibling.slug() for sibling in
                                    target.get_children()
                                    if intersects_sites(sibling)]:
                            raise forms.ValidationError(error_dict['child_error'])
                else:
                    if self.instance.id:
                        if (slug in [sibling.slug() for sibling in
                            self.instance.get_siblings().exclude(
                                id=self.instance.id
                            ) if intersects_sites(sibling)]):
                            raise forms.ValidationError(error_dict['sibling_error'])
                    else:
                        if slug in [sibling.slug() for sibling in
                                    Page.objects.root()
                                    if intersects_sites(sibling)]:
                            raise forms.ValidationError(error_dict['sibling_root_error'])
            return slug

    return PageForm

Example 29

Project: djangobb
Source File: views.py
View license
def search(request):
    # TODO: used forms in every search type

    def _render_search_form(form=None):
        return render(request, 'djangobb_forum/search_form.html', {'categories': Category.objects.all(),
                'form': form,
                })

    if not 'action' in request.GET:
        return _render_search_form(form=PostSearchForm())

    if request.GET.get("show_as") == "posts":
        show_as_posts = True
        template_name = 'djangobb_forum/search_posts.html'
    else:
        show_as_posts = False
        template_name = 'djangobb_forum/search_topics.html'

    context = {}

    # Create 'user viewable' pre-filtered topics/posts querysets
    viewable_category = Category.objects.all()
    topics = Topic.objects.all().order_by("-last_post__created")
    posts = Post.objects.all().order_by('-created')
    user = request.user
    if not user.is_superuser:
        user_groups = user.groups.all() or [] # need 'or []' for anonymous user otherwise: 'EmptyManager' object is not iterable
        viewable_category = viewable_category.filter(Q(groups__in=user_groups) | Q(groups__isnull=True))

        topics = Topic.objects.filter(forum__category__in=viewable_category)
        posts = Post.objects.filter(topic__forum__category__in=viewable_category)

    base_url = None
    _generic_context = True

    action = request.GET['action']
    if action == 'show_24h':
        date = timezone.now() - timedelta(days=1)
        if show_as_posts:
            context["posts"] = posts.filter(Q(created__gte=date) | Q(updated__gte=date))
        else:
            context["topics"] = topics.filter(Q(last_post__created__gte=date) | Q(last_post__updated__gte=date))
        _generic_context = False
    elif action == 'show_new':
        if not user.is_authenticated():
            raise Http404("Search 'show_new' not available for anonymous user.")
        try:
            last_read = PostTracking.objects.get(user=user).last_read
        except PostTracking.DoesNotExist:
            last_read = None

        if last_read:
            if show_as_posts:
                context["posts"] = posts.filter(Q(created__gte=last_read) | Q(updated__gte=last_read))
            else:
                context["topics"] = topics.filter(Q(last_post__created__gte=last_read) | Q(last_post__updated__gte=last_read))
            _generic_context = False
        else:
            #searching more than forum_settings.SEARCH_PAGE_SIZE in this way - not good idea :]
            topics_id = [topic.id for topic in topics[:forum_settings.SEARCH_PAGE_SIZE] if forum_extras.has_unreads(topic, user)]
            topics = Topic.objects.filter(id__in=topics_id) # to create QuerySet

    elif action == 'show_unanswered':
        topics = topics.filter(post_count=1)
    elif action == 'show_subscriptions':
        topics = topics.filter(subscribers__id=user.id)
    elif action == 'show_user':
        # Show all posts from user or topics started by user
        if not user.is_authenticated():
            raise Http404("Search 'show_user' not available for anonymous user.")

        user_id = request.GET.get("user_id", user.id)
        try:
            user_id = int(user_id)
        except ValueError:
            raise SuspiciousOperation()

        if user_id != user.id:
            try:
                search_user = User.objects.get(id=user_id)
            except User.DoesNotExist:
                messages.error(request, _("Error: User unknown!"))
                return HttpResponseRedirect(request.path)
            messages.info(request, _("Filter by user '%(username)s'.") % {'username': search_user.username})

        if show_as_posts:
            posts = posts.filter(user__id=user_id)
        else:
            # show as topic
            topics = topics.filter(posts__user__id=user_id).order_by("-last_post__created").distinct()

        base_url = "?action=show_user&user_id=%s&show_as=" % user_id
    elif action == 'search':
        form = PostSearchForm(request.GET)
        if not form.is_valid():
            return _render_search_form(form)

        keywords = form.cleaned_data['keywords']
        author = form.cleaned_data['author']
        forum = form.cleaned_data['forum']
        search_in = form.cleaned_data['search_in']
        sort_by = form.cleaned_data['sort_by']
        sort_dir = form.cleaned_data['sort_dir']

        query = SearchQuerySet().models(Post)

        if author:
            query = query.filter(author__username=author)

        if forum != '0':
            query = query.filter(forum__id=forum)

        if keywords:
            if search_in == 'all':
                query = query.filter(SQ(topic=keywords) | SQ(text=keywords))
            elif search_in == 'message':
                query = query.filter(text=keywords)
            elif search_in == 'topic':
                query = query.filter(topic=keywords)

        order = {'0': 'created',
                 '1': 'author',
                 '2': 'topic',
                 '3': 'forum'}.get(sort_by, 'created')
        if sort_dir == 'DESC':
            order = '-' + order

        post_pks = query.values_list("pk", flat=True)

        if not show_as_posts:
            # TODO: We have here a problem to get a list of topics without double entries.
            # Maybe we must add a search index over topics?

            # Info: If whoosh backend used, setup HAYSTACK_ITERATOR_LOAD_PER_QUERY
            #    to a higher number to speed up
            context["topics"] = topics.filter(posts__in=post_pks).distinct()
        else:
            # FIXME: How to use the pre-filtered query from above?
            posts = posts.filter(pk__in=post_pks).order_by(order)
            context["posts"] = posts

        get_query_dict = request.GET.copy()
        get_query_dict.pop("show_as")
        base_url = "?%s&show_as=" % get_query_dict.urlencode()
        _generic_context = False

    if _generic_context:
        if show_as_posts:
            context["posts"] = posts.filter(topic__in=topics).order_by('-created')
        else:
            context["topics"] = topics

    if base_url is None:
        base_url = "?action=%s&show_as=" % action

    if show_as_posts:
        context['posts_page'] = get_page(context['posts'], request, forum_settings.SEARCH_PAGE_SIZE)
        context["as_topic_url"] = base_url + "topics"
        post_count = context["posts"].count()
        messages.success(request, _("Found %i posts.") % post_count)
    else:
        context['topics_page'] = get_page(context['topics'], request, forum_settings.SEARCH_PAGE_SIZE)
        context["as_post_url"] = base_url + "posts"
        topic_count = context["topics"].count()
        messages.success(request, _("Found %i topics.") % topic_count)

    return render(request, template_name, context)

Example 30

Project: xadmin
Source File: filters.py
View license
    def get_list_queryset(self, queryset):
        lookup_params = dict([(smart_str(k)[len(FILTER_PREFIX):], v) for k, v in self.admin_view.params.items()
                              if smart_str(k).startswith(FILTER_PREFIX) and v != ''])
        for p_key, p_val in lookup_params.iteritems():
            if p_val == "False":
                lookup_params[p_key] = False
        use_distinct = False

        # for clean filters
        self.admin_view.has_query_param = bool(lookup_params)
        self.admin_view.clean_query_url = self.admin_view.get_query_string(remove=
                                                                           [k for k in self.request.GET.keys() if k.startswith(FILTER_PREFIX)])

        # Normalize the types of keys
        if not self.free_query_filter:
            for key, value in lookup_params.items():
                if not self.lookup_allowed(key, value):
                    raise SuspiciousOperation(
                        "Filtering by %s not allowed" % key)

        self.filter_specs = []
        if self.list_filter:
            for list_filter in self.list_filter:
                if callable(list_filter):
                    # This is simply a custom list filter class.
                    spec = list_filter(self.request, lookup_params,
                                       self.model, self)
                else:
                    field_path = None
                    field_parts = []
                    if isinstance(list_filter, (tuple, list)):
                        # This is a custom FieldListFilter class for a given field.
                        field, field_list_filter_class = list_filter
                    else:
                        # This is simply a field name, so use the default
                        # FieldListFilter class that has been registered for
                        # the type of the given field.
                        field, field_list_filter_class = list_filter, filter_manager.create
                    if not isinstance(field, models.Field):
                        field_path = field
                        field_parts = get_fields_from_path(
                            self.model, field_path)
                        field = field_parts[-1]
                    spec = field_list_filter_class(
                        field, self.request, lookup_params,
                        self.model, self.admin_view, field_path=field_path)

                    if len(field_parts)>1:
                        # Add related model name to title
                        spec.title = "%s %s"%(field_parts[-2].name,spec.title)

                    # Check if we need to use distinct()
                    use_distinct = (use_distinct or
                                    lookup_needs_distinct(self.opts, field_path))
                if spec and spec.has_output():
                    try:
                        new_qs = spec.do_filte(queryset)
                    except ValidationError, e:
                        new_qs = None
                        self.admin_view.message_user(_("<b>Filtering error:</b> %s") % e.messages[0], 'error')
                    if new_qs is not None:
                        queryset = new_qs

                    self.filter_specs.append(spec)

        self.has_filters = bool(self.filter_specs)
        self.admin_view.filter_specs = self.filter_specs
        self.admin_view.used_filter_num = len(
            filter(lambda f: f.is_used, self.filter_specs))

        try:
            for key, value in lookup_params.items():
                use_distinct = (
                    use_distinct or lookup_needs_distinct(self.opts, key))
        except FieldDoesNotExist, e:
            raise IncorrectLookupParameters(e)

        try:
            queryset = queryset.filter(**lookup_params)
        except (SuspiciousOperation, ImproperlyConfigured):
            raise
        except Exception, e:
            raise IncorrectLookupParameters(e)

        query = self.request.GET.get(SEARCH_VAR, '')

        # Apply keyword searches.
        def construct_search(field_name):
            if field_name.startswith('^'):
                return "%s__istartswith" % field_name[1:]
            elif field_name.startswith('='):
                return "%s__iexact" % field_name[1:]
            elif field_name.startswith('@'):
                return "%s__search" % field_name[1:]
            else:
                return "%s__icontains" % field_name

        if self.search_fields and query:
            orm_lookups = [construct_search(str(search_field))
                           for search_field in self.search_fields]
            for bit in query.split():
                or_queries = [models.Q(**{orm_lookup: bit})
                              for orm_lookup in orm_lookups]
                queryset = queryset.filter(reduce(operator.or_, or_queries))
            if not use_distinct:
                for search_spec in orm_lookups:
                    if lookup_needs_distinct(self.opts, search_spec):
                        use_distinct = True
                        break
            self.admin_view.search_query = query

        if use_distinct:
            return queryset.distinct()
        else:
            return queryset

Example 31

Project: froide
Source File: views.py
View license
@require_POST
def submit_request(request, public_body=None):
    error = False
    foi_law = None
    if public_body is not None:
        public_body = get_object_or_404(PublicBody,
                slug=public_body)
        if not public_body.email:
            raise Http404
        all_laws = FoiLaw.objects.filter(jurisdiction=public_body.jurisdiction)
    else:
        all_laws = FoiLaw.objects.all()
    context = {"public_body": public_body}

    request_form = RequestForm(user=request.user,
                               list_of_laws=all_laws,
                               default_law=FoiLaw.get_default_law(),
                               data=request.POST)
    context['request_form'] = request_form
    context['public_body_form'] = PublicBodyForm()
    if (public_body is None and
            request.POST.get('public_body') == "new"):
        pb_form = PublicBodyForm(request.POST)
        context["public_body_form"] = pb_form
        if pb_form.is_valid():
            data = pb_form.cleaned_data
            data['confirmed'] = False
            # Take the first jurisdiction there is
            data['jurisdiction'] = Jurisdiction.objects.all()[0]
            data['slug'] = slugify(data['name'])
            public_body = PublicBody(**data)
        else:
            error = True

    if not request_form.is_valid():
        error = True
    else:
        if (public_body is None and
                request_form.cleaned_data['public_body'] != '' and
                request_form.cleaned_data['public_body'] != 'new'):
            public_body = request_form.public_body_object

    context['user_form'] = None
    user = None
    if not request.user.is_authenticated:
        user_form = NewUserForm(request.POST)
        context['user_form'] = user_form
        if not user_form.is_valid():
            error = True
    else:
        user = request.user

    if error:
        messages.add_message(request, messages.ERROR,
            _('There were errors in your form submission. Please review and submit again.'))
        return render(request, 'foirequest/request.html', context, status=400)

    password = None
    if user is None:
        user, password = AccountManager.create_user(**user_form.cleaned_data)
    sent_to_pb = 1
    if public_body is not None and public_body.pk is None:
        public_body._created_by = user
        public_body.save()
        sent_to_pb = 2
    elif public_body is None:
        sent_to_pb = 0

    if foi_law is None:
        if public_body is not None:
            foi_law = public_body.default_law
        else:
            foi_law = request_form.foi_law

    kwargs = registry.run_hook('pre_request_creation', request,
        user=user,
        public_body=public_body,
        foi_law=foi_law,
        form_data=request_form.cleaned_data,
        post_data=request.POST
    )
    foi_request = FoiRequest.from_request_form(**kwargs)

    special_redirect = None
    if request_form.cleaned_data['redirect_url']:
        redirect_url = request_form.cleaned_data['redirect_url']
        if is_safe_url(redirect_url, allowed_hosts=settings.ALLOWED_REDIRECT_HOSTS):
            special_redirect = redirect_url

    if user.is_active:
        if sent_to_pb == 0:
            messages.add_message(request, messages.INFO,
                _('Others can now suggest the Public Bodies for your request.'))
        elif sent_to_pb == 2:
            messages.add_message(request, messages.INFO,
                _('Your request will be sent as soon as the newly created Public Body was confirmed by an administrator.'))
        else:
            messages.add_message(request, messages.INFO,
                _('Your request has been sent.'))
        req_url = u'%s%s' % (foi_request.get_absolute_url(), _('?request-made'))
        return redirect(special_redirect or req_url)
    else:
        AccountManager(user).send_confirmation_mail(request_id=foi_request.pk,
                password=password)
        messages.add_message(request, messages.INFO,
                _('Please check your inbox for mail from us to confirm your mail address.'))
        # user cannot access the request yet, redirect to custom URL or homepage
        return redirect(special_redirect or "/")

Example 32

Project: django-treebeard
Source File: ns_tree.py
View license
    def move(self, target, pos=None):
        """
        Moves the current node and all it's descendants to a new position
        relative to another node.
        """

        pos = self._prepare_pos_var_for_move(pos)
        cls = get_result_class(self.__class__)

        parent = None

        if pos in ('first-child', 'last-child', 'sorted-child'):
            # moving to a child
            if target.is_leaf():
                parent = target
                pos = 'last-child'
            else:
                target = target.get_last_child()
                pos = {'first-child': 'first-sibling',
                       'last-child': 'last-sibling',
                       'sorted-child': 'sorted-sibling'}[pos]

        if target.is_descendant_of(self):
            raise InvalidMoveToDescendant(
                _("Can't move node to a descendant."))

        if self == target and (
            (pos == 'left') or
            (pos in ('right', 'last-sibling') and
             target == target.get_last_sibling()) or
            (pos == 'first-sibling' and
             target == target.get_first_sibling())):
            # special cases, not actually moving the node so no need to UPDATE
            return

        if pos == 'sorted-sibling':
            siblings = list(target.get_sorted_pos_queryset(
                target.get_siblings(), self))
            if siblings:
                pos = 'left'
                target = siblings[0]
            else:
                pos = 'last-sibling'
        if pos in ('left', 'right', 'first-sibling'):
            siblings = list(target.get_siblings())

            if pos == 'right':
                if target == siblings[-1]:
                    pos = 'last-sibling'
                else:
                    pos = 'left'
                    found = False
                    for node in siblings:
                        if found:
                            target = node
                            break
                        elif node == target:
                            found = True
            if pos == 'left':
                if target == siblings[0]:
                    pos = 'first-sibling'
            if pos == 'first-sibling':
                target = siblings[0]

        # ok let's move this
        cursor = self._get_database_cursor('write')
        move_right = cls._move_right
        gap = self.rgt - self.lft + 1
        sql = None
        target_tree = target.tree_id

        # first make a hole
        if pos == 'last-child':
            newpos = parent.rgt
            sql, params = move_right(target.tree_id, newpos, False, gap)
        elif target.is_root():
            newpos = 1
            if pos == 'last-sibling':
                target_tree = target.get_siblings().reverse()[0].tree_id + 1
            elif pos == 'first-sibling':
                target_tree = 1
                sql, params = cls._move_tree_right(1)
            elif pos == 'left':
                sql, params = cls._move_tree_right(target.tree_id)
        else:
            if pos == 'last-sibling':
                newpos = target.get_parent().rgt
                sql, params = move_right(target.tree_id, newpos, False, gap)
            elif pos == 'first-sibling':
                newpos = target.lft
                sql, params = move_right(target.tree_id,
                                         newpos - 1, False, gap)
            elif pos == 'left':
                newpos = target.lft
                sql, params = move_right(target.tree_id, newpos, True, gap)

        if sql:
            cursor.execute(sql, params)

        # we reload 'self' because lft/rgt may have changed

        fromobj = cls.objects.get(pk=self.pk)

        depthdiff = target.depth - fromobj.depth
        if parent:
            depthdiff += 1

        # move the tree to the hole
        sql = "UPDATE %(table)s "\
              " SET tree_id = %(target_tree)d, "\
              "     lft = lft + %(jump)d , "\
              "     rgt = rgt + %(jump)d , "\
              "     depth = depth + %(depthdiff)d "\
              " WHERE tree_id = %(from_tree)d AND "\
              "     lft BETWEEN %(fromlft)d AND %(fromrgt)d" % {
                  'table': connection.ops.quote_name(cls._meta.db_table),
                  'from_tree': fromobj.tree_id,
                  'target_tree': target_tree,
                  'jump': newpos - fromobj.lft,
                  'depthdiff': depthdiff,
                  'fromlft': fromobj.lft,
                  'fromrgt': fromobj.rgt}
        cursor.execute(sql, [])

        # close the gap
        sql, params = cls._get_close_gap_sql(fromobj.lft,
                                             fromobj.rgt, fromobj.tree_id)
        cursor.execute(sql, params)

Example 33

Project: taiga-back
Source File: api.py
View license
    @method_decorator(atomic)
    def create(self, request, *args, **kwargs):
        self.check_permissions(request, 'import_project', None)

        data = request.DATA.copy()
        data['owner'] = data.get('owner', request.user.email)

        # Validate if the project can be imported
        is_private = data.get('is_private', False)
        total_memberships = len([m for m in data.get("memberships", []) if m.get("email", None) != data["owner"]])
        total_memberships = total_memberships + 1  # 1 is the owner
        (enough_slots, error_message) = users_services.has_available_slot_for_import_new_project(
            self.request.user,
            is_private,
            total_memberships
        )
        if not enough_slots:
            raise exc.NotEnoughSlotsForProject(is_private, total_memberships, error_message)

        # Create Project
        project_serialized = services.store.store_project(data)

        if not project_serialized:
            raise exc.BadRequest(services.store.get_errors())

        # Create roles
        roles_serialized = None
        if "roles" in data:
            roles_serialized = services.store.store_roles(project_serialized.object, data)

        if not roles_serialized:
            raise exc.BadRequest(_("We needed at least one role"))

        # Create memberships
        if "memberships" in data:
            services.store.store_memberships(project_serialized.object, data)

        try:
            owner_membership = project_serialized.object.memberships.get(user=project_serialized.object.owner)
            owner_membership.is_admin = True
            owner_membership.save()
        except Membership.DoesNotExist:
            Membership.objects.create(
                project=project_serialized.object,
                email=project_serialized.object.owner.email,
                user=project_serialized.object.owner,
                role=project_serialized.object.roles.all().first(),
                is_admin=True
            )

        # Create project values choicess
        if "points" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "points", validators.PointsExportValidator)
        if "issue_types" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "issue_types",
                                                           validators.IssueTypeExportValidator)
        if "issue_statuses" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "issue_statuses",
                                                           validators.IssueStatusExportValidator,)
        if "us_statuses" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "us_statuses",
                                                           validators.UserStoryStatusExportValidator,)
        if "task_statuses" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "task_statuses",
                                                           validators.TaskStatusExportValidator)
        if "priorities" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "priorities",
                                                           validators.PriorityExportValidator)
        if "severities" in data:
            services.store.store_project_attributes_values(project_serialized.object, data,
                                                           "severities",
                                                           validators.SeverityExportValidator)

        if ("points" in data or "issues_types" in data or
                "issues_statuses" in data or "us_statuses" in data or
                "task_statuses" in data or "priorities" in data or
                "severities" in data):
            services.store.store_default_project_attributes_values(project_serialized.object, data)

        # Created custom attributes
        if "userstorycustomattributes" in data:
            services.store.store_custom_attributes(project_serialized.object, data,
                                                   "userstorycustomattributes",
                                                   validators.UserStoryCustomAttributeExportValidator)

        if "taskcustomattributes" in data:
            services.store.store_custom_attributes(project_serialized.object, data,
                                                   "taskcustomattributes",
                                                   validators.TaskCustomAttributeExportValidator)

        if "issuecustomattributes" in data:
            services.store.store_custom_attributes(project_serialized.object, data,
                                                   "issuecustomattributes",
                                                   validators.IssueCustomAttributeExportValidator)

        # Is there any error?
        errors = services.store.get_errors()
        if errors:
            raise exc.BadRequest(errors)

        # Importer process is OK
        response_data = serializers.ProjectExportSerializer(project_serialized.object).data
        response_data['id'] = project_serialized.object.id
        headers = self.get_success_headers(response_data)
        return response.Created(response_data, headers=headers)

Example 34

Project: tendenci
Source File: views.py
View license
@ssl_required
def register(request, success_url=None,
             form_class=RegistrationForm, profile_callback=None,
             template_name='registration/registration_form.html',
             event_id=None,
             extra_context=None):
    """
    Allow a new user to register an account.

    Following successful registration, issue a redirect; by default,
    this will be whatever URL corresponds to the named URL pattern
    ``registration_complete``, which will be
    ``/accounts/register/complete/`` if using the included URLConf. To
    change this, point that named pattern at another URL, or pass your
    preferred URL as the keyword argument ``success_url``.

    By default, ``registration.forms.RegistrationForm`` will be used
    as the registration form; to change this, pass a different form
    class as the ``form_class`` keyword argument. The form class you
    specify must have a method ``save`` which will create and return
    the new ``User``, and that method must accept the keyword argument
    ``profile_callback`` (see below).

    To enable creation of a site-specific user profile object for the
    new user, pass a function which will create the profile object as
    the keyword argument ``profile_callback``. See
    ``RegistrationManager.create_inactive_user`` in the file
    ``models.py`` for details on how to write this function.

    By default, use the template
    ``registration/registration_form.html``; to change this, pass the
    name of a template as the keyword argument ``template_name``.

    **Required arguments**

    None.

    **Optional arguments**

    ``form_class``
        The form class to use for registration.

    ``extra_context``
        A dictionary of variables to add to the template context. Any
        callable object in this dictionary will be called to produce
        the end result which appears in the context.

    ``profile_callback``
        A function which will be used to create a site-specific
        profile instance for the new ``User``.

    ``success_url``
        The URL to redirect to on successful registration.

    ``template_name``
        A custom template to use.

    **Context:**

    ``form``
        The registration form.

    Any extra variables supplied in the ``extra_context`` argument
    (see above).

    **Template:**

    registration/registration_form.html or ``template_name`` keyword
    argument.

    """
    # check if this site allows self registration, if not, redirect to login page
    allow_self_registration = get_setting('module', 'users', 'selfregistration')
    if not allow_self_registration:
        return HttpResponseRedirect(reverse('auth_login'))

    form_params = {}
    if request.session.get('form_params', None):
        form_params = request.session.pop('form_params')

    if request.method == 'POST':
        form = form_class(data=request.POST, files=request.FILES, **form_params)
        if form.is_valid():
            # This is for including a link in the reg email back to the event viewed
            event = None
            if event_id: # the user signed up via an event
                from tendenci.apps.events.models import Event
                event = get_object_or_404(Event, pk=event_id)

            new_user = form.save(profile_callback=profile_callback, event=event)
            # success_url needs to be dynamically generated here; setting a
            # a default value using reverse() will cause circular-import
            # problems with the default URLConf for this application, which
            # imports this file.

            # add to the default group(s)
            default_user_groups =[g.strip() for g in (get_setting('module', 'users', 'defaultusergroup')).split(',')]
            if default_user_groups:
                from tendenci.apps.user_groups.models import Group, GroupMembership
                from django.db.models import Q
                for group_name in default_user_groups:
                    groups = Group.objects.filter(Q(name=group_name) | Q(label=group_name)).filter(allow_self_add=1, status=1, status_detail='active')
                    if groups:
                        group = groups[0]
                    else:
                        # group doesnot exist, so create the group
                        group = Group()
                        group.name  = group_name
                        group.label = group_name
                        group.type = 'distribution'
                        group.show_as_option = 1
                        group.allow_self_add = 1
                        group.allow_self_remove = 1
                        group.creator = new_user
                        group.creator_username = new_user.username
                        group.owner =  new_user
                        group.owner_username = new_user.username
                        try:
                            group.save()
                        except:
                            group = None

                    if group:
                        gm = GroupMembership()
                        gm.group = group
                        gm.member = new_user
                        gm.creator_id = new_user.id
                        gm.creator_username = new_user.username
                        gm.owner_id =  new_user.id
                        gm.owner_username = new_user.username
                        gm.save()


            EventLog.objects.log(instance=new_user)

            return HttpResponseRedirect(success_url or reverse('registration_complete'))
        elif form.similar_email_found:
            messages.add_message(
                request, messages.INFO,
                _(u"An account already exists for the email %(email)s." % {
                    'email': request.POST.get('email_0') or request.POST.get('email_1')}))

            querystring = 'registration=True'
            return HttpResponseRedirect(reverse('auth_password_reset')+ "?%s" % querystring)

    else:
        allow_same_email = request.GET.get('registration_approved', False)
        form_params = {'allow_same_email' : allow_same_email }
        request.session['form_params'] = form_params
        form = form_class(**form_params)

    if extra_context is None:
        extra_context = {}
    context = RequestContext(request)
    for key, value in extra_context.items():
        context[key] = callable(value) and value() or value
    return render_to_response(template_name,
                              { 'form': form },
                              context_instance=context)

Example 35

Project: tendenci
Source File: views.py
View license
def multi_register(request, event_id, template_name="events/registration/multi_register.html"):
    """
    This view is where a user defines the specifics of his/her registrations.
    A registration set is a set of registrants (one or many) that comply with a specific pricing.
    A pricing defines the maximum number of registrants in a registration set.
    A user can avail multiple registration sets.

    If the site setting anonymousmemberpricing is enabled,
    anonymous users can select non public pricings in their registration sets,
    provided that the first registrant of every registration set's email is validated to be an existing user.

    If the site setting anonymousmemberpricing is disabled,
    anonymous users will not be able to see non public prices.
    """

    # redirect to default registration if anonymousmemberpricing not enabled
    if not get_setting('module', 'events', 'anonymousmemberpricing'):
        return redirect('event.multi_register', event_id)

    # clear user list session
    request.session['user_list'] = []

    event = get_object_or_404(Event, pk=event_id)

    # check if event allows registration
    if (event.registration_configuration is None or not event.registration_configuration.enabled):
        raise Http404

    # check if it is still open, always open for admin users
    if not request.user.profile.is_superuser:
        status = reg_status(event, request.user)
        if status == REG_FULL:
            messages.add_message(request, messages.ERROR, _('Registration is full.'))
            return redirect('event', event.pk)
        elif status == REG_CLOSED:
            messages.add_message(request, messages.ERROR, _('Registration is closed.'))
            return redirect('event', event.pk)

    user = AnonymousUser()
    # get available pricings
    active_pricings = get_active_pricings(event)
    event_pricings = event.registration_configuration.regconfpricing_set.all()

    # get available addons
    active_addons = get_active_addons(event)

    # check if use a custom reg form
    custom_reg_form = None
    reg_conf = event.registration_configuration
    if reg_conf.use_custom_reg_form:
        if reg_conf.bind_reg_form_to_conf_only:
            custom_reg_form = reg_conf.reg_form

    #custom_reg_form = None

    if custom_reg_form:
        RF = FormForCustomRegForm
    else:
        RF = RegistrantForm

    # start the form set factory
    RegistrantFormSet = formset_factory(
        RF,
        formset=RegistrantBaseFormSet,
        can_delete=True,
        extra=0,
    )

    RegAddonFormSet = formset_factory(
        RegAddonForm,
        formset=RegAddonBaseFormSet,
        extra=0,
    )

    if request.method == "POST":
        # process the submitted forms
        params = {'prefix': 'registrant',
                 'event': event,
                 'extra_params': {
                                  'pricings':event_pricings,
                                  }
                  }
        if custom_reg_form:
            params['extra_params'].update({"custom_reg_form": custom_reg_form,
                                           'event': event})
        reg_formset = RegistrantFormSet(request.POST,
                            **params)

        reg_form = RegistrationForm(event, request.user, request.POST,
                    reg_count = len(reg_formset.forms))

        # This form is just here to preserve the data in case of invalid registrants
        # The real validation of addons is after validation of registrants
        addon_formset = RegAddonFormSet(request.POST,
                            prefix='addon',
                            event=event,
                            extra_params={
                                'addons':active_addons,
                                'valid_addons':active_addons,
                            })
        addon_formset.is_valid()

        # validate the form and formset
        if False not in (reg_form.is_valid(), reg_formset.is_valid()):
            valid_addons = get_addons_for_list(event, reg_formset.get_user_list())
            # validate the addons
            addon_formset = RegAddonFormSet(request.POST,
                            prefix='addon',
                            event=event,
                            extra_params={
                                'addons':active_addons,
                                'valid_addons':valid_addons,
                            })
            if addon_formset.is_valid():
                # process the registration
                # this will create the registrants and apply the discount
                reg8n = process_registration(reg_form, reg_formset,  addon_formset, custom_reg_form=custom_reg_form)

                self_reg8n = get_setting('module', 'users', 'selfregistration')
                is_credit_card_payment = (reg8n.payment_method and
                    (reg8n.payment_method.machine_name).lower() == 'credit-card'
                    and reg8n.amount_paid > 0)
                registrants = reg8n.registrant_set.all().order_by('id')
                for registrant in registrants:
                    #registrant.assign_mapped_fields()
                    if registrant.custom_reg_form_entry:
                        registrant.name = registrant.custom_reg_form_entry.__unicode__()
                    else:
                        registrant.name = ' '.join([registrant.first_name, registrant.last_name])

                if is_credit_card_payment: # online payment
                    # email the admins as well
                    email_admins(event, reg8n.amount_paid, self_reg8n, reg8n, registrants)
                    # get invoice; redirect to online pay
                    return redirect('payment.pay_online',
                        reg8n.invoice.id, reg8n.invoice.guid)
                else:
                    # offline payment
                    # email the registrant
                    send_registrant_email(reg8n, self_reg8n)
                    # email the admins as well
                    email_admins(event, reg8n.amount_paid, self_reg8n, reg8n, registrants)

                EventLog.objects.log(instance=event)

                # redirect to confirmation
                return redirect('event.registration_confirmation', event_id, reg8n.registrant.hash)
    else:
        params = {'prefix': 'registrant',
                    'event': event,
                    'extra_params': {
                        'pricings':event_pricings,
                    }
                  }
        if custom_reg_form:
            params['extra_params'].update({"custom_reg_form": custom_reg_form,
                                           'event': event})
        # intialize empty forms
        reg_formset = RegistrantFormSet(**params)
        reg_form = RegistrationForm(event, request.user)
        addon_formset = RegAddonFormSet(
                            prefix='addon',
                            event=event,
                            extra_params={
                                'addons':active_addons,
                            })

    sets = reg_formset.get_sets()

    # build our hidden form dynamically
    hidden_form = None
    if custom_reg_form:
        params = {'prefix': 'registrant',
                  'event': event,
                  'extra_params': {
                        'pricings':event_pricings,
                        'custom_reg_form': custom_reg_form,
                        'event': event
                    }
                  }
        FormSet = formset_factory(
            FormForCustomRegForm,
            formset=RegistrantBaseFormSet,
            can_delete=True,
            extra=1,
        )
        formset = FormSet(**params)
        hidden_form = formset.forms[0]


    return render_to_response(template_name, {
            'event':event,
            'reg_form':reg_form,
            'custom_reg_form': custom_reg_form,
            'hidden_form': hidden_form,
            'registrant': reg_formset,
            'addon_formset': addon_formset,
            'sets': sets,
            'addons':active_addons,
            'pricings':active_pricings,
            'anon_pricing':True,
            'total_price':reg_formset.get_total_price()+addon_formset.get_total_price(),
            'allow_memberid_pricing':get_setting('module', 'events', 'memberidpricing'),
            'shared_pricing':get_setting('module', 'events', 'sharedpricing'),
            }, context_instance=RequestContext(request))

Example 36

Project: tendenci
Source File: views.py
View license
@is_enabled('forms')
def form_detail(request, slug, template="forms/form_detail.html"):
    """
    Display a built form and handle submission.
    """
    published = Form.objects.published(for_user=request.user)
    form = get_object_or_404(published, slug=slug)

    if not has_view_perm(request.user,'forms.view_form',form):
        raise Http403

    # If form has a recurring payment, make sure the user is logged in
    if form.recurring_payment:
        [email_field] = form.fields.filter(field_type__iexact='EmailVerificationField')[:1] or [None]
        if request.user.is_anonymous() and not email_field:
            # anonymous user - if we don't have the email field, redirect to login
            response = redirect('auth_login')
            response['Location'] += '?next=%s' % form.get_absolute_url()
            return response
        if request.user.is_superuser and not email_field:
            messages.add_message(request, messages.WARNING,
                    'Please edit the form to include an email field ' + \
                    'as it is required for setting up a recurring ' + \
                    'payment for anonymous users.')

    form_for_form = FormForForm(form, request.user, request.POST or None, request.FILES or None)
    for field in form_for_form.fields:
        field_default = request.GET.get(field, None)
        if field_default:
            form_for_form.fields[field].initial = field_default

    if request.method == "POST":
        if form_for_form.is_valid():
            entry = form_for_form.save()
            entry.entry_path = request.POST.get("entry_path", "")
            if request.user.is_anonymous():
                if entry.get_email_address():
                    emailfield = entry.get_email_address()
                    firstnamefield = entry.get_first_name()
                    lastnamefield = entry.get_last_name()
                    phonefield = entry.get_phone_number()
                    password = ''
                    for i in range(0, 10):
                        password += random.choice(string.ascii_lowercase + string.ascii_uppercase)

                    user_list = User.objects.filter(email=emailfield).order_by('-last_login')
                    if user_list:
                        anonymous_creator = user_list[0]
                    else:
                        anonymous_creator = User(username=emailfield[:30], email=emailfield,
                                                 first_name=firstnamefield, last_name=lastnamefield)
                        anonymous_creator.set_password(password)
                        anonymous_creator.is_active = False
                        anonymous_creator.save()
                        anonymous_profile = Profile(user=anonymous_creator, owner=anonymous_creator,
                                                    creator=anonymous_creator, phone=phonefield)
                        anonymous_profile.save()
                    entry.creator = anonymous_creator
            else:
                entry.creator = request.user
            entry.save()
            entry.set_group_subscribers()

            # Email
            subject = generate_email_subject(form, entry)
            email_headers = {}  # content type specified below
            if form.email_from:
                email_headers.update({'Reply-To':form.email_from})

            # Email to submitter
            # fields aren't included in submitter body to prevent spam
            submitter_body = generate_submitter_email_body(entry, form_for_form)
            email_from = form.email_from or settings.DEFAULT_FROM_EMAIL
            email_to = form_for_form.email_to()
            is_spam = Email.is_blocked(email_to)
            if is_spam:
                # log the spam
                description = "Email \"{0}\" blocked because it is listed in email_blocks.".format(email_to)
                EventLog.objects.log(instance=form, description=description)
                
                if form.completion_url:
                    return HttpResponseRedirect(form.completion_url)
                return redirect("form_sent", form.slug)
                
            email = Email()
            email.subject = subject
            email.reply_to = form.email_from

            if email_to and form.send_email and form.email_text:
                # Send message to the person who submitted the form.
                email.recipient = email_to
                email.body = submitter_body
                email.send(fail_silently=True)

            # Email copies to admin
            admin_body = generate_admin_email_body(entry, form_for_form)
            email_from = email_to or email_from # Send from the email entered.
            email_headers = {}  # Reset the email_headers
            email_headers.update({'Reply-To':email_from})
            email_copies = [e.strip() for e in form.email_copies.split(',') if e.strip()]

            subject = subject.encode(errors='ignore')
            email_recipients = entry.get_function_email_recipients()
            # reply_to of admin emails goes to submitter
            email.reply_to = email_to

            if email_copies or email_recipients:
                # prepare attachments
                attachments = []
                try:
                    for f in form_for_form.files.values():
                        f.seek(0)
                        attachments.append((f.name, f.read()))
                except ValueError:
                    attachments = []
                    for field_entry in entry.fields.all():
                        if field_entry.field.field_type == 'FileField':
                            try:
                                f = default_storage.open(field_entry.value)
                            except IOError:
                                pass
                            else:
                                f.seek(0)
                                attachments.append((f.name.split('/')[-1], f.read()))

                # Send message to the email addresses listed in the copies
                if email_copies:
                    email.body = admin_body
                    email.recipient = email_copies
                    email.send(fail_silently=True, attachments=attachments)

                # Email copies to recipient list indicated in the form
                if email_recipients:
                    email.body = admin_body
                    email.recipient = email_recipients
                    email.send(fail_silently=True, attachments=attachments)

            # payment redirect
            if (form.custom_payment or form.recurring_payment) and entry.pricing:
                # get the pricing's price, custom or otherwise
                price = entry.pricing.price or form_for_form.cleaned_data.get('custom_price')

                if form.recurring_payment:
                    if request.user.is_anonymous():
                        rp_user = entry.creator
                    else:
                        rp_user = request.user
                    billing_start_dt = datetime.datetime.now()
                    trial_period_start_dt = None
                    trial_period_end_dt = None
                    if entry.pricing.has_trial_period:
                        trial_period_start_dt = datetime.datetime.now()
                        trial_period_end_dt = trial_period_start_dt + datetime.timedelta(1)
                        billing_start_dt = trial_period_end_dt
                    # Create recurring payment
                    rp = RecurringPayment(
                             user=rp_user,
                             description=form.title,
                             billing_period=entry.pricing.billing_period,
                             billing_start_dt=billing_start_dt,
                             num_days=entry.pricing.num_days,
                             due_sore=entry.pricing.due_sore,
                             payment_amount=price,
                             taxable=entry.pricing.taxable,
                             tax_rate=entry.pricing.tax_rate,
                             has_trial_period=entry.pricing.has_trial_period,
                             trial_period_start_dt=trial_period_start_dt,
                             trial_period_end_dt=trial_period_end_dt,
                             trial_amount=entry.pricing.trial_amount,
                             creator=rp_user,
                             creator_username=rp_user.username,
                             owner=rp_user,
                             owner_username=rp_user.username,
                         )
                    rp.save()
                    rp.add_customer_profile()

                    # redirect to recurring payments
                    messages.add_message(request, messages.SUCCESS, _('Successful transaction.'))
                    return redirect('recurring_payment.view_account', rp.id, rp.guid)
                else:
                    # create the invoice
                    invoice = make_invoice_for_entry(entry, custom_price=price)
                    # log an event for invoice add

                    EventLog.objects.log(instance=form)

                    # redirect to billing form
                    return redirect('form_entry_payment', invoice.id, invoice.guid)

            # default redirect
            if form.completion_url:
                return HttpResponseRedirect(form.completion_url)
            return redirect("form_sent", form.slug)

    # set form's template to forms/base.html if no template or template doesn't exist
    if not form.template or not template_exists(form.template):
        form.template = "forms/base.html"

    # NOTE: Temporarily use forms/base.html for the meantime
    form.template = "forms/base.html"

    context = {
        "form": form,
        "form_for_form": form_for_form,
        'form_template': form.template,
    }
    return render_to_response(template, context, RequestContext(request))

Example 37

Project: tendenci
Source File: get_email.py
View license
def ticket_from_message(message, queue, quiet):
    # 'message' must be an RFC822 formatted message.
    msg = message
    message = email.message_from_string(msg)
    subject = message.get('subject', _('Created from e-mail'))
    subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
    subject = subject.replace("Re: ", "").replace("Fw: ", "").replace("RE: ", "").replace("FW: ", "").replace("Automatic reply: ", "").strip()

    sender = message.get('from', _('Unknown Sender'))
    sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))

    sender_email = parseaddr(sender)[1]

    body_plain, body_html = '', ''

    for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
        if ignore.test(sender_email):
            if ignore.keep_in_mailbox:
                # By returning 'False' the message will be kept in the mailbox,
                # and the 'True' will cause the message to be deleted.
                return False
            return True

    matchobj = re.match(r".*\["+queue.slug+"-(?P<id>\d+)\]", subject)
    if matchobj:
        # This is a reply or forward.
        ticket = matchobj.group('id')
    else:
        ticket = None

    counter = 0
    files = []

    for part in message.walk():
        if part.get_content_maintype() == 'multipart':
            continue

        name = part.get_param("name")
        if name:
            name = collapse_rfc2231_value(name)

        if part.get_content_maintype() == 'text' and name == None:
            if part.get_content_subtype() == 'plain':
                body_plain = EmailReplyParser.parse_reply(decodeUnknown(part.get_content_charset(), part.get_payload(decode=True)))
            else:
                body_html = part.get_payload(decode=True)
        else:
            if not name:
                ext = mimetypes.guess_extension(part.get_content_type())
                name = "part-%i%s" % (counter, ext)

            files.append({
                'filename': name,
                'content': part.get_payload(decode=True),
                'type': part.get_content_type()},
                )

        counter += 1

    if body_plain:
        body = body_plain
    else:
        body = _('No plain-text email body available. Please see attachment email_html_body.html.')

    if body_html:
        files.append({
            'filename': _("email_html_body.html"),
            'content': body_html,
            'type': 'text/html',
        })

    now = timezone.now()

    if ticket:
        try:
            t = Ticket.objects.get(id=ticket)
            new = False
        except Ticket.DoesNotExist:
            ticket = None

    priority = 3

    smtp_priority = message.get('priority', '')
    smtp_importance = message.get('importance', '')

    high_priority_types = ('high', 'important', '1', 'urgent')

    if smtp_priority in high_priority_types or smtp_importance in high_priority_types:
        priority = 2

    if ticket == None:
        t = Ticket(
            title=subject,
            queue=queue,
            submitter_email=sender_email,
            created=now,
            description=body,
            priority=priority,
        )
        t.save()
        new = True
        update = ''

    elif t.status == Ticket.CLOSED_STATUS:
        t.status = Ticket.REOPENED_STATUS
        t.save()

    f = FollowUp(
        ticket = t,
        title = _('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}),
        date = timezone.now(),
        public = True,
        comment = body,
    )

    if t.status == Ticket.REOPENED_STATUS:
        f.new_status = Ticket.REOPENED_STATUS
        f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email})
    
    f.save()

    if not quiet:
        print (" [%s-%s] %s" % (t.queue.slug, t.id, t.title,)).encode('ascii', 'replace')

    for file in files:
        if file['content']:
            filename = file['filename'].encode('ascii', 'replace').replace(' ', '_')
            filename = re.sub('[^a-zA-Z0-9._-]+', '', filename)
            a = Attachment(
                followup=f,
                filename=filename,
                mime_type=file['type'],
                size=len(file['content']),
                )
            a.file.save(filename, ContentFile(file['content']), save=False)
            a.save()
            if not quiet:
                print "    - %s" % filename


    context = safe_template_context(t)

    if new:

        if sender_email:
            send_templated_mail(
                'newticket_submitter',
                context,
                recipients=sender_email,
                sender=queue.from_address,
                fail_silently=True,
                )

        if queue.new_ticket_cc:
            send_templated_mail(
                'newticket_cc',
                context,
                recipients=queue.new_ticket_cc,
                sender=queue.from_address,
                fail_silently=True,
                )

        if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
            send_templated_mail(
                'newticket_cc',
                context,
                recipients=queue.updated_ticket_cc,
                sender=queue.from_address,
                fail_silently=True,
                )

    else:
        context.update(comment=f.comment)

        if t.status == Ticket.REOPENED_STATUS:
            update = _(' (Reopened)')
        else:
            update = _(' (Updated)')

        if t.assigned_to:
            send_templated_mail(
                'updated_owner',
                context,
                recipients=t.assigned_to.email,
                sender=queue.from_address,
                fail_silently=True,
                )

        if queue.updated_ticket_cc:
            send_templated_mail(
                'updated_cc',
                context,
                recipients=queue.updated_ticket_cc,
                sender=queue.from_address,
                fail_silently=True,
                )

    return t

Example 38

Project: tendenci
Source File: widgets.py
View license
    def render(self, name, value, attrs=None):
        if not isinstance(value, list):
            value = self.decompress(value)

        final_attrs = self.build_attrs(attrs)
        id_ = final_attrs.get('id', None)

        custom_field_final_attrs = final_attrs.copy()
        if 'class' in custom_field_final_attrs:
            classes = custom_field_final_attrs["class"].split(" ")
            custom_field_final_attrs['class'] = " ".join([cls for cls in classes if cls != "form-control"])

        # period type
        period_type_widget = self.pos_d['period_type'][1]
        period_type_widget.choices = PERIOD_CHOICES
        #self.widgets.append(period_type_widget)
        rendered_period_type = self.render_widget(period_type_widget,
                                                  name, value, final_attrs, self.pos_d['period_type'][0], id_)

        # period
        period_widget = self.pos_d['period'][1]
        period_widget.attrs = {'size':'8'}
        rendered_period = self.render_widget(period_widget, name, value, final_attrs,
                                             self.pos_d['period'][0], id_)

        # period_unit
        period_unit_widget = self.pos_d['period_unit'][1]
        period_unit_widget.choices = PERIOD_UNIT_CHOICE
        rendered_period_unit = self.render_widget(period_unit_widget,
                                                  name, value, final_attrs, self.pos_d['period_unit'][0], id_)
        # expiration_method_day
        rolling_option1_day_widget = self.pos_d['rolling_option1_day'][1]
        rolling_option1_day_widget.attrs = {'size':'8'}
        rendered_rolling_option1_day = self.render_widget(rolling_option1_day_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['rolling_option1_day'][0], id_)
        # expiration_method
        JOIN_EXP_METHOD_CHOICE = (
                                  ("0", _("End of full period")),
                                  ("1", mark_safe("%s day(s) at signup month" % \
                                                  rendered_rolling_option1_day)),)
        rolling_option_widget = self.pos_d['rolling_option'][1]
        rolling_option_widget.choices=JOIN_EXP_METHOD_CHOICE
        rendered_rolling_option = self.render_widget(rolling_option_widget,
                                                  name, value, final_attrs,
                                                  self.pos_d['rolling_option'][0], id_)

        # rolling_renew_option1_day
        rolling_renew_option1_day_widget = self.pos_d['rolling_renew_option1_day'][1]
        rolling_renew_option1_day_widget.attrs = {'size':'8'}
        rendered_rolling_renew_option1_day = self.render_widget(rolling_renew_option1_day_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['rolling_renew_option1_day'][0], id_)
        # renew_expiration_day2
        rolling_renew_option2_day_widget = self.pos_d['rolling_renew_option2_day'][1]
        rolling_renew_option2_day_widget.attrs = {'size':'8'}
        rendered_rolling_renew_option2_day = self.render_widget(rolling_renew_option2_day_widget,
                                                            name, value, final_attrs,
                                                           self.pos_d['rolling_renew_option2_day'][0], id_)
        # renew_expiration_method
        RENEW_EXP_METHOD_CHOICE = (
                                  ("0", _("End of full period")),
                                  ("1", mark_safe("%s day(s) at signup month" % \
                                                  rendered_rolling_renew_option1_day)),
                                  ("2", mark_safe("%s day(s) at renewal month" % \
                                                  rendered_rolling_renew_option2_day)),)
        rolling_renew_option_widget = self.pos_d['rolling_renew_option'][1]
        rolling_renew_option_widget.choices=RENEW_EXP_METHOD_CHOICE
        rendered_rolling_renew_option = self.render_widget(rolling_renew_option_widget,
                                                  name, value, final_attrs,
                                                  self.pos_d['rolling_renew_option'][0], id_)
        # fixed_option1_day
        fixed_option1_day_widget = self.pos_d['fixed_option1_day'][1]
        fixed_option1_day_widget.choices=DAYS_CHOICES
        rendered_fixed_option1_day = self.render_widget(fixed_option1_day_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['fixed_option1_day'][0], id_)
        # fixed_option1_month
        fixed_option1_month_widget = self.pos_d['fixed_option1_month'][1]
        fixed_option1_month_widget.choices=MONTHS_CHOICES
        rendered_fixed_option1_month = self.render_widget(fixed_option1_month_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['fixed_option1_month'][0], id_)
        # dynamically generate the year choices for fixed_option1_year
        fixed_option1_year = ''
        if value:
            try:
                fixed_option1_year = int(value[self.pos_d['fixed_option1_year'][0]])
            except:
                pass
        if not fixed_option1_year:
            fixed_option1_year = int(datetime.date.today().year)
        years = [(year, year) for year in range(fixed_option1_year-1, fixed_option1_year+20)]

        #fixed_expiration_year
        fixed_option1_year_widget =  self.pos_d['fixed_option1_year'][1]
        fixed_option1_year_widget.choices=years
        rendered_fixed_option1_year = self.render_widget(fixed_option1_year_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['fixed_option1_year'][0], id_)
        # fixed_option2_day
        fixed_option2_day_widget = self.pos_d['fixed_option2_day'][1]
        fixed_option2_day_widget.choices=DAYS_CHOICES
        rendered_fixed_option2_day = self.render_widget(fixed_option2_day_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['fixed_option2_day'][0], id_)
        #fixed_option2_month
        fixed_option2_month_widget = self.pos_d['fixed_option2_month'][1]
        fixed_option2_month_widget.choices=MONTHS_CHOICES
        rendered_fixed_option2_month = self.render_widget(fixed_option2_month_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['fixed_option2_month'][0], id_)
        FIXED_EXP_METHOD_CHOICE = (
                                  ("0", mark_safe("%s %s %s" % (rendered_fixed_option1_month,
                                                      rendered_fixed_option1_day,
                                                      rendered_fixed_option1_year))),
                                  ("1", mark_safe("%s %s of current year" % \
                                                  (rendered_fixed_option2_month,
                                                   rendered_fixed_option2_day))))

        # fixed_option
        fixed_option_widget = self.pos_d['fixed_option'][1]
        fixed_option_widget.choices=FIXED_EXP_METHOD_CHOICE
        rendered_fixed_option = self.render_widget(fixed_option_widget,
                                                  name, value, final_attrs,
                                                  self.pos_d['fixed_option'][0], id_)
        # fixed_option2_rollover_days
        fixed_option2_rollover_days_widget = self.pos_d['fixed_option2_rollover_days'][1]
        fixed_option2_rollover_days_widget.attrs={'size':'8'}
        rendered_fixed_option2_rollover_days = self.render_widget(fixed_option2_rollover_days_widget,
                                                            name, value, final_attrs,
                                                            self.pos_d['fixed_option2_rollover_days'][0], id_)
        # fixed_option2_can_rollover
        fixed_option2_can_rollover_widget = self.pos_d['fixed_option2_can_rollover'][1]
        can_rollover_attrs = final_attrs.copy()
        if "class" in can_rollover_attrs:
            can_rollover_attrs["class"] = "%s checkbox" % can_rollover_attrs["class"]
        else:
            can_rollover_attrs["class"] = "checkbox"
        rendered_fixed_option2_can_rollover = self.render_widget(fixed_option2_can_rollover_widget,
                                                       name, value, can_rollover_attrs,
                                                       self.pos_d['fixed_option2_can_rollover'][0], id_)

        output_html = """
                        <div id="exp-method-box">
                            <div>%s</div>

                            <div style="margin: 1em 0 0 3em;">
                                <div id="rolling-box" class="form-group">
                                    <div class="form-inline"><label for="%s_%s">Period</label> %s %s</div>
                                    <div><label for="%s_%s">Expires On</label> %s</div>
                                    <div><label for="%s_%s">Renew Expires On</label> %s</div>
                                </div>

                                <div id="fixed-box" class="form-group">
                                    <div><label for="%s_%s">Expires On</label> %s</div>
                                    <div class="form-inline">%s For option 2, grace period %s day(s) before expiration then expires in the next year</div>
                                </div>
                            </div>

                        </div>
                      """ % (rendered_period_type,
                           name, self.pos_d['period'][0],
                           rendered_period, rendered_period_unit,
                           name, self.pos_d['rolling_option'][0], rendered_rolling_option,
                           name, self.pos_d['rolling_renew_option'][0], rendered_rolling_renew_option,
                           name, self.pos_d['fixed_option'][0], rendered_fixed_option,
                           rendered_fixed_option2_can_rollover, rendered_fixed_option2_rollover_days)

        return mark_safe(output_html)

Example 39

Project: hubplus
Source File: views.py
View license
@secure_resource(TgGroup)
def group(request, group, template_name="plus_groups/group.html", current_app='plus_groups', **kwargs):

    if not group :
        raise Http404(_('There is no group with this id'))

    user = request.user

    can_join = False
    apply = False
    leave = False
    invite = False
    can_comment = False
    message = False
    add_link = False
    can_tag = False
    can_change_avatar = False
    has_accept = False
    can_delete = False

    is_following = Following.objects.is_following(request.user, group.get_inner())

    editable_group_type = group.group_type != settings.GROUP_HUB_TYPE

    if user.is_authenticated():
        if user.is_direct_member_of(group.get_inner()):
            # can now leave group if you aren't the last one out
            if group.get_no_members() > 1 :
                leave = True
            try :
                group.invite_member
                invite = True
            except Exception, e :# user doesn't have invite permission
                pass

        else :
            try :
                group.join 
                can_join = True
            except Exception, e: # user doesn't have join permission
                pass
            try :
                if not can_join :
                    group.apply
                    apply = True
            except Exception, e : # user doesn't have apply permission
                pass

        try : 
            group.comment
            can_comment = True
            can_tag = True # XXX commentor interface governs who can tag. Do we need a special tag interface?
        except :
            pass


        try :
            group.message_members
            message = True
        except :
            pass
        
        try :
            group.create_Link
            add_link = True
        except Exception, e :
            print e
            pass

        try :
            group.change_avatar
            can_change_avatar  = True
        except Exception, e:
            pass

        try :
            dummy = group.delete
            can_delete = True
        except :
            pass

        if has_access(request.user, None, 'Application.Accept', group._inner.get_security_context()):
            has_accept = True
        else:
            has_accept = False
    
    tweets = FeedItem.feed_manager.get_from_permissioned(group, request.user)

    try:
        group.get_all_sliders
        perms_bool = True
    except PlusPermissionsNoAccessException:
        perms_bool = False

    if kwargs['type'] == 'hub':
        type_name = hub_name()
    else:
        type_name = "Group"

    search_types = narrow_search_types(type_name)
    side_search = side_search_args(current_app + ':groups', search_types[0][1][2])

    search = request.GET.get('search', '')
    order = request.GET.get('order', '')
    resource_search = resources(group=group, search=search, order=order)
    resource_listing_args = listing_args(current_app + ':group_resources', current_app + ':group_resources_tag', tag_string='', search_terms=search, multitabbed=False, order=order, template_base='plus_lib/listing_frag.html', search_type_label='resources')
    resource_listing_args['group_id'] = group.id

    ##############Here we should use the "plus_search" function from plus_explore as above########

    member_search = a_member_search(group=group, search=search, order=order, 
                                    member_profile_ids=[x.get_profile().get_ref().id for x in group.users.all()])
    host_search = a_member_search(group=group.get_admin_group(), search=search, order=order, 
                                  member_profile_ids=[x.get_profile().get_ref().id for x in group.get_admin_group().users.all()])
    member_listing_args = listing_args(current_app+':group_members', current_app+':group_members_tag', tag_string='', search_terms=search, multitabbed=False, order=order, template_base='plus_lib/listing_frag.html', search_type_label='members', group_id=group.id)

    host_listing_args = listing_args(current_app+':group_hosts', current_app+':group_hosts_tag', tag_string='', search_terms=search, multitabbed=False, order=order, template_base='plus_lib/listing_frag.html', search_type_label='hosts', group_id=group.id)
    member_count = group.users.all().count()
    host_count = group.get_admin_group().users.all().count()


    ##############################################################################################

    return render_to_response(template_name, {
            "head_title" : "%s" % group.get_display_name(),
            "status_type" : 'group',
            #"status_since" : status_since,
            "group" : TemplateSecureWrapper(group),
            "target_class" : ContentType.objects.get_for_model(group.get_inner()).id,
            "target_id" : group.get_inner().id,
            #"members" : members,
            "member_count" : member_count,
            "leave": leave,
            "can_join" : can_join, 
            "apply" : apply, 
            "invite" : invite, 
            "can_comment" : can_comment, 
            "message" : message,
            "add_link" : add_link,
            "can_tag" : can_tag,
            "can_change_avatar" : can_change_avatar,
            "can_delete" : can_delete, 

            "is_following" : is_following,
            #"hosts": hosts,
            "host_group_id":group.get_admin_group().id,
            "host_group_app_label":group.get_admin_group().group_app_label() + ':group',
            "is_host":user.is_admin_of(group.get_inner()),
            "host_count": host_count,
            "tweets" : tweets,
            "permissions": perms_bool,
            'side_search_args':side_search,
            'resource_search':resource_search,
            'resource_listing_args':resource_listing_args,
            'member_search':member_search,
            'member_listing_args':member_listing_args,
            'host_search':host_search,
            'host_listing_args':host_listing_args,
            'group_id':group.id,
            'search_types':search_types,
            'tagged_url':current_app + ':groups_tag',
            'has_accept':has_accept,
            'editable_group_type':editable_group_type,
            }, context_instance=RequestContext(request, current_app=current_app)
    )

Example 40

Project: hubplus
Source File: models.py
View license
def patch_user_class():
    """access biz_type, introduced_by and postcode through prefixing 'md_' e.g user.md_biz_type.
    """
    from apps.plus_groups.models import TgGroup, User_Group  
    User._meta.db_table = 'tg_user'
    # Patching the User class
    User.add_to_class('__getattr__',  __getattr__)
    User.add_to_class('__setattr__',  __setattr__)

    # EXPERIMENT ... make username alias of user_name
    User.add_to_class('user_name', UserNameField(unique=True, max_length=255))
    #User.add_to_class('user_name',models.CharField(max_length=255,unique=True))
    #del User.username
    #User.add_to_class('username',AliasOf('user_name'))                      
    # EXPERIMENT END

    User.add_to_class('email_address', models.CharField(max_length=255,unique=True))

    #remove the existing django groups relation  
    gr = User._meta.get_field('groups')
    User._meta.local_many_to_many.remove(gr)
    del User.groups
    # add ours new relation for groups
    User.add_to_class('groups', models.ManyToManyField(TgGroup, through=User_Group, related_name='users'))

    User.add_to_class('description', models.TextField())
    User.add_to_class('organisation', models.CharField(max_length=255)) 
    User.add_to_class('title', models.CharField(max_length=255,null=True))
    User.add_to_class('mobile', models.CharField(max_length=30))
    User.add_to_class('work', models.CharField(max_length=30))
    User.add_to_class('home', models.CharField(max_length=30))
    User.add_to_class('fax', models.CharField(max_length=30))
    User.add_to_class('place', models.CharField(max_length=150, null=True))

    User.add_to_class('created',models.DateTimeField(default=datetime.datetime.now))
    User.add_to_class('email2',models.CharField(max_length=255))
    User.add_to_class('email3',models.CharField(max_length=255))
    User.add_to_class('skype_id',models.TextField())
    User.add_to_class('sip_id',models.TextField())
    User.add_to_class('website',models.TextField())
    User.add_to_class('homeplace', models.ForeignKey(Location, null=True))
    User.add_to_class('address', models.TextField())
    User.add_to_class('country', models.CharField(null=True, default="", max_length=2))

    User.add_to_class('homehub', models.ForeignKey("plus_groups.TgGroup", null=True)) # we need this for PSN, XXX need to decide how to make it compatible with hubspace for hub+ 

    User.add_to_class('psn_id', models.CharField(max_length=50,null=True))
    User.add_to_class('psn_password_hmac_key', models.CharField(max_length=50, null=True)) #this is just for the bizare hmacing of psn passwords by a field formly known as "fullname"

    User.add_to_class('cc_messages_to_email',models.BooleanField(default=False)) # internal messages get reflected to email

    User.add_to_class('has_avatar', models.BooleanField(default=False))

    User.email = AliasOf('email_address')
    if settings.PROJECT_THEME == 'plus':
       User.post_or_zip = AliasOf('md_postcode')
       User.add_to_class('public_field', models.SmallIntegerField(null=True)) # this will be phased out as it is redundant with the new permissions system
    else:
       User.add_to_class('post_or_zip', models.CharField(null=True, default="", max_length=30))
    
    User.is_active = AliasOf('active') # This takes precedence over the existing is_active field in django.contrib.auth.models
    User.add_to_class('active', models.SmallIntegerField(null=True)) # not currently shown, however setting this to 0 will stop the user logging in

    #User.add_to_class('image', models.CharField(null=True, default="", max_length= XXXX)


    User.set_password = set_password
    User.check_password = check_password
    User.is_member_of = is_member_of
    User.is_direct_member_of = is_direct_member_of
    User.get_enclosures = get_enclosures
    User.get_enclosure_set = get_enclosure_set
    User.is_group = lambda(self) : False
    User.is_user = lambda(self) : True

    User.save = user_save
    User.post_save = post_save

    User.is_admin_of = lambda self, group : self.is_member_of(group.get_admin_group())

    def is_site_admin(self) :
       from apps.plus_permissions.default_agents import get_all_members_group
       return self.is_admin_of(get_all_members_group())
    User.is_site_admin = is_site_admin

    def send_tweet(self, msg) :
       from apps.microblogging.models import send_tweet
       return send_tweet(self, msg)
    User.send_tweet = send_tweet

    def message(self, sender, subject, body, message_extra="") :

       if not self.active :
          return # don't send messages to inactive members

       from messages.models import Message
       from django.core.mail import send_mail
       from django.core.urlresolvers import reverse
       from django.utils.translation import ugettext_lazy as _, ugettext
       
       m = Message(subject=subject, body=body, sender = sender, recipient=self)
       m.save()

       if self.cc_messages_to_email :
          # recipient wants emails cc-ed 
          link = 'http://' + settings.DOMAIN_NAME + reverse('messages_all')
          settings_link = 'http://' + settings.DOMAIN_NAME + reverse('acct_settings')
          main = _(""" 
%(sender)s has sent you a new message on %(account_name)s .

---------------------------------------------------------------

%(body)s

---------------------------------------------------------------


Click %(link)s to see your account.

If you do not want to receive emails when you receive messages on %(account_name)s, please change your settings here : %(settings_link)s

%(message_extra)s

""") % {'account_name':settings.SITE_NAME, 'body':body, 'link':link, 'sender':sender.get_display_name(), 'settings_link':settings_link, "message_extra":message_extra}

          self.email_user(subject, main, settings.SERVER_EMAIL)

       return m

    User.message = message


    def group_invite_message(self, group, invited_by, accept_url, special_message='') :

       self.message(invited_by, 
                    Template(settings.GROUP_INVITE_SUBJECT_TEMPLATE).render(
             Context({'group_name':group.get_display_name() })),
                    Template(settings.GROUP_INVITE_TEMPLATE).render(
             Context({
                   'first_name':self.first_name,
                   'last_name':self.last_name,
                   'sponsor':invited_by.get_display_name(),
                   'group_name':group.get_display_name(),
                   'site_name':settings.SITE_NAME,
                   'special_message':special_message,
                   'signup_link':accept_url,
                   })
             )+"""
%s""" % accept_url
                    )
    
    User.group_invite_message = group_invite_message


    User.hubs = lambda self : self.groups.filter(group_type=settings.GROUP_HUB_TYPE,level='member')

    User.change_avatar = lambda self : True

    def is_following(self, other_user) :
       from apps.microblogging.models import Following
       return Following.objects.is_following(self, other_user)

    User.is_following = is_following

    def is_followed_by(self, other_user) :
       from apps.microblogging.models import Following
       return Following.objects.is_following(other_user, self)

    User.is_followed_by = is_followed_by

    AnonymousUser.is_member_of = lambda *args, **kwargs : False
    AnonymousUser.is_direct_member_of = lambda *args, **kwarg : False

Example 41

Project: RatticWeb
Source File: views.py
View license
@login_required
def list(request, cfilter='special', value='all', sortdir='ascending', sort='title', page=1):
    # Setup basic stuff
    viewdict = {
        'credtitle': _('All passwords'),
        'alerts': [],
        'filter': unicode(cfilter).lower(),
        'value': unicode(value).lower(),
        'sort': unicode(sort).lower(),
        'sortdir': unicode(sortdir).lower(),
        'page': unicode(page).lower(),
        'groups': request.user.groups,

        # Default buttons
        'buttons': {
            'add': True,
            'delete': True,
            'changeq': True,
            'tagger': True,
            'export': False,
        }
    }

    # Get groups if required
    get_groups = request.GET.getlist('group')

    if len(get_groups) > 0:
        groups = Group.objects.filter(id__in=get_groups)
    else:
        groups = Group.objects.all()

    # Perform the search
    (search_object, cred_list) = cred_search(request.user, cfilter, value, sortdir, sort, groups)

    # Apply the filters
    if cfilter == 'tag':
        viewdict['credtitle'] = _('Passwords tagged with %(tagname)s') % {'tagname': search_object.name, }
        viewdict['buttons']['export'] = True

    elif cfilter == 'group':
        viewdict['credtitle'] = _('Passwords in group %(groupname)s') % {'groupname': search_object.name, }
        viewdict['buttons']['export'] = True

    elif cfilter == 'search':
        viewdict['credtitle'] = _('Passwords for search "%(searchstring)s"') % {'searchstring': search_object, }
        viewdict['buttons']['export'] = True

    elif cfilter == 'history':
        viewdict['credtitle'] = _('Versions of: "%(credtitle)s"') % {'credtitle': search_object.title, }
        viewdict['buttons']['add'] = False
        viewdict['buttons']['delete'] = False
        viewdict['buttons']['changeq'] = False
        viewdict['buttons']['tagger'] = False

    elif cfilter == 'changeadvice':
        alert = {}
        alert['message'] = _("That user is now disabled. Here is a list of passwords that they have viewed that have not since been changed. You probably want to add them all to the change queue.")
        alert['type'] = 'info'

        viewdict['credtitle'] = _('Changes required for "%(username)s"') % {'username': search_object.username}
        viewdict['buttons']['add'] = False
        viewdict['buttons']['delete'] = True
        viewdict['buttons']['changeq'] = True
        viewdict['buttons']['tagger'] = False
        viewdict['alerts'].append(alert)

    elif cfilter == 'special' and value == 'all':
        viewdict['buttons']['export'] = True

    elif cfilter == 'special' and value == 'trash':
        viewdict['credtitle'] = _('Passwords in the trash')
        viewdict['buttons']['add'] = False
        viewdict['buttons']['undelete'] = True
        viewdict['buttons']['changeq'] = False
        viewdict['buttons']['tagger'] = False
        viewdict['buttons']['export'] = True

    elif cfilter == 'special' and value == 'changeq':
        viewdict['credtitle'] = _('Passwords on the Change Queue')
        viewdict['buttons']['add'] = False
        viewdict['buttons']['delete'] = False
        viewdict['buttons']['changeq'] = False
        viewdict['buttons']['tagger'] = False

    else:
        raise Http404

    # Apply the sorting rules
    if sortdir == 'ascending':
        viewdict['revsortdir'] = 'descending'
    elif sortdir == 'descending':
        viewdict['revsortdir'] = 'ascending'
    else:
        raise Http404

    # Get the page
    paginator = Paginator(cred_list, request.user.profile.items_per_page)
    try:
        cred = paginator.page(page)
    except PageNotAnInteger:
        cred = paginator.page(1)
    except EmptyPage:
        cred = paginator.page(paginator.num_pages)

    # Get variables to give the template
    viewdict['credlist'] = cred

    # Create the form for exporting
    viewdict['exportform'] = ExportForm()

    return render(request, 'cred_list.html', viewdict)

Example 42

Project: django-common
Source File: admin.py
View license
    @csrf_protect_m
    @atomic_decorator
    def add_view(self, request, form_url='', extra_context=None):
        """The 'add' admin view for this model."""
        model = self.model
        opts = model._meta

        if not self.has_add_permission(request):
            raise PermissionDenied

        ModelForm = self.get_form(request)
        formsets = []

        if request.method == 'POST':
            form = ModelForm(request.POST, request.FILES)

            if form.is_valid():
                new_object = self.save_form(request, form, change=False)
                form_validated = True
            else:
                form_validated = False
                new_object = self.model()

            prefixes = {}

            for FormSet, inline in zip(self.get_formsets(request),
                                       self.get_inline_instances(request)):
                prefix = FormSet.get_default_prefix()
                prefixes[prefix] = prefixes.get(prefix, 0) + 1

                if prefixes[prefix] != 1:
                    prefix = "{0}-{1}".format(prefix, prefixes[prefix])

                formset = FormSet(data=request.POST, files=request.FILES,
                                  instance=new_object,
                                  save_as_new="_saveasnew" in request.POST,
                                  prefix=prefix, queryset=inline.queryset(request))

                formsets.append(formset)

                for inline in self.get_inline_instances(request):
                    # If this is the inline that matches this formset, and
                    # we have some nested inlines to deal with, then we need
                    # to get the relevant formset for each of the forms in
                    # the current formset.
                    if inline.inlines and inline.model == formset.model:
                        for nested in inline.inline_instances:
                            for the_form in formset.forms:
                                InlineFormSet = nested.get_formset(request, the_form.instance)
                                prefix = "{0}-{1}".format(the_form.prefix,
                                                          InlineFormSet.get_default_prefix())
                                formsets.append(InlineFormSet(request.POST, request.FILES,
                                                              instance=the_form.instance,
                                                              prefix=prefix))
            if all_valid(formsets) and form_validated:
                self.save_model(request, new_object, form, change=False)
                form.save_m2m()

                for formset in formsets:
                    self.save_formset(request, form, formset, change=False)

                self.log_addition(request, new_object)

                return self.response_add(request, new_object)
        else:
            # Prepare the dict of initial data from the request.
            # We have to special-case M2Ms as a list of comma-separated PKs.
            initial = dict(request.GET.items())

            for k in initial:
                try:
                    f = opts.get_field(k)
                except models.FieldDoesNotExist:
                    continue

                if isinstance(f, models.ManyToManyField):
                    initial[k] = initial[k].split(",")

            form = ModelForm(initial=initial)
            prefixes = {}

            for FormSet, inline in zip(self.get_formsets(request),
                                       self.get_inline_instances(request)):
                prefix = FormSet.get_default_prefix()
                prefixes[prefix] = prefixes.get(prefix, 0) + 1

                if prefixes[prefix] != 1:
                    prefix = "{0}-{1}".format(prefix, prefixes[prefix])

                formset = FormSet(instance=self.model(), prefix=prefix,
                                  queryset=inline.queryset(request))
                formsets.append(formset)

        adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)),
                                      self.prepopulated_fields, self.get_readonly_fields(request),
                                      model_admin=self)

        media = self.media + adminForm.media
        inline_admin_formsets = []

        for inline, formset in zip(self.get_inline_instances(request), formsets):
            fieldsets = list(inline.get_fieldsets(request))
            readonly = list(inline.get_readonly_fields(request))
            inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
                                                              fieldsets, readonly,
                                                              model_admin=self)
            if inline.inlines:
                for form in formset.forms:
                    if form.instance.pk:
                        instance = form.instance
                    else:
                        instance = None

                    form.inlines = inline.get_inlines(request, instance, prefix=form.prefix)

                inline_admin_formset.inlines = inline.get_inlines(request)

            inline_admin_formsets.append(inline_admin_formset)
            media = media + inline_admin_formset.media

        context = {
            'title': _('Add %s') % force_unicode(opts.verbose_name),
            'adminform': adminForm,
            'is_popup': "_popup" in request.REQUEST,
            'show_delete': False,
            'media': mark_safe(media),
            'inline_admin_formsets': inline_admin_formsets,
            'errors': helpers.AdminErrorList(form, formsets),
            'app_label': opts.app_label,
        }

        context.update(extra_context or {})

        return self.render_change_form(request, context, form_url=form_url, add=True)

Example 43

Project: django-common
Source File: admin.py
View license
    @csrf_protect_m
    @atomic_decorator
    def change_view(self, request, object_id, extra_context=None, **kwargs):
        "The 'change' admin view for this model."
        model = self.model
        opts = model._meta
        obj = self.get_object(request, unquote(object_id))

        if not self.has_change_permission(request, obj):
            raise PermissionDenied

        if obj is None:
            raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
                          {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})

        if request.method == 'POST' and "_saveasnew" in request.POST:
            return self.add_view(request, form_url='../add/')

        ModelForm = self.get_form(request, obj)
        formsets = []

        if request.method == 'POST':
            form = ModelForm(request.POST, request.FILES, instance=obj)

            if form.is_valid():
                form_validated = True
                new_object = self.save_form(request, form, change=True)
            else:
                form_validated = False
                new_object = obj

            prefixes = {}

            for FormSet, inline in zip(self.get_formsets(request, new_object),
                                       self.get_inline_instances(request)):
                prefix = FormSet.get_default_prefix()
                prefixes[prefix] = prefixes.get(prefix, 0) + 1

                if prefixes[prefix] != 1:
                    prefix = "{0}-{1}".format(prefix, prefixes[prefix])
                formset = FormSet(request.POST, request.FILES,
                                  instance=new_object, prefix=prefix,
                                  queryset=inline.queryset(request))

                formsets.append(formset)

                for inline in self.get_inline_instances(request):
                    # If this is the inline that matches this formset, and
                    # we have some nested inlines to deal with, then we need
                    # to get the relevant formset for each of the forms in
                    # the current formset.
                    if inline.inlines and inline.model == formset.model:
                        for nested in inline.inline_instances:
                            for the_form in formset.forms:
                                InlineFormSet = nested.get_formset(request, the_form.instance)
                                prefix = "{0}-{1}".format(the_form.prefix,
                                                          InlineFormSet.get_default_prefix())
                                formsets.append(InlineFormSet(request.POST, request.FILES,
                                                              instance=the_form.instance,
                                                              prefix=prefix))
            if all_valid(formsets) and form_validated:
                self.save_model(request, new_object, form, change=True)
                form.save_m2m()

                for formset in formsets:
                    self.save_formset(request, form, formset, change=True)

                change_message = self.construct_change_message(request, form, formsets)
                self.log_change(request, new_object, change_message)

                return self.response_change(request, new_object)

        else:
            form = ModelForm(instance=obj)
            prefixes = {}

            for FormSet, inline in zip(self.get_formsets(request, obj),
                                       self.get_inline_instances(request)):
                prefix = FormSet.get_default_prefix()
                prefixes[prefix] = prefixes.get(prefix, 0) + 1
                if prefixes[prefix] != 1:
                    prefix = "{0}-{1}".format(prefix, prefixes[prefix])
                formset = FormSet(instance=obj, prefix=prefix,
                                  queryset=inline.queryset(request))
                formsets.append(formset)

        adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
                                      self.prepopulated_fields,
                                      self.get_readonly_fields(request, obj),
                                      model_admin=self)
        media = self.media + adminForm.media
        inline_admin_formsets = []

        for inline, formset in zip(self.get_inline_instances(request), formsets):
            fieldsets = list(inline.get_fieldsets(request, obj))
            readonly = list(inline.get_readonly_fields(request, obj))
            inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets,
                                                              readonly, model_admin=self)
            if inline.inlines:
                for form in formset.forms:
                    if form.instance.pk:
                        instance = form.instance
                    else:
                        instance = None

                    form.inlines = inline.get_inlines(request, instance, prefix=form.prefix)

                inline_admin_formset.inlines = inline.get_inlines(request)

            inline_admin_formsets.append(inline_admin_formset)
            media = media + inline_admin_formset.media

        context = {
            'title': _('Change %s') % force_unicode(opts.verbose_name),
            'adminform': adminForm,
            'object_id': object_id,
            'original': obj,
            'is_popup': "_popup" in request.REQUEST,
            'media': mark_safe(media),
            'inline_admin_formsets': inline_admin_formsets,
            'errors': helpers.AdminErrorList(form, formsets),
            'app_label': opts.app_label,
        }

        context.update(extra_context or {})

        return self.render_change_form(request, context, change=True, obj=obj)

Example 44

Project: pyas2
Source File: as2lib.py
View license
def save_message(message, payload, raw_payload):
    """ Function decompresses, decrypts and verifies the received AS2 message
     Takes an AS2 message as input and returns the actual payload ex. X12 message """

    try:
        # Initialize variables
        mic_content = None
        mic_alg = None
        filename = payload.get_filename()

        # Search for the organization adn partner, raise error if none exists.
        models.Log.objects.create(message=message, status='S', text=_(u'Begin Processing of received AS2 message'))

        if not models.Organization.objects.filter(as2_name=as2utils.unescape_as2name(payload.get('as2-to'))).exists():
            raise as2utils.As2PartnerNotFound('Unknown AS2 organization with id %s' % payload.get('as2-to'))
        message.organization = models.Organization.objects.get(
            as2_name=as2utils.unescape_as2name(payload.get('as2-to')))

        if not models.Partner.objects.filter(as2_name=as2utils.unescape_as2name(payload.get('as2-from'))).exists():
            raise as2utils.As2PartnerNotFound('Unknown AS2 Trading partner with id %s' % payload.get('as2-from'))
        message.partner = models.Partner.objects.get(as2_name=as2utils.unescape_as2name(payload.get('as2-from')))
        models.Log.objects.create(
            message=message,
            status='S',
            text=_(u'Message is for Organization "%s" from partner "%s"' % (message.organization, message.partner))
         )

        # Check if message from this partner are expected to be encrypted
        if message.partner.encryption and payload.get_content_type() != 'application/pkcs7-mime':
            raise as2utils.As2InsufficientSecurity(
                u'Incoming messages from AS2 partner {0:s} are defined to be encrypted'.format(
                    message.partner.as2_name))

        # Check if payload is encrypted and if so decrypt it
        if payload.get_content_type() == 'application/pkcs7-mime' \
                and payload.get_param('smime-type') == 'enveloped-data':
            models.Log.objects.create(message=message, status='S', text=_(
                u'Decrypting the payload using private key {0:s}'.format(message.organization.encryption_key)))
            message.encrypted = True

            # Check if encrypted data is base64 encoded, if not then encode
            try:
                payload.get_payload().encode('ascii')
            except UnicodeDecodeError:
                payload.set_payload(payload.get_payload().encode('base64'))

            # Decrypt the base64 encoded data using the partners public key
            pyas2init.logger.debug(u'Decrypting the payload :\n{0:s}'.format(payload.get_payload()))
            try:
                decrypted_content = as2utils.decrypt_payload(
                    as2utils.mimetostring(payload, 78),
                    str(message.organization.encryption_key.certificate.path),
                    str(message.organization.encryption_key.certificate_passphrase)
                )
                raw_payload = decrypted_content
                payload = email.message_from_string(decrypted_content)

                # Check if decrypted content is the actual content i.e. no compression and no signatures
                if payload.get_content_type() == 'text/plain':
                    payload = email.Message.Message()
                    payload.set_payload(decrypted_content)
                    payload.set_type('application/edi-consent')
                    if filename:
                        payload.add_header('Content-Disposition', 'attachment', filename=filename)
            except Exception, msg:
                raise as2utils.As2DecryptionFailed('Failed to decrypt message, exception message is %s' % msg)

        # Check if message from this partner are expected to be signed
        if message.partner.signature and payload.get_content_type() != 'multipart/signed':
            raise as2utils.As2InsufficientSecurity(
                u'Incoming messages from AS2 partner {0:s} are defined to be signed'.format(message.partner.as2_name))

        # Check if message is signed and if so verify it
        if payload.get_content_type() == 'multipart/signed':
            if not message.partner.signature_key:
                raise as2utils.As2InsufficientSecurity('Partner has no signature verification key defined')
            models.Log.objects.create(message=message, status='S', text=_(
                u'Message is signed, Verifying it using public key {0:s}'.format(message.partner.signature_key)))
            pyas2init.logger.debug('Verifying the signed payload:\n{0:s}'.format(payload.as_string()))
            message.signed = True
            mic_alg = payload.get_param('micalg').lower() or 'sha1'
            # main_boundary = '--' + payload.get_boundary()

            # Get the partners public and ca certificates
            cert = str(message.partner.signature_key.certificate.path)
            ca_cert = cert
            if message.partner.signature_key.ca_cert:
                ca_cert = str(message.partner.signature_key.ca_cert.path)
            verify_cert = message.partner.signature_key.verify_cert

            # Extract the signature and signed content from the mime message
            raw_sig = None
            for part in payload.walk():
                if part.get_content_type() == "application/pkcs7-signature":
                    try:
                        raw_sig = part.get_payload().encode('ascii').strip()
                    except UnicodeDecodeError:
                        raw_sig = part.get_payload().encode('base64').strip()
                else:
                    payload = part

            # Verify message using raw payload received from partner
            try:
                as2utils.verify_payload(raw_payload, None, cert, ca_cert, verify_cert)
            except Exception:
                # Verify message using extracted signature and stripped message
                try:
                    as2utils.verify_payload(as2utils.canonicalize2(payload), raw_sig, cert, ca_cert, verify_cert)
                except Exception, e:
                    raise as2utils.As2InvalidSignature(
                        'Signature Verification Failed, exception message is {0:s}'.format(e))

            mic_content = as2utils.canonicalize2(payload)

        # Check if the message has been compressed and if so decompress it
        if payload.get_content_type() == 'application/pkcs7-mime' \
                and payload.get_param('smime-type') == 'compressed-data':
            models.Log.objects.create(message=message, status='S', text=_(u'Decompressing the payload'))
            message.compressed = True

            # Decode the data to binary if its base64 encoded
            compressed_content = payload.get_payload()
            try:
                compressed_content.encode('ascii')
                compressed_content = base64.b64decode(payload.get_payload())
            except UnicodeDecodeError:
                pass

            pyas2init.logger.debug('Decompressing the payload:\n%s' % compressed_content)
            try:
                decompressed_content = as2utils.decompress_payload(compressed_content)
                payload = email.message_from_string(decompressed_content)
            except Exception, e:
                raise as2utils.As2DecompressionFailed('Failed to decompress message,exception message is %s' % e)

        # Saving the message mic for sending it in the MDN
        if mic_content:
            pyas2init.logger.debug("Calculating MIC with alg {0:s} for content:\n{1:s}".format(mic_alg, mic_content))
            calculate_mic = getattr(hashlib, mic_alg.replace('-', ''), hashlib.sha1)
            message.mic = '%s, %s' % (calculate_mic(mic_content).digest().encode('base64').strip(), mic_alg)

        return payload
    finally:
        message.save()

Example 45

Project: pyas2
Source File: as2lib.py
View license
def build_mdn(message, status, **kwargs):
    """ Function builds AS2 MDN report for the received message.
    Takes message status as input and returns the mdn content."""

    try:
        # Initialize variables
        mdn_body, mdn_message = None, None

        # Set the confirmation text message here
        confirmation_text = str()
        if message.organization and message.organization.confirmation_message:
            confirmation_text = message.organization.confirmation_message
        # overwrite with partner specific message
        if message.partner and message.partner.confirmation_message:
            confirmation_text = message.partner.confirmation_message
        # default message
        if confirmation_text.strip() == '':
            confirmation_text = _(u'The AS2 message has been processed. '
                                  u'Thank you for exchanging AS2 messages with Pyas2.')

        # Update message status and send mail here based on the created MDN
        if status != 'success':
            as2utils.senderrorreport(message, _(u'Failure in processing message from partner,\n '
                                                u'Basic status : %s \n Advanced Status: %s' %
                                                (kwargs['adv_status'], kwargs['status_message'])))
            confirmation_text = _(u'The AS2 message could not be processed. '
                                  u'The disposition-notification report has additional details.')
            models.Log.objects.create(message=message, status='E', text=kwargs['status_message'])
            message.status = 'E'
        else:
            message.status = 'S'

        # In case no MDN is requested exit from process
        header_parser = HeaderParser()
        message_header = header_parser.parsestr(message.headers)
        if not message_header.get('disposition-notification-to'):
            models.Log.objects.create(message=message, status='S',
                                      text=_(u'MDN not requested by partner, closing request.'))
            return mdn_body, mdn_message

        # Build the MDN report
        models.Log.objects.create(message=message, status='S', text=_(u'Building the MDN response to the request'))
        mdn_report = MIMEMultipart('report', report_type="disposition-notification")

        # Build the text message with confirmation text and add to report
        mdn_text = email.Message.Message()
        mdn_text.set_payload("%s\n" % confirmation_text)
        mdn_text.set_type('text/plain')
        mdn_text.set_charset('us-ascii')
        del mdn_text['MIME-Version']
        mdn_report.attach(mdn_text)

        # Build the MDN message and add to report
        mdn_base = email.Message.Message()
        mdn_base.set_type('message/disposition-notification')
        mdn_base.set_charset('us-ascii')
        mdn = 'Reporting-UA: Bots Opensource EDI Translator\n'
        mdn += 'Original-Recipient: rfc822; %s\n' % message_header.get('as2-to')
        mdn += 'Final-Recipient: rfc822; %s\n' % message_header.get('as2-to')
        mdn += 'Original-Message-ID: <%s>\n' % message.message_id
        if status != 'success':
            mdn += 'Disposition: automatic-action/MDN-sent-automatically; ' \
                   'processed/%s: %s\n' % (status, kwargs['adv_status'])
        else:
            mdn += 'Disposition: automatic-action/MDN-sent-automatically; processed\n'
        if message.mic:
            mdn += 'Received-content-MIC: %s\n' % message.mic
        mdn_base.set_payload(mdn)
        del mdn_base['MIME-Version']
        mdn_report.attach(mdn_base)
        del mdn_report['MIME-Version']

        # If signed MDN is requested by partner then sign the MDN and attach to report
        pyas2init.logger.debug('MDN for message %s created:\n%s' % (message.message_id, mdn_report.as_string()))
        mdn_signed = False
        if message_header.get('disposition-notification-options') and message.organization \
                and message.organization.signature_key:
            models.Log.objects.create(message=message,
                                      status='S',
                                      text=_(u'Signing the MDN using private key {0:s}'.format(
                                          message.organization.signature_key)))
            mdn_signed = True
            # options = message_header.get('disposition-notification-options').split(";")
            # algorithm = options[1].split(",")[1].strip()
            signed_report = MIMEMultipart('signed', protocol="application/pkcs7-signature")
            signed_report.attach(mdn_report)
            mic_alg, signature = as2utils.sign_payload(
                    as2utils.canonicalize(as2utils.mimetostring(mdn_report, 0)+'\n'),
                    str(message.organization.signature_key.certificate.path),
                    str(message.organization.signature_key.certificate_passphrase)
            )
            pyas2init.logger.debug('Signature for MDN created:\n%s' % signature.as_string())
            signed_report.set_param('micalg', mic_alg)
            signed_report.attach(signature)
            mdn_message = signed_report
        else:
            mdn_message = mdn_report

        # Extract the MDN boy from the mdn message.
        # Add new line between the MDN message and the signature,
        # Found that without this MDN signature verification fails on Mendelson AS2
        main_boundary = '--' + mdn_report.get_boundary() + '--'
        mdn_body = as2utils.canonicalize(
            as2utils.extractpayload(mdn_message).replace(main_boundary, main_boundary+'\n'))

        # Add the relevant headers to the MDN message
        mdn_message.add_header('ediint-features', 'CEM')
        mdn_message.add_header('as2-from', message_header.get('as2-to'))
        mdn_message.add_header('as2-to', message_header.get('as2-from'))
        mdn_message.add_header('AS2-Version', '1.2')
        mdn_message.add_header('date', email.Utils.formatdate(localtime=True))
        mdn_message.add_header('Message-ID', email.utils.make_msgid())
        mdn_message.add_header('user-agent', 'PYAS2, A pythonic AS2 server')

        # Save the MDN to the store
        filename = mdn_message.get('message-id').strip('<>') + '.mdn'
        full_filename = as2utils.storefile(pyas2init.gsettings['mdn_send_store'], filename, mdn_body, True)

        # Extract the MDN headers as string
        mdn_headers = ''
        for key in mdn_message.keys():
            mdn_headers += '%s: %s\n' % (key, mdn_message[key])

        # Is Async mdn is requested mark MDN as pending and return None
        if message_header.get('receipt-delivery-option'):
            message.mdn = models.MDN.objects.create(message_id=filename,
                                                    file=full_filename,
                                                    status='P',
                                                    signed=mdn_signed,
                                                    headers=mdn_headers,
                                                    return_url=message_header['receipt-delivery-option'])
            message.mdn_mode = 'ASYNC'
            mdn_body, mdn_message = None, None
            models.Log.objects.create(message=message,
                                      status='S',
                                      text=_(u'Asynchronous MDN requested, setting status to pending'))

        # Else mark MDN as sent and return the MDN message
        else:
            message.mdn = models.MDN.objects.create(message_id=filename,
                                                    file=full_filename,
                                                    status='S',
                                                    signed=mdn_signed,
                                                    headers=mdn_headers)
            message.mdn_mode = 'SYNC'
            models.Log.objects.create(message=message,
                                      status='S',
                                      text=_(u'MDN created successfully and sent to partner'))
        return mdn_body, mdn_message
    finally:
        message.save()

Example 46

Project: pyas2
Source File: views.py
View license
@csrf_exempt
def as2receive(request, *args, **kwargs):
    """
       Function receives AS2 requests from partners.
       Checks whether its an AS2 message or an MDN and acts accordingly.
    """
    if request.method == 'POST':
        # Process the posted AS2 message
        request_body = request.read()

        # Create separate raw_payload with only message-id and content type as M2Crypto's signature
        # verification method does not like too many header
        raw_payload = '%s: %s\n' % ('message-id', request.META['HTTP_MESSAGE_ID'])
        raw_payload += '%s: %s\n\n' % ('content-type', request.META['CONTENT_TYPE'])
        raw_payload += request_body

        # Extract all the relevant headers from the http request
        as2headers = ''
        for key in request.META:
            if key.startswith('HTTP') or key.startswith('CONTENT'):
                as2headers += '%s: %s\n' % (key.replace("HTTP_", "").replace("_", "-").lower(), request.META[key])

        pyas2init.logger.debug('Recevied an HTTP POST from %s with payload :\n%s' %
                               (request.META['REMOTE_ADDR'], as2headers + '\n' + request_body))
        try:
            pyas2init.logger.debug('Check payload to see if its an AS2 Message or ASYNC MDN.')
            # Load the request header and body as a MIME Email Message
            payload = email.message_from_string(as2headers + '\n' + request_body)
            # Get the message sender and receiver AS2 IDs
            message_org = as2utils.unescape_as2name(payload.get('as2-to'))
            message_partner = as2utils.unescape_as2name(payload.get('as2-from'))
            message = None

            # Check if this is an MDN message
            mdn_message = None
            if payload.get_content_type() == 'multipart/report':
                mdn_message = payload
            elif payload.get_content_type() == 'multipart/signed':
                for part in payload.walk():
                    if part.get_content_type() == 'multipart/report':
                        mdn_message = part

            # If this is an MDN, get the message ID and check if it exists
            if mdn_message:
                msg_id = None

                for part in mdn_message.walk():
                    if part.get_content_type() == 'message/disposition-notification':
                        msg_id = part.get_payload().pop().get('Original-Message-ID')
                pyas2init.logger.info('Asynchronous MDN received for AS2 message %s to organization %s '
                                      'from partner %s' % (msg_id, message_org, message_partner))
                try:
                    # Get the related organization, partner and message from the db.
                    org = get_object_or_404(models.Organization, as2_name=message_org)
                    partner = get_object_or_404(models.Partner, as2_name=message_partner)
                    message = get_object_or_404(models.Message, message_id=msg_id.strip('<>'), organization=org, partner=partner)
                    models.Log.objects.create(message=message,
                                              status='S',
                                              text=_(u'Processing asynchronous mdn received from partner'))
                    as2lib.save_mdn(message, raw_payload)

                except Http404:
                    # Send 404 response
                    pyas2init.logger.error('Unknown Asynchronous MDN AS2 message %s. '
                                           'Either the partner, org or message was not found in the system' % msg_id)
                    return HttpResponseServerError(_(u'Unknown AS2 MDN received. Will not be processed'))

                except Exception, e:
                    message.status = 'E'
                    models.Log.objects.create(message=message,
                                              status='E',
                                              text=_(u'Failed to send message, error is %s' % e))

                    # Send mail here
                    as2utils.senderrorreport(message, _(u'Failed to send message, error is %s' % e))

                finally:
                    # Save message and send response to HTTP request
                    if message:
                        message.save()
                    return HttpResponse(_(u'AS2 ASYNC MDN has been received'))

            else:
                try:
                    # Process the received AS2 message from partner
                    # Initialize the processing status variables
                    status, adv_status, status_message = '', '', ''

                    pyas2init.logger.info('Received an AS2 message with id %s for organization %s from partner %s' %
                                          (payload.get('message-id'), message_org, message_partner))

                    # Raise duplicate message error in case message already exists in the system
                    # TODO: Create composite key (message_id, organization, partner)
                    if models.Message.objects.filter(message_id=payload.get('message-id').strip('<>')).exists():
                        message = models.Message.objects.create(
                            message_id='%s_%s' % (payload.get('message-id').strip('<>'), payload.get('date')),
                            direction='IN',
                            status='IP',
                            headers=as2headers
                        )
                        raise as2utils.As2DuplicateDocument(_(u'An identical message has already '
                                                              u'been sent to our server'))

                    # Create a new message in the system
                    message = models.Message.objects.create(
                        message_id=payload.get('message-id').strip('<>'),
                        direction='IN',
                        status='IP',
                        headers=as2headers)

                    # Process the received payload to extract the actual message from partner
                    payload = as2lib.save_message(message, payload, raw_payload)

                    # Get the inbox folder for this partner and organization
                    output_dir = as2utils.join(pyas2init.gsettings['root_dir'],
                                               'messages',
                                               message.organization.as2_name,
                                               'inbox',
                                               message.partner.as2_name)

                    # Get the filename from the header and if not there set to message id
                    if message.partner.keep_filename and payload.get_filename():
                        filename = payload.get_filename()
                    else:
                        filename = '%s.msg' % message.message_id

                    # Save the message content to the store and inbox
                    content = payload.get_payload(decode=True)
                    full_filename = as2utils.storefile(output_dir, filename, content, False)
                    store_filename = as2utils.storefile(pyas2init.gsettings['payload_receive_store'],
                                                        message.message_id,
                                                        content,
                                                        True)

                    models.Log.objects.create(message=message,
                                              status='S',
                                              text=_(u'Message has been saved successfully to %s' % full_filename))
                    message.payload = models.Payload.objects.create(name=filename,
                                                                    file=store_filename,
                                                                    content_type=payload.get_content_type())

                    # Set processing status and run the post receive command.
                    status = 'success'
                    as2lib.run_post_receive(message, full_filename)
                    message.save()

                # Catch each of the possible exceptions while processing an as2 message
                except as2utils.As2DuplicateDocument, e:
                    status = 'warning'
                    adv_status = 'duplicate-document'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)

                except as2utils.As2PartnerNotFound, e:
                    status = 'error'
                    adv_status = 'unknown-trading-partner'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)

                except as2utils.As2InsufficientSecurity, e:
                    status = 'error'
                    adv_status = 'insufficient-message-security'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)

                except as2utils.As2DecryptionFailed, e:
                    status = 'decryption-failed'
                    adv_status = 'error'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)

                except as2utils.As2DecompressionFailed, e:
                    status = 'error'
                    adv_status = 'decompression-failed'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)

                except as2utils.As2InvalidSignature, e:
                    status = 'error'
                    adv_status = 'integrity-check-failed'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)

                except Exception, e:
                    txt = traceback.format_exc(None).decode('utf-8', 'ignore')
                    pyas2init.logger.error(_(u'Unexpected error while processing message %(msg)s, '
                                             u'error:\n%(txt)s'), {'txt': txt, 'msg': message.message_id})
                    status = 'error'
                    adv_status = 'unexpected-processing-error'
                    status_message = _(u'An error occurred during the AS2 message processing: %s' % e)
                finally:
                    # Build the mdn for the message based on processing status
                    mdn_body, mdn_message = as2lib.build_mdn(message,
                                                             status,
                                                             adv_status=adv_status,
                                                             status_message=status_message)

                    # Create the mdn response body and return the MDN to the http request
                    if mdn_body:
                        mdn_response = HttpResponse(mdn_body, content_type=mdn_message.get_content_type())
                        for key, value in mdn_message.items():
                            mdn_response[key] = value
                        return mdn_response
                    else:
                        return HttpResponse(_(u'AS2 message has been received'))

        # Catch all exception in case of any kind of error in the system.
        except Exception:
            txt = traceback.format_exc(None).decode('utf-8', 'ignore')
            report_txt = _(u'Fatal error while processing message %(msg)s, '
                           u'error:\n%(txt)s') % {'txt': txt, 'msg': request.META.get('HTTP_MESSAGE_ID').strip('<>')}
            pyas2init.logger.error(report_txt)
            return HttpResponseServerError(report_txt)
            # Send mail here
            # mail_managers(_(u'[pyAS2 Error Report] Fatal
            # error%(time)s')%{'time':request.META.get('HTTP_DATE')}, reporttxt)

    elif request.method == 'GET':
        return HttpResponse(_('To submit an AS2 message, you must POST the message to this URL '))

    elif request.method == 'OPTIONS':
        response = HttpResponse()
        response['allow'] = ','.join(['POST', 'GET'])
        return response

Example 47

Project: transifex
Source File: views.py
View license
@login_required
def translate(request, project_slug, lang_code, resource_slug=None,
                     *args, **kwargs):
    """
    Main lotte view.
    """

    # Permissions handling
    # Project should always be available
    project = get_object_or_404(Project, slug=project_slug)
    team = Team.objects.get_or_none(project, lang_code)
    check = ProjectPermission(request.user)
    if not check.submit_translations(team or project) and not\
        check.maintain(project):
        return permission_denied(request)

    resources = []
    if resource_slug:
        resource_list = [get_object_or_404(Resource, slug=resource_slug,
            project=project)]
    else:
        resource_list = Resource.objects.filter(project=project)

        # Return a page explaining that the project has multiple source langs and
        # cannot be translated as a whole.
        if resource_list.values('source_language').distinct().count() > 1:
            messages.info(request,_(
                          "There are multiple source languages for this project. "
                          "You will only be able to translate resources for one "
                          "source language at a time."))
            return HttpResponseRedirect(reverse('project_detail',
                                        args=[project_slug]),)

    # Filter resources that are not accepting translations
    for resource in resource_list:
        if resource.accept_translations:
            resources.append(resource)

    # If no resource accepting translations, raise a 403
    if not resources:
        return permission_denied(request)

    target_language = Language.objects.by_code_or_alias_or_404(lang_code)

    # If it is an attempt to edit the source language, redirect the user to
    # resource_detail and show him a message explaining the reason.
    if target_language == get_source_language(resources):
        messages.error(request,_(
                       "Cannot edit the source language because this would "
                       "result in translation mismatches! If you want to "
                       "update the source strings consider using the transifex "
                       "command-line client."))
        if resource_slug:
            return HttpResponseRedirect(reverse('resource_detail',
                                                args=[project_slug,
                                                      resource_slug]),)
        else:
            return HttpResponseRedirect(reverse('project_detail',
                                                args=[project_slug]),)

    total_strings = SourceEntity.objects.filter(
        resource__in = resources).count()

    translated_strings = Translation.objects.filter(
        resource__in=resources,
        language=target_language,
        source_entity__pluralized=False,
        rule=5).count()

    reviewed_strings = Translation.objects.filter(
        resource__in=resources,
        language=target_language,
        source_entity__pluralized=False,
        rule=5,
        reviewed=True).count()

    # Include counting of pluralized entities
    for pluralized_entity in SourceEntity.objects.filter(resource__in = resources,
                                                         pluralized=True):
        plurals_translated = Translation.objects.filter(
            language=target_language,
            source_entity=pluralized_entity).count()
        if plurals_translated == len(target_language.get_pluralrules()):
            translated_strings += 1

    if len(resources) > 1:
        translation_resource = None
    else:
        translation_resource = resources[0]

    contributors = User.objects.filter(pk__in=Translation.objects.filter(
        resource__in = resources,
        language = target_language,
        rule = 5).values_list("user", flat=True))

    lotte_init.send(None, request=request, resources=resources,
        language=target_language)

    if target_language in [team.language for team in project.available_teams]:
        team_language = True
    else:
        team_language = False

    GtModel = get_model('gtranslate', 'Gtranslate')
    try:
        auto_translate = GtModel.objects.get(project=project)
    except GtModel.DoesNotExist:
        auto_translate = None
    """
    if cache.get('lotte_%s' % request.session.session_key, None):
        cache.delete('lotte_%s' % request.session.session_key)
    """

    #Set rtl to True if target_language is an RTL language
    rtl = False
    if target_language.code in settings.RTL_LANGUAGE_CODES:
        rtl = True

    return render_to_response("translate.html", {
        'project': project,
        'resource': translation_resource,
        'target_language': target_language,
        'translated_strings': translated_strings,
        'reviewed_strings': reviewed_strings,
        'untranslated_strings': total_strings - translated_strings,
        'contributors': contributors,
        'resources': resources,
        'resource_slug': resource_slug,
        'languages': Language.objects.all(),
        'auto_translate': auto_translate,
        'spellcheck_supported_langs': SPELLCHECK_SUPPORTED_LANGS,
        'team_language': team_language,
        'RTL': rtl,
    }, context_instance = RequestContext(request))

Example 48

Project: transifex
Source File: qt.py
View license
    def _parse(self, is_source, lang_rules):
        """
        Parses Qt file and exports all entries as GenericTranslations.
        """
        def clj(s, w):
            return s[:w].replace("\n", " ").ljust(w)

        if lang_rules:
            nplural = len(lang_rules)
        else:
            nplural = self.language.get_pluralrules_numbers()

        try:
            doc = xml.dom.minidom.parseString(
                self.content.encode(self.format_encoding)
            )
        except Exception, e:
            logger.warning("QT parsing: %s" % e.message, exc_info=True)
            raise LinguistParseError(_(
                "Your file doesn't seem to contain valid xml: %s!" % e.message
            ))
        if hasattr(doc, 'doctype') and hasattr(doc.doctype, 'name'):
            if doc.doctype.name != "TS":
                raise LinguistParseError(_("Incorrect doctype!"))
        else:
            raise LinguistParseError(_("Uploaded file has no Doctype!"))
        root = doc.documentElement
        if root.tagName != "TS":
            raise LinguistParseError(_("Root element is not 'TS'"))

        # This needed to be commented out due the 'is_source' parameter.
        # When is_source=True we return the value of the <source> node as the
        # translation for the given file, instead of the <translation> node(s).
        #stringset.target_language = language
        #language = get_attribute(root, "language", die = STRICT)

        i = 1
        # There can be many <message> elements, they might have
        # 'encoding' or 'numerus' = 'yes' | 'no' attributes
        # if 'numerus' = 'yes' then 'translation' element contains 'numerusform' elements
        for context in root.getElementsByTagName("context"):
            context_name_element = _getElementByTagName(context, "name")
            if context_name_element.firstChild:
                if context_name_element.firstChild.nodeValue:
                    context_name = escape_context(
                        [context_name_element.firstChild.nodeValue])
                else:
                    context_name = []
            else:
                context_name = []

            for message in context.getElementsByTagName("message"):
                occurrences = []

                # NB! There can be zero to many <location> elements, but all
                # of them must have 'filename' and 'line' attributes
                for location in message.getElementsByTagName("location"):
                    if location.attributes.has_key("filename") and \
                        location.attributes.has_key("line"):
                        occurrences.append("%s:%i" % (
                            location.attributes["filename"].value,
                            int(location.attributes["line"].value)))
                    elif STRICT:
                        raise LinguistParseError(_("Malformed 'location' element"))

                pluralized = False
                if message.attributes.has_key("numerus") and \
                    message.attributes['numerus'].value=='yes':
                    pluralized = True

                source = _getElementByTagName(message, "source")
                try:
                    translation = _getElementByTagName(message, "translation")
                except LinguistParseError:
                    translation = None
                try:
                    ec_node = _getElementByTagName(message, "extracomment")
                    extracomment = _getText(ec_node.childNodes)
                except LinguistParseError, e:
                    extracomment = None

                # <commend> in ts files are also used to distinguish entries,
                # so we append it to the context to make the entry unique
                try:
                    c_node = _getElementByTagName(message, "comment")
                    comment_text = _getText(c_node.childNodes)
                    if comment_text:
                        comment = escape_context([comment_text])
                    else:
                        comment = []
                except LinguistParseError, e:
                    comment = []

                status = None
                if source.firstChild:
                    sourceString = _getText(source.childNodes)
                else:
                    sourceString = None # WTF?

                # Check whether the message is using logical id
                if message.attributes.has_key("id"):
                    sourceStringText = sourceString
                    sourceString = message.attributes['id'].value
                else:
                    sourceStringText = None

                same_nplural = True
                obsolete, fuzzy = False, False
                messages = []

                if is_source:
                    if translation and translation.attributes.has_key("variants") and \
                      translation.attributes['variants'].value == 'yes':
                        logger.error("Source file has unsupported"
                            " variants.")
                        raise LinguistParseError(_("Qt Linguist variants are"
                            " not yet supported."))

                    # Skip obsolete strings.
                    if translation and translation.attributes.has_key("type"):
                        status = translation.attributes["type"].value.lower()
                        if status == "obsolete":
                            continue

                    translation_text = None
                    if translation:
                        translation_text = _getText(translation.childNodes)
                    messages = [(5, translation_text or sourceStringText or sourceString)]
                    # remove unfinished/obsolete attrs from template
                    if translation and translation.attributes.has_key("type"):
                        status = translation.attributes["type"].value.lower()
                        if status == "unfinished":
                            del translation.attributes["type"]
                    if pluralized:
                        if translation:
                            try:
                                numerusforms = translation.getElementsByTagName('numerusform')
                                messages = []
                                for n,f in enumerate(numerusforms):
                                    if numerusforms[n].attributes.has_key("variants") and \
                                      numerusforms[n].attributes['variants'].value == 'yes':
                                        logger.error("Source file has unsupported"
                                            " variants.")
                                        raise LinguistParseError(_("Source file"
                                            " could not be imported: Qt Linguist"
                                            " variants are not supported."))
                                for n,f in enumerate(numerusforms):
                                    if numerusforms[n].attributes.has_key("variants") and \
                                      numerusforms[n].attributes['variants'].value == 'yes':
                                        continue
                                for n,f in enumerate(numerusforms):
                                    nf=numerusforms[n]
                                    messages.append((nplural[n], _getText(nf.childNodes)
                                        or sourceStringText or sourceString ))
                            except LinguistParseError, e:
                                pass
                        else:
                            plural_numbers = self.language.get_pluralrules_numbers()
                            for p in plural_numbers:
                                if p != 5:
                                    messages.append((p, sourceStringText or sourceString))

                elif translation and translation.firstChild:
                    # For messages with variants set to 'yes', we skip them
                    # altogether. We can't support variants at the momment...
                    if translation.attributes.has_key("variants") and \
                      translation.attributes['variants'].value == 'yes':
                        continue

                    # Skip obsolete strings.
                    if translation.attributes.has_key("type"):
                        status = translation.attributes["type"].value.lower()
                        if status == "obsolete":
                            continue

                    if translation.attributes.has_key("type"):
                        status = translation.attributes["type"].value.lower()
                        if status == "unfinished" and\
                          not pluralized:
                            suggestion = GenericTranslation(sourceString,
                                _getText(translation.childNodes),
                                context=context_name + comment,
                                occurrences= ";".join(occurrences))
                            self.suggestions.add(suggestion)
                        else:
                            logger.error("Element 'translation' attribute "\
                                "'type' is neither 'unfinished' nor 'obsolete'")

                        continue

                    if not pluralized:
                        messages = [(5, _getText(translation.childNodes))]
                    else:
                        numerusforms = translation.getElementsByTagName('numerusform')
                        try:
                            for n,f  in enumerate(numerusforms):
                                if numerusforms[n].attributes.has_key("variants") and \
                                  numerusforms[n].attributes['variants'].value == 'yes':
                                    raise StopIteration
                        except StopIteration:
                            continue
                        if nplural:
                            nplural_file = len(numerusforms)
                            if len(nplural) != nplural_file:
                                logger.error("Passed plural rules has nplurals=%s"
                                    ", but '%s' file has nplurals=%s. String '%s'"
                                    "skipped." % (nplural, self.filename,
                                     nplural_file, sourceString))
                                same_nplural = False
                        else:
                            same_nplural = False

                        if not same_nplural:
                            # If we're missing plurals, skip them altogether
                            continue

                        for n,f  in enumerate(numerusforms):
                            nf=numerusforms[n]
                            if nf.firstChild:
                                messages.append((nplural[n], _getText(nf.childNodes)))

                    # NB! If <translation> doesn't have type attribute, it means that string is finished

                if sourceString and messages:
                    for msg in messages:
                        self._add_translation_string(
                            sourceString, msg[1],
                            context = context_name + comment, rule=msg[0],
                            occurrences = ";".join(occurrences),
                            pluralized=pluralized, fuzzy=fuzzy,
                            comment=extracomment, obsolete=obsolete)
                i += 1

                if is_source:
                    if sourceString is None:
                        continue
                    if message.attributes.has_key("numerus") and \
                        message.attributes['numerus'].value=='yes' and translation:
                            numerusforms = translation.getElementsByTagName('numerusform')
                            for n,f in enumerate(numerusforms):
                                f.appendChild(doc.createTextNode(
                                        "%(hash)s_pl_%(key)s" %
                                        {
                                            'hash': hash_tag(sourceString,
                                                context_name + comment),
                                            'key': n
                                        }
                                ))
                    else:
                        if not translation:
                            translation = doc.createElement("translation")

                        # Delete all child nodes. This is usefull for xml like
                        # strings (eg html) where the translation text is split
                        # in multiple nodes.
                        translation.childNodes = []

                        translation.appendChild(doc.createTextNode(
                                ("%(hash)s_tr" % {'hash': hash_tag(
                                    sourceString, context_name + comment)})
                        ))
        return doc

Example 49

Project: treeio
Source File: modules.py
View license
@contextfilter
def humanize_datetime(context, value):
    """
    Finds the difference between the datetime value given and now()
    and returns appropriate humanize form
    """

    request = context['request']

    user = None
    if request.user.username:
        try:
            user = request.user.profile
        except:
            pass

    # timezone
    default_timezone = settings.HARDTREE_SERVER_DEFAULT_TIMEZONE
    try:
        conf = ModuleSetting.get('default_timezone')[0]
        default_timezone = conf.value
    except:
        pass

    try:
        conf = ModuleSetting.get('default_timezone', user=user)[0]
        default_timezone = conf.value
    except Exception:
        default_timezone = getattr(
            settings, 'HARDTREE_SERVER_TIMEZONE')[default_timezone][0]

    all_timezones = getattr(settings, 'HARDTREE_SERVER_TIMEZONE', [
                            (1, '(GMT-11:00) International Date Line West')])
    title = all_timezones[int(default_timezone)][1]
    GMT = title[4:10]  # with sign e.g. +06:00
    sign = GMT[0:1]  # + or -
    hours = int(GMT[1:3])  # e.g. 06
    mins = int(GMT[4:6])

    now = datetime.now()

    if value:
        if sign == "-":
            value = value - timedelta(hours=hours, minutes=mins)
            now = now - timedelta(hours=hours, minutes=mins)
        else:
            value = value + timedelta(hours=hours, minutes=mins)
            now = now + timedelta(hours=hours, minutes=mins)

    if isinstance(value, timedelta):
        delta = value
    elif isinstance(value, datetime):
        delta = now - value
    else:
        delta = None

    if delta:
        if delta.days > 6:                                      # May 15, 17:55
            month = None
            if value.strftime("%b") == 'Jan':
                month = _("Jan")
            elif value.strftime("%b") == 'Feb':
                month = _("Feb")
            elif value.strftime("%b") == 'Mar':
                month = _("Mar")
            elif value.strftime("%b") == 'Apr':
                month = _("Apr")
            elif value.strftime("%b") == 'May':
                month = _("May")
            elif value.strftime("%b") == 'Jun':
                month = _("Jun")
            elif value.strftime("%b") == 'Jul':
                month = _("Jul")
            elif value.strftime("%b") == 'Aug':
                month = _("Aug")
            elif value.strftime("%b") == 'Sep':
                month = _("Sep")
            elif value.strftime("%b") == 'Oct':
                month = _("Oct")
            elif value.strftime("%b") == 'Nov':
                month = _("Nov")
            elif value.strftime("%b") == 'Dec':
                month = _("Dec")
            return month + value.strftime(" %d, %H:%M")

        if delta.days > 1:                                      # Wednesday
            if value.strftime("%A") == 'Monday':
                return _("Monday")
            elif value.strftime("%A") == 'Tuesday':
                return _("Tuesday")
            elif value.strftime("%A") == 'Wednesday':
                return _("Wednesday")
            elif value.strftime("%A") == 'Thursday':
                return _("Thursday")
            elif value.strftime("%A") == 'Friday':
                return _("Friday")
            elif value.strftime("%A") == 'Saturday':
                return _("Saturday")
            elif value.strftime("%A") == 'Sunday':
                return _("Sunday")

        elif delta.days == 1:
            return _("yesterday")                               # yesterday
        elif delta.seconds >= 7200:
            return str(delta.seconds / 3600) + _(" hours ago")  # 3 hours ago
        elif delta.seconds >= 3600:
            return _("1 hour ago")                              # 1 hour ago
        elif delta.seconds > MOMENT:
            # 29 minutes ago
            return str(delta.seconds / 60) + _(" minutes ago")
        else:
            return _("a moment ago")                            # a moment ago
        return djangodate(value)
    else:
        return str(value)

Example 50

Project: daywatch
Source File: views.py
View license
@catch_error
@log_activity
@login_required
@permission_required
def history_listings_div(request):
    #prepare the params
    context = get_status(request)
    form = HistoryPanelExportForm(user=request.user, data=request.GET)

    if form.is_valid():
        request.session['form_session'] = form.cleaned_data
        period = form.cleaned_data['period']
        style = get_style()
        style_ref = get_style_ref(style)
        user = request.user

        # Convert date parameters
        end_date = datetime.now()
        if period == 'last_30_d':
            start_date = datetime.now() - timedelta(days=30)
        elif period == 'last_15_d':
            start_date = datetime.now() - timedelta(days=15)
        elif period == 'last_7_d':
            start_date = datetime.now() - timedelta(days=7)
        elif period == 'custom':
            d = form.cleaned_data['start_date']
            start_date = datetime(d.year, d.month, d.day)
            d = form.cleaned_data['end_date']
            end_date = datetime(d.year, d.month, d.day, 23, 59)

        country = form.cleaned_data['country']
        context['use_local_currency'] = country in LOCAL_CURRENCY_COUNTRIES
        context['local_currency'] = CURRENCY_DICT[country]

        history_limit = 0
        out_of_range_error = False
        out_of_range_warning = False
        if not user.has_full_access_for_country(country):
            if user.week_history_limit > 0:
                #user is history limited, limit start and end dates
                week_limit = user.week_history_limit
                history_limit = datetime.now() - timedelta(weeks=week_limit)
                if end_date < history_limit:
                    out_of_range_error = True
                elif start_date < history_limit:
                    start_date = history_limit
                    out_of_range_warning = True
                history_limit = history_limit.date()

        # Get deals for this query
        if not out_of_range_error:
            player_ids = form.cleaned_data['players']
            player_ids = [int(p_id) for p_id in player_ids]

            if form.cleaned_data['all_categories']:
                items = DayWatchItem.objects.filter(
                    site__id__in=player_ids,
                    date_time__gte=start_date,
                    date_time__lte=end_date
                )
                categories = Category.objects.all()
                category_ids = []
                for category in categories:
                    if category.name != 'root':
                        category_ids.append(category.id)
            else:
                category_ids = form.cleaned_data['categories']
                category_ids = [int(c_id) for c_id in category_ids]
                items = DayWatchItem.objects.filter(
                    site__id__in=player_ids,
                    category__id__in=category_ids,
                    date_time__gte=start_date,
                    date_time__lte=end_date
                )
        else:
            items = DayWatchItem.objects.none()

        # Prepare and return results to upper layers
        context['items'] = items
        context['country'] = country
        context['style_ref'] = style_ref

        context['history_limit'] = history_limit
        context['out_of_range_error'] = out_of_range_error
        context['out_of_range_warning'] = out_of_range_warning

    # excel button clicked
    if form.data.get('excel'):
        if not request.user.premium_access:
            msg = " Sorry, Excel exports are limited to Premium Users."
            return warningResponse(request, _(msg))

        if not user.is_staff:
            # We limit exportable deals to a month and a half from today
            floor_date = datetime.now() - timedelta(weeks=7)
            context['items'] = context['items'].filter(
                                        start_date_time__gte=floor_date)

            if start_date < floor_date:
                context['floor_date_warn'] = floor_date

        filename = "DayWatch_report_%s" % (
            datetime.now().strftime("%d-%m-%Y_%H-%M"),
        )
        result = render_to_string(
            'includes/history_table_xls.html',
            context,
            context_instance=RequestContext(request)
        )
        response = HttpResponse(
            result,
            content_type='application/vnd.ms-excel;charset=utf-8'
        )
        content_disposition = 'attachment; filename="%s.xls"' % (filename,)
        response['Content-Disposition'] = content_disposition

        return response

    # Normal results rendering
    # col_index_name_map is required for correct sorting behavior
    index_name_map = {
        0: 'offer',
        1: 'company',
        2: 'start_date_time',
        3: 'end_date_time',
        4: 'price',
        5: 'price_usd',
        6: 'discount',
        7: 'category',
        8: 'is_main_deal',
        9: 'sold_count',
        10: 'total_sales_usd',
        11: 'merchant_name',
    }
    if context['use_local_currency']:
        index_name_map[10] = 'total_sales_local'

    json_template = 'includes/history_table_json.txt'

    return get_datatables_records(
        request, context['items'],
        index_name_map, context, json_template
    )