django.template.defaultfilters.slugify

Here are the examples of the python api django.template.defaultfilters.slugify taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

185 Examples 7

Example 101

Project: ella Source File: models.py
Function: save
    def save(self, **kwargs):
        """Overrides models.Model.save.

        - Generates slug.
        - Saves image file.
        """
        if not self.width or not self.height:
            self.width, self.height = self.image.width, self.image.height

        # prefill the slug with the ID, it requires double save
        if not self.id:
            img = self.image

            # store dummy values first...
            w, h = self.width, self.height
            self.image = ''
            self.width, self.height = w, h
            self.slug = ''

            super(Photo, self).save(force_insert=True)

            # ... so that we can generate the slug
            self.slug = str(self.id) + '-' + slugify(self.title)
            # truncate slug in order to fit in an ImageField and/or paths in Redirects
            self.slug = self.slug[:64]
            # .. tha will be used in the image's upload_to function
            self.image = img
            # and the image will be saved properly
            super(Photo, self).save(force_update=True)
        else:
            try:
                old = Photo.objects.get(pk=self.pk)

                force_update = True
                # delete formatedphotos if new image was uploaded
                if old.image != self.image:
                    for f_photo in self.formatedphoto_set.all():
                        f_photo.delete()
            except Photo.DoesNotExist:
                # somebody is just trying to create new model with given PK
                force_update = False

            super(Photo, self).save(force_update=force_update)

Example 102

Project: wagtailpress Source File: wpimport.py
    def import_entry(self, title, content, item_node):
        """
        Importing an entry but some data are missing like
        related entries, start_publication and end_publication.
        start_publication and creation_date will use the same value,
        wich is always in Wordpress $post->post_date.
        """
        creation_date = datetime.strptime(
            item_node.find('{%s}post_date' % WP_NS).text,
            '%Y-%m-%d %H:%M:%S')
        if settings.USE_TZ:
            creation_date = timezone.make_aware(
                creation_date, pytz.timezone('GMT'))

        excerpt = strip_tags(item_node.find(
            '{%sexcerpt/}encoded' % WP_NS).text or '')
        if not excerpt:
            if self.auto_excerpt:
                excerpt = Truncator(strip_tags(content)).words(50)
            else:
                excerpt = ''

        # Prefer use this function than
        # item_node.find('{%s}post_name' % WP_NS).text
        # Because slug can be not well formated
        slug = slugify(title)[:255] or 'post-%s' % item_node.find(
            '{%s}post_id' % WP_NS).text

        entry_dict = {
            'title': title,
            'content': content,
            'excerpt': excerpt,
            'tags': ', '.join(self.get_entry_tags(item_node.findall(
                'category'))),
            'status': self.REVERSE_STATUS[item_node.find(
                '{%s}status' % WP_NS).text],
            'comment_enabled': item_node.find(
                '{%s}comment_status' % WP_NS).text == 'open',
            'pingback_enabled': item_node.find(
                '{%s}ping_status' % WP_NS).text == 'open',
            'featured': item_node.find('{%s}is_sticky' % WP_NS).text == '1',
            'password': item_node.find('{%s}post_password' % WP_NS).text or '',
            'login_required': item_node.find(
                '{%s}status' % WP_NS).text == 'private',
            'last_update': timezone.now()}
        entry_dict['trackback_enabled'] = entry_dict['pingback_enabled']

        entry, created = Entry.objects.get_or_create(
            slug=slug, creation_date=creation_date,
            defaults=entry_dict)
        if created:
            entry.categories.add(*self.get_entry_categories(
                item_node.findall('category')))
            entry.authors.add(self.authors[item_node.find(
                '{http://purl.org/dc/elements/1.1/}creator').text])
            entry.sites.add(self.ROOT_PAGE)

        return entry, created

Example 103

Project: write-it Source File: messages_test.py
    def test_two_messages_with_the_same_subject_but_different_slug(self):
        message1 = Message.objects.create(
            content='Content 1',
            author_name='Felipe',
            author_email="[email protected]",
            subject='Same subject hey',
            writeitinstance=self.writeitinstance1,
            persons=[self.person1],
            )

        message2 = Message.objects.create(
            content='Content 1',
            author_name='Felipe',
            author_email="[email protected]",
            subject='Same subject hey',
            writeitinstance=self.writeitinstance1,
            persons=[self.person1],
            )

        message3 = Message.objects.create(
            content='Content 1',
            author_name='Felipe',
            author_email="[email protected]",
            subject='Same subject hey',
            writeitinstance=self.writeitinstance1,
            persons=[self.person1],
            )

        self.assertEquals(message1.slug, slugify(message1.subject))
        self.assertEquals(message2.slug, slugify(message2.subject) + "-2")
        self.assertEquals(message3.slug, slugify(message3.subject) + "-3")

Example 104

Project: django-feedme Source File: utils.py
Function: unique_slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
                   slug_separator='-'):
    """
Calculates and stores a unique slug of ``value`` for an instance.

``slug_field_name`` should be a string matching the name of the field to
store the slug in (and the field to check against for uniqueness).

``queryset`` usually doesn't need to be explicitly provided - it'll default
to using the ``.all()`` queryset from the model's default manager.
"""
    slug_field = instance._meta.get_field(slug_field_name)

    slug = getattr(instance, slug_field.attname)
    slug_len = slug_field.max_length

    # Sort out the initial slug, limiting its length if necessary.
    slug = slugify(value)
    if slug_len:
        slug = slug[:slug_len]
    slug = _slug_strip(slug, slug_separator)
    original_slug = slug

    # Create the queryset if one wasn't explicitly provided and exclude the
    # current instance from the queryset.
    if queryset is None:
        queryset = instance.__class__._default_manager.all()
    if instance.pk:
        queryset = queryset.exclude(pk=instance.pk)

    # Find a unique slug. If one matches, at '-2' to the end and try again
    # (then '-3', etc).
    next = 2
    while not slug or queryset.filter(**{slug_field_name: slug}):
        slug = original_slug
        end = '%s%s' % (slug_separator, next)
        if slug_len and len(slug) + len(end) > slug_len:
            slug = slug[:slug_len-len(end)]
            slug = _slug_strip(slug, slug_separator)
        slug = '%s%s' % (slug, end)
        next += 1

    setattr(instance, slug_field.attname, slug)

Example 105

Project: django-cities Source File: cities.py
    def import_city(self):
        uptodate = self.download_once('city')
        if uptodate and not self.force:
            return
        data = self.get_data('city')

        total = sum(1 for _ in data)

        data = self.get_data('city')

        self.build_country_index()
        self.build_region_index()

        self.logger.info("Importing city data")
        for item in tqdm(data, total=total, desc="Importing cities"):
            if not self.call_hook('city_pre', item):
                continue

            if item['featureCode'] not in city_types:
                continue

            city = City()
            try:
                city.id = int(item['geonameid'])
            except:
                continue
            city.name = item['name']
            city.kind = item['featureCode']
            city.name_std = item['asciiName']
            city.slug = slugify(city.name_std)
            city.location = Point(float(item['longitude']), float(item['latitude']))
            city.population = int(item['population'])
            city.timezone = item['timezone']
            try:
                city.elevation = int(item['elevation'])
            except:
                pass

            country_code = item['countryCode']
            try:
                country = self.country_index[country_code]
                city.country = country
            except:
                self.logger.warning("City: %s: Cannot find country: %s -- skipping",
                                    city.name, country_code)
                continue

            region_code = item['admin1Code']
            try:
                region = self.region_index[country_code + "." + region_code]
                city.region = region
            except:
                if IGNORE_EMPTY_REGIONS:
                    city.region = None
                else:
                    print("{}: {}: Cannot find region: {} -- skipping", country_code, city.name, region_code)
                    self.logger.warning("%s: %s: Cannot find region: %s -- skipping",
                                        country_code, city.name, region_code)
                    continue

            subregion_code = item['admin2Code']
            try:
                subregion = self.region_index[country_code + "." + region_code + "." + subregion_code]
                city.subregion = subregion
            except:
                if subregion_code:
                    self.logger.warning("%s: %s: Cannot find subregion: %s -- skipping",
                                        country_code, city.name, subregion_code)
                pass

            if not self.call_hook('city_post', city, item):
                continue
            city.save()
            self.logger.debug("Added city: %s", city)

Example 106

Project: django-supertagging Source File: __init__.py
def _processTopics(field, data, obj, ctype, tags, date):
    """
    Process Topics, this opertaion is similar to _processEntities, the only
    difference is that there are no instances
    """
    processed_tags = []
    for di in data:
        di.pop('__reference')
        
        calais_id = re.match(REF_REGEX, str(di.pop('category'))).group('key')
        stype = 'Topic'
        display_name = di.pop('categoryName', '')
        name = display_name.lower()
        
        if tags and name not in tags:
            continue
        rel = int(float(str(di.pop('score', '0'))) * 1000)
        
        slug = slugify(name)
        tag = None
        try:
            tag = SuperTag.objects.get_by_name(name__iexact=name)
        except SuperTag.DoesNotExist:
            try:
                tag = SuperTag.objects.get(calais_id=calais_id)
            except SuperTag.DoesNotExist:
                kwargs = {
                    'calais_id': calais_id,
                    'slug': slug,
                    'stype': stype,
                    'name': name,
                }
                if settings.INCLUDE_DISPLAY_FIELDS:
                    kwargs['display_name'] = display_name
                tag = SuperTag.objects.create_alternate(**kwargs)
        except SuperTag.MultipleObjectsReturned:
            tag = SuperTag.objects.filter(name__iexact=name)[0]
            
        tag = tag.substitute or tag
        
        if not tag.enabled:
            continue

        tag.properties = di
        tag.save()

        SuperTaggedItem.objects.create(tag=tag, content_type=ctype, 
            object_id=obj.pk, field=field, relevance=rel, item_date=date)

        processed_tags.append(tag)
    return processed_tags

Example 107

Project: feedhq Source File: views.py
def save_outline(user, category, outline, existing):
    count = 0
    try:
        opml_tag = outline._tree.getroot().tag == 'opml'
    except AttributeError:
        opml_tag = False
    if (
        not hasattr(outline, 'xmlUrl') and
        hasattr(outline, 'title') and
        outline._outlines
    ):
        if opml_tag:
            cat = None
            created = False
        else:
            slug = slugify(outline.title)
            if not slug:
                slug = 'unknown'
            title = truncate(outline.title, 1023)
            slug = slug[:50]
            cat, created = user.categories.get_or_create(
                slug=slug, defaults={'name': title},
            )
        for entry in outline._outlines:
            count += save_outline(user, cat, entry, existing)
        if created and cat.feeds.count() == 0:
            cat.delete()

    for entry in outline:
        count += save_outline(user, category, entry, existing)

    if (hasattr(outline, 'xmlUrl')):
        if outline.xmlUrl not in existing:
            existing.add(outline.xmlUrl)
            title = getattr(outline, 'title',
                            getattr(outline, 'text', _('No title')))
            title = truncate(title, 1023)
            user.feeds.create(category=category, url=outline.xmlUrl,
                              name=title)
            count += 1
    return count

Example 108

Project: zorna Source File: views.py
@login_required()
def add_new_story(request):
    allowed_objects = get_allowed_objects(
        request.user, ArticleCategory, 'writer')
    if len(allowed_objects) == 0:
        return HttpResponseRedirect('/')

    if request.method == 'POST':
        form_story = ArticleStoryForm(request.POST, request.FILES, request=request)
        fa_set = formset_factory(ArticleAttachmentsForm, extra=2)
        form_attachments_set = fa_set(request.POST, request.FILES)
        if form_story.is_valid():
            if 'image' in request.FILES:
                image_file = request.FILES['image']
                mimetype = image_file.content_type
            else:
                image_file = None
                mimetype = ''

            if image_file:
                story = form_story.save()
                upload_path = get_upload_articles_images()
                path_src = u"%s/%s" % (upload_path, story.image)
                path_dest = u"%s/%s" % (upload_path, story.pk)
                os.makedirs(path_dest)
                shutil.move(path_src, path_dest)
                s = os.path.splitext(image_file.name)
                filename = u"%s%s" % (slugify(s[0]), s[1])
                story.image = "%s/%s" % (story.pk, filename)
            else:
                story = form_story.save(commit=False)

            story.mimetype = mimetype
            story.owner = request.user
            story.save()

            story.categories.clear()
            selected_categories = request.POST.getlist('_selected_action')
            story.categories = selected_categories

            if form_attachments_set.is_valid():
                for i in range(0, form_attachments_set.total_form_count()):
                    form = form_attachments_set.forms[i]
                    try:
                        file = request.FILES['form-' + str(
                            i) + '-attached_file']
                        attachment = ArticleAttachments(description=form.cleaned_data[
                                                        'description'], mimetype=file.content_type)
                        attachment.article = story
                        attachment.save()
                        attachment.attached_file.save(file.name, file)
                    except:
                        pass

            tags = map(int, request.POST.getlist('article_tags[]'))
            tags = ArticleTags.objects.filter(pk__in=tags)
            story.tags.add(*tags)

            if story.categories:
                notify_users(request, story, story.categories.all(), True)

            return HttpResponseRedirect(reverse('writer_stories_list', args=[]))
    else:
        form_story = ArticleStoryForm(request=request)
        fa_set = formset_factory(ArticleAttachmentsForm, extra=2)
        form_attachments_set = fa_set()

    tags = ArticleTags.objects.all()
    context = RequestContext(request)
    extra_context = {'form_story': form_story,
                    'form_attachments': form_attachments_set,
                    'tags': tags,
                    }
    return render_to_response('articles/new_article.html', extra_context, context_instance=context)

Example 109

Project: geonode Source File: views.py
@login_required
def layer_upload(request, template='upload/layer_upload.html'):
    if request.method == 'GET':
        mosaics = Layer.objects.filter(is_mosaic=True).order_by('name')
        ctx = {
            'mosaics': mosaics,
            'charsets': CHARSETS,
            'is_layer': True,
        }
        return render_to_response(template, RequestContext(request, ctx))
    elif request.method == 'POST':
        form = NewLayerUploadForm(request.POST, request.FILES)
        tempdir = None
        errormsgs = []
        out = {'success': False}
        if form.is_valid():
            title = form.cleaned_data["layer_title"]
            # Replace dots in filename - GeoServer REST API upload bug
            # and avoid any other invalid characters.
            # Use the title if possible, otherwise default to the filename
            if title is not None and len(title) > 0:
                name_base = title
            else:
                name_base, __ = os.path.splitext(
                    form.cleaned_data["base_file"].name)
            name = slugify(name_base.replace(".", "_"))
            try:
                # Moved this inside the try/except block because it can raise
                # exceptions when unicode characters are present.
                # This should be followed up in upstream Django.
                tempdir, base_file = form.write_files()
                saved_layer = file_upload(
                    base_file,
                    name=name,
                    user=request.user,
                    overwrite=False,
                    charset=form.cleaned_data["charset"],
                    abstract=form.cleaned_data["abstract"],
                    title=form.cleaned_data["layer_title"],
                    metadata_uploaded_preserve=form.cleaned_data["metadata_uploaded_preserve"]
                )
            except Exception as e:
                exception_type, error, tb = sys.exc_info()
                logger.exception(e)
                out['success'] = False
                out['errors'] = str(error)
                # Assign the error message to the latest UploadSession from that user.
                latest_uploads = UploadSession.objects.filter(user=request.user).order_by('-date')
                if latest_uploads.count() > 0:
                    upload_session = latest_uploads[0]
                    upload_session.error = str(error)
                    upload_session.traceback = traceback.format_exc(tb)
                    upload_session.context = log_snippet(CONTEXT_LOG_FILE)
                    upload_session.save()
                    out['traceback'] = upload_session.traceback
                    out['context'] = upload_session.context
                    out['upload_session'] = upload_session.id
            else:
                out['success'] = True
                if hasattr(saved_layer, 'info'):
                    out['info'] = saved_layer.info
                out['url'] = reverse(
                    'layer_detail', args=[
                        saved_layer.service_typename])
                upload_session = saved_layer.upload_session
                upload_session.processed = True
                upload_session.save()
                permissions = form.cleaned_data["permissions"]
                if permissions is not None and len(permissions.keys()) > 0:
                    saved_layer.set_permissions(permissions)
            finally:
                if tempdir is not None:
                    shutil.rmtree(tempdir)
        else:
            for e in form.errors.values():
                errormsgs.extend([escape(v) for v in e])
            out['errors'] = form.errors
            out['errormsgs'] = errormsgs
        if out['success']:
            status_code = 200
        else:
            status_code = 400
        return HttpResponse(
            json.dumps(out),
            content_type='application/json',
            status=status_code)

Example 110

Project: zorna Source File: views.py
def fm_upload_view(request):
    pathfile = request.REQUEST.get('dir', None)
    ret = {}
    if pathfile:
        ppath = urllib.unquote(pathfile.rstrip('/'))
        path_dest = clean_path(ppath)
        if path_dest != ppath:
            ret['message'] = gettext(u"Incorrect path")
            ret['status'] = 'error'
        else:
            buser, bmanager = get_user_access_to_path(request.user, path_dest)
            folder = get_shared_folder(path_dest)
            if bmanager is False:
                ret['message'] = gettext(u"Access denied")
                ret['status'] = 'error'
            else:
                cdir_components = get_path_components(path_dest)
                if request.method == 'POST':
                    root_path = get_upload_library()
                    fa_set = formset_factory(ZornaFileAddForm, extra=2)
                    form_set = fa_set(request.POST, request.FILES)
                    if form_set.is_valid():
                        upload_files = {}
                        ifiles = []
                        for i in range(0, form_set.total_form_count()):
                            form = form_set.forms[i]
                            try:
                                f = request.FILES['form-' + str(i) + '-file']
                                s = os.path.splitext(f.name)
                                fname = u"%s%s" % (slugify(s[0]), s[1])
                                upload_files[fname] = {'pk': '', 'file': f, 'description':
                                                       form.cleaned_data['description'], 'tags': form.cleaned_data['tags']}
                            except:
                                continue
                        path = u"%s/%s" % (
                            root_path, urllib.unquote(path_dest))
                        bupload = False
                        for f, info in upload_files.iteritems():
                            file = ZornaFile(owner=request.user, modifier=request.user, description=info[
                                             'description'], tags=info['tags'], folder=path_dest.split('/')[0])
                            file.save()
                            destination = open(u"%s/%s,%s" % (
                                path, file.pk, f), 'wb+')
                            bupload = True
                            for chunk in info['file'].chunks():
                                destination.write(chunk)
                            destination.close()
                            ifiles.append({'name': f, 'description': info[
                                          'description']})
                        if bupload:
                            ret['message'] = gettext(
                                u"Files uploaded successfully")
                            # notify users
                            bnotify = request.POST.get('notify_users', 0)
                            if folder and folder.email_notification and (folder.email_notification == 1 or bnotify):
                                notify_users(request, folder, ifiles, True)
                        else:
                            ret['message'] = gettext(u"No file uploaded")
                        ret['status'] = 'success'
                        json_data = simplejson.dumps(ret)
                        return HttpResponse('<textarea>' + json_data + '</textarea>')
                    else:
                        ret['message'] = gettext(u"Invalid form")
                        ret['status'] = 'error'
                        t = loader.get_template(
                            'fileman/upload_docuements.html')
                        c = RequestContext(request, {'form_set':
                                           form_set, 'cdir_components': cdir_components, 'folder_dest': pathfile, 'folder_content_url': get_url_folder_content(pathfile)})
                        ret['data'] = t.render(c)
                        json_data = simplejson.dumps(ret)
                        return HttpResponse('<textarea>' + json_data + '</textarea>')
                else:
                    fa_set = formset_factory(ZornaFileAddForm, extra=2)
                    form_set = fa_set()
                    ret['status'] = 'success'

                t = loader.get_template('fileman/upload_docuements.html')
                c = RequestContext(request, {'form_set': form_set,
                                             'cdir_components': cdir_components,
                                             'folder_dest': pathfile,
                                             'folder_content_url': get_url_folder_content(pathfile),
                                             'folder': folder,
                                             'manager': bmanager, })
                ret['data'] = t.render(c)
    else:
        ret['message'] = gettext(u"Invalid file path")
        ret['status'] = 'error'

    json_data = simplejson.dumps(ret)
    return HttpResponse(json_data)

Example 111

Project: django-leonardo Source File: forms.py
Function: init
    def __init__(self, *args, **kwargs):
        request = kwargs.pop('request', None)
        model = kwargs.pop('model', None)

        super(WidgetForm, self).__init__(*args, **kwargs)

        if isinstance(model, Page):
            self.fields['parent'] = PageSelectField(
                label=_("Parent"), help_text=_("Parent Page"))
        else:
            self.fields['parent'].widget = forms.widgets.HiddenInput()

        initial = kwargs.get('initial', None)

        if initial and initial.get('id', None):
            widget = self._meta.model.objects.get(
                id=initial['id'])
            data = widget.dimensions

            self.init_content_themes()

        elif 'instance' in kwargs:
            widget = kwargs['instance']
            data = widget.dimensions

            self.init_content_themes()
        else:
            data = []
            widget = None

            # set defaults and delete id field
            self.init_themes()
            del self.fields['id']

        # get all fields for widget
        main_fields = self._meta.model.fields()
        main_fields.update({'label': 'label'})
        main_fields.pop("parent", None)

        self.helper.layout = Layout(
            TabHolder(
                Tab(self._meta.model._meta.verbose_name.capitalize(),
                    *self.get_main_fields(main_fields),
                    css_id='field-{}'.format(slugify(self._meta.model))
                    ),
                Tab(_('Styles'),
                    'base_theme', 'content_theme', 'color_scheme',
                    'prerendered_content',
                    Fieldset(_('Positions'), 'layout', 'align',
                             'vertical_align', 'parent'),
                    *self.get_id_field(),
                    css_id='theme-widget-settings'
                    ),
                Tab(_('Effects'),
                    'enter_effect_style', 'enter_effect_duration',
                    'enter_effect_delay', 'enter_effect_offset',
                    'enter_effect_iteration',
                    css_id='theme-widget-effects'
                    ),
            ),
            HTML(render_to_string('widget/_update_preview.html',
                                  {'class_name': ".".join([
                                      self._meta.model._meta.app_label,
                                      self._meta.model._meta.model_name])
                                   }))

        )

        self.fields['label'].widget = forms.TextInput(
            attrs={'placeholder': self._meta.model._meta.verbose_name})

        if request:
            _request = copy.copy(request)
            _request.POST = {}
            _request.method = 'GET'
            from .tables import WidgetDimensionTable
            dimensions = Tab(_('Dimensions'),
                             HTML(
                WidgetDimensionTable(_request,
                                     widget=widget,
                                     data=data).render()),
                             )
            self.helper.layout[0].append(dimensions)

        # hide label
        if 'text' in self.fields:
            self.fields['text'].label = ''

        # finally add custom tabs
        self.init_custom_tabs()

Example 112

Project: codesters Source File: utils.py
Function: unique_slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
                   slug_separator='-'):
    """
    Calculates and stores a unique slug of ``value`` for an instance.

    ``slug_field_name`` should be a string matching the name of the field to
    store the slug in (and the field to check against for uniqueness).

    ``queryset`` usually doesn't need to be explicitly provided - it'll default
    to using the ``.all()`` queryset from the model's default manager.
    """
    slug_field = instance._meta.get_field(slug_field_name)

    slug = getattr(instance, slug_field.attname)
    slug_len = slug_field.max_length

    # Sort out the initial slug, limiting its length if necessary.
    slug = slugify(value)
    if slug_len:
        slug = slug[:slug_len]
    slug = _slug_strip(slug, slug_separator)
    original_slug = slug

    # Create the queryset if one wasn't explicitly provided and exclude the
    # current instance from the queryset.
    if queryset is None:
        queryset = instance.__class__._default_manager.all()
    if instance.pk:
        queryset = queryset.exclude(pk=instance.pk)

    # Find a unique slug. If one matches, at '-2' to the end and try again
    # (then '-3', etc).
    next = 2
    while not slug or queryset.filter(**{slug_field_name: slug}):
        slug = original_slug
        end = '%s%s' % (slug_separator, next)
        if slug_len and len(slug) + len(end) > slug_len:
            slug = slug[:slug_len-len(end)]
            slug = _slug_strip(slug, slug_separator)
        slug = '%s%s' % (slug, end)
        next += 1

    setattr(instance, slug_field.attname, slug)

Example 113

Project: django-staff Source File: models.py
def get_staff_updater(cls):
    """
    This returns a function for passing to a signal.
    """
    from django.core.exceptions import ImproperlyConfigured
    if not issubclass(cls, BaseStaffMember):
        raise ImproperlyConfigured("%s is not a sublass of StaffMember" % cls)

    def update_staff_member(sender, instance, created, *args, **kwargs):
        """
        Update the Staff Member instance when a User object is changed.
        """
        if instance.is_staff and not cls.objects.filter(user=instance).count():
            staffmember = cls(
                user=instance,
                is_active=True)
            staffmember.save()
        elif instance.is_staff:
            staffmembers = cls.objects.filter(user=instance)
            if len(staffmembers):
                staffmember = staffmembers[0]
                staffmember.is_active = True
                if instance.first_name != staffmember.first_name:
                    staffmember.first_name = instance.first_name
                    staffmember.slug = slugify('%s %s' % (
                        instance.first_name, instance.last_name))
                if instance.last_name != staffmember.last_name:
                    staffmember.last_name = instance.last_name
                    staffmember.slug = slugify('%s %s' % (
                        instance.first_name, instance.last_name))
                if instance.email != staffmember.email:
                    staffmember.email = instance.email
                staffmember.save()
        elif not instance.is_staff:
            # Make sure we deactivate any staff members associated with this user
            for staffmember in cls.objects.filter(user=instance):
                staffmember.is_active = False
                staffmember.save()
        from django.db import transaction
        transaction.commit_unless_managed()

    return update_staff_member

Example 114

Project: open-context-py Source File: models.py
    def make_slug(self, uri):
        """
        Makes a slug for the URI of the linked entity
        """
        actual_uri = uri
        uri_prefixes = {'http://www.cidoc-crm.org/rdfs/cidoc-crm': 'crm-rdf',
                        'http://erlangen-crm.org/current': 'cidoc-crm',
                        'http://collection.britishmuseum.org/description/thesauri': 'bm-thes',
                        'http://collection.britishmuseum.org/id/thesauri': 'bm-thes',
                        'http://concordia.atlantides.org': 'concordia',
                        'http://gawd.atlantides.org/terms': 'gawd',
                        'http://purl.org/dc/terms': 'dc-terms',
                        'http://dbpedia.org/resource': 'dbpedia',
                        'http://www.wikidata.org/wiki': 'wikidata',
                        'http://eol.org/pages': 'eol-p',
                        'http://opencontext.org/vocabularies/dinaa': 'dinaa',
                        'http://opencontext.org/vocabularies/oc-general': 'oc-gen',
                        'http://opencontext.org/vocabularies/open-context-zooarch': 'oc-zoo',
                        'http://orcid.org': 'orcid',
                        'http://pleiades.stoa.org/places': 'pleiades-p',
                        'http://pleiades.stoa.org/vocabularies/time-periods': 'pleiades-tp',
                        'http://purl.obolibrary.org/obo': 'obo',
                        'http://purl.org/NET/biol/ns': 'biol',
                        'http://sw.opencyc.org': 'opencyc',
                        'http://www.freebase.com/view/en': 'freebase',
                        'http://en.wiktionary.org/wiki': 'wiktionary',
                        'http://www.geonames.org': 'geonames',
                        'http://www.w3.org/2000/01/rdf-schema': 'rdfs',
                        'http://www.w3.org/2003/01/geo/wgs84_pos': 'geo',
                        'http://www.w3.org/2004/02/skos/core': 'skos',
                        'http://en.wikipedia.org/wiki': 'wiki',
                        'http://id.loc.gov/authorities/subjects': 'loc-sh',
                        'http://core.tdar.org/browse/site-name': 'tdar-kw-site',
                        'http://purl.org/ontology/bibo': 'bibo',
                        'http://creativecommons.org/ns#': 'cc',
                        'http://www.w3.org/2002/07/owl#': 'owl',
                        'http://creativecommons.org/licenses': 'cc-license',
                        'http://creativecommons.org/publicdomain': 'cc-publicdomain',
                        'http://n2t.net/ark:/99152/p0': 'periodo-p0',
                        'http://vocab.getty.edu/aat': 'getty-aat',
                        'http://nomisma.org/ontology': 'nmo',
                        'http://numismatics.org/ocre/id': 'ocre',
                        'http://portal.vertnet.org': 'vertnet-rec',
                        }
        for uri_root, uri_prefix in uri_prefixes.items():
            #  replaces the start of a uri with a prefix
            uri = uri.replace(uri_root, uri_prefix)
        uri = uri.replace('https://www.', '')
        uri = uri.replace('http://www.', '')
        uri = uri.replace('https://', '')
        uri = uri.replace('http://', '')
        uri = uri.replace('/', '-')
        uri = uri.replace('.', '-')
        uri = uri.replace('#', '-')
        uri = uri.replace('_', ' ')
        raw_slug = slugify(unidecode(uri[:55]))
        raw_slug = raw_slug.replace('---', '--')  # make sure no triple dashes, conflicts with solr
        if(raw_slug[-1:] == '-'):
            raw_slug = raw_slug[:-1]
        if(raw_slug[-1:] == '-'):
            raw_slug = raw_slug + 'x'  # slugs don't end with dashes
        raw_slug = re.sub(r'([-]){3,}', r'--', raw_slug)  # slugs can't have more than 3 dash characters
        slug = raw_slug
        try:
            slug_in = LinkEntity.objects.get(slug=raw_slug)
            if slug_in.uri != actual_uri:
                slug_exists = True
            else:
                slug_exists = False
        except LinkEntity.DoesNotExist:
            slug_exists = False
        if(slug_exists):
            try:
                slug_count = LinkEntity.objects.filter(slug__startswith=raw_slug).count()
            except LinkEntity.DoesNotExist:
                slug_count = 0
            if(slug_count > 0):
                slug = raw_slug + "-" + str(slug_count + 1)  # ok because a slug does not end in a dash
        return slug

Example 115

Project: django-knowledge Source File: views.py
Function: test_thread
    def test_thread(self):
        c = Client()

        question_url = reverse('knowledge_thread', args=[self.question.id, slugify(self.question.title)])

        r = c.get(reverse('knowledge_thread', args=[123456, 'a-big-long-slug']))
        self.assertEquals(r.status_code, 404)

        # this is private by default
        r = c.get(reverse('knowledge_thread', args=[self.question.id, 'a-big-long-slug']))
        self.assertEquals(r.status_code, 404)
    
        r = c.get(question_url)
        self.assertEquals(r.status_code, 404)

        c.login(username='joe', password='secret')

        r = c.get(reverse('knowledge_thread', args=[self.question.id, 'a-big-long-slug']))
        self.assertEquals(r.status_code, 301)

        r = c.get(question_url)
        self.assertEquals(r.status_code, 200)


        RESPONSE_POST = {
            'body': 'This is the response body friend!'
        }

        r = c.post(question_url, RESPONSE_POST)
        self.assertEquals(r.status_code, 302)

        # back to an anon user
        c.logout()

        # lets make it public...
        self.question.public()
    
        r = c.get(question_url)
        self.assertEquals(r.status_code, 200)

        # invalid responses POSTs are basically ignored...
        r = c.post(question_url, RESPONSE_POST)
        self.assertEquals(r.status_code, 200)

Example 116

Project: CommunityCellularManager Source File: staff.py
    def get(self, request):
        """"Handles GET requests."""
        user_profile = models.UserProfile.objects.get(user=request.user)
        if not user_profile.user.is_staff:
            return response.Response('', status=status.HTTP_404_NOT_FOUND)
        # Build up the context and initial form data.
        initial_form_data = {}
        context = {
            'networks': get_objects_for_user(request.user, 'view_network', klass=models.Network),
            'user_profile': user_profile,
        }
        network_pk = request.GET.get('network', None)
        if network_pk:
            initial_form_data['network'] = network_pk
            network = models.Network.objects.get(pk=network_pk)
            # Attach the associated UserProfile to the network for reference.
            network.user_profile = models.UserProfile.objects.get(
                network=network)
            context['network'] = network
            # Count subs and numbers.
            context['subscriber_count'] = models.Subscriber.objects.filter(
                network=network).count()
            context['number_count'] = models.Number.objects.filter(
                network=network).count()
            # Build up the data for the price comparison table.
            context['prices'] = []
            context['grand_total_op_profit'] = 0
            context['grand_total_e_profit'] = 0
            tiers = self.get_ordered_tiers(network)
            for traffic_type in ('call', 'sms'):
                for tier in tiers:
                    # Determine costs.  The URL params (keys) are of the form
                    # <call/sms>_<sluggified_tier_name>_proposed_<entity>_cost.
                    # We'll fill in the form with something that was POSTed or
                    # with a default value from the tier itself.
                    if traffic_type == 'call':
                        # Subscriber costs.
                        actual_sub_cost = tier.cost_to_subscriber_per_min
                        key = 'call_%s_proposed_%s_cost' % (
                            slugify(tier.name), 'sub')
                        proposed_sub_cost = int(request.GET.get(
                            key, actual_sub_cost))
                        # Operator costs.
                        actual_op_cost = tier.cost_to_operator_per_min
                        key = 'call_%s_proposed_%s_cost' % (
                            slugify(tier.name), 'op')
                        proposed_op_cost = int(request.GET.get(
                            key, actual_op_cost))
                        # Endaga costs.
                        actual_e_cost = self.get_cost(tier, 'call', 'e')
                        key = 'call_%s_proposed_%s_cost' % (
                            slugify(tier.name), 'e')
                        proposed_e_cost = int(request.GET.get(
                            key, actual_e_cost))
                    elif traffic_type == 'sms':
                        # Subscriber costs.
                        actual_sub_cost = tier.cost_to_subscriber_per_sms
                        key = 'sms_%s_proposed_%s_cost' % (
                            slugify(tier.name), 'sub')
                        proposed_sub_cost = int(request.GET.get(
                            key, actual_sub_cost))
                        # Operator costs.
                        actual_op_cost = tier.cost_to_operator_per_sms
                        key = 'sms_%s_proposed_%s_cost' % (
                            slugify(tier.name), 'op')
                        proposed_op_cost = int(request.GET.get(
                            key, actual_op_cost))
                        # Endaga costs.
                        actual_e_cost = self.get_cost(tier, 'sms', 'e')
                        key = 'sms_%s_proposed_%s_cost' % (
                            slugify(tier.name), 'e')
                        proposed_e_cost = int(request.GET.get(
                            key, actual_e_cost))
                    # Calculate margins.
                    op_margin = proposed_sub_cost - proposed_op_cost
                    e_margin = proposed_op_cost - proposed_e_cost
                    # Find the number of these kinds of events.
                    occurrences = self.count_usage_events(traffic_type, tier)
                    # Calculate profits in dollars.
                    total_op_profit = occurrences * op_margin / (1000 * 100.)
                    total_e_profit = occurrences * e_margin / (1000 * 100.)
                    context['grand_total_op_profit'] += total_op_profit
                    context['grand_total_e_profit'] += total_e_profit
                    # Now that we've done the math, format the values.
                    if traffic_type == 'call':
                        occurrences = '%0.2f' % occurrences
                    # Use all of this to set more of the context.
                    context['prices'].append({
                        'directionality': tier.directionality,
                        'tier': tier.name,
                        'traffic_type': traffic_type,
                        'proposed_sub_cost': proposed_sub_cost,
                        'actual_sub_cost': actual_sub_cost,
                        'proposed_op_cost': proposed_op_cost,
                        'actual_op_cost': actual_op_cost,
                        'proposed_e_cost': proposed_e_cost,
                        'actual_e_cost': actual_e_cost,
                        'op_margin': op_margin,
                        'e_margin': e_margin,
                        'occurrences': occurrences,
                        'total_op_profit': total_op_profit,
                        'total_e_profit': total_e_profit,
                    })
        # Attach the network selection form with any specified initial data.
        select_network_form = SelectNetworkForm(initial=initial_form_data)
        select_network_form.helper.form_action = (
            '/dashboard/staff/margin-analysis')
        context['select_network_form'] = select_network_form
        # Render the template.
        margin_template = template.loader.get_template(
            'dashboard/staff/margin-analysis.html')
        html = margin_template.render(context, request)
        return http.HttpResponse(html)

Example 117

Project: cdr-stats Source File: create_admin.py
    def handle(self, *args, **kwargs):
        first_name = last_name = None
        name, email = settings.ADMINS[0]
        names = name.split(' ')
        if len(names) > 1:
            username = ''.join(
                [c[0].lower() for c in names[:-1]]) + names[-1].lower()
            first_name = names[0]
            last_name = names[-1]
        else:
            username = slugify(name)
        # Overwrite username with admin
        username = 'root'
        password = User.objects.make_random_password(length=14)

        try:
            User.objects.get(username=username)
            print "Admin {} already exists".format(username)
        except User.DoesNotExist:
            u = User.objects.create_user(username, email, password)
            u.is_staff = True
            u.is_superuser = True
            if first_name:
                u.first_name = first_name
            if last_name:
                u.last_name = last_name
            u.save()
            print 'Created admin with username {} and password {}'.format(
                username,
                password
            )

Example 118

Project: feincms Source File: zip.py
def export_zipfile(site, queryset):
    now = timezone.now()
    zip_name = "export_%s_%04d%02d%02d.zip" % (
        slugify(site.domain), now.year, now.month, now.day)

    zip_data = open(os.path.join(django_settings.MEDIA_ROOT, zip_name), "w")
    zip_file = zipfile.ZipFile(zip_data, 'w', allowZip64=True)

    # Save the used categories in the zip file's global comment
    used_categories = set()
    for mf in queryset:
        for cat in mf.categories.all():
            used_categories.update(cat.path_list())

    info = {
        'export_magic': export_magic,
        'categories': [{
            'id': cat.id,
            'title': cat.title,
            'slug': cat.slug,
            'parent': cat.parent_id or 0,
            'level': len(cat.path_list()),
        } for cat in used_categories],
    }
    zip_file.comment = json.dumps(info)

    for mf in queryset:
        ctime = time.localtime(os.stat(mf.file.path).st_ctime)
        info = json.dumps({
            'copyright': mf.copyright,
            'categories': [cat.id for cat in mf.categories.all()],
            'translations': [{
                'lang': t.language_code,
                'caption': t.caption,
                'description': t.description,
            } for t in mf.translations.all()],
        })

        with open(mf.file.path, "r") as file_data:
            zip_info = zipfile.ZipInfo(
                filename=mf.file.name,
                date_time=(
                    ctime.tm_year,
                    ctime.tm_mon,
                    ctime.tm_mday,
                    ctime.tm_hour,
                    ctime.tm_min,
                    ctime.tm_sec))
            zip_info.comment = info
            zip_file.writestr(zip_info, file_data.read())

    return zip_name

Example 119

Project: django-multi-gtfs Source File: exportgtfs.py
    def handle(self, *args, **options):
        if len(args) == 0:
            raise CommandError('You must pass in feed ID to export.')
        if len(args) > 1:
            raise CommandError('You can only export one feed at a time.')

        # Setup logging
        verbosity = int(options['verbosity'])
        console = logging.StreamHandler(self.stderr)
        formatter = logging.Formatter('%(levelname)s - %(message)s')
        logger_name = 'multigtfs'
        if verbosity == 0:
            level = logging.WARNING
        elif verbosity == 1:
            level = logging.INFO
        elif verbosity == 2:
            level = logging.DEBUG
        else:
            level = logging.DEBUG
            logger_name = ''
            formatter = logging.Formatter(
                '%(name)s - %(levelname)s - %(message)s')
        console.setLevel(level)
        console.setFormatter(formatter)
        logger = logging.getLogger(logger_name)
        logger.setLevel(level)
        logger.addHandler(console)

        # Disable database query logging
        if settings.DEBUG:
            connection.use_debug_cursor = False

        try:
            feed_id = int(args[0])
        except ValueError:
            raise CommandError('"%s" is not a valid feed ID' % args[0])
        try:
            feed = Feed.objects.get(id=feed_id)
        except Feed.DoesNotExist:
            raise CommandError('Feed %s not found' % feed_id)
        out_name = options.get('name') or slugify(feed.name)
        if not out_name.endswith('.zip'):
            out_name += '.zip'
        self.stdout.write(
            "Exporting Feed %s to %s...\n" % (feed_id, out_name))
        feed.export_gtfs(out_name)
        self.stdout.write(
            "Successfully exported Feed %s to %s\n" % (feed_id, out_name))

Example 120

Project: django-raster Source File: views.py
    def get(self, request):
        # Initiate algebra parser
        parser = RasterAlgebraParser()
        # Get formula from request
        formula = request.GET.get('formula')
        # Get id list from request
        ids = self.get_ids()
        # Compute tile index range
        zoom, xmin, ymin, xmax, ymax = self.get_tile_range()
        # Check maximum size of target raster in pixels
        max_pixels = getattr(settings, 'RASTER_EXPORT_MAX_PIXELS', EXPORT_MAX_PIXELS)
        if WEB_MERCATOR_TILESIZE * (xmax - xmin) * WEB_MERCATOR_TILESIZE * (ymax - ymin) > max_pixels:
            raise RasterAlgebraException('Export raster too large.')
        # Construct an empty raster with the output dimensions
        result_raster = self.construct_raster(zoom, xmin, xmax, ymin, ymax)
        target = result_raster.bands[0]
        # Get raster data as 1D arrays and store in dict that can be used
        # for formula evaluation.
        for xindex, x in enumerate(range(xmin, xmax + 1)):
            for yindex, y in enumerate(range(ymin, ymax + 1)):
                data = {}
                for name, layerid in ids.items():
                    tile = get_raster_tile(layerid, zoom, x, y)
                    if tile:
                        data[name] = tile
                # Ignore this tile if data is not found for all layers
                if len(data) != len(ids):
                    continue
                # Evaluate raster algebra expression, return 400 if not successful
                try:
                    # Evaluate raster algebra expression
                    tile_result = parser.evaluate_raster_algebra(data, formula)
                except:
                    raise RasterAlgebraException('Failed to evaluate raster algebra.')
                # Update nodata value on target
                target.nodata_value = tile_result.bands[0].nodata_value
                # Update results raster with algebra
                target.data(
                    data=tile_result.bands[0].data(),
                    size=(WEB_MERCATOR_TILESIZE, WEB_MERCATOR_TILESIZE),
                    offset=(xindex * WEB_MERCATOR_TILESIZE, yindex * WEB_MERCATOR_TILESIZE),
                )
        # Create filename base with datetime stamp
        filename_base = 'algebra_export'
        # Add name slug to filename if provided
        if request.GET.get('filename', ''):
            # Sluggify name
            slug = slugify(request.GET.get('filename'))
            # Remove all unwanted characters
            slug = "".join([c for c in slug if re.match(r'\w|\-', c)])
            # Limit length of custom name slug
            slug = slug[:MAX_EXPORT_NAME_LENGTH]
            # Add name slug to filename base
            filename_base += '_' + slug
        filename_base += '_{0}'.format(datetime.now().strftime('%Y_%m_%d_%H_%M'))
        # Compress resulting raster file into a zip archive
        raster_workdir = getattr(settings, 'RASTER_WORKDIR', None)
        dest = NamedTemporaryFile(dir=raster_workdir, suffix='.zip')
        dest_zip = zipfile.ZipFile(dest.name, 'w', allowZip64=True)
        dest_zip.write(
            filename=self.exportfile.name,
            arcname=filename_base + '.tif',
            compress_type=zipfile.ZIP_DEFLATED,
        )
        # Write README.txt and COLORMAP.txt files to zip file
        self.write_readme(dest_zip)
        self.write_colormap(dest_zip)
        # Close zip file before returning
        dest_zip.close()
        # Create file based response containing zip file and return for download
        response = FileResponse(
            open(dest.name, 'rb'),
            content_type='application/zip'
        )
        response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename_base + '.zip')
        return response

Example 121

Project: iCQA Source File: users.py
def user_view(template, tab_name, tab_title, tab_description, private=False, tabbed=True, render_to=None, weight=500):
    def decorator(fn):
        def params(request, id, slug=None):
            user = get_object_or_404(User, id=id)
            if private and not (user == request.user or request.user.is_superuser):
                raise ReturnImediatelyException(HttpResponseUnauthorized(request))

            if render_to and (not render_to(user)):
                raise ReturnImediatelyException(HttpResponseRedirect(user.get_profile_url()))

            return [request, user], {}

        decorated = decorate.params.withfn(params)(fn)

        def result(context, request, user):
            rev_page_title = user.username + " - " + tab_description

            context.update({
                "tab": "users",
                "active_tab" : tab_name,
                "tab_description" : tab_description,
                "page_title" : rev_page_title,
                "can_view_private": (user == request.user) or request.user.is_superuser
            })
            return render_to_response(template, context, context_instance=RequestContext(request))

        decorated = decorate.result.withfn(result, needs_params=True)(decorated)

        if tabbed:
            def url_getter(vu):
                try:
                    return reverse(fn.__name__, kwargs={'id': vu.id, 'slug': slugify(vu.username)})
                except NoReverseMatch:
                    return reverse(fn.__name__, kwargs={'id': vu.id})

            ui.register(ui.PROFILE_TABS, ui.ProfileTab(
                tab_name, tab_title, tab_description,url_getter, private, render_to, weight
            ))

        return decorated
    return decorator

Example 122

Project: django-cities Source File: cities.py
    def import_country(self):
        uptodate = self.download('country')
        if uptodate and not self.force:
            return

        data = self.get_data('country')

        total = sum(1 for _ in data)

        data = self.get_data('country')

        neighbours = {}
        countries = {}

        continents = {c.code: c for c in Continent.objects.all()}

        # If the continent attribute on Country is a ForeignKey, import
        # continents as ForeignKeys to the Continent models, otherwise assume
        # they are still the CharField(max_length=2) and import them the old way
        import_continents_as_fks = type(Country._meta.get_field('continent')) == ForeignKey

        self.logger.info("Importing country data")
        for item in tqdm([d for d in data if d['code'] not in NO_LONGER_EXISTENT_COUNTRY_CODES],
                         total=total,
                         desc="Importing countries..."):
            self.logger.info(item)
            if not self.call_hook('country_pre', item):
                continue

            country = Country()
            try:
                country.id = int(item['geonameid'])
            except:
                continue

            country.name = item['name']
            country.slug = slugify(country.name)
            country.code = item['code']
            country.code3 = item['code3']
            country.population = item['population']
            country.continent = continents[item['continent']] if import_continents_as_fks else item['continent']
            country.tld = item['tld'][1:]  # strip the leading .
            country.phone = item['phone']
            country.currency = item['currencyCode']
            country.currency_name = item['currencyName']

            # These fields shouldn't impact saving older models (that don't
            # have these attributes)
            try:
                country.currency_symbol = CURRENCY_SYMBOLS.get(item['currencyCode'], None)
                country.postal_code_format = item['postalCodeFormat']
                country.postal_code_regex = item['postalCodeRegex']
            except AttributeError:
                pass

            country.capital = item['capital']
            country.area = int(float(item['area'])) if item['area'] else None
            if hasattr(country, 'language_codes'):
                country.language_codes = item['languages']
            elif type(country, 'languages') == CharField:
                country.languages = item['languages']

            neighbours[country] = item['neighbours'].split(",")
            countries[country.code] = country

            if not self.call_hook('country_post', country, item):
                continue
            country.save()

        for country, neighbour_codes in list(neighbours.items()):
            neighbours = [x for x in [countries.get(x) for x in neighbour_codes if x] if x]
            country.neighbours.add(*neighbours)

Example 123

Project: django-djobberbase Source File: models.py
Function: save
    def save(self, *args, **kwargs):
        #saving auth code
        if not self.auth:
            self.auth = md5(unicode(self.id) + \
                            unicode(uuid.uuid1()) + \
                            unicode(time.time())).hexdigest()
        #saving company slug
        self.company_slug = slugify(self.company)

        #saving job url
        self.joburl = slugify(self.title) + \
                        '-' + djobberbase_settings.DJOBBERBASE_AT_URL + \
                        '-' + slugify(self.company)

        #saving with textile
        if djobberbase_settings.DJOBBERBASE_MARKUP_LANGUAGE == 'textile':
            import textile
            self.description_html = mark_safe(
                                        force_unicode(
                                            textile.textile(
                                                smart_str(self.description))))
        #or markdown
        elif djobberbase_settings.DJOBBERBASE_MARKUP_LANGUAGE == 'markdown':
            import markdown
            self.description_html = mark_safe(
                                        force_unicode(
                                            markdown.markdown(
                                                smart_str(self.description))))
        else:
            self.description_html = self.description

        super(Job, self).save(*args, **kwargs)

Example 124

Project: django-cities Source File: cities.py
    def import_district(self):
        uptodate = self.download_once('city')
        if uptodate and not self.force:
            return

        data = self.get_data('city')

        total = sum(1 for _ in data)

        data = self.get_data('city')

        self.build_country_index()
        self.build_region_index()
        self.build_hierarchy()

        self.logger.info("Building city index")
        city_index = {}
        for obj in City.objects.all():
            city_index[obj.id] = obj

        self.logger.info("Importing district data")
        for item in tqdm(data, total=total, desc="Importing districts"):
            if not self.call_hook('district_pre', item):
                continue

            type = item['featureCode']
            if type not in district_types:
                continue

            district = District()
            district.name = item['name']
            district.name_std = item['asciiName']
            try:
                district.code = item['admin3Code']
            except AttributeError:
                pass
            district.slug = slugify(district.name_std)
            district.location = Point(float(item['longitude']), float(item['latitude']))
            district.population = int(item['population'])

            # Find city
            city = None
            try:
                city = city_index[self.hierarchy[district.id]]
            except:
                self.logger.warning("District: %s: Cannot find city in hierarchy, using nearest", district.name)
                city_pop_min = 100000
                # we are going to try to find closet city using native
                # database .distance(...) query but if that fails then
                # we fall back to degree search, MYSQL has no support
                # and Spatialite with SRID 4236.
                try:
                    if django.VERSION < (1, 9):
                        city = City.objects.filter(population__gt=city_pop_min)\
                                   .distance(district.location)\
                                   .order_by('distance')[0]
                    else:
                        city = City.objects.filter(population__gt=city_pop_min)\
                            .annotate(distance=Distance('location', district.location))\
                            .order_by('distance')[0]
                except:  # TODO: Restrict what this catches
                    self.logger.warning(
                        "District: %s: DB backend does not support native '.distance(...)' query "
                        "falling back to two degree search",
                        district.name
                    )
                    search_deg = 2
                    min_dist = float('inf')
                    bounds = Envelope(
                        district.location.x - search_deg, district.location.y - search_deg,
                        district.location.x + search_deg, district.location.y + search_deg)
                    for e in City.objects.filter(population__gt=city_pop_min).filter(
                            location__intersects=bounds.wkt):
                        dist = geo_distance(district.location, e.location)
                        if dist < min_dist:
                            min_dist = dist
                            city = e

            if not city:
                self.logger.warning("District: %s: Cannot find city -- skipping", district.name)
                continue

            district.city = city

            if not self.call_hook('district_post', district, item):
                continue
            district.save()
            self.logger.debug("Added district: %s", district)

Example 125

Project: zorna Source File: views.py
def fm_properties_view(request):
    pathfile = request.REQUEST.get('dir', None)
    file_name = request.REQUEST.get('file_name', None)
    file_id = request.REQUEST.get('file_id', 0)
    ret = {}
    if pathfile:
        ppath = urllib.unquote(pathfile.rstrip('/'))
        path_dest = clean_path(ppath)
        if path_dest != ppath:
            ret['message'] = gettext(u"Incorrect path")
            ret['status'] = 'error'
        else:
            buser, bmanager = get_user_access_to_path(request.user, path_dest)
            if bmanager is False:
                ret['message'] = gettext(u"Access denied")
                ret['status'] = 'error'
            else:
                folder = get_shared_folder(path_dest)
                try:
                    fo = ZornaFile.objects.get(pk=int(file_id))
                except:
                    ret['message'] = gettext(u"Access denied")
                    ret['status'] = 'error'
                else:
                    cdir_components = get_path_components(path_dest)
                    if request.method == 'POST':
                        root_path = get_upload_library()
                        form = ZornaFileForm(request.POST, instance=fo)
                        if form.is_valid():
                            fo = form.save(commit=False)
                            fo.modifier = request.user
                            fo.save()
                            if request.FILES:
                                f = request.FILES['file']
                                s = os.path.splitext(f.name)
                                fname = u"%s%s" % (slugify(s[0]), s[1])
                                os.remove(u"%s/%s/%s,%s" % (
                                    root_path, path_dest, file_id, file_name))
                                destination = open(u"%s/%s/%s,%s" % (
                                    root_path, path_dest, file_id, fname), 'wb+')
                                for chunk in f.chunks():
                                    destination.write(chunk)
                                destination.close()
                                # notify users
                                bnotify = request.POST.get('notify_users', 0)
                                if folder and folder.email_notification and (folder.email_notification == 1 or bnotify):
                                    notify_users(request, folder, [{
                                                 'name': fname, 'description': fo.description}], False)

                            ret['message'] = gettext(
                                u"File updated successfully")
                            ret['status'] = 'success'
                            json_data = simplejson.dumps(ret)
                            return HttpResponse('<textarea>' + json_data + '</textarea>')
                        else:
                            ret['message'] = gettext(u"Invalid form")
                            ret['status'] = 'error'
                    else:
                        form = ZornaFileForm(instance=fo)
                        ret['status'] = 'success'

                    t = loader.get_template('fileman/file_properties.html')
                    ec = {
                        'file': fo, 'form': form, 'file_path': pathfile, 'file_name': file_name,
                        'file_id': file_id, 'cdir_components': cdir_components,
                        'folder_content_url': get_url_folder_content(pathfile), 'folder': folder}
                    c = RequestContext(request, ec)
                    ret['data'] = t.render(c)
    else:
        ret['message'] = gettext(u"Invalid file path")
        ret['status'] = 'error'

    json_data = simplejson.dumps(ret)
    return HttpResponse(json_data)

Example 126

Project: django-rolodex Source File: slug.py
Function: unique_slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
                   slug_separator='-'):
    """
    Calculates and stores a unique slug of ``value`` for an instance.

    ``slug_field_name`` should be a string matching the name of the field to
    store the slug in (and the field to check against for uniqueness).

    ``queryset`` usually doesn't need to be explicitly provided - it'll default
    to using the ``.all()`` queryset from the model's default manager.
    """
    slug_field = instance._meta.get_field(slug_field_name)

    slug = getattr(instance, slug_field.attname)
    slug_len = slug_field.max_length

    # Sort out the initial slug, limiting its length if necessary.
    slug = slugify(value)
    if slug_len:
        slug = slug[:slug_len]
    slug = _slug_strip(slug, slug_separator)
    original_slug = slug

    # Create the queryset if one wasn't explicitly provided and exclude the
    # current instance from the queryset.
    if queryset is None:
        queryset = instance.__class__._default_manager.all()
    if instance.pk:
        queryset = queryset.exclude(pk=instance.pk)

    # Find a unique slug. If one matches, add '-2' to the end and try again
    # (then '-3', etc).
    next = 2
    while not slug or queryset.filter(**{slug_field_name: slug}):
        slug = original_slug
        end = '%s%s' % (slug_separator, next)
        if slug_len and len(slug) + len(end) > slug_len:
            slug = slug[:slug_len - len(end)]
            slug = _slug_strip(slug, slug_separator)
        slug = '%s%s' % (slug, end)
        next += 1

    setattr(instance, slug_field.attname, slug)

Example 127

Project: 1flow Source File: admin.py
    def csv_export(self, request, queryset, *args, **kwargs):
        response = HttpResponse(mimetype='text/csv')

        response['Content-Disposition'] = "attachment; filename={}.csv".format(
            slugify(self.model.__name__)
        )

        headers = list(self.list_display) + list(
            self.get_extra_csv_fields(request)
        )

        # BOM (Excel needs it to open UTF-8 file properly)
        response.write(u'\ufeff'.encode('utf8'))
        writer = csv.DictWriter(response, headers)

        # Write header.
        header_data = {}
        for name in headers:
            if hasattr(self, name) \
                    and hasattr(getattr(self, name), 'short_description'):
                header_data[name] = getattr(
                    getattr(self, name), 'short_description')

            else:
                field = self.model._meta.get_field_by_name(name)

                if field and field[0].verbose_name:
                    header_data[name] = field[0].verbose_name

                else:
                    header_data[name] = name

            header_data[name] = header_data[name].encode('utf-8', 'ignore')

        writer.writerow(header_data)

        # Write records.
        for row in queryset[:self.csv_record_limit]:
            data = {}
            for name in headers:
                if hasattr(row, name):
                    data[name] = getattr(row, name)
                elif hasattr(self, name):
                    data[name] = getattr(self, name)(row)
                else:
                    raise Exception('Unknown field: %s' % (name,))

                if callable(data[name]):
                    data[name] = data[name]()

                if isinstance(data[name], unicode):
                    data[name] = data[name].encode('utf-8', 'ignore')
                else:
                    data[name] = unicode(data[name]).encode('utf-8', 'ignore')

            writer.writerow(data)

        return response

Example 128

Project: django-report-builder Source File: unique_slugify.py
Function: unique_slugify
def unique_slugify(instance, value, slug_field_name='slug', queryset=None,
                   slug_separator='-'):
    """
    Calculates and stores a unique slug of ``value`` for an instance.

    ``slug_field_name`` should be a string matching the name of the field to
    store the slug in (and the field to check against for uniqueness).

    ``queryset`` usually doesn't need to be explicitly provided - it'll default
    to using the ``.all()`` queryset from the model's default manager.
    """
    slug_field = instance._meta.get_field(slug_field_name)

    slug = getattr(instance, slug_field.attname)
    slug_len = slug_field.max_length

    # Sort out the initial slug, limiting its length if necessary.
    slug = slugify(value)
    if slug_len:
        slug = slug[:slug_len]
    slug = _slug_strip(slug, slug_separator)
    original_slug = slug

    # Create the queryset if one wasn't explicitly provided and exclude the
    # current instance from the queryset.
    if queryset is None:
        queryset = instance.__class__._default_manager.all()
    if instance.pk:
        queryset = queryset.exclude(pk=instance.pk)

    # Find a unique slug. If one matches, at '-2' to the end and try again
    # (then '-3', etc).
    next = 2
    while not slug or queryset.filter(**{slug_field_name: slug}):
        slug = original_slug
        end = '%s%s' % (slug_separator, next)
        if slug_len and len(slug) + len(end) > slug_len:
            slug = slug[:slug_len - len(end)]
            slug = _slug_strip(slug, slug_separator)
        slug = '%s%s' % (slug, end)
        next += 1

    setattr(instance, slug_field.attname, slug)

Example 129

Project: onlineweb4 Source File: events.py
    def dehydrate(self, bundle):

        # Setting slug-field
        bundle.data['slug'] = slugify(unidecode(bundle.data['title']))

        # If image is set
        if 'image' in bundle.data and bundle.data['image']:
            # Parse to FileObject used by Filebrowser
            temp_image = FileObject(bundle.data['image'])

            # Itterate the different versions (by key)
            for ver in VERSIONS.keys():
                # Check if the key start with article_ (if it does, we want to crop to that size)
                if ver.startswith('events_'):
                    # Adding the new image to the object
                    bundle.data['image_' + ver] = temp_image.version_generate(ver).url

            # Unset the image-field
            del(bundle.data['image'])

        # Do the same thing for the company image
        if bundle.data['company_event']:
            for company in bundle.data['company_event']:
                temp_image = FileObject(company.data['companies'].data['old_image'])
                for ver in VERSIONS.keys():
                    if ver.startswith('companies_thumb'):
                        company.data['companies'].data['old_image_' + ver] = temp_image.version_generate(ver).url
                del(company.data['companies'].data['old_image'])

        # Returning washed object
        return bundle

Example 130

Project: btb Source File: generate.py
def generate_colation(mailing):
    """
    Generates a zip file containing all of the letters and envelopes for a
    particular mailing.  The output has the following structure:
    mailings-YYYY-MM-DD/ 
      letters/ (all letters, individually)
      envelopes/ (all envelopes)
      postcards/ (any other postcard type)
    
      all_letters.pdf -- all letters of all kinds (not postcards) combined for 
                         double-sided printing
      manifest.csv    -- CSV file with sheet counts and names.
      addresses.csv   -- CSV file with all addresses, one column per line,
                         front-padded
    """
    
    tmpdir = tempfile.mkdtemp(prefix="colation")
    outname = "mailings-%s_%s" % (
        datetime.datetime.now().strftime("%Y-%m-%d"),
        mailing.pk,
    )
    outdir = os.path.join(tmpdir, outname)
    os.makedirs(outdir) # also makes outdir

    manifest = {"letters": [], "postcards": []}
    for letter in mailing.letters.all():
        if not letter.get_file():
            continue
        details = {
            "recipient": letter.get_recipient_address(),
            "sender": letter.org.mailing_address,
            "type": letter.type,
            "file": letter.get_file(),
            "id": letter.id,
        }
        details["slug"] = slugify(details["recipient"].split("\n")[0])
        if letter.is_postcard:
            manifest["postcards"].append(details)
        else:
            manifest["letters"].append(details)

    # Write envelopes
    if manifest["letters"]:
        envelope_dir = os.path.join(outdir, "envelopes")
        os.makedirs(envelope_dir)
        unique_envelopes = set(
            (d["slug"], d["recipient"], d["sender"]) for d in manifest["letters"]
        )
        for slug, addr, from_address in unique_envelopes:
            env_fh = utils.build_envelope(from_address=from_address, to_address=addr)
            path = os.path.join(envelope_dir, "%s-envelope.jpg" % slug)
            with open(path, 'w') as fh:
                fh.write(env_fh.getvalue())

    # Write postcards
    for key in ("postcards", "letters"):
        if not manifest[key]:
            continue

        file_dir = os.path.join(outdir, key)
        os.makedirs(file_dir)
        for details in manifest[key]:
            dest = os.path.join(file_dir,
                "{0}-{1}{2}.{3}".format(
                    details["slug"],
                    details["type"],
                    details["id"],
                    ("jpg" if key == "postcards" else "pdf")
                ))
            shutil.copy(details["file"], dest)
            # Replace 'file' in manifest with zipfile-relative path
            details["file"] = os.path.relpath(dest, outdir)
            
    # Make combined letters pdf.
    if manifest["letters"]:
        sorted_pdfs = sorted(glob.glob(os.path.join(outdir, "letters", "*.pdf")))
        utils.combine_pdfs(*sorted_pdfs,
            add_blanks=True,
            filename=os.path.join(outdir, "all_letters.pdf")
        )

    # Write manifest file
    with open(os.path.join(outdir, "manifest.json"), 'w') as fh:
        json.dump(manifest, fh, indent=2)

    # Zip
    tmp_zip_path = "{0}.zip".format(outdir)
    zipbase = os.path.basename(outdir)
    proc = subprocess.Popen(["/usr/bin/zip", "-r", zipbase, zipbase],
            cwd=tmpdir) # zip adds ".zip"
    proc.communicate()

    # Clean up 
    dest = os.path.join(settings.MEDIA_ROOT, "mailings",
            os.path.basename(outname) + ".zip")
    try:
        os.makedirs(os.path.dirname(dest))
    except OSError:
        pass
    shutil.move(tmp_zip_path, dest)
    proc = subprocess.Popen(["rm", "-r", tmpdir])
    return os.path.relpath(dest, settings.MEDIA_ROOT)

Example 131

Project: fumblerooski Source File: games.py
def game_updater(year, teams, week, nostats=False):
    """
    Loads a team's games for a given year, creating new games as it finds them and updating scores. If needed,
    it will create new College records for opponents. The function accepts an optional QuerySet of teams. 
    It can be run in nostats mode, in which case the function updates only the score. If nostats=True, 
    after the game is updated the function calls other functions to update player and drive statistics. After completing,
    this function calls update_college_year for the given year, which updates the win-loss record of each team.
    
    >>> game_updater(2010, teams, 12)
    """
    if not teams:
        teams = CollegeYear.objects.filter(season=year, college__updated=True).order_by('id')
    
    games = []
    
    for team in teams:
        unmatched = []
        url = "http://web1.ncaa.org/football/exec/rankingSummary?org=%s&year=%s&week=%s" % (team.college.id, year, week)
        html = urllib.urlopen(url).read()
        soup = BeautifulSoup(html)
        try:
            t = soup.findAll('table')[2]
            rows = t.findAll('tr')[2:]
            base_url = "http://web1.ncaa.org/d1mfb/%s/Internet/worksheets/" % year
            for row in rows:
                try:
                    game_file = row.findAll('td')[0].find('a')['href'].split('game=')[1]
                    stringdate = row.findAll('td')[0].find('a').contents[0][4:]
                    team1_score, team2_score = [int(x) for x in row.findAll('td')[2].contents[0].split(' - ')]
                    if len(row.findAll('td')[3].contents[0].strip().split(' ')) == 2:
                        t1_result, ot = row.findAll('td')[3].contents[0].strip().split(' ')
                    else:
                        t1_result = row.findAll('td')[3].contents[0].strip()
                        ot = None

                except:
                    game_file = None
                    stringdate = row.findAll('td')[0].contents[0][4:]
                    team1_score = None
                    team2_score = None
                    t1_result = None
                    ot = None
                date = datetime.date(*(time.strptime(stringdate, '%m/%d/%Y')[0:3]))
                try:
                    t2 = int(row.findAll('td')[1].find('a')['href'].split('=')[1].split('&')[0])
                    try:
                        if t2 == 115:   # hack job to cover for ncaa change
                            team2 = CollegeYear.objects.get(college__id=30416, season=year)
                        elif t2 == 357: # another one like the above - Lincoln Univ. PA
                            team2 = CollegeYear.objects.get(college__id=30417, season=year)
                        else:
                            team2 = CollegeYear.objects.get(college__id=t2, season=year)
                    except:
                        name = row.findAll('td')[1].contents[0].replace("*","").strip().title()
                        slug = slugify(name)
                        new_college, created = College.objects.get_or_create(name=name, slug=slug)
                        team2 = CollegeYear.objects.get_or_create(college=new_college, season=year)
                except:
                    if len(row.findAll('td')[1].contents) > 0 and row.findAll('td')[1].contents[0] != '':
                        name = row.findAll('td')[1].contents[0].replace("*","").strip().title()
                        slug = slugify(name)
                        new_college, created = College.objects.get_or_create(name=name, slug=slug)
                        team2, created = CollegeYear.objects.get_or_create(college=new_college, season=year)
                    else:
                        continue
                print team, team2, date, team1_score, team2_score, t1_result
                g, new_game = Game.objects.get_or_create(season=year, team1=team, team2=team2, date=date)
                g.team1_score = team1_score
                g.team2_score=team2_score
                g.t1_result=t1_result
                g.overtime=ot
                if game_file:
                    g.ncaa_xml = game_file.split('.xml')[0].strip()
                    games.append(g)
                    if not nostats:
                        load_ncaa_game_xml(g)
                        g.has_stats = True
                        player_game_stats(g)
                        g.has_player_stats = True
                        game_drive_loader(g)
                        g.has_drives = True
                else:
                    pass
                if ot:
                    g.ot = 't'
                if "@" in row.findAll('td')[1].contents[0]:
                    g.t1_game_type = 'A'
                elif "^" in row.findAll('td')[1].contents[0]:
                    g.t1_game_type = 'N'
                elif row.findAll('td')[1].find('a') and "@" in row.findAll('td')[1].find('a').contents[0]:
                    g.t1_game_type = 'A'
                elif row.findAll('td')[1].find('a') and "^" in row.findAll('td')[1].find('a').contents[0]:
                    g.t1_game_type = 'N'
                else:
                    g.t1_game_type = 'H'
                if new_game:
                    populate_head_coaches(g)
                g.save()
        except:
             unmatched.append(team.college.id)
    update_college_year(year)
    for team in unmatched:
        print "Could not find games for %s" % team.id

Example 132

Project: django-supertagging Source File: __init__.py
def _processEntities(field, data, obj, ctype, process_type, tags, date):
    """
    Process Entities.
    """
    processed_tags = []
    for e in data:
        entity = e.copy()
        # Here we convert the given float value to an integer
        rel = int(float(str(entity.pop('relevance', '0'))) * 1000)
        
        # Only process tags and items that greater or equal 
        # to MIN_RELEVANCE setting
        if rel < settings.MIN_RELEVANCE:
            continue
            
        inst = entity.pop('instances', {})
        ## Tidy up the encoding
        for i, j in enumerate(inst):
            for k, v in j.items():
                if isinstance(v, unicode):
                    inst[i][k] = v.encode('utf-8')
                else:
                    inst[i][k] = v


        calais_id = re.match(REF_REGEX, str(entity.pop('__reference'))).group('key')
        stype = entity.pop('_type', '')

        # if type is in EXLCUSIONS, continue to next item.
        if stype.lower() in map(lambda s: s.lower(), settings.EXCLUSIONS):
            continue
        
        display_name = entity.pop('name', '')
        name = display_name.lower()
        if tags and name not in tags:
            continue
        
        slug = slugify(name)
        tag = None
        try:
            tag = SuperTag.objects.get_by_name(name__iexact=name, stype=stype)
        except SuperTag.DoesNotExist:
            try:
                tag = SuperTag.objects.get(calais_id=calais_id)
            except SuperTag.DoesNotExist:
                kwargs = {
                    'calais_id': calais_id,
                    'slug': slug,
                    'stype': stype,
                    'name': name,
                }
                if settings.INCLUDE_DISPLAY_FIELDS:
                    kwargs['display_name'] = display_name
                tag = SuperTag.objects.create_alternate(**kwargs)
        except SuperTag.MultipleObjectsReturned:
            tag = SuperTag.objects.filter(name__iexact=name)[0]
            
        tag = tag.substitute or tag
        
        # If this tag was added to exlcude list, move onto the next item.
        if not tag.enabled:
            continue
            
        tag.properties = entity
        tag.save()
            
        # Check to make sure that the entity is not already attached
        # to the content object, if it is, just append the instances. This
        # should elimiate entities returned with different names such as
        # 'Washington' and 'Washington DC' but same id
        try:
            it = SuperTaggedItem.objects.get(tag=tag, content_type=ctype, 
                object_id=obj.pk, field=field)
            it.instances.append(inst)
            it.item_date = date
            # Take the higher relevance
            if rel > it.relevance:
                it.relevance = rel
            it.save()
        except SuperTaggedItem.DoesNotExist:
            # Create the record that will associate content to tags
            it = SuperTaggedItem.objects.create(tag=tag, 
                content_type=ctype, object_id=obj.pk, field=field, 
                process_type=process_type, relevance=rel, instances=inst, 
                item_date=date)

        processed_tags.append(tag)
    return processed_tags

Example 133

Project: ella Source File: views.py
    def get_context(self, request, category, slug, year, month, day, id):
        try:
            cat = Category.objects.get_by_tree_path(category)
        except Category.DoesNotExist:
            # non-static url, no way to recover
            if year:
                raise Http404("Category with tree_path '%s' doesn't exist." % category)
            else:
                cat = None

        if year:
            start_date = localize(datetime(int(year), int(month), int(day)))
            end_date = start_date + timedelta(days=1)

            lookup = {
                'publish_from__gte': start_date,
                'publish_from__lt': end_date,
                'category': cat,
                'slug': slug,
                'static': False
            }
            try:
                publishable = get_cached_object(Publishable, published=True, **lookup)
            except Publishable.DoesNotExist:
                # Fallback for staff members in case there are multiple
                # objects with same URL.
                if request.user.is_staff:
                    try:
                        # Make sure we return specific publishable subclass
                        # like when using `get_cached_object` if possible.
                        p = Publishable.objects.filter(published=False, **lookup)[0]
                        publishable = p.content_type.model_class()._default_manager.get(pk=p.pk)
                    except IndexError:
                        raise Http404
                else:
                    raise Http404
        else:
            publishable = get_cached_object_or_404(Publishable, pk=id)

        if not (publishable.is_published() or request.user.is_staff):
            # future publish, render if accessed by logged in staff member
            raise Http404

        if not year:
            if cat is None:
                raise self.WrongUrl('Category with tree_path %r does not exist.' % category, publishable)
            elif not publishable.static:
                raise self.WrongUrl('%s is not static.' % publishable, publishable)
            elif slug != publishable.slug:
                raise self.WrongUrl('Wrong slug in URL (%r).' % slug, publishable)
            elif publishable.category_id != cat.pk:
                raise self.WrongUrl('Wrong category for %s.' % publishable, publishable)

        # save existing object to preserve memory and SQL
        publishable.category = cat

        context = {
            'object': publishable,
            'category': cat,
            'content_type_name': slugify(publishable.content_type.model_class()._meta.verbose_name_plural),
            'content_type': publishable.content_type
        }

        return context

Example 134

Project: django-profile Source File: avatars.original.py
    def render(self, context):
        try:
            # If size is not an int, then it's a Variable, so try to resolve it.
            if not isinstance(self.size, int):
                self.size = int(self.size.resolve(context))
            self.user = self.get_user(context)
        except Exception, e:
            print e
            return '' # just die...
        if self.size > _settings.DEFAULT_AVATAR_WIDTH:
            return '' # unacceptable
        profile = self.get_profile()
        if not profile:
            return ''
        # Avatar's heaven, where all the avatars go.
        avatars_root = path.join(_settings.AVATARS_DIR,
                                 slugify(self.user.username))
        file_root, file_name, defaulting = self.get_file(profile)
        if defaulting:
            file_root = _settings.AVATARS_DIR
            if self.size_equals():
                return self.as_url(path.join(file_root, file_name))
        file_path = path.join(file_root, file_name)
        # I don't return the default because I have to resize it.
        if not defaulting:
            if path.exists(file_path) and self.size_equals(file_path):
                return self.as_url(file_path)
            else:
                if not profile.avatar:
                    file_root = _settings.AVATARS_DIR
                    file_path = path.join(file_root, _settings.DEFAULT_AVATAR)
        # Oops, I din't find it, let's try to generate it.
        if path.exists(file_path):
            orig_file = Image(file_path)
            dest_root = path.join(avatars_root, str(self.size))
            try:
                makedirs(dest_root)
            except Exception, e:
                print e
            # Save the new path for later...
            dest_path = path.join(dest_root, file_name)
        else:
            # Did my best...
            return '' # fail silently
        orig_file.scale(self.size)
        if orig_file.write(dest_path):
            return self.as_url(dest_path)
        else:
            print '=== ERROR ==='
            return '' # damn! Close but no cigar...

Example 135

Project: aldryn-blog Source File: utils.py
Function: generate_slugs
def generate_slugs(users):
    """
    Takes a queryset of users and creates nice slugs
    Returns the same queryset but with a slug attribute on each user
    """

    slugs = []
    slugged_users = []

    for user in users:
        slug = ''
        _slug = slugify(user.get_full_name())
        if not _slug:
            slug = user.get_username()

        elif _slug not in slugs:
            slug = _slug

        else:
            for i in xrange(2, 100):
                if not '%s-%i' % (_slug, i) in slugs:
                    slug = '%s-%i' % (_slug, i)
                    break

        if not slug:
            slug = user.get_username()

        slugs.append(slug)
        user.slug = slug
        slugged_users.append(user)

    return slugged_users

Example 136

Project: django-supertagging Source File: __init__.py
def _processSocialTags(field, data, obj, ctype, tags, date):
    """
    Process Topics, this opertaion is similar to _processEntities, the only
    difference is that there are no instances
    """
    rel_map = {'1': 900, '2': 700}
    processed_tags = []
    for di in data:
        di.pop('__reference')
        
        calais_id = re.match(REF_REGEX, str(di.pop('socialTag'))).group('key')
        stype = 'Social Tag'
        display_name = di.pop('name', '')
        name = display_name.lower()
        if tags and name not in tags:
            continue
        rel = rel_map.get(di.get('importance', '3'), 500)
        slug = slugify(name)
        tag = None
        try:
            tag = SuperTag.objects.get_by_name(name__iexact=name)
        except SuperTag.DoesNotExist:
            try:
                tag = SuperTag.objects.get(calais_id=calais_id)
            except SuperTag.DoesNotExist:
                kwargs = {
                    'calais_id': calais_id,
                    'slug': slug,
                    'stype': stype,
                    'name': name,
                }
                if settings.INCLUDE_DISPLAY_FIELDS:
                    kwargs['display_name'] = display_name
                tag = SuperTag.objects.create_alternate(**kwargs)
        except SuperTag.MultipleObjectsReturned:
            tag = SuperTag.objects.filter(name__iexact=name)[0]
            
        tag = tag.substitute or tag
        
        if not tag.enabled:
            continue

        tag.properties = di
        tag.save()

        SuperTaggedItem.objects.create(tag=tag, content_type=ctype, 
            object_id=obj.pk, field=field, relevance=rel, item_date=date)

        processed_tags.append(tag)
    return processed_tags

Example 137

Project: django-blog-zinnia Source File: metaweblog.py
@xmlrpc_func(returns='string', args=['string', 'string', 'string',
                                     'struct', 'boolean'])
def new_post(blog_id, username, password, post, publish):
    """
    metaWeblog.newPost(blog_id, username, password, post, publish)
    => post_id
    """
    user = authenticate(username, password, 'zinnia.add_entry')
    if post.get('dateCreated'):
        creation_date = datetime.strptime(
            post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S')
        if settings.USE_TZ:
            creation_date = timezone.make_aware(
                creation_date, timezone.utc)
    else:
        creation_date = timezone.now()

    entry_dict = {'title': post['title'],
                  'content': post['description'],
                  'excerpt': post.get('mt_excerpt', ''),
                  'publication_date': creation_date,
                  'creation_date': creation_date,
                  'last_update': creation_date,
                  'comment_enabled': post.get('mt_allow_comments', 1) == 1,
                  'pingback_enabled': post.get('mt_allow_pings', 1) == 1,
                  'trackback_enabled': post.get('mt_allow_pings', 1) == 1,
                  'featured': post.get('sticky', 0) == 1,
                  'tags': 'mt_keywords' in post and post['mt_keywords'] or '',
                  'slug': 'wp_slug' in post and post['wp_slug'] or slugify(
                      post['title']),
                  'password': post.get('wp_password', '')}
    if user.has_perm('zinnia.can_change_status'):
        entry_dict['status'] = publish and PUBLISHED or DRAFT

    entry = Entry.objects.create(**entry_dict)

    author = user
    if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'):
        if int(post['wp_author_id']) != user.pk:
            author = Author.objects.get(pk=post['wp_author_id'])
    entry.authors.add(author)

    entry.sites.add(Site.objects.get_current())
    if 'categories' in post:
        entry.categories.add(*[
            Category.objects.get_or_create(
                title=cat, slug=slugify(cat))[0]
            for cat in post['categories']])

    return entry.pk

Example 138

Project: django-municipios Source File: ibge.py
def convert_shapefile(shapefilename, srid=4674):
    """
    shapefilename: considera nomenclatura de shapefile do IBGE para determinar se é UF
                   ou Municípios.
                   ex. 55UF2500GC_SIR.shp para UF e 55MU2500GC_SIR.shp para Municípios
    srid: 4674 (Projeção SIRGAS 2000)
    """
    # /home/nando/Desktop/IBGE/2010/55MU2500GC_SIR.shp
    ds = DataSource(shapefilename)

    is_uf = shapefilename.upper().find('UF') != -1

    transform_coord = None
    if srid != SRID:
        transform_coord = CoordTransform(SpatialReference(srid), SpatialReference(SRID))

    if is_uf:
        model = UF
    else:
        model = Municipio

    ct = 0
    for f in ds[0]:

        # 3D para 2D se necessário
        if f.geom.coord_dim != 2:
            f.geom.coord_dim = 2

        # converte para MultiPolygon se necessário
        if isinstance(f.geom, Polygon):
            g = OGRGeometry(OGRGeomType('MultiPolygon'))
            g.add(f.geom)
        else:
            g = f.geom

        # transforma coordenadas se necessário
        if transform_coord:
            g.transform(transform_coord)

        # força 2D
        g.coord_dim = 2
        kwargs = {}

        if is_uf:
            kwargs['nome'] = capitalize_name(unicode(f.get(CAMPO_NOME_UF), 'latin1'))
            kwargs['geom'] = g.ewkt
            kwargs['id_ibge'] = f.get(CAMPO_GEOCODIGO_UF)
            kwargs['regiao'] = capitalize_name(unicode(f.get(CAMPO_REGIAO_UF), 'latin1'))
            kwargs['uf'] = UF_SIGLAS_DICT.get(kwargs['id_ibge'])
        else:
            kwargs['nome'] = capitalize_name(unicode(f.get(CAMPO_NOME_MU), 'latin1'))
            kwargs['geom'] = g.ewkt
            kwargs['id_ibge'] = f.get(CAMPO_GEOCODIGO_MU)
            kwargs['uf'] = UF.objects.get(pk=f.get(CAMPO_GEOCODIGO_MU)[:2])
            kwargs['uf_sigla'] = kwargs['uf'].uf
            kwargs['nome_abreviado'] = slugify(kwargs['nome'])
            # tenta corrigir nomes duplicados, são em torno de 242 nomes repetidos
            # adicionando a sigla do estado no final
            if Municipio.objects.filter(nome_abreviado=kwargs['nome_abreviado']).count() > 0:
                kwargs['nome_abreviado'] = u'%s-%s' % (kwargs['nome_abreviado'], kwargs['uf_sigla'].lower())

        instance = model(**kwargs)
        instance.save()

        ct += 1

    print(ct, (is_uf and "Unidades Federativas criadas" or "Municipios criados"))

Example 139

Project: feincms Source File: zip.py
def import_zipfile(category_id, overwrite, data):
    """
    Import a collection of media files from a zip file.

    category_id: if set, the pk of a Category that all uploaded
        files will have added (eg. cathegory "newly uploaded files")
    overwrite: attempt to overwrite existing files. This might
        not work with non-trivial storage handlers
    """
    category = None
    if category_id:
        category = Category.objects.get(pk=int(category_id))

    z = zipfile.ZipFile(data)

    # Peek into zip file to find out whether it contains meta information
    is_export_file = False
    info = {}
    try:
        info = json.loads(z.comment)
        if info['export_magic'] == export_magic:
            is_export_file = True
    except:
        pass

    # If meta information, do we need to create any categories?
    # Also build translation map for category ids.
    category_id_map = {}
    if is_export_file:
        for cat in sorted(
                info.get('categories', []),
                key=lambda k: k.get('level', 999)):
            new_cat, created = Category.objects.get_or_create(
                slug=cat['slug'],
                title=cat['title'])
            category_id_map[cat['id']] = new_cat
            if created and cat.get('parent', 0):
                parent_cat = category_id_map.get(cat.get('parent', 0), None)
                if parent_cat:
                    new_cat.parent = parent_cat
                    new_cat.save()

    count = 0
    for zi in z.infolist():
        if not zi.filename.endswith('/'):
            bname = os.path.basename(zi.filename)
            if bname and not bname.startswith(".") and "." in bname:
                fname, ext = os.path.splitext(bname)
                wanted_dir = os.path.dirname(zi.filename)
                target_fname = slugify(fname) + ext.lower()

                info = {}
                if is_export_file:
                    info = json.loads(zi.comment)

                mf = None
                if overwrite:
                    full_path = os.path.join(wanted_dir, target_fname)
                    try:
                        mf = MediaFile.objects.get(file=full_path)
                        mf.file.delete(save=False)
                    except MediaFile.DoesNotExist:
                        mf = None

                if mf is None:
                    mf = MediaFile()
                if overwrite:
                    mf.file.field.upload_to = wanted_dir
                mf.copyright = info.get('copyright', '')
                mf.file.save(
                    target_fname,
                    ContentFile(z.read(zi.filename)),
                    save=False)
                mf.save()

                found_metadata = False
                if is_export_file:
                    try:
                        for tr in info['translations']:
                            found_metadata = True
                            mt, mt_created =\
                                MediaFileTranslation.objects.get_or_create(
                                    parent=mf, language_code=tr['lang'])
                            mt.caption = tr['caption']
                            mt.description = tr.get('description', None)
                            mt.save()

                        # Add categories
                        mf.categories = (
                            category_id_map[cat_id]
                            for cat_id in info.get('categories', []))
                    except Exception:
                        pass

                if not found_metadata:
                    mt = MediaFileTranslation()
                    mt.parent = mf
                    mt.caption = fname.replace('_', ' ')
                    mt.save()

                if category:
                    mf.categories.add(category)

                count += 1

    return count

Example 140

Project: idea-box Source File: models.py
Function: unique_slug
def unique_slug(item, slug_source, slug_field):
    """
    Ensures a unique slug field by appending an integer counter to duplicate
    slugs.

    The item's slug field is first prepopulated by slugify-ing the source
    field. If that value already exists, a counter is appended to the slug,
    and the counter incremented upward until the value is unique.

    For instance, if you save an object titled Daily Roundup, and the slug
    daily-roundup is already taken, this function will try daily-roundup-2,
    daily-roundup-3, daily-roundup-4, etc, until a unique value is found.

    Call from within a model's custom save() method like so:
    unique_slug(item, slug_source='field1', slug_field='field2')
    where the value of field slug_source will be used to prepopulate the value
    of slug_field.
    """
    if not getattr(item, slug_field):  # if it already has slug, do nothing.
        from django.template.defaultfilters import slugify
        slug = slugify(getattr(item, slug_source))
        itemModel = item.__class__
        # the following gets all existing slug values
        allSlugs = [sl.values()[0]
                    for sl in itemModel.objects.values(slug_field)]
        if slug in allSlugs:
            import re
            counterFinder = re.compile(r'-\d+$')
            counter = 2
            slug = "%s-%i" % (slug, counter)
            while slug in allSlugs:
                slug = re.sub(counterFinder, "-%i" % counter, slug)
                counter += 1
        setattr(item, slug_field, slug)

Example 141

Project: djKarma Source File: import_archive.py
    def handle(self, *args, **options):
        archives = self.load_archives()

        # assuming everything is Harvard
        sch = School.objects.filter(name = "Harvard")[0]
        year = 2009

        # first we add the Simplest item required: Instructors
        # we will search for existing ones, and add those who dont exist
        orphanednotes = []
        for note in archives['archivednotes']:
                course_id = note['course_id']
                course = archives['archivedcourses'].get(course_id)
                if course == None:
                        orphanednotes.append(note)
                        continue #if a note does not properly link to a course, we add it to a list, then skip over the note
                subject_id = course['subject_id']
                subject = archives['archivedsubjects'][subject_id]
                temp_instructor, created = Instructor.objects.get_or_create(name=course['instructor'], school=sch)
                if created:
                        self.stdout.write('instructor created')

                # now we look for courses that do not exist, and add them
                course_slug = slugify(course['name'])
                #this is done in a TRY because uniqueness requirements get weird
                try:
                        temp_course, created = Course.objects.get_or_create(slug = course_slug, title = course['name'])
                        if created:
                                self.stdout.write('course created')
                except:
                        self.stdout.write('course already exists')
                #then, once it IS created, we add data.  Otherwise, we assume that its got data attached
                if created:
                        temp_course.school = sch
                        temp_course.field = subject['name']
                        temp_course.academic_year = year
                        temp_course.instructor = temp_instructor
                        temp_course.save()

                # now for actually uploading notes
                temp_note, created = File.objects.get_or_create(title = note['topic'],
                                                                                        description = note['topic'],
                                                                                        course = temp_course,
                                                                                        school = sch,
                                                                                        html = note['text'])
                if created:
                        self.stdout.write('note created')

Example 142

Project: transifex Source File: upload_manager_tags.py
@register.inclusion_tag("resources/upload_create_resource_form.html")
def upload_create_resource_form(request, project, prefix='create_form'):
    """Form for creating a new resource."""
    resource = None
    display_form = False
    if request.method == 'POST' and request.POST.get('create_resource', None):
        cr_form = CreateResourceForm(
            request.POST, request.FILES, prefix=prefix
        )
        if cr_form.is_valid():
            name = cr_form.cleaned_data['name']
            slug = slugify(name)

            # Check if we already have a resource with this slug in the db.
            try:
                Resource.objects.get(slug=slug, project=project)
            except Resource.DoesNotExist:
                pass
            else:
                # if the resource exists, modify slug in order to force the
                # creation of a new resource.
                slug = slugify(name)
                identifier = Resource.objects.filter(
                    project=project, slug__icontains="%s_" % slug
                ).count() + 1
                slug = "%s_%s" % (slug, identifier)
            method = cr_form.cleaned_data['i18n_method']
            content = content_from_uploaded_file(request.FILES)
            filename = filename_of_uploaded_file(request.FILES)
            rb = ResourceBackend()
            try:
                with transaction.commit_on_success():
                    rb.create(
                        project, slug, name, method, project.source_language,
                        content, user=request.user,
                        extra_data={'filename': filename}
                    )
            except ResourceBackendError, e:
                cr_form._errors['source_file'] = ErrorList([e.message, ])
                display_form=True
            else:
                display_form = False
                resource = Resource.objects.get(slug=slug, project=project)
        else:
            display_form=True
    else:
        cr_form = CreateResourceForm(prefix=prefix)
        display_form = False

    return {
          'project' : project,
          'resource': resource,
          'create_resource_form': cr_form,
          'display_form': display_form,
    }

Example 143

Project: 3bot Source File: tasks.py
@background(schedule=1)
def run_workflow(workflow_log_id):
    """
    expects an empty workflow_log,
    performes its tasks on the given worker(s) and
    returns updated workflow_log
    """
    outputs = {}
    protocol = "tcp"

    workflow_log = WorkflowLog.objects.get(id=workflow_log_id)
    worker = workflow_log.performed_on

    WORKER_ENDPOINT = "%s://%s:%s" % (protocol, worker.ip, str(worker.port))
    WORKER_SECRET_KEY = worker.secret_key

    conn = BotConnection(WORKER_ENDPOINT, WORKER_SECRET_KEY)
    conn.connect()

    # Make a JSON
    request_header = {'workflow_log_id': workflow_log.id,
                      'workflow': slugify(workflow_log.workflow.title),
                      'workflow_log_time': workflow_log.date_created.strftime('%Y%m%d-%H%M%S'),
                      'script': {},
                      'hooks': {},  # see doc/HOOKS.md
                      }

    # hooks for this workflow
    if workflow_log.workflow.pre_task:
        request_header['hooks']['pre_task'] = workflow_log.workflow.pre_task

    if workflow_log.workflow.post_task:
        request_header['hooks']['post_task'] = workflow_log.workflow.post_task

    ordered_workflows = order_workflow_tasks(workflow_log.workflow)

    workflow_log.date_started = timezone.now()
    for idx, workflow_task in enumerate(ordered_workflows):
        template = render_template(workflow_log, workflow_task)

        if workflow_task.task.is_builtin:
            m = importCode(template, "test")
            output = {}
            output['stdout'] = str(m.run())
            output['exit_code'] = workflow_log.SUCCESS
        else:
            request = request_header
            request['script']['id'] = idx
            request['script']['body'] = template

            output = send_script(request, conn)

        outputs['%i_%s' % (workflow_task.id, workflow_task.task.title)] = output

        # loop over all next wf_tasks and add this scripts output to inputs
        current = workflow_task
        while current.next_workflow_task:
            current = current.next_workflow_task

            # deepcopy dict to prevent runtime error
            inp = deepcopy(workflow_log.inputs)
            # loop key, value pairs and look if this output needs to be set as input
            for key, value in inp[str(current.id)]['string'].iteritems():
                if value == 'output_%s' % str(workflow_task.id):
                    workflow_log.inputs[str(current.id)]['string'][key] = output['stdout']

        if 'exit_code' not in output or output['exit_code'] is not workflow_log.SUCCESS:
            workflow_log.exit_code = workflow_log.ERROR
            workflow_log.save()
            break
        else:
            workflow_log.exit_code = workflow_log.SUCCESS

    conn.close()

    workflow_log.date_finished = timezone.now()
    workflow_log.outputs = outputs
    workflow_log.save()

    # Notify user in case of failure
    if workflow_log.exit_code == workflow_log.ERROR:
        subject = "[3BOT] Workflow '%s' has failed" % (workflow_log.workflow.title)
        message = "Your workflow %s%s has failed.\n -- 3bot" % (Site.objects.get_current(), workflow_log.get_absolute_url())
        workflow_log.performed_by.email_user(subject, message)

Example 144

Project: iCQA Source File: readers.py
@decorators.render("question.html", 'questions')
def question(request, id, slug='', answer=None):
    try:
        question = Question.objects.get(id=id)
    except:
        if slug:
            question = match_question_slug(id, slug)
            if question is not None:
                return HttpResponseRedirect(question.get_absolute_url())

        raise Http404()

    if question.nis.deleted and not request.user.can_view_deleted_post(question):
        raise Http404

    if request.GET.get('type', None) == 'rss':
        return RssAnswerFeed(request, question, include_comments=request.GET.get('comments', None) == 'yes')(request)

    if answer:
        answer = get_object_or_404(Answer, id=answer)

        if (question.nis.deleted and not request.user.can_view_deleted_post(question)) or answer.question != question:
            raise Http404

        if answer.marked:
            return HttpResponsePermanentRedirect(question.get_absolute_url())

        return answer_redirect(request, answer)

    if settings.FORCE_SINGLE_URL and (slug != slugify(question.title)):
        return HttpResponsePermanentRedirect(question.get_absolute_url())

    if request.POST:
        answer_form = AnswerForm(request.POST, user=request.user)
    else:
        answer_form = AnswerForm(user=request.user)

    answers = request.user.get_visible_answers(question)

    update_question_view_times(request, question)

    if request.user.is_authenticated():
        try:
            subscription = QuestionSubscription.objects.get(question=question, user=request.user)
        except:
            subscription = False
    else:
        subscription = False

    from django.db import connection, transaction
    cursor = connection.cursor()
    cursor.execute("SELECT a.user_id, a.score FROM analytics_answerer_recommendation a WHERE a.question_id=%s AND NOT EXISTS (SELECT * FROM forum_node f WHERE f.id=a.question_id AND f.author_id=a.user_id) AND NOT EXISTS (SELECT * FROM forum_node f2 WHERE f2.parent_id=a.question_id AND f2.author_id=a.user_id) AND NOT EXISTS (SELECT * FROM forum_node f3 WHERE f3.parent_id=a.question_id AND f3.marked=1) ORDER BY score ASC LIMIT 12", [question.id])
    row_list = cursor.fetchall()
    user_list = []

    for row in row_list:
        user = User.objects.get(pk=row[0])
        if (row[1] > 0):
            user_list.append({"user": user, "score": row[1]})

    return pagination.paginated(request, ('answers', AnswerPaginatorContext()), {
    "question" : question,
    "answer" : answer_form,
    "answers" : answers,
    "similar_questions" : question.get_related_questions(),
    "subscription": subscription,
    "recommendedanswerers": user_list,
    })

Example 145

Project: zorna Source File: forms.py
    def save(self, request):
        message = self.cleaned_data['message']
        send_to = self.cleaned_data['send_to']
        upload_to = []
        calendar_owners = []
        dest = []
        ao = get_allowed_objects(request.user, Community, ['manage', 'member'])
        if send_to:
            send_to = send_to.split(',')
            for item in send_to:
                item = item.split('-')
                if item[0] == 'u':  # user
                    user = User.objects.get(pk=item[1])
                    # if user recipient member of any current user communities
                    ao_member_user = get_allowed_objects(
                        user, Community, ['member', 'manage'])
                    inter = [k for k in ao if k in ao_member_user]
                    if len(inter):
                        dest.append(user)
                        calendar_owners.append(user)
                        upload_to.append(u"U%s" % user.pk)
                else:
                    community = Community.objects.get(pk=item[1])
                    if community.pk in ao:
                        dest.append(community)
                        calendar_owners.append(community)
                        upload_to.append(u"C%s" % community.pk)
            users_emails = []
            if len(dest):
                m = MessageCommunity(message=message)
                m.owner = m.modifier = request.user
                m.save()
                for k in dest:
                    if isinstance(k, User):
                        m.users.add(k)
                        users_emails.append(k.email)
                    else:
                        m.communities.add(k)
                        if k.email_notification:
                            users = list(chain(get_acl_by_object(
                                k, 'member'), get_acl_by_object(k, 'manage')))
                            users_emails.extend([u.email for u in users])
            else:
                return None

            files = request.FILES.getlist("attachments")
            if len(upload_to) and len(files):
                try:
                    path_library = get_upload_library()
                    path = os.path.join(get_upload_communities(), "%s" % m.pk)
                    if not os.path.isdir(path):
                        os.makedirs(path)
                    for f in request.FILES.getlist("attachments"):
                        s = os.path.splitext(f.name)
                        fname = slugify(s[0])
                        fext = s[1]
                        destination = open(u"%s/%s" % (
                            path, u"%s%s" % (fname, fext)), 'wb+')
                        for chunk in f.chunks():
                            destination.write(chunk)
                        destination.close()
                        for d in upload_to:
                            destpath = os.path.join(path_library, "%s" % d)
                            if not os.path.isdir(destpath):
                                os.makedirs(destpath)
                            try:
                                libfile = ZornaFile(
                                    owner=request.user, modifier=request.user)
                                libfile.save()
                                fsrc = u"%s/%s/%s,%s%s" % (
                                    path_library, d, str(libfile.pk), fname, fext)
                                shutil.copy2(u"%s/%s" % (
                                    path, u"%s%s" % (fname, fext)), fsrc)
                            except Exception as e:
                                print(e)
                except Exception as e:
                    pass
            # send email notification
            if len(users_emails):
                users_emails = list(set(users_emails))
                if users_emails:
                    email = ZornaEmail()
                    url = request.build_absolute_uri(reverse(
                        'communities_home_page', args=[])) + '?all_msg=message&message_id=%s' % m.pk
                    ec = {"message": m, 'url': url, 'user': request.user}
                    body_text = render_to_string(
                        'communities/email_notification_text.html', ec)
                    body_html = render_to_string(
                        'communities/email_notification_html.html', ec)
                    subject = _(
                        u'A new message has been posted in communities')
                    step = getattr(settings, "ZORNA_MAIL_MAXPERPACKET", 25)
                    for n in range(0, len(users_emails) / step + 1):
                        email.append(subject, body_text, body_html, settings.DEFAULT_FROM_EMAIL, bcc=users_emails[
                                     n * step:(n + 1) * step])
                    email.send()
            return m
        return None

Example 146

Project: django-bulbs Source File: test_content_search.py
    def setUp(self):
        super(PolyContentTestCase, self).setUp()
        """
        Normally, the "Content" class picks up available doctypes from installed apps, but
        in this case, our test models don't exist in a real app, so we'll hack them on.
        """

        # generate some data
        one_hour_ago = timezone.now() - datetime.timedelta(hours=1)
        two_days_ago = timezone.now() - datetime.timedelta(days=2)
        words = ['spam', 'driver', 'dump truck', 'restaurant']
        self.num_subclasses = 2
        self.combos = list(itertools.combinations(words, 2))
        self.all_tags = []
        ft_one = FeatureType.objects.create(name="Obj one", slug="obj-one")
        ft_two = FeatureType.objects.create(name="Obj two", slug="obj-two")

        for i, combo in enumerate(self.combos):
            tags = []
            for atom in combo:
                tag, created = Tag.objects.get_or_create(name=atom, slug=slugify(atom))
                tags.append(tag)
                self.all_tags.append(tag)

            obj = make_content(
                TestContentObj,
                published=one_hour_ago,
                feature_type=ft_one,
                tunic_campaign_id=1
            )
            obj.tags.add(*tags)
            obj.index()

            obj2 = make_content(TestContentObjTwo, published=two_days_ago, feature_type=ft_two)
            obj2.tags.add(*tags)
            obj2.index()

        obj = TestContentObj.objects.create(
            title="Unpublished draft",
            description="Just to throw a wrench",
            foo="bar",
            feature_type=ft_one
        )
        Content.search_objects.refresh()

Example 147

Project: tri.table Source File: __init__.py
def table_context(request,
                  table,
                  links=None,
                  paginate_by=None,
                  page=None,
                  extra_context=None,
                  context_processors=None,
                  paginator=None,
                  show_hits=False,
                  hit_label='Items'):
    """
    :type table: Table
    """
    if extra_context is None:  # pragma: no cover
        extra_context = {}

    assert table.data is not None

    grouped_links = {}
    if links is not None:
        links = evaluate_recursive(links, table=table)
        links = [link for link in links if link.show and link.url]

        grouped_links = groupby((link for link in links if link.group is not None), key=lambda l: l.group)
        grouped_links = [(g, slugify(g), list(lg)) for g, lg in grouped_links]  # because django templates are crap!

        links = [link for link in links if link.group is None]

    base_context = {
        'links': links,
        'grouped_links': grouped_links,
        'table': table,
    }

    if paginate_by:
        try:
            paginate_by = int(request.GET.get('page_size', paginate_by))
        except ValueError:  # pragma: no cover
            pass
        if paginator is None:
            paginator = Paginator(table.data, paginate_by)
            object_list = None
        else:  # pragma: no cover
            object_list = table.data
        if not page:
            page = request.GET.get('page', 1)
        try:
            page = int(page)
            if page < 1:  # pragma: no cover
                page = 1
            if page > paginator.num_pages:  # pragma: no cover
                page = paginator.num_pages
            if object_list is None:
                table.data = paginator.page(page).object_list
        except (InvalidPage, ValueError):  # pragma: no cover
            if page == 1:
                table.data = []
            else:
                raise Http404

        base_context.update({
            'request': request,
            'is_paginated': paginator.num_pages > 1,
            'results_per_page': paginate_by,
            'has_next': paginator.num_pages > page,
            'has_previous': page > 1,
            'page_size': paginate_by,
            'page': page,
            'next': page + 1,
            'previous': page - 1,
            'pages': paginator.num_pages,
            'hits': paginator.count,
            'show_hits': show_hits,
            'hit_label': hit_label})
    else:  # pragma: no cover
        base_context.update({
            'is_paginated': False})

    base_context.update(extra_context)
    return RequestContext(request, base_context, context_processors)

Example 148

Project: btb Source File: import_test_data.py
def load_test_data():
    data_file = os.path.join(settings.MEDIA_ROOT, "test", "test_data.yaml")
    uploader = User.objects.get(username='uploader')
    commenter = User.objects.create(username="commenter")
    with open(data_file) as fh:
        data = yaml.safe_load(fh)

    orgs = {}

    print "Setting site..."
    site = Site.objects.get_current()
    site.domain = data['site']['domain']
    site.name = data['site']['name']
    site.save()

    print "Adding admins..."
    for admin_data in data['admins']:
        user, created = User.objects.get_or_create(
                username=admin_data['username'],
                is_superuser=True,
                is_staff=True,
        )
        user.set_password(admin_data['password'])
        user.save()

    print "Adding orgs..."
    for org_data in data['orgs']:
        org, created = Organization.objects.get_or_create(
                name=org_data['name'],
                personal_contact=org_data['personal_contact'],
                slug=slugify(org_data['name']),
                public=org_data['public'],
                mailing_address=org_data['mailing_address'],
                about=org_data.get('about', ''),
                footer=org_data.get('footer', ''),
        )
        orgs[org_data['name']] = org
        for mod_data in org_data['moderators']:
            u, created = User.objects.get_or_create(
                    username=mod_data['username']
            )
            u.set_password(mod_data['password'])
            u.save()
            org.moderators.add(u)
            Group.objects.get(name='moderators').user_set.add(u)
    for org_data in data['orgs']:
        mail_handled_by = org_data.get('outgoing_mail_handled_by', None)
        if mail_handled_by:
            org = Organization.objects.get(name=org_data['name'])
            mailer = Organization.objects.get(name=mail_handled_by)
            org.outgoing_mail_handled_by = mailer
            org.save()

    print "Building pdfs and users..."
    for user_data in data['users']:
        user, created = User.objects.get_or_create(
                username=slugify(user_data['name'])
        )
        if user_data.get('managed', False):
            random_mailing_address = "\n".join([
                # Prisoner number
                "#%s" % "".join(random.choice(string.digits) for a in range(8)),
                # Street
                "%s Cherry Tree Lane" % "".join(
                    random.choice(string.digits) for a in range(3)),
                # City, state, zip
                "City Name, %s  %s" % (
                    random.choice(US_STATES)[0],
                    "".join(random.choice(string.digits) for a in range(5)),
                )
            ])
        else:
            random_mailing_address = ""

        user.profile.display_name = user_data['name']
        user.profile.mailing_address = random_mailing_address
        user.profile.blogger = user_data.get('blogger', False)
        user.profile.managed = user_data.get('managed', False)
        user.profile.consent_form_received = user_data.get('consent_form_received', False)
        user.profile.blog_name = user_data.get('blog_name', None) or ''
        user.profile.save()

        for org_name in user_data['orgs']:
            orgs[org_name].members.add(user)

        for corresp in user_data['correspondence']:
            direction, content = corresp.items()[0]
            if direction == "received":
                # Build Scan
                pdf = build_pdf(content['parts'], user.profile) 
                path = tasks.move_scan_file(filename=pdf)
                scan = Scan.objects.create(
                        uploader=uploader,
                        org=orgs[org_name],
                        author=user,
                        pdf=os.path.relpath(path, settings.MEDIA_ROOT),
                        under_construction=True,
                        processing_complete=True,
                        created=content['date'])
                # execute synchronously
                tasks.split_scan(scan_id=scan.pk)
                # Build Docuements
                page_count = 1 # ignore envelope
                for part in content['parts']:
                    page_count += part["pages"]
                    if part["type"] == "ignore":
                        continue
                    docuement = Docuement.objects.create(
                            scan=scan,
                            editor=uploader,
                            author=user,
                            type=part["type"],
                            date_written=content["date"],
                            created=content["date"],
                            title=part.get("title", None) or "",
                    )
                    for i, page_index in enumerate(
                            range(page_count - part["pages"], page_count)):
                        scanpage = scan.scanpage_set.get(order=page_index)
                        DocuementPage.objects.create(
                                docuement=docuement,
                                scan_page=scanpage,
                                order=i)
                    # execute synchronously
                    if part["type"] in ("profile", "post"):
                        docuement.status = "published"
                    else:
                        docuement.status = "unpublishable"
                    docuement.highlight_transform = '{"docuement_page_id": %s, "crop": [44.5, 58.66667175292969, 582.5, 288.6666717529297]}' % docuement.docuementpage_set.all()[0].pk
                    docuement.save()
                    tasks.update_docuement_images(docuement.pk)
                    for comment in part.get('comments', []):
                        Comment.objects.create(
                                user=commenter,
                                comment="Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean ut gravida lorem. Ut turpis felis, pulvinar a semper sed, adipiscing id dolor. Pellentesque auctor nisi id magna consequat sagittis.",
                                docuement=docuement,
                                created=comment['date'],
                        )
        # Finish received scans before parsing letters, to ensure comments/etc
        # are there yet.
        for corresp in user_data['correspondence']:
            direction, content = corresp.items()[0]
            if direction == "sent":
                letter = Letter(type=content['type'], 
                        auto_generated=True, 
                        sender=uploader,
                        created=content['date'],
                        sent=content['date'],
                        recipient=user,
                        org=Organization.objects.get(name=user_data['orgs'][0]))
                if content['type'] == "comments":
                    letter.save()
                    comments = Comment.objects.unmailed().filter(
                            docuement__author=user,
                            created__lt=content['date']
                    )
                    for comment in comments:
                        letter.comments.add(comment)
                elif content['type'] == "letter":
                    letter.body = content['body']
                letter.save()

Example 149

Project: zorna Source File: forms.py
    def save(self, request, **kwargs):
        """
        Create a FormEntry instance and related FieldEntry instances for each
        form field.
        """
        entry = super(FormForForm, self).save(commit=False)
        entry.form = self.form
        if not request.user.is_anonymous():
            entry.modifier = request.user
            entry.owner = request.user
        if self.form.bind_to_account and not self.instance.id:
            try:
                entry.account_id = int(self.cleaned_data['zorna_owner-id'])
            except:
                pass
        if self.form.bind_to_entry:
            try:
                entry.entry_id = int(self.cleaned_data[
                                     self.form.bind_to_entry_slug])
            except:
                pass
        entry.save()
        fields_values = {}
        for field in self.form_fields:
            if field.field_type < fc.ZORNA_USER_SINGLETON:
                bdelete_file = False
                field_key = field.slug
                if not field_key in self.cleaned_data:
                    if field.is_a(fc.REFERENCE):
                        now = datetime.datetime.now()
                        value = now.strftime(field.default_value)
                        value = value.replace('{ID}', str(entry.pk))
                    else:
                        continue
                else:
                    value = self.cleaned_data[field_key]
                if field.is_a(fc.ZORNA_USER):
                    value = self.cleaned_data[field_key + '-id']
                if value and hasattr(value, 'name') and self.fields[field_key].widget.needs_multipart_form:
                    s = os.path.splitext(value.name)
                    filename = u"%s%s" % (slugify(s[0]), s[1])
                    value = fs.save(join(str(self.form.pk), str(
                        entry.pk), str(uuid4()), filename), value)
                    bdelete_file = True
                if isinstance(value, list):
                    value = ",".join([v.strip() for v in value])
                if self.instance.id:
                    try:
                        fe = entry.fields.get(field=field.pk,
                                              form_entry=self.instance)
                        if bdelete_file:
                            fs.delete(fe.value)
                            os.rmdir(os.path.dirname(fs.path(fe.value)))
                        fe.value = value if value else ''
                        fe.save()
                    except FormsFieldEntry.DoesNotExist:
                        if value:
                            entry.fields.create(field_id=field.id, value=value)
                elif value:
                    entry.fields.create(field_id=field.id, value=value)
                try:
                    v = float(value)
                except:
                    pass
                else:
                    fields_values[field.slug] = value

        if self.form.bind_to_entry:
            r = self.form.bind_to_entry.split('.')
            if len(r) == 2:
                try:
                    col, row = FormsFieldEntry.objects.forms_get_entries(r[
                                                                         0], entries=[int(self.cleaned_data[self.form.bind_to_entry_slug])])
                    row = row[0]
                    for v in row['fields']:
                        try:
                            float(v['value'])
                        except:
                            pass
                        else:
                            if v.has_key('form_bind'):
                                fields_values[v['form_bind'] + '.' + v[
                                    'field_bind']] = v['value']
                            else:
                                fields_values[r[0] + '.' + v[
                                    'slug']] = v['value']
                except Exception as e:
                    pass

        for field in self.form.fields.not_visible():
            value = None
            if field.reference and (field.is_a(fc.DECIMAL) or field.is_a(fc.INTEGER)):
                # tre = re.compile(r'([a-z0-9-_]+\.[a-z0-9-_]+)')
                value = field.reference
                for s, v in fields_values.iteritems():
                    value = re.sub(s, str(v), value)
                value = eval(value, {"__builtins__": None}, {
                             "__builtins__": None})
                value = '%.2f' % round(value, 2)
            else:
                value = field.default_value
            if value is not None and self.instance.id:
                try:
                    fe = entry.fields.get(field=field.pk,
                                          form_entry=self.instance)
                    fe.value = value
                    fe.save()
                except FormsFieldEntry.DoesNotExist:
                    entry.fields.create(field_id=field.id, value=value)
            elif value is not None:
                entry.fields.create(field_id=field.id, value=value)

        #if there is no field delete entry
        if not entry.fields.all():
            entry.delete()
            return None

        cols, row = FormsFieldEntry.objects.forms_get_entry(entry)
        form_entry_post_save.send(sender=entry, cols=cols, row=row)
        ec = {}
        for v in row['fields']:
            ec[v['slug']] = {'label': v['label'], 'value': v['value']}
        if self.form.send_email:
            c = RequestContext(request, ec)
            if entry.form.email_message:
                t = Template(entry.form.email_message)
                body = t.render(c)
            else:
                fields = ["%s: %s" % (v.label, self.cleaned_data[k])
                          for (k, v) in self.fields.items()]
                body = "\n".join(fields)

            subject = self.form.email_subject
            if not subject:
                subject = "%s - %s" % (self.form.name, entry.time_created)
            else:
                t = Template(subject)
                subject = t.render(c)

            email_from = self.form.email_from or settings.DEFAULT_FROM_EMAIL
            t = Template(self.form.email_copies)
            email_copies = t.render(c)
            email_copies = [e.strip() for e in email_copies.split(",")
                            if e.strip()]
            if email_copies:
                msg = EmailMessage(subject, body, email_from, email_copies)
                for f in self.files.values():
                    f.seek(0)
                    msg.attach(f.name, f.read())
                msg.send()
        return entry

Example 150

Project: django-wordpress Source File: import_to_blogango.py
    def handle(self, *args, **kwargs):
        if 'blogango' not in settings.INSTALLED_APPS:
            raise CommandError('Add blogango to installed apps to import posts from wordpress')
        
        # get the offset id from blogango
        blog_entries = BlogEntry.objects.all().order_by('-id')
        offset = blog_entries.count() and blog_entries[0].id or 0
        wp_posts = Post.objects.filter(id__gt=offset, post_status='publish', post_type='post')
        
        for wp_post in wp_posts:
            # insert into BlogEntry
            print wp_post.post_date

            blog_entry = BlogEntry.objects.create(id=wp_post.id,
                                                  title=wp_post.post_title,
                                                  slug=slugify(wp_post.post_title),
                                                  text=wp_post.post_content,
                                                  created_by=get_auth_user(wp_post.post_author))
            blog_entry.created_on = wp_post.post_date
            blog_entry.save()
            
            tables = ['wp_term_taxonomy', 'wp_term_relationships']
            where = ['wp_term_relationships.object_id = %s', 
                     'wp_term_taxonomy.term_taxonomy_id = wp_term_relationships.term_taxonomy_id', 
                     'wp_term_taxonomy.term_id = wp_terms.term_id', 
                     'wp_term_taxonomy.taxonomy = %s']
                
            # get categories
            categories = Term.objects.extra(tables=tables, where=where, params=[wp_post.id, 'category'])
            for category in categories:blog_entry.tags.add(category.name)
            
            # get tags
            # tags = Term.objects.extra(tables=tables, where=where, params=[wp_post.id, 'post_tag'])
            # for tag in tags:blog_entry.tags.add(tag.name)
            
            # add comments
            wp_comments = WpComment.objects.filter(comment_post_id=wp_post.id, comment_approved=1)
            for wp_comment in wp_comments:
                if wp_comment.comment_type == 'pingback':continue
                if wp_comment.comment_agent in COMMENT_AGENTS:
                    comment = Reaction.objects.create(text=wp_comment.comment_content,
                                                       comment_for=blog_entry,
                                                       user_name=wp_comment.comment_author,
                                                       user_url=wp_comment.comment_author_url,
                                                       source=wp_comment.comment_agent.lstrip('btc_'))
                else:
                    comment = Comment.objects.create(text=wp_comment.comment_content,
                                                     comment_for=blog_entry,
                                                     user_name=wp_comment.comment_author,
                                                     user_url=wp_comment.comment_author_url,
                                                     email_id=wp_comment.comment_author_email)
                comment.created_on = wp_comment.comment_date
                comment.is_public = True
                comment.save()
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4