django.core.files.File

Here are the examples of the python api django.core.files.File taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

160 Examples 7

Example 101

Project: hydroshare Source File: utils.py
def add_file_to_resource(resource, f, fed_res_file_name_or_path='', fed_copy_or_move=None):
    """
    Add a ResourceFile to a Resource.  Adds the 'format' metadata element to the resource.
    :param resource: Resource to which file should be added
    :param f: File-like object to add to a resource
    :param fed_res_file_name_or_path: the logical file name of the resource content file for
                                      federated iRODS resource or the federated zone name;
                                      By default, it is empty. A non-empty value indicates
                                      the file needs to be added into the federated zone, either
                                      from local disk where f holds the uploaded file from local
                                      disk, or from the federated zone directly where f is empty
                                      but fed_res_file_name_or_path has the whole data object
                                      iRODS path in the federated zone
    :param fed_copy_or_move: indicate whether the file should be copied or moved from private user
                             account to proxy user account in federated zone; A value of 'copy'
                             indicates copy is needed, a value of 'move' indicates no copy, but
                             the file will be moved from private user account to proxy user account.
                             The default value is None, which indicates N/A, or not applicable,
                             since the files do not come from a federated zone, and this copy or
                             move operation is not applicable, but any value other
                             than 'copy' or 'move' is regarded as N/A.

    :return: The identifier of the ResourceFile added.
    """
    if f:
        if fed_res_file_name_or_path:
            ret = ResourceFile.objects.create(content_object=resource,
                                              resource_file=None,
                                              fed_resource_file=File(f) if not isinstance(
                                                  f, UploadedFile) else f)
        else:
            ret = ResourceFile.objects.create(content_object=resource,
                                              resource_file=File(f) if not isinstance(
                                                  f, UploadedFile) else f,
                                              fed_resource_file=None)
        # add format metadata element if necessary
        file_format_type = get_file_mime_type(f.name)
    elif fed_res_file_name_or_path and (fed_copy_or_move == 'copy' or fed_copy_or_move == 'move'):
        size = get_fed_zone_file_size(fed_res_file_name_or_path)
        ret = ResourceFile.objects.create(content_object=resource, resource_file=None,
                                          fed_resource_file=None,
                                          fed_resource_file_name_or_path=fed_res_file_name_or_path,
                                          fed_resource_file_size=size)
        try:
            from_fname = fed_res_file_name_or_path
            filename = from_fname.rsplit('/')[-1]

            if resource.resource_federation_path:
                to_fname = '{base_path}/{res_id}/data/contents/{file_name}'
                to_fname = to_fname.format(base_path=resource.resource_federation_path,
                                           res_id=resource.short_id, file_name=filename)
                istorage = IrodsStorage('federated')
            else:
                to_fname = '{res_id}/data/contents/{file_name}'.format(res_id=resource.short_id,
                                                                       file_name=filename)
                istorage = IrodsStorage()
            if fed_copy_or_move == 'copy':
                istorage.copyFiles(from_fname, to_fname)
            else:
                istorage.moveFile(from_fname, to_fname)
            # update file path now that file has been copied or moved to HydroShare proxy
            # account space
            ret.fed_resource_file_name_or_path = 'data/contents/{file_name}'.format(
                file_name=filename)
            ret.save()
        except SessionException as ex:
            # delete the file added if there is any exception
            ret.delete()
            # raise the exception for the calling function to inform the error on the page interface
            raise SessionException(ex.exitcode, ex.stdout, ex.stderr)

        file_format_type = get_file_mime_type(fed_res_file_name_or_path)
    else:
        raise ValueError('Invalid input parameter is passed into this add_file_to_resource() '
                         'function')

    if file_format_type not in [mime.value for mime in resource.metadata.formats.all()]:
        resource.metadata.create_element('format', value=file_format_type)

    return ret

Example 102

Project: airmozilla Source File: test_authmigrate.py
    def test_migrate_user(self):
        user = self.user
        event = Event.objects.get(title='Test event')
        event.creator = user
        event.save()

        Event.objects.create(
            title='Different',
            creator=User.objects.create(username='else'),
            modified_user=user,
            start_time=event.start_time,
        )

        EventEmail.objects.create(
            event=event,
            user=user,
            to='[email protected]',
        )

        EventRevision.objects.create(
            event=event,
            user=user,
            title='title'
        )

        EventTweet.objects.create(
            event=event,
            text='Hi',
            creator=user,
        )

        Approval.objects.create(
            event=event,
            user=user,
        )

        assignment = EventAssignment.objects.create(
            event=event
        )
        assignment.users.add(user)

        suggested_event = SuggestedEvent.objects.create(
            user=user,
            title='Something',
        )

        SuggestedEventComment.objects.create(
            suggested_event=suggested_event,
            comment='hi',
            user=user,
        )

        Comment.objects.create(
            event=event,
            comment='Hi!',
            user=user,
        )

        with open(self.main_image) as fp:
            Picture.objects.create(
                event=event,
                file=File(fp),
                modified_user=user,
            )

        Chapter.objects.create(
            event=event,
            timestamp=1,
            user=user,
        )

        LoggedSearch.objects.create(
            term='foo',
            user=user,
        )

        SavedSearch.objects.create(
            user=user,
            filters={'key': 'value'}
        )

        StarredEvent.objects.create(
            event=event,
            user=user,
        )

        survey = Survey.objects.create(
            name='name'
        )
        question = Question.objects.create(
            survey=survey,
            question={'key': 'value'},
        )
        Answer.objects.create(
            user=user,
            question=question,
            answer={'foo': 'bar'}
        )

        Upload.objects.create(
            user=user,
            url='https://',
            size=1234,
        )

        discussion = Discussion.objects.create(event=event)
        discussion.moderators.add(user)
        assert user in discussion.moderators.all()

        Unsubscription.objects.create(
            user=user,
            discussion=discussion,
        )

        sug_discussion = SuggestedDiscussion.objects.create(
            event=suggested_event
        )
        sug_discussion.moderators.add(user)
        assert user in sug_discussion.moderators.all()

        ClosedCaptions.objects.create(
            event=event,
            created_user=user,
        )

        RevOrder.objects.create(
            event=event,
            created_user=user,
            input=RevInput.objects.create(url='https://'),
            output_file_formats=['dfxp'],
        )

        new = User.objects.create(
            username='new',
            email='[email protected]'
        )

        # MOMENT OF TRUTH!
        things = merge_user(user, new)
        assert things

        # Events
        ok_(not Event.objects.filter(creator=user))
        ok_(Event.objects.filter(creator=new))
        ok_(not Event.objects.filter(modified_user=user))
        ok_(Event.objects.filter(modified_user=new))

        # EventEmail
        ok_(not EventEmail.objects.filter(user=user))
        ok_(EventEmail.objects.filter(user=new))

        # Suggested events
        ok_(not SuggestedEvent.objects.filter(user=user))
        ok_(SuggestedEvent.objects.filter(user=new))

        # Comments
        ok_(not Comment.objects.filter(user=user))
        ok_(Comment.objects.filter(user=new))

        # Discussion moderators
        ok_(new in discussion.moderators.all())
        ok_(user not in discussion.moderators.all())

        # Suggested Discussion moderators
        ok_(new in sug_discussion.moderators.all())
        ok_(user not in sug_discussion.moderators.all())

        # Unsubscriptions
        ok_(not Unsubscription.objects.filter(user=user))
        ok_(Unsubscription.objects.filter(user=new))

        # Suggested event
        ok_(not SuggestedEvent.objects.filter(user=user))
        ok_(SuggestedEvent.objects.filter(user=new))

        # Closed captions
        ok_(not ClosedCaptions.objects.filter(created_user=user))
        ok_(ClosedCaptions.objects.filter(created_user=new))

        # Rev orders
        ok_(not RevOrder.objects.filter(created_user=user))
        ok_(RevOrder.objects.filter(created_user=new))

        # Event revisions
        ok_(not EventRevision.objects.filter(user=user))
        ok_(EventRevision.objects.filter(user=new))

        # Event assignments
        ok_(new in assignment.users.all())
        ok_(user not in assignment.users.all())

        # Suggested event comments
        ok_(not SuggestedEventComment.objects.filter(user=user))
        ok_(SuggestedEventComment.objects.filter(user=new))

        # Event tweets
        ok_(not EventTweet.objects.filter(creator=user))
        ok_(EventTweet.objects.filter(creator=new))

        # Approvals
        ok_(not Approval.objects.filter(user=user))
        ok_(Approval.objects.filter(user=new))

        # Pictures
        ok_(not Picture.objects.filter(modified_user=user))
        ok_(Picture.objects.filter(modified_user=new))

        # Chapters
        ok_(not Chapter.objects.filter(user=user))
        ok_(Chapter.objects.filter(user=new))

        # Logged search
        ok_(not LoggedSearch.objects.filter(user=user))
        ok_(LoggedSearch.objects.filter(user=new))

        # Saved search
        ok_(not SavedSearch.objects.filter(user=user))
        ok_(SavedSearch.objects.filter(user=new))

        # Starred event
        ok_(not StarredEvent.objects.filter(user=user))
        ok_(StarredEvent.objects.filter(user=new))

        # (survey) Answers
        ok_(not Answer.objects.filter(user=user))
        ok_(Answer.objects.filter(user=new))

        # Uploads
        ok_(not Upload.objects.filter(user=user))
        ok_(Upload.objects.filter(user=new))

Example 103

Project: django-mail-queue Source File: test_messages.py
Function: setup_files
    def _setUp_files(self):
        self.large_file = File(open(os.path.join(self.TEST_ROOT, "attachments", "big.pdf"), "rb"))

Example 104

Project: hellolily Source File: message.py
    def _create_attachment(self, part, headers):
        """
        Create an attachment for the given part

        Args:
            part (dict): with attachment info
            headers (dict): headers for message part

        Raises:
            Attachment exception if attachment couldn't be created
        """
        logger.debug('Storing attachment for message %s' % self.message.message_id)
        headers = {name.lower(): value for name, value in headers.iteritems()}

        # Check if attachment is inline
        inline = False
        if headers and headers.get('content-id', False):
            inline = True

        # Get file data from part or from remote
        if 'data' in part['body']:
            file_data = part['body']['data']
        elif 'attachmentId' in part['body']:
            file_data = self.manager.get_attachment(self.message.message_id, part['body']['attachmentId'])
            if file_data:
                file_data = file_data.get('data')
            else:
                logger.warning('No attachment could be downloaded, not storing anything')
                return
        else:
            logger.warning('No attachment, not storing anything')
            return

        file_data = base64.urlsafe_b64decode(file_data.encode('UTF-8'))

        # create as string file
        file = StringIO.StringIO(file_data)
        if headers and 'content-type' in headers:
            file.content_type = headers['content-type'].split(';')[0]
        else:
            file.content_type = 'application/octet-stream'

        file.size = len(file_data)
        file.name = part.get('filename', '').rsplit('\\')[-1]
        if len(file.name) > 200:
            file.name = None

        # No filename in part, create a name
        if not file.name:
            extensions = get_extensions_for_type(file.content_type)
            if part.get('partId'):
                file.name = 'attachment-%s%s' % (part.get('partId'), extensions.next())
            else:
                logger.warning('No part id, no filename')
                file.name = 'attachment-%s-%s' % (
                    len(self.attachments) + len(self.inline_attachments),
                    extensions.next()
                )

        final_file = File(file, file.name)

        # Create a EmailAttachment object
        attachment = EmailAttachment()
        attachment.attachment = final_file
        attachment.size = file.size
        attachment.inline = inline
        attachment.tenant_id = self.manager.email_account.tenant_id

        # Check if inline attachment
        if inline:
            attachment.cid = headers.get('content-id')

        self.attachments.append(attachment)

Example 105

Project: akvo-rsr Source File: links.py
    def do_import(self):
        """
        Retrieve and store the current image, as well as the image caption and credit.
        The image will be extracted from the 'url' attribute of the first 'docuement-link' element
        containing a file with one of the extensions of VALID_IMAGE_EXTENSIONS. If an image is
        successfully retrieved, the image caption will be based on the underlying 'title'
        element and the image credit will be based on the akvo photo-credit attribute of the
        'docuement-link' element.

        :return: List; contains fields that have changed
        """

        changes = []
        image_meta_changes = []

        for docuement_link_element in self.parent_elem.findall('docuement-link'):
            url = self.get_attrib(docuement_link_element, 'url', 'current_image')
            if url:
                filename, extension = file_info_from_url(url)
                if extension not in VALID_IMAGE_EXTENSIONS:
                    continue
                # get content length of uncompressed cargo
                header_query = requests.head(url, headers={'Accept-Encoding': 'identity'})
                content_length = int(header_query.headers.get('content-length', '0'))
                # If we have no image or the size of the image URL differs from the stored one,
                # we go get. This _may_ in unlucky cases lead to a new image not being fetched.
                # TODO: add a timestamp to the image for better comparison criteria
                if not self.project.current_image or (
                        self.project.current_image.size != content_length):
                    request = requests.get(url, stream=True)
                    if request.status_code == 200:
                        tmp_file = NamedTemporaryFile()
                        for chunk in request.iter_content(1024):
                            if not chunk:
                                break
                            tmp_file.write(chunk)
                        tmp_file.flush()
                        self.project.current_image.save(filename, File(tmp_file))
                        changes.append('current_image')
                    else:
                        self.add_log('docuement-link', 'current_image',
                                     'Error trying to fetch image: {}'.format(url))

                current_image_caption = self.get_child_element_text(
                        docuement_link_element, 'title', 'current_image_caption')
                if current_image_caption:
                    self.project.current_image_caption = current_image_caption
                    image_meta_changes.append('current_image_caption')
                current_image_credit = self.get_attrib(
                        docuement_link_element, akvo_ns('photo-credit'),
                        'current_image_credit')
                if current_image_credit:
                    self.project.current_image_credit = current_image_credit
                    image_meta_changes.append('current_image_credit')
                if image_meta_changes:
                    self.project.save(update_fields=image_meta_changes)

        return changes + image_meta_changes

Example 106

Project: storybase Source File: __init__.py
    def test_is_file_true(self):
        f = File(sys.stdout)
        self.assertTrue(is_file(f))

Example 107

Project: libravatar Source File: views.py
@transaction.atomic
@login_required
def successfully_authenticated(request):
    if request.user.ldap_user:
        try:
            confirmed = ConfirmedEmail.objects.get(email=request.user.email)
        except ConfirmedEmail.DoesNotExist:
            confirmed = ConfirmedEmail()
            confirmed.user = request.user
            confirmed.email = request.user.email
            confirmed.save()

            # remove unconfirmed email address if necessary
            try:
                unconfirmed = UnconfirmedEmail.objects.get(email=request.user.email)
                unconfirmed.delete()
            except UnconfirmedEmail.DoesNotExist:
                pass

            # add photo to database, bung LDAP photo into the expected file
            photo_contents = request.user.ldap_user.attrs[settings.AUTH_LDAP_USER_PHOTO][0]
            file_ptr = StringIO(photo_contents)  # file pointer to in-memory string buffer
            image = File(file_ptr)
            photo = Photo()
            photo.user = request.user
            photo.save(image)
            return HttpResponseRedirect(reverse('libravatar.account.views.crop_photo', args=[photo.id]))

    return HttpResponseRedirect(reverse('libravatar.account.views.profile'))

Example 108

Project: django-admin-cli Source File: cli.py
Function: add
    def _add(self, modeladmin, fields, filefields):
        """
        Update one or more fields of all instances filtered.

        :param modeladmin: ModelAdmin of model
        :type modeladmin: :class:`admin.ModelAdmin`

        :param fields: Fields to defines
        :type fields: ``dict``

        :param filefields: File fields to update
        :type filefields: ``dict``

        :raises CommandError: If data is unvalid or files are unfoundable
        """
        instance = modeladmin.model()
        initial = dict([
            (field.name, getattr(instance, field.name))
            for field in modeladmin.model._meta.fields
        ])
        used_fields = dict([f.split('=') for f in fields])
        data = initial.copy()
        data.update(used_fields)
        for field_name, value in data.items():
            try:
                modelfield = modeladmin.model._meta.get_field(field_name)
            except models.FieldDoesNotExist:
                continue
            if isinstance(modelfield, models.ManyToManyField):
                data[field_name] = value.split(',')
        files = {}
        for filename, path in filefields.items():
            try:
                files[filename] = File(open(path, 'rb'))
            except IOError as err:
                raise CommandError(err.args[0])
        form_class = modeladmin.add_form if hasattr(modeladmin, 'add_form') \
            else modeladmin.get_form(FALSE_REQ)
        form = form_class(data=data, files=files)
        if form.is_valid():
            obj = form.save()
            self.stdout.write("Created '%s'" % obj)
        else:
            error_msg = '\n'+'\n'.join([
                ('%s: %s' % (field, err))
                for field in form.errors
                for err in form.errors[field]
            ])
            raise CommandError(error_msg)

Example 109

Project: pombola Source File: south_africa_import_scraped_photos.py
Function: handle_label
    def handle_label(self, path, **options):

        matched = 0
        unmatched = 0

        for filename in os.listdir(path):

            # Strip out the .jpg
            name = re.sub('\.jpg', '', filename)

            # Strip any non-alpha trailing characters
            name = re.sub('[^a-zA-Z]*$', '', name)

            # Strip any more trailing whitespace that may have snuck in
            name = name.strip()

            # Make the name unicode so we can actually work with it in the DB
            name = unicode(name)

            # Slice the name into two
            name = name.split('_')

            if len(name) == 2:

                # Match up the person
                person = match_person(name[1] + ' ' + name[0])

                if person is None:
                    print BRIGHT + 'Unable to match "' + filename + '" to a person!'+ ENDC
                    unmatched += 1
                else:
                    print 'Matched ' + person.name.encode('utf-8')

                    Image.objects.create(
                        object_id=person.id,
                        content_type=self.content_type_person,
                        is_primary=True,
                        source='http://www.parliament.gov.za',
                        image=File(open(path + filename, 'r'))
                    )

                    matched += 1

            else:

                # This name doesn't have two bits, complain.
                print BRIGHT + '"' + filename + '" does not parse to a first and last name.'+ ENDC
                unmatched += 1

        print 'Done! Matched ' + str(matched) + ', failed to match ' + str(unmatched)

Example 110

Project: django-calaccess-raw-data Source File: cleancalaccessrawfile.py
    def clean(self):
        """
        Cleans the provided source TSV file and writes it out in CSV format.
        """
        # Up the CSV data limit
        csv.field_size_limit(1000000000)

        # Input and output paths
        tsv_path = os.path.join(self.tsv_dir, self.file_name)
        csv_path = os.path.join(
            self.csv_dir,
            self.file_name.lower().replace("tsv", "csv")
        )

        # Reader
        tsv_file = open(tsv_path, 'rbU')

        # Writer
        csv_file = open(csv_path, 'w')
        csv_writer = CSVKitWriter(csv_file)

        # Pull and clean the headers
        try:
            headers = tsv_file.readline()
        except StopIteration:
            return
        headers = headers.decode("ascii", "replace")
        headers_csv = CSVKitReader(StringIO(headers), delimiter=str('\t'))
        try:
            headers_list = next(headers_csv)
        except StopIteration:
            return
        headers_count = len(headers_list)
        csv_writer.writerow(headers_list)

        log_rows = []

        # Loop through the rest of the data
        line_number = 1
        for tsv_line in tsv_file:
            line_number += 1
            # Log empty lines, then skip
            if (
                tsv_line.decode("ascii", "replace") == '\n' or
                tsv_line.decode("ascii", "replace") == '\r\n'
            ):
                if self.verbosity > 2:
                    msg = '  Line %s is empty'
                    self.failure(msg % (
                        line_number,
                    ))
                continue

            # Goofing around with the encoding while we're in there.
            tsv_line = tsv_line.decode("ascii", "replace")
            if six.PY2:
                tsv_line = tsv_line.replace('\ufffd', '?')

            # Nuke any null bytes
            null_bytes = tsv_line.count('\x00')
            if null_bytes:
                tsv_line = tsv_line.replace('\x00', ' ')

            # Nuke ASCII 26 char, the "substitute character"
            # or chr(26) in python
            sub_char = tsv_line.count('\x1a')
            if sub_char:
                tsv_line = tsv_line.replace('\x1a', '')

            # Remove any extra newline chars
            tsv_line = tsv_line.replace("\r\n", "").replace("\r", "").replace("\n", "")

            # Split on tabs so we can later spit it back out as CSV
            csv_field_list = tsv_line.split("\t")

            # Check if our values line up with our headers
            # and if not, see if CSVkit can sort out the problems
            if not len(csv_field_list) == headers_count:
                csv_field_list = next(CSVKitReader(
                    StringIO(tsv_line),
                    delimiter=str('\t')
                ))

                if not len(csv_field_list) == headers_count:
                    if self.verbosity > 2:
                        msg = '  Bad parse of line %s (%s headers, %s values)'
                        self.failure(msg % (
                            line_number,
                            len(headers_list),
                            len(csv_field_list)
                        ))
                    log_rows.append([
                        line_number,
                        len(headers_list),
                        len(csv_field_list),
                        ','.join(csv_field_list)
                    ])
                    continue

            # Write out the row
            csv_writer.writerow(csv_field_list)

        # Log errors if there are any
        if log_rows:
            if self.verbosity > 1:
                msg = '  %s errors logged (not including empty lines)'
                self.failure(msg % (len(log_rows)))
            self.log_errors(log_rows)

        # Add counts to raw_file_record
        self.raw_file.download_columns_count = headers_count
        self.raw_file.download_records_count = line_number - 1
        self.raw_file.clean_columns_count = headers_count
        self.raw_file.clean_records_count = line_number - 1 - len(log_rows)
        self.raw_file.error_count = len(log_rows)

        # Shut it down
        tsv_file.close()
        csv_file.close()

        # Add file size to the raw_file_record
        self.raw_file.download_file_size = os.path.getsize(tsv_path) or 0
        self.raw_file.clean_file_size = os.path.getsize(csv_path) or 0

        if getattr(settings, 'CALACCESS_STORE_ARCHIVE', False):
            if self.verbosity > 2:
                self.log(" Archiving {0}".format(os.path.basename(csv_path)))
            # Remove previous .CSV and error log files
            self.raw_file.clean_file_archive.delete()
            self.raw_file.error_log_archive.delete()

            # Open up the .CSV file so we can wrap it in the Django File obj
            with open(csv_path, 'rb') as csv_file:
                # Save the .CSV on the raw data file
                self.raw_file.clean_file_archive.save(
                    self.file_name.lower().replace("tsv", "csv"),
                    File(csv_file),
                )
            # if there are any errors, archive the log too
            if log_rows:
                if self.verbosity > 2:
                    self.log(" Archiving {0}".format(
                        os.path.basename(self.error_log_path)
                    ))
                with open(self.error_log_path, 'rb') as error_file:
                    self.raw_file.error_log_archive.save(
                        os.path.basename(self.error_log_path),
                        File(error_file),
                    )

Example 111

Project: django-raster Source File: parser.py
    def reproject_rasterfile(self):
        """
        Reproject the rasterfile into web mercator.
        """
        # Return if reprojected rasterfile already exists.
        if hasattr(self.rasterlayer, 'reprojected') and self.rasterlayer.reprojected.rasterfile.name:
            return

        # Return if the raster already has the right projection
        # and nodata value is acceptable.
        if self.dataset.srs.srid == WEB_MERCATOR_SRID:
            # SRID was not manually specified.
            if self.rasterlayer.nodata in ('', None):
                return
            # All bands from dataset already have the same nodata value as the
            # one that was manually specified.
            if all([self.rasterlayer.nodata == band.nodata_value
                    for band in self.dataset.bands]):
                return
        else:
            # Log projection change if original raster is not in web mercator.
            self.log(
                'Transforming raster to SRID {0}'.format(WEB_MERCATOR_SRID),
                status=self.rasterlayer.parsestatus.REPROJECTING_RASTER,
            )

        # Reproject the dataset.
        self.dataset = self.dataset.transform(
            WEB_MERCATOR_SRID,
            driver=INTERMEDIATE_RASTER_FORMAT,
        )

        # Manually override nodata value if neccessary
        if self.rasterlayer.nodata not in ('', None):
            self.log(
                'Setting no data values to {0}.'.format(self.rasterlayer.nodata),
                status=self.rasterlayer.parsestatus.REPROJECTING_RASTER,
            )
            for band in self.dataset.bands:
                band.nodata_value = float(self.rasterlayer.nodata)

        # Compress reprojected raster file and store it
        if self.rasterlayer.store_reprojected:
            dest = tempfile.NamedTemporaryFile(dir=self.tmpdir, suffix='.zip')
            dest_zip = zipfile.ZipFile(dest.name, 'w', allowZip64=True)
            dest_zip.write(
                filename=self.dataset.name,
                arcname=os.path.basename(self.dataset.name),
                compress_type=zipfile.ZIP_DEFLATED,
            )
            dest_zip.close()

            # Store zip file in reprojected raster model
            self.rasterlayer.reprojected.rasterfile = File(
                open(dest_zip.filename, 'rb'),
                name=os.path.basename(dest_zip.filename)
            )
            self.rasterlayer.reprojected.save()

        self.log('Finished transforming raster.')

Example 112

Project: filepicker-django Source File: utils.py
Function: get_file
    def get_file(self, additional_params=None):
        '''
        Downloads the file from filepicker.io and returns a
        Django File wrapper object.
        additional_params should include key/values such as:
        {
          'data-fp-signature': HEXDIGEST,
          'data-fp-policy': HEXDIGEST,
        }
        (in other words, parameters should look like additional_params
        of the models)
        '''
        # clean up any old downloads that are still hanging around
        self.cleanup()

        # Fetch any fields possibly required for fetching files for reading.
        query_params = {}

        if additional_params:
            for field in ('policy','signature'):
                longfield = 'data-fp-{0}'.format(field)
                if longfield in additional_params:
                    query_params[field] = additional_params[longfield]

        # iterate through one or more file urls
        result = list()
        for url in self.url.split(","):
            # Append the fields as GET query parameters to the URL in data.
            r = requests.get(url, params=query_params, stream=True)
            header = r.headers
            name = ''
            disposition = header.get('Content-Disposition')
            if disposition:
                name = disposition.rpartition("filename=")[2].strip('" ')
            filename = header.get('X-File-Name')
            if filename:
                name = filename

            # Create a temporary file to save to it later
            tmp = tempfile.NamedTemporaryFile(mode='w+b')
            for chunk in r.iter_content(chunk_size=1024):
                if chunk:
                    tmp.write(chunk) # Write the chunk
                    tmp.flush()

        # initialize File components of this object
            file = File(tmp, name=name)
            result.append(file)
        return result

Example 113

Project: votainteligente-portal-electoral Source File: incoming_mail_tests.py
Function: set_up
    def setUp(self):
        super(AnswerHandlerTestCase, self).setUp()
        self.photo_fiera = File(open(__attachrments_dir__ +"fiera_parque.jpg", 'rb'))
        self.pdf_file = File(open(__attachrments_dir__ + "hello.pd.pdf", 'rb'))

Example 114

Project: hydroshare Source File: spatialite.py
    @classmethod
    def join_data_with_existing_geometry(
            cls, title, parent_dataresource,
            new_data, join_field_in_existing_data, join_field_in_new_data,
            parent=None, geometry_column_name='GEOMETRY', srid=4326, geometry_type='GEOMETRY', owner=None):
        from ga_resources.models import DataResource
        from uuid import uuid4

        parent_dataresource.resource.ready_data_resource()
        pconn = parent_dataresource.resource._connection() # FIXME assumes the spatialite driver for the parent, but much faster
        c = pconn.cursor()
        c.execute('select OGC_FID, AsBinary(Transform({geom}, {srid}), {join_field_in_existing_data}) from {table}'.format(
            geom=parent_dataresource.resource._geometry_field,
            table=parent_dataresource.resource._table_name,
            srid=srid,
            join_field_in_existing_data=join_field_in_existing_data))
        records = c.fetchall()

        filename = os.path.join('/tmp', uuid4().hex + '.sqlite')
        conn = db.connect(filename)
        conn.enable_load_extension(True)
        conn.execute("select load_extension('libspatialite.so')")
        conn.executescript("""
                    select initspatialmetadata();
                    create table layer (
                        OGC_FID INTEGER PRIMARY KEY
                    );
                    select AddGeometryColumn('layer', '{geometry_column_name}', {srid}, '{geometry_type}', 2, 1);
                    select CreateSpatialIndex('layer','{geometry_column_name}');
                    create index layer_{join_field_on_existing_data} on layer ({join_field_on_existing_data});
                """.format(**locals()))


        conn.executemany('insert into layer (OGC_FID, {geometry_column_name}, {join_field_in_existing_data}) values (?, GeomFromWKB(?, {srid}))'.format(**locals()), records)
        conn.commit()

        for column in new_data.keys():
            if new_data[column].dtype is numpy.float64:
                datatype = 'REAL'
            elif new_data[column].dtype is numpy.int64:
                datatype = 'INTEGER'
            else:
                datatype = 'TEXT'

            conn.execute(
                'alter table layer add column {column} {datatype}'.format(column=column, datatype=datatype))

        columns = list(new_data.keys())
        conn.executemany("""
            UPDATE layer SET
            {columns}
            WHERE {join_field_in_existing_data}=?
        """.format(
            columns=','.join(k + '=?' for k in columns),
            join_field_in_existing_data=join_field_in_existing_data
        ), [[r[c] for c in columns] + [r[join_field_in_new_data]] for _, r in new_data.iterrows()] )

        conn.close()

        ds = DataResource.objects.create(
            title=title,
            parent=parent,
            driver='ga_resources.drivers.spatialite',
            resource_file=File(open(filename), filename),
            in_menus=[],
            owner=owner
        )
        ds.resource.compute_fields()
        os.unlink(filename)
        return ds

Example 115

Project: Wooey Source File: tasks.py
@celery_app.task(base=WooeyTask)
def submit_script(**kwargs):
    job_id = kwargs.pop('wooey_job')
    resubmit = kwargs.pop('wooey_resubmit', False)
    from .backend import utils
    from .models import WooeyJob, UserFile
    job = WooeyJob.objects.get(pk=job_id)

    command = utils.get_job_commands(job=job)
    if resubmit:
        # clone ourselves, setting pk=None seems hackish but it works
        job.pk = None

    # This is where the script works from -- it is what is after the media_root since that may change between
    # setups/where our user uploads are stored.
    cwd = job.get_output_path()

    abscwd = os.path.abspath(os.path.join(settings.MEDIA_ROOT, cwd))
    job.command = ' '.join(command)
    job.save_path = cwd

    utils.mkdirs(abscwd)
    # make sure we have the script, otherwise download it. This can happen if we have an ephemeral file system or are
    # executing jobs on a worker node.
    script_path = job.script_version.script_path
    if not utils.get_storage(local=True).exists(script_path.path):
        utils.get_storage(local=True).save(script_path.path, script_path.file)

    job.status = WooeyJob.RUNNING
    job.save()

    stdout, stderr = '', ''
    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=abscwd, bufsize=0)

    # We need to use subprocesses to capture the IO, otherwise they will block one another
    # i.e. a check against stderr will sit waiting on stderr before returning
    # we use Queues to communicate
    qout, qerr = Queue(), Queue()
    pout = output_monitor_queue(qout, proc.stdout)
    perr = output_monitor_queue(qerr, proc.stderr)

    prev_std = None

    def check_output(job, stdout, stderr, prev_std):
        # Check for updates from either (non-blocking)
        stdout = update_from_output_queue(qout, stdout)
        stderr = update_from_output_queue(qerr, stderr)

        # If there are changes, update the db
        if (stdout, stderr) != prev_std:
            job.update_realtime(stdout=stdout, stderr=stderr)
            prev_std = (stdout, stderr)

        return stdout, stderr, prev_std

    # Loop until the process is complete + both stdout/stderr have EOFd
    while proc.poll() is None or pout.is_alive() or perr.is_alive():
        stdout, stderr, prev_std = check_output(job, stdout, stderr, prev_std)

    # Catch any remaining output
    try:
        proc.stdout.flush()
    except ValueError:  # Handle if stdout is closed
        pass
    stdout, stderr, prev_std = check_output(job, stdout, stderr, prev_std)

    # tar/zip up the generated content for bulk downloads
    def get_valid_file(cwd, name, ext):
        out = os.path.join(cwd, name)
        index = 0
        while os.path.exists(six.u('{}.{}').format(out, ext)):
            index += 1
            out = os.path.join(cwd, six.u('{}_{}').format(name, index))
        return six.u('{}.{}').format(out, ext)

    # fetch the job again in case the database connection was lost during the job or something else changed.
    job = WooeyJob.objects.get(pk=job_id)
    # if there are files generated, make zip/tar files for download
    if len(os.listdir(abscwd)):
        tar_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'tar.gz')
        tar = tarfile.open(tar_out, "w:gz")
        tar_name = os.path.splitext(os.path.splitext(os.path.split(tar_out)[1])[0])[0]
        tar.add(abscwd, arcname=tar_name)
        tar.close()

        zip_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'zip')
        zip = zipfile.ZipFile(zip_out, "w")
        arcname = os.path.splitext(os.path.split(zip_out)[1])[0]
        zip.write(abscwd, arcname=arcname)
        for root, folders, filenames in os.walk(os.path.split(zip_out)[0]):
            for filename in filenames:
                path = os.path.join(root, filename)
                if path == tar_out:
                    continue
                if path == zip_out:
                    continue
                try:
                    zip.write(path, arcname=os.path.join(arcname, filename))
                except:
                    stderr = '{}\n{}'.format(stderr, traceback.format_exc())
        try:
            zip.close()
        except:
            stderr = '{}\n{}'.format(stderr, traceback.format_exc())

        # save all the files generated as well to our default storage for ephemeral storage setups
        if wooey_settings.WOOEY_EPHEMERAL_FILES:
            for root, folders, files in os.walk(abscwd):
                for filename in files:
                    filepath = os.path.join(root, filename)
                    s3path = os.path.join(root[root.find(cwd):], filename)
                    remote = utils.get_storage(local=False)
                    exists = remote.exists(s3path)
                    filesize = remote.size(s3path) if exists else 0
                    if not exists or (exists and filesize == 0):
                        if exists:
                            remote.delete(s3path)
                        remote.save(s3path, File(open(filepath, 'rb')))
    utils.create_job_fileinfo(job)

    job.stdout = stdout
    job.stderr = stderr
    job.status = WooeyJob.COMPLETED
    job.update_realtime(delete=True)
    job.save()

    return (stdout, stderr)

Example 116

Project: ionyweb Source File: functions.py
def version_generator(value, version_prefix, root, force=None):
    """
    Generate Version for an Image.
    value has to be a serverpath relative to MEDIA_ROOT.
    """
    
    # PIL's Error "Suspension not allowed here" work around:
    # s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
    try:
        from PIL import ImageFile
    except ImportError:
        import ImageFile
    ImageFile.MAXBLOCK = settings.IMAGE_MAXBLOCK # default is 64k


    if storage.exists(value):
        tmpfile = File(NamedTemporaryFile())
        try:
            orig_file = storage.open(value)
            im = Image.open(orig_file)
            version_path = get_version_path(value, version_prefix, root)
            path, version_basename = os.path.split(version_path)
            root, ext = os.path.splitext(version_basename)
            version = scale_and_crop(im, settings.VERSIONS[version_prefix]['width'], 
                                     settings.VERSIONS[version_prefix]['height'], 
                                     settings.VERSIONS[version_prefix]['opts'])
            if not version:
                version = im
            if 'methods' in settings.VERSIONS[version_prefix].keys():
                for method in settings.VERSIONS[version_prefix]['methods']:
                    if callable(method):
                        version = method(version)
            try:
                version.save(tmpfile, format=Image.EXTENSION[ext], 
                             quality=settings.VERSION_QUALITY, 
                             optimize=(os.path.splitext(version_path)[1].lower() != '.gif'))
            except IOError:
                version.save(tmpfile, format=Image.EXTENSION[ext], quality=settings.VERSION_QUALITY)
            # Remove the old version, if there's any
            if version_path != storage.get_available_name(version_path):
                storage.delete(version_path)
            storage.save(version_path, tmpfile)
            return version_path
        except:
            raise
        finally:
            tmpfile.close()
            try:
                orig_file.close()
            except:
                pass
    return None

Example 117

Project: djangogirls Source File: fetch_stories.py
Function: handle
    def handle(self, *args, **options):

        rss_url = 'http://blog.djangogirls.org/rss'

        response = requests.get(rss_url)
        rss = ElementTree.fromstring(response.content)

        for post in rss.iter('item'):
            title = post.find('title').text
            if 'Your Django Story: Meet' in title:
                name = title.replace('Your Django Story: Meet ', '')
                is_story = True
            else:
                name = title
                is_story = False

            if not Story.objects.filter(name=name).exists():
                post_url = post.find('link').text
                post = pq(post.find('description').text)
                image_url = post('img').attr.src
                story = Story(name=name, post_url=post_url, content=post,
                              is_story=is_story)

                if image_url:
                    img = NamedTemporaryFile(delete=True)
                    img.write(urlopen(image_url).read())
                    img.flush()
                    story.image.save(image_url.split('/')[-1], File(img))

                story.save()

                if is_story:
                    print('Story of %s has been fetched' % name)
                else:
                    print('Blogpost "%s" has been fetched' % name)

Example 118

Project: airmozilla Source File: test_views.py
    def test_download_from_dfxp(self):
        filepath = os.path.join(TEST_DIRECTORY, 'example.dfxp')
        with open(filepath) as f:
            item = ClosedCaptions.objects.create(
                event=self.event,
                file=File(f),
            )

        # txt
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'txt'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'text/plain')
        ok_(response.content.startswith('Language: en-US\n'))

        # dfxp
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'dfxp'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'application/ttml+xml; charset=utf-8')
        ok_(response.content.startswith(
            '<?xml version="1.0" encoding="utf-8"?>\n<tt'
        ))

        # srt
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'srt'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'text/plain')
        ok_(response.content.startswith(
            '1\n00:00:00,983 -->'
        ))

        # vtt
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'vtt'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'text/vtt')
        ok_(response.content.startswith(
            'WEBVTT\n\n00:00.983 -->'
        ))

Example 119

Project: fileconveyor Source File: transporter.py
Function: run
    def run(self):
        while not self.die:
            # Sleep a little bit if there's no work.
            if self.queue.qsize() == 0:
                time.sleep(0.5)
            else:
                self.lock.acquire()
                (src, dst, action, callback, error_callback) = self.queue.get()
                self.lock.release()

                self.logger.debug("Running the transporter '%s' to sync '%s'." % (self.name, src))
                try:
                    # Sync the file: either add/modify it, or delete it.
                    if action == Transporter.ADD_MODIFY:
                        # Sync the file.
                        f = File(open(src, "rb"))
                        if self.storage.exists(dst):
                            self.storage.delete(dst)
                        self.storage.save(dst, f)
                        f.close()
                        # Calculate the URL.
                        url = self.storage.url(dst)
                        url = self.alter_url(url)
                    else:
                        if self.storage.exists(dst):
                            self.storage.delete(dst)
                        url = None

                    self.logger.debug("The transporter '%s' has synced '%s'." % (self.name, src))

                    # Call the callback function. Use the callback function
                    # defined for this Transporter (self.callback), unless
                    # an alternative one was defined for this file (callback).
                    if not callback is None:
                        callback(src, dst, url, action)
                    else:
                        self.callback(src, dst, url, action)

                except Exception, e:
                    self.logger.error("The transporter '%s' has failed while transporting the file '%s' (action: %d). Error: '%s'." % (self.name, src, action, e))

                    # Call the error_callback function. Use the error_callback
                    # function defined for this Transporter
                    # (self.error_callback), unless an alternative one was
                    # defined for this file (error_callback).
                    if not callback is None:
                        error_callback(src, dst, action)
                    else:
                        self.error_callback(src, dst, action)

Example 120

Project: pombola Source File: import_people_from_json.py
def process(filename):
    data = json.loads( open(filename, 'r').read() )
    # pprint.pprint( data )
    print "%s (%s) - %s" % (data['name'], data['slug'], filename)

    slug = data['slug']
    
    try:
        person = models.Person.objects.get(slug=slug)
        return # don't try to update the person        
    except models.Person.DoesNotExist:
        person = models.Person(slug=slug)

    person.legal_name    = data['name']
    person.summary       = data['summary']
    person.date_of_birth = data['date_of_birth']

    person.save()
    
    content_type = ContentType.objects.get_for_model(person)
    
    if data.get('profile_url'):
        models.Contact.objects.get_or_create(
            content_type = content_type,
            object_id    = person.id,
            value        = re.sub('\s', '%20', data['profile_url'] ),
            kind         = profile_url_kind,
        )
    
    if data.get('email'):
        models.Contact.objects.get_or_create(
            content_type = content_type,
            object_id    = person.id,
            value        = data['email'],
            kind         = email_kind,
        )

    # import image
    if data.get('image') and 'img_not_found' not in data['image']:

        image_url = re.sub('\s', '%20', data['image'] );

        photo, created = Image.objects.get_or_create(
            content_type = content_type,
            object_id    = person.id,
            source       = image_url,
        )

        if created:

            print "  Fetching " + image_url
            try:
                img_temp = NamedTemporaryFile(delete=True)
                img_temp.write( urllib2.urlopen(image_url).read() )
                img_temp.flush()
                
                photo.image.save( person.slug, File(img_temp) )
                photo.save()
            except urllib2.HTTPError:
                print "  ...failed!"

Example 121

Project: django-machina Source File: test_fields.py
Function: set_up
    @pytest.yield_fixture(autouse=True)
    def setup(self):
        # Set up some images used for doing image tests
        images_dict = {}

        # Fetch an image aimed to be resized
        f = open(settings.MEDIA_ROOT + "/to_be_resized_image.png", "rb")
        images_dict['to_be_resized_image'] = File(f)

        # Fetch a big image
        f = open(settings.MEDIA_ROOT + "/too_large_image.jpg", "rb")
        images_dict['too_large_image'] = File(f)

        # Fetch a wide image
        f = open(settings.MEDIA_ROOT + "/too_wide_image.jpg", "rb")
        images_dict['too_wide_image'] = File(f)

        # Fetch a high image
        f = open(settings.MEDIA_ROOT + "/too_high_image.jpg", "rb")
        images_dict['too_high_image'] = File(f)

        self.images_dict = images_dict

        yield

        # teardown
        # --

        for img in self.images_dict.values():
            img.close()
        tests = DummyModel.objects.all()
        for test in tests:
            try:
                test.resized_image.delete()
            except:
                pass
            try:
                test.validated_image.delete()
            except:
                pass

Example 122

Project: akvo-rsr Source File: links.py
    def do_import(self):
        """
        :return: List; contains fields that have changed
        """
        from . import same_data

        changes = []

        photo_id = self.get_attrib(self.parent_elem, akvo_ns('photo-id'), 'current_image')
        current_image = file_from_zip_archive(
                self.iati_import_job.iati_xml_file, "out_proj/{}.jpg".format(photo_id))
        if current_image:
            tmp_file = NamedTemporaryFile()
            for line in current_image.readlines():
                tmp_file.write(line)
            tmp_file.flush()
            # update current image if it's different from the existing one
            try:
                old_file = self.project.current_image.file
            except (IOError, ValueError):
                old_file = None
            new_file = File(tmp_file)
            if not same_data(old_file, new_file):
                filename = model_and_instance_based_filename(
                        'Project', self.project.pk, 'current_image', 'image.jpg')
                new_file.seek(0)
                self.project.current_image.save(filename, new_file)
                changes += ['current_image']

        current_image_caption = self.get_attrib(
                self.parent_elem, akvo_ns('image-caption'), 'current_image_caption')
        if current_image_caption:
            changes += self.update_project_field('current_image_caption', current_image_caption)

        current_image_credit = self.get_attrib(
                self.parent_elem, akvo_ns('photo-credit'), 'current_image_credit')
        if current_image_credit:
            changes += self.update_project_field('current_image_credit', current_image_credit)

        return changes

Example 123

Project: Wooey Source File: utils.py
def create_job_fileinfo(job):
    parameters = job.get_parameters()
    from ..models import WooeyFile, UserFile
    # first, create a reference to things the script explicitly created that is a parameter
    files = []
    local_storage = get_storage(local=True)
    for field in parameters:
        try:
            if field.parameter.form_field == 'FileField':
                value = field.value
                if value is None:
                    continue
                if isinstance(value, six.string_types):
                    # check if this was ever created and make a fileobject if so
                    if local_storage.exists(value):
                        if not get_storage(local=False).exists(value):
                            get_storage(local=False).save(value, File(local_storage.open(value)))
                        value = field.value
                    else:
                        field.force_value(None)
                        try:
                            with transaction.atomic():
                                field.save()
                        except:
                            sys.stderr.write('{}\n'.format(traceback.format_exc()))
                        continue
                d = {'parameter': field, 'file': value}
                if field.parameter.is_output:
                    full_path = os.path.join(job.save_path, os.path.split(local_storage.path(value))[1])
                    checksum = get_checksum(value, extra=[job.pk, full_path, 'output'])
                    d['checksum'] = checksum
                files.append(d)
        except ValueError:
            continue

    known_files = {i['file'].name for i in files}
    # add the user_output files, these are things which may be missed by the model fields because the script
    # generated them without an explicit arguments reference in the script
    file_groups = {'archives': []}
    absbase = os.path.join(settings.MEDIA_ROOT, job.save_path)
    for root, dirs, dir_files in os.walk(absbase):
        for filename in dir_files:
            new_name = os.path.join(job.save_path, filename)
            if any([i.endswith(new_name) for i in known_files]):
                continue
            try:
                filepath = os.path.join(root, filename)
                if os.path.isdir(filepath):
                    continue
                full_path = os.path.join(job.save_path, filename)
                # this is to make the job output have a unique checksum. If this file is then re-uploaded, it will create
                # a new file to reference in the uploads directory and not link back to the job output.
                checksum = get_checksum(filepath, extra=[job.pk, full_path, 'output'])
                try:
                    storage_file = get_storage_object(full_path)
                except:
                    sys.stderr.write('Error in accessing stored file {}:\n{}'.format(full_path, traceback.format_exc()))
                    continue
                d = {'name': filename, 'file': storage_file, 'size_bytes': storage_file.size, 'checksum': checksum}
                if filename.endswith('.tar.gz') or filename.endswith('.zip'):
                    file_groups['archives'].append(d)
                else:
                    files.append(d)
            except IOError:
                sys.stderr.write('{}'.format(traceback.format_exc()))
                continue

    # establish grouping by inferring common things
    file_groups['all'] = files
    file_groups['image'] = []
    file_groups['tabular'] = []
    file_groups['fasta'] = []

    for filemodel in files:
        fileinfo = get_file_info(filemodel['file'].path)
        filetype = fileinfo.get('type')
        if filetype is not None:
            file_groups[filetype].append(dict(filemodel, **{'preview': fileinfo.get('preview')}))
        else:
            filemodel['preview'] = json.dumps(None)

    # Create our WooeyFile models

    # mark things that are in groups so we don't add this to the 'all' category too to reduce redundancy
    grouped = set([i['file'].path for file_type, groups in six.iteritems(file_groups) for i in groups if file_type != 'all'])
    for file_type, group_files in six.iteritems(file_groups):
        for group_file in group_files:
            if file_type == 'all' and group_file['file'].path in grouped:
                continue
            try:
                preview = group_file.get('preview')
                size_bytes = group_file.get('size_bytes')

                filepath = group_file['file'].path
                save_path = job.get_relative_path(filepath)
                parameter = group_file.get('parameter')

                # get the checksum of the file to see if we need to save it
                checksum = group_file.get('checksum', get_checksum(filepath))
                try:
                    wooey_file = WooeyFile.objects.get(checksum=checksum)
                    file_created = False
                except ObjectDoesNotExist:
                    wooey_file = WooeyFile(
                        checksum=checksum,
                        filetype=file_type,
                        filepreview=preview,
                        size_bytes=size_bytes,
                        filepath=save_path
                    )
                    file_created = True
                userfile_kwargs = {
                    'job': job,
                    'parameter': parameter,
                    'system_file': wooey_file,
                    'filename': os.path.split(filepath)[1]
                }
                try:
                    with transaction.atomic():
                        if file_created:
                            wooey_file.save()
                        job.save()
                        UserFile.objects.get_or_create(**userfile_kwargs)
                except:
                    sys.stderr.write('Error in saving DJFile: {}\n'.format(traceback.format_exc()))
            except:
                sys.stderr.write('Error in saving DJFile: {}\n'.format(traceback.format_exc()))
                continue

Example 124

Project: akvo-rsr Source File: test_iati_import.py
    def test_iati_cordaid_import(self):
        """
        Test an IATI import for Cordaid.
        """
        # Create business unit, Cordaid and Other organisations
        Organisation.objects.create(
            id=959,
            name="Cordaid business unit",
            long_name="Cordaid business unit",
            iati_org_id="NL-KVK-0987654321-business"
        )
        Organisation.objects.create(
            id=273,
            name="Cordaid",
            long_name="Cordaid",
            iati_org_id="NL-KVK-cordaid"
        )
        Organisation.objects.create(
            id=1653,
            name="Cordaid - Others",
            long_name="Cordaid - Others",
            iati_org_id="NL-KVK-others"
        )

        iati_cordaid_import = IatiImport.objects.create(
            label="Test IATI Cordaid import", user=self.user, mapper_prefix="Cordaid")
        iati_cordaid_xml_file = NamedTemporaryFile(delete=True)
        iati_cordaid_xml_file.write(IATI_CORDAID_STRING)
        iati_cordaid_xml_file.flush()
        iati_cordaid_import_job = IatiImportJob.objects.create(
            iati_import=iati_cordaid_import, iati_xml_file=File(iati_cordaid_xml_file))
        iati_cordaid_import_job.run()

        project_cordaid = Project.objects.get(iati_activity_id="NL-KVK-0987654321-cordaid")
        self.assertIsInstance(project_cordaid, Project)
        self.assertEqual(project_cordaid.language, "en")
        self.assertEqual(project_cordaid.currency, "USD")
        self.assertEqual(project_cordaid.hierarchy, 1)
        self.assertEqual(project_cordaid.title, "Test project for IATI Cordaid import")
        self.assertEqual(project_cordaid.partners.count(), 4)
        self.assertEqual(project_cordaid.reporting_org.iati_org_id, "NL-KVK-0987654321")

Example 125

Project: django-calaccess-raw-data Source File: updatecalaccessrawdata.py
    def clean(self):
        """
        Clean up the raw data files from the state so they are ready to get loaded into the database.
        """
        if self.verbosity:
            self.header("Cleaning data files")

        tsv_list = [
            f for f in os.listdir(self.tsv_dir) if '.TSV' in f.upper()
        ]

        if self.resume:
            # get finished clean command logs of last update
            prev_cleaned = [
                x.file_name + '.TSV'
                for x in self.version.files.filter(
                    clean_finish_datetime__isnull=False
                )
            ]
            self.log("{} files already cleaned.".format(len(prev_cleaned)))
            # remove these from tsv_list
            tsv_list = [x for x in tsv_list if x not in prev_cleaned]

        # Loop through all the files in the source directory
        if self.verbosity:
            tsv_list = progress.bar(tsv_list)
        for name in tsv_list:
            call_command(
                "cleancalaccessrawfile",
                name,
                verbosity=self.verbosity,
                keep_file=self.keep_files,
            )

        # if archive setting is enabled, zip up all of the csv and error logs
        if getattr(settings, 'CALACCESS_STORE_ARCHIVE', False):
            # skip if resuming and the clean zip file is already saved
            if self.resume and self.version.clean_zip_archive:
                pass
            else:
                if self.verbosity:
                    self.header("Zipping cleaned files")
                # Remove previous zip file
                self.version.clean_zip_archive.delete()
                clean_zip_path = os.path.join(self.data_dir, 'clean.zip')

                # enable zipfile compression
                compression = ZIP_DEFLATED

                try:
                    zf = ZipFile(clean_zip_path, 'w', compression, allowZip64=True)
                except RuntimeError:
                    logger.error('Zip file cannot be compressed (check zlib module).')
                    compression = ZIP_STORED
                    zf = ZipFile(clean_zip_path, 'w', compression, allowZip64=True)

                # loop over and save files in csv dir
                for f in os.listdir(self.csv_dir):
                    csv_path = os.path.join(self.csv_dir, f)
                    zf.write(csv_path, f)

                # same for errors dir
                errors_dir = os.path.join(self.data_dir, 'log')
                for f in os.listdir(errors_dir):
                    error_path = os.path.join(errors_dir, f)
                    zf.write(error_path, f)

                # close the zip file
                zf.close()

                if not self.test_mode:
                    # save the clean zip size
                    self.version.clean_zip_size = os.path.getsize(clean_zip_path)
                    with open(clean_zip_path, 'rb') as zf:
                        # Save the zip on the raw data version
                        self.version.clean_zip_archive.save(
                            os.path.basename(clean_zip_path), File(zf)
                        )

Example 126

Project: django-scrape Source File: items.py
Function: save
    def save(self, pipeline, spider):
        # Convert all our values as needed. Mostly for addresses, dammit.
        for field in self._model_fields:
            value = self.get(field.name)
            if value not in ['', None, []]:
                self[field.name] = get_field_value(self, field, value, pipeline, spider)

        # Create a search filter, beginning by adding all my unique fields.
        fltr = {}
        for field in self._model_fields:
            if field.unique or isinstance(field, FileField):
                query = get_field_query(self, field, self.get(field.name, None), pipeline, spider)
                if query:
                    fltr.update({field.name + query[0]: query[1]})

        # Now add all the fields that must be unique combined.
        for unique_set in self.django_model._meta.unique_together:
            cur_fltr = {}
            for field_name in unique_set:
                field = self.django_model._meta.get_field_by_name(field_name)[0]
                query = get_field_query(self, field, self.get(field.name, None), pipeline, spider, use_null=True)
                cur_fltr.update({field.name + query[0]: query[1]})
            if cur_fltr:
                fltr.update(cur_fltr)

        # Either get an existing object or create a new one.
        if fltr:
            obj, created = self.django_model.objects.get_or_create(**fltr)
        else:

            # If we're making a new object, be sure to fill required fields.
            required = {}
            for field in self._model_fields:
                if field.blank == False:
                    value = self.get(field.name)
                    if value is None:
                        print self
                    assert value is not None
                    required[field.name] = value
            obj, created = self.django_model.objects.create(**required), True

        # We perform a fill operation even on new objects because of the possibility
        # that the filter we uesed to find an existing object contained '__' notations,
        # e.g. any many-to-many field.
        for field in self._model_fields:
            name = field.name
            cur_value = getattr(obj, name)

            # If the value to set is empty, skip it.
            if self.get(name) in [None, '']:
                continue

            # If we're using a ScrapeModel, first check if the field has already been
            # validated.
            if self._scrape_model:
                if getattr(obj, name + '_valid') == True:
                    continue # alrady validated, skip

            # If the field is an m2m, perform a merge.
            modified = False
            if isinstance(field, ManyToManyField):
                to_insert = arg_to_iter(self.get(name))
                existing = cur_value.all()
                to_insert = [t for t in to_insert if t not in existing]
                for itm in to_insert:
                    cur_value.add(itm)
                    modified = True

            # If we have a file field we need special consideration.
            elif isinstance(field, FileField):

                # Only change it if it does not already have a value. If we don't
                # observe this, we end up with duplicate files.
                if cur_value in ['', None]:
                    path = self.get(name)
                    filename = os.path.basename(path)
                    cur_value.save(filename, File(open(path, 'rb')))
                    modified = True

            # # Otherwise just check if a value already exists.
            # elif cur_value in ['', None]:
            #     setattr(obj, name, self.get(name))
            #     modified = True

            # Otherwise, stomp on existing value, bearing in mind that we've already
            # checked if the value we're writing is empty.
            else:
                setattr(obj, name, self.get(name))
                modified = True

            # If we're using a scrape model and we changed the field, update the scrape data. Or,
            # if there is no existing value for either the source or timestamp, fill them in.
            if self._scrape_model:

                # Timestamp.
                cur_name = name + '_timestamp'
                cur_val = getattr(obj, cur_name)
                if not cur_val or modified:
                    setattr(obj, cur_name, datetime.now())#to_datetime(self[cur_name]))

                # Source
                cur_name = name + '_source'
                cur_val = getattr(obj, cur_name)
                if not cur_val or modified:
                    setattr(obj, cur_name, self['scrape_url'])#self[cur_name])

        # Save both the object and the scrape object.
        obj.save()

        return obj

Example 127

Project: wolnelektury Source File: models.py
    @classmethod
    def from_xml_file(cls, xml_file, image_file=None, image_store=None, overwrite=False):
        """
        Import xml and it's accompanying image file.
        If image file is missing, it will be fetched by librarian.picture.ImageStore
        which looks for an image file in the same directory the xml is, with extension matching
        its mime type.
        """
        from sortify import sortify
        from django.core.files import File
        from librarian.picture import WLPicture, ImageStore
        close_xml_file = False
        close_image_file = False

        if image_file is not None and not isinstance(image_file, File):
            image_file = File(open(image_file))
            close_image_file = True

        if not isinstance(xml_file, File):
            xml_file = File(open(xml_file))
            close_xml_file = True

        with transaction.atomic():
            # use librarian to parse meta-data
            if image_store is None:
                image_store = ImageStore(picture_storage.path('images'))
            picture_xml = WLPicture.from_file(xml_file, image_store=image_store)

            picture, created = Picture.objects.get_or_create(slug=picture_xml.slug[:120])
            if not created and not overwrite:
                raise Picture.AlreadyExists('Picture %s already exists' % picture_xml.slug)

            picture.areas.all().delete()
            picture.title = unicode(picture_xml.picture_info.title)
            picture.extra_info = picture_xml.picture_info.to_dict()

            picture_tags = set(catalogue.models.Tag.tags_from_info(picture_xml.picture_info))
            motif_tags = set()
            thing_tags = set()

            area_data = {'themes': {}, 'things': {}}

            # Treat all names in picture XML as in default language.
            lang = settings.LANGUAGE_CODE

            for part in picture_xml.partiter():
                if picture_xml.frame:
                    c = picture_xml.frame[0]
                    part['coords'] = [[p[0] - c[0], p[1] - c[1]] for p in part['coords']]
                if part.get('object', None) is not None:
                    _tags = set()
                    for objname in part['object'].split(','):
                        objname = objname.strip().capitalize()
                        tag, created = catalogue.models.Tag.objects.get_or_create(
                            slug=slughifi(objname), category='thing')
                        if created:
                            tag.name = objname
                            setattr(tag, 'name_%s' % lang, tag.name)
                            tag.sort_key = sortify(tag.name)
                            tag.save()
                        # thing_tags.add(tag)
                        area_data['things'][tag.slug] = {
                            'object': objname,
                            'coords': part['coords'],
                            }

                        _tags.add(tag)
                    area = PictureArea.rectangle(picture, 'thing', part['coords'])
                    area.save()
                    area.tags = _tags
                else:
                    _tags = set()
                    for motifs in part['themes']:
                        for motif in motifs.split(','):
                            tag, created = catalogue.models.Tag.objects.get_or_create(
                                slug=slughifi(motif), category='theme')
                            if created:
                                tag.name = motif
                                tag.sort_key = sortify(tag.name)
                                tag.save()
                            # motif_tags.add(tag)
                            _tags.add(tag)
                            area_data['themes'][tag.slug] = {
                                'theme': motif,
                                'coords': part['coords']
                                }

                    logging.debug("coords for theme: %s" % part['coords'])
                    area = PictureArea.rectangle(picture, 'theme', part['coords'])
                    area.save()
                    area.tags = _tags.union(picture_tags)

            picture.tags = picture_tags.union(motif_tags).union(thing_tags)
            picture.areas_json = area_data

            if image_file is not None:
                img = image_file
            else:
                img = picture_xml.image_file()

            modified = cls.crop_to_frame(picture_xml, img)
            modified = cls.add_source_note(picture_xml, modified)

            picture.width, picture.height = modified.size

            modified_file = StringIO()
            modified.save(modified_file, format='JPEG', quality=95)
            # FIXME: hardcoded extension - detect from DC format or orginal filename
            picture.image_file.save(path.basename(picture_xml.image_path), File(modified_file))

            picture.xml_file.save("%s.xml" % picture.slug, File(xml_file))
            picture.save()
            tasks.generate_picture_html(picture.id)

        if close_xml_file:
            xml_file.close()
        if close_image_file:
            image_file.close()

        return picture

Example 128

Project: django-djangui Source File: tasks.py
@celery_app.task(base=DjanguiTask)
def submit_script(**kwargs):
    job_id = kwargs.pop('djangui_job')
    resubmit = kwargs.pop('djangui_resubmit', False)
    rerun = kwargs.pop('rerun', False)
    from .backend import utils
    from .models import DjanguiJob, DjanguiFile
    job = DjanguiJob.objects.get(pk=job_id)

    command = utils.get_job_commands(job=job)
    if resubmit:
        # clone ourselves, setting pk=None seems hackish but it works
        job.pk = None

    # This is where the script works from -- it is what is after the media_root since that may change between
    # setups/where our user uploads are stored.
    cwd = job.get_output_path()

    abscwd = os.path.abspath(os.path.join(settings.MEDIA_ROOT, cwd))
    job.command = ' '.join(command)
    job.save_path = cwd

    if rerun:
        # cleanup the old files, we need to be somewhat aggressive here.
        local_storage = utils.get_storage(local=True)
        remote_storage = utils.get_storage(local=False)
        to_delete = []
        with atomic():
            for dj_file in DjanguiFile.objects.filter(job=job):
                if dj_file.parameter is None or dj_file.parameter.parameter.is_output:
                    to_delete.append(dj_file)
                    path = local_storage.path(dj_file.filepath.name)
                    dj_file.filepath.delete(False)
                    if local_storage.exists(path):
                        local_storage.delete(path)
                    # TODO: This needs to be tested to make sure it's being nuked
                    if remote_storage.exists(path):
                        remote_storage.delete(path)
            [i.delete() for i in to_delete]

    utils.mkdirs(abscwd)
    # make sure we have the script, otherwise download it. This can happen if we have an ephemeral file system or are
    # executing jobs on a worker node.
    script_path = job.script.script_path
    if not utils.get_storage(local=True).exists(script_path.path):
        utils.get_storage(local=True).save(script_path.path, script_path.file)

    job.status = DjanguiJob.RUNNING
    job.save()

    proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=abscwd)

    stdout, stderr = proc.communicate()
    # tar/zip up the generated content for bulk downloads
    def get_valid_file(cwd, name, ext):
        out = os.path.join(cwd, name)
        index = 0
        while os.path.exists(six.u('{}.{}').format(out, ext)):
            index += 1
            out = os.path.join(cwd, six.u('{}_{}').format(name, index))
        return six.u('{}.{}').format(out, ext)

    # fetch the job again in case the database connection was lost during the job or something else changed.
    job = DjanguiJob.objects.get(pk=job_id)

    # if there are files generated, make zip/tar files for download
    if len(os.listdir(abscwd)):
        tar_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'tar.gz')
        tar = tarfile.open(tar_out, "w:gz")
        tar_name = os.path.splitext(os.path.splitext(os.path.split(tar_out)[1])[0])[0]
        tar.add(abscwd, arcname=tar_name)
        tar.close()

        zip_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'zip')
        zip = zipfile.ZipFile(zip_out, "w")
        arcname = os.path.splitext(os.path.split(zip_out)[1])[0]
        zip.write(abscwd, arcname=arcname)
        for root, folders, filenames in os.walk(os.path.split(zip_out)[0]):
            for filename in filenames:
                path = os.path.join(root, filename)
                if path == tar_out:
                    continue
                if path == zip_out:
                    continue
                zip.write(path, arcname=os.path.join(arcname, filename))
        zip.close()

        # save all the files generated as well to our default storage for ephemeral storage setups
        if djangui_settings.DJANGUI_EPHEMERAL_FILES:
            for root, folders, files in os.walk(abscwd):
                for filename in files:
                    filepath = os.path.join(root, filename)
                    s3path = os.path.join(root[root.find(cwd):], filename)
                    remote = utils.get_storage(local=False)
                    exists = remote.exists(s3path)
                    filesize = remote.size(s3path)
                    if not exists or (exists and filesize == 0):
                        if exists:
                            remote.delete(s3path)
                        remote.save(s3path, File(open(filepath, 'rb')))

    utils.create_job_fileinfo(job)


    job.stdout = stdout
    job.stderr = stderr
    job.status = DjanguiJob.COMPLETED
    job.save()

    return (stdout, stderr)

Example 129

Project: geonode Source File: utils.py
def file_upload(filename, name=None, user=None, title=None, abstract=None,
                keywords=None, category=None, regions=None, date=None,
                skip=True, overwrite=False, charset='UTF-8',
                metadata_uploaded_preserve=False):
    """Saves a layer in GeoNode asking as little information as possible.
       Only filename is required, user and title are optional.
    """
    if keywords is None:
        keywords = []
    if regions is None:
        regions = []

    # Get a valid user
    theuser = get_valid_user(user)

    # Create a new upload session
    upload_session = UploadSession.objects.create(user=theuser)

    # Get all the files uploaded with the layer
    files = get_files(filename)

    # Set a default title that looks nice ...
    if title is None:
        basename = os.path.splitext(os.path.basename(filename))[0]
        title = basename.title().replace('_', ' ')

    # Create a name from the title if it is not passed.
    if name is None:
        name = slugify(title).replace('-', '_')

    if category is not None:
        categories = TopicCategory.objects.filter(Q(identifier__iexact=category) | Q(gn_description__iexact=category))
        if len(categories) == 1:
            category = categories[0]
        else:
            category = None

    # Generate a name that is not taken if overwrite is False.
    valid_name = get_valid_layer_name(name, overwrite)

    # Add them to the upload session (new file fields are created).
    assigned_name = None
    for type_name, fn in files.items():
        with open(fn, 'rb') as f:
            upload_session.layerfile_set.create(name=type_name,
                                                file=File(f, name='%s.%s' % (assigned_name or valid_name, type_name)))
            # save the system assigned name for the remaining files
            if not assigned_name:
                the_file = upload_session.layerfile_set.all()[0].file.name
                assigned_name = os.path.splitext(os.path.basename(the_file))[0]

    # Get a bounding box
    bbox_x0, bbox_x1, bbox_y0, bbox_y1 = get_bbox(filename)

    # by default, if RESOURCE_PUBLISHING=True then layer.is_published
    # must be set to False
    is_published = True
    if settings.RESOURCE_PUBLISHING:
        is_published = False

    defaults = {
        'upload_session': upload_session,
        'title': title,
        'abstract': abstract,
        'owner': user,
        'charset': charset,
        'bbox_x0': bbox_x0,
        'bbox_x1': bbox_x1,
        'bbox_y0': bbox_y0,
        'bbox_y1': bbox_y1,
        'is_published': is_published,
        'category': category
    }

    # set metadata
    if 'xml' in files:
        with open(files['xml']) as f:
            xml_file = f.read()
        defaults['metadata_uploaded'] = True
        defaults['metadata_uploaded_preserve'] = metadata_uploaded_preserve

        # get model properties from XML
        identifier, vals, regions, keywords = set_metadata(xml_file)

        if defaults['metadata_uploaded_preserve']:
            defaults['metadata_xml'] = xml_file
            defaults['uuid'] = identifier

        for key, value in vals.items():
            if key == 'spatial_representation_type':
                value = SpatialRepresentationType(identifier=value)
            elif key == 'topic_category':
                value, created = TopicCategory.objects.get_or_create(
                    identifier=value.lower(),
                    defaults={'description': '', 'gn_description': value})
                key = 'category'
                defaults[key] = value
            else:
                defaults[key] = value

    regions_resolved, regions_unresolved = resolve_regions(regions)
    keywords.extend(regions_unresolved)

    if getattr(settings, 'NLP_ENABLED', False):
        try:
            from geonode.contrib.nlp.utils import nlp_extract_metadata_dict
            nlp_metadata = nlp_extract_metadata_dict({
                'title': defaults.get('title', None),
                'abstract': defaults.get('abstract', None),
                'purpose': defaults.get('purpose', None)})
            if nlp_metadata:
                regions_resolved.extend(nlp_metadata.get('regions', []))
                keywords.extend(nlp_metadata.get('keywords', []))
        except:
            print "NLP extraction failed."

    # If it is a vector file, create the layer in postgis.
    if is_vector(filename):
        defaults['storeType'] = 'dataStore'

    # If it is a raster file, get the resolution.
    if is_raster(filename):
        defaults['storeType'] = 'coverageStore'

    # Create a Django object.
    layer, created = Layer.objects.get_or_create(
        name=valid_name,
        defaults=defaults
    )

    # Delete the old layers if overwrite is true
    # and the layer was not just created
    # process the layer again after that by
    # doing a layer.save()
    if not created and overwrite:
        if layer.upload_session:
            layer.upload_session.layerfile_set.all().delete()
        layer.upload_session = upload_session
        # Pass the parameter overwrite to tell whether the
        # geoserver_post_save_signal should upload the new file or not
        layer.overwrite = overwrite
        layer.save()

    # Assign the keywords (needs to be done after saving)
    keywords = list(set(keywords))
    if keywords:
        if len(keywords) > 0:
            layer.keywords.add(*keywords)

    # Assign the regions (needs to be done after saving)
    regions_resolved = list(set(regions_resolved))
    if regions_resolved:
        if len(regions_resolved) > 0:
            layer.regions.add(*regions_resolved)

    if date is not None:
        layer.date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
        layer.save()

    return layer

Example 130

Project: Wooey Source File: 0019_userfile_data.py
def setup_wooey_files(apps, schema_editor):
    from six.moves import StringIO
    from django.core.files import File
    from wooey.backend.utils import get_storage

    storage = get_storage()

    WooeyFile = apps.get_model('wooey', 'WooeyFile')
    Favorite = apps.get_model('wooey', 'Favorite')
    WooeyJob = apps.get_model('wooey', 'WooeyJob')
    ScriptParameter = apps.get_model('wooey', 'ScriptParameter')
    ScriptParameters = apps.get_model('wooey', 'ScriptParameters')
    ScriptParameterGroup = apps.get_model('wooey', 'ScriptParameterGroup')
    ScriptVersion = apps.get_model('wooey', 'ScriptVersion')
    Script = apps.get_model('wooey', 'Script')
    User = apps.get_model('auth', 'User')
    update_all_contenttypes()
    ContentType = apps.get_model("contenttypes", "ContentType")
    ctype = ContentType.objects.get(model='wooeyfile')

    user = User.objects.create(username='test user')

    script = Script.objects.create(
        script_name='Test'
    )

    script_version = ScriptVersion.objects.create(
        script=script,
        script_path=get_storage().save('fake_script', File(StringIO('nonsense'))),
    )

    script_parameter_group = ScriptParameterGroup.objects.create(
        group_name='blahh',
        script_version=script_version,
    )

    script_parameter = ScriptParameter.objects.create(
        script_version=script_version,
        short_param='blah',
        script_param='--blah',
        form_field='FileField',
        input_type='file',
        parameter_group=script_parameter_group,
        is_output=False,
    )

    job = WooeyJob.objects.create(
        script_version=script_version,
        job_name='job1',
    )

    job2 = WooeyJob.objects.create(
        script_version=script_version,
        job_name='job2',
    )

    # make wooey files
    buffer = StringIO('file1')
    file1 = get_storage().save('file1', File(buffer))
    file2 = get_storage().save('file1', File(buffer))

    script_parameters = ScriptParameters.objects.create(
        parameter=script_parameter,
        job=job,
        _value=file1,
    )

    script_parameters2 = ScriptParameters.objects.create(
        parameter=script_parameter,
        job=job2,
        _value=file2,
    )


    wooey_file1 = WooeyFile.objects.create(
        filepath=file1,
        job=job,
        parameter=script_parameters,
        checksum='abc123',
    )

    wooey_file1_copy = WooeyFile.objects.create(
        filepath=file2,
        job=job2,
        parameter=script_parameters2,
        checksum='abc123',
    )

    # make the second a favorite file
    Favorite.objects.create(
        content_type=ctype,
        object_id=wooey_file1_copy.pk,
        user=user
    )

Example 131

Project: hydroshare Source File: spatialite.py
    @classmethod
    def create_dataset_with_parent_geometry(cls, title, parent_dataresource, parent=None, geometry_column_name='GEOMETRY', srid=4326,
                                            geometry_type='GEOMETRY', owner=None, columns_definitions=()):
        from ga_resources.models import DataResource
        from uuid import uuid4

        parent_dataresource.resource.ready_data_resource()
        pconn = parent_dataresource.resource._connection() # FIXME assumes the spatialite driver for the parent, but much faster
        c = pconn.cursor()
        c.execute('select OGC_FID, AsBinary(Transform({geom}, {srid})) from {table}'.format(geom=parent_dataresource.resource._geometry_field, table=parent_dataresource.resource._table_name, srid=srid))
        records = c.fetchall()

        filename = os.path.join('/tmp', uuid4().hex + '.sqlite')
        conn = db.connect(filename)
        conn.enable_load_extension(True)
        conn.execute("select load_extension('libspatialite.so')")
        conn.executescript("""
                    select initspatialmetadata();
                    create table layer (
                        OGC_FID INTEGER PRIMARY KEY
                    );
                    select AddGeometryColumn('layer', '{geometry_column_name}', {srid}, '{geometry_type}', 2, 1);
                    select CreateSpatialIndex('layer','{geometry_column_name}');
                """.format(**locals()))


        conn.executemany('insert into layer (OGC_FID, {geometry_column_name}) values (?, GeomFromWKB(?, {srid}))'.format(**locals()), records)
        conn.commit()

        for column, datatype in columns_definitions:
            conn.execute(
                'alter table layer add column {column} {datatype}'.format(column=column, datatype=datatype))

        conn.close()

        ds = DataResource.objects.create(
            title=title,
            parent=parent,
            driver='ga_resources.drivers.spatialite',
            resource_file=File(open(filename), filename),
            in_menus=[],
            owner=owner
        )
        ds.resource.compute_fields()
        for name,ctype in columns_definitions:
            ds.resource.add_column(name, ctype)
        os.unlink(filename)
        return ds

Example 132

Project: django-responsive-images Source File: utils.py
def get_sized_images(image, sizes, crop=(50, 50)):
    (orig, c) = OriginalImage.objects.get_or_create(image_file=image.name)

    # filter out duplicates and larger than original
    sizes_set = set()
    for (width, height) in sizes:
        width = min(width, image.width)
        height = min(height, image.height)
        if not crop:
            orig_aspect = image.width / float(image.height)
            req_aspect = width / float(height)
            if orig_aspect > req_aspect:
                ratio = width / float(image.width)
            else:
                ratio = height / float(image.height)
            width = int(image.width * ratio + 0.5)
            height = int(image.height * ratio + 0.5)
        sizes_set.add((width, height))
    sizes = sorted(sizes_set)

    if sizes[0] == orig.size:
        # smallest size is original image
        return [orig]

    # common info to all resized images
    if crop:
        crop_type = '{}-{}'.format(*crop)
    else:
        crop_type = 'nocrop'
    split_ext = image.name.rsplit('.', 1)
    if len(split_ext) > 1:
        ext = '.' + split_ext[-1]
    else:
        ext = ''

    # open the original image
    image.open()
    orig_image = Image.open(image)
    orig_image.load()
    image.close()

    # create the resized images
    resized = []
    for (width, height) in sizes:
        if (width, height) == (image.width, image.height):
            resized.append(orig)
            continue
        try:
            found = ResizedImage.objects.get(
                original=orig,
                width=width,
                height=height,
                crop=crop_type
            )
        except ResizedImage.DoesNotExist:
            pass
        else:
            resized.append(found)
            continue

        if crop:
            new_image = ImageOps.fit(
                orig_image,
                (width, height),
                method=Image.BICUBIC,
                centering=(crop[0] / 100.0, crop[1] / 100.0)
            )
        else:
            new_image = orig_image.resize(
                (width, height),
                resample=Image.BICUBIC
            )

        data = IO()
        new_image.save(data, orig_image.format)
        resized_path = default_storage.save(
            os.path.join(
                'responsive_images',
                image.name,
                '{}x{}_{}{}'.format(width, height, crop_type, ext)),
            File(data)
        )
        resized.append(ResizedImage.objects.create(
            original=orig,
            image_file=resized_path,
            crop=crop_type
        ))

    return resized

Example 133

Project: hydroshare Source File: users.py
def update_account(user, **kwargs):
    """
    Update an existing user within the HydroShare system. The user calling this method must have write access to the
    account details.

    REST URL:  PUT /accounts/{userID}

    Parameters: userID - ID of the existing user to be modified

    user - An object containing the modified attributes of the user to be modified

    Returns: The userID of the user that was modified

    Return Type: userID

    Raises:
    Exceptions.NotAuthorized - The user is not authorized
    Exceptions.NotFound - The user identified by userID does not exist
    Exceptions.InvalidContent - The content of the user object is invalid
    Exception.ServiceFailure - The service is unable to process the request

    Note:  This would be done via a JSON object (user) that is in the PUT request.

    """
    from django.contrib.auth.models import Group

    groups = kwargs.get('groups', [])
    if groups:
        if len(groups) == 1:
            groups = [(Group.objects.get_or_create(name=groups)
                      if isinstance(groups, basestring) else groups)[0]]
        else:
            groups = zip(
                *(Group.objects.get_or_create(name=g)
                  if isinstance(g, basestring) else g
                  for g in groups))[0]

    if 'password' in kwargs:
        user.set_password(kwargs['password'])

    blacklist = {'username', 'password', 'groups'}  # handled separately or cannot change
    for k in blacklist.intersection(kwargs.keys()):
        del kwargs[k]

    try:
        profile = get_profile(user)
        profile_update = dict()
        update_keys = filter(lambda x: hasattr(profile, str(x)), kwargs.keys())
        for key in update_keys:
            profile_update[key] = kwargs[key]
        for k, v in profile_update.items():
            if k == 'picture':
                profile.picture = File(v) if not isinstance(v, UploadedFile) else v
            elif k == 'cv':
                profile.cv = File(v) if not isinstance(v, UploadedFile) else v
            else:
                setattr(profile, k, v)
        profile.save()
    except AttributeError as e:
        raise exceptions.ValidationError(e.message)  # ignore deprecated user profile module when we upgrade to 1.7

    user_update = dict()
    update_keys = filter(lambda x: hasattr(user, str(x)), kwargs.keys())
    for key in update_keys:
            user_update[key] = kwargs[key]
    for k, v in user_update.items():
        setattr(user, k, v)
    user.save()

    user.groups = groups
    return user.username

Example 134

Project: wger Source File: download-exercise-images.py
    def handle(self, **options):

        if not settings.MEDIA_ROOT:
            raise ImproperlyConfigured('Please set MEDIA_ROOT in your settings file')

        remote_url = options['remote_url']
        try:
            val = URLValidator()
            val(remote_url)
        except ValidationError:
            raise CommandError('Please enter a valid URL')

        exercise_api = "{0}/api/v2/exercise/?limit=999"
        image_api = "{0}/api/v2/exerciseimage/?exercise={1}"
        thumbnail_api = "{0}/api/v2/exerciseimage/{1}/thumbnails/"

        headers = {'User-agent': default_user_agent('wger/{} + requests'.format(get_version()))}

        # Get all exercises
        result = requests.get(exercise_api.format(remote_url), headers=headers).json()
        for exercise_json in result['results']:
            exercise_name = exercise_json['name'].encode('utf-8')
            exercise_uuid = exercise_json['uuid']
            exercise_id = exercise_json['id']

            self.stdout.write('')
            self.stdout.write(u"*** Processing {0} (ID: {1}, UUID: {2})".format(exercise_name,
                                                                                exercise_id,
                                                                                exercise_uuid))

            try:
                exercise = Exercise.objects.get(uuid=exercise_uuid)
            except Exercise.DoesNotExist:
                self.stdout.write('    Remote exercise not found in local DB, skipping...')
                continue

            # Get all images
            images = requests.get(image_api.format(remote_url, exercise_id), headers=headers).json()

            if images['count']:

                for image_json in images['results']:
                    image_id = image_json['id']
                    result = requests.get(thumbnail_api.format(remote_url, image_id),
                                          headers=headers).json()

                    image_name = os.path.basename(result['original'])
                    self.stdout.write('    Fetching image {0} - {1}'.format(image_id, image_name))

                    try:
                        image = ExerciseImage.objects.get(pk=image_id)
                        self.stdout.write('    --> Image already present locally, skipping...')
                        continue
                    except ExerciseImage.DoesNotExist:
                        self.stdout.write('    --> Image not found in local DB, creating now...')
                        image = ExerciseImage()
                        image.pk = image_id

                    # Save the downloaded image, see link for details
                    # http://stackoverflow.com/questions/1308386/programmatically-saving-image-to-
                    retrieved_image = requests.get(result['original'], headers=headers)
                    img_temp = NamedTemporaryFile(delete=True)
                    img_temp.write(retrieved_image.content)
                    img_temp.flush()

                    image.exercise = exercise
                    image.is_main = image_json['is_main']
                    image.status = image_json['status']
                    image.image.save(
                        os.path.basename(image_name),
                        File(img_temp),
                    )
                    image.save()

            else:
                self.stdout.write('    No images for this exercise, nothing to do')

Example 135

Project: ideascube Source File: import_medias.py
    def add(self, metadata):
        title = metadata.get('title')
        if not title:
            return self.report.error('Missing title', metadata)
        title = smart_truncate(title)
        metadata['title'] = title

        if not metadata.get('lang'):
            metadata['lang'] = settings.LANGUAGE_CODE

        original = metadata.get('path')
        if not original:
            return self.report.error('Missing path', metadata)
        kind = metadata.get('kind')
        content_type, encoding = mimetypes.guess_type(original)
        if not kind or not hasattr(Docuement, kind.upper()):
            kind = guess_kind_from_content_type(content_type) or Docuement.OTHER
            metadata['kind'] = kind

        instance = Docuement.objects.filter(title=title, kind=kind).last()
        if instance and not self.update:
            return self.report.warning('Docuement exists (Use --update for '
                                       'reimport)', title)

        path = os.path.join(self.ROOT, original)
        if not os.path.exists(path):
            return self.report.error(u'Path not found', path)
        with open(path, 'rb') as f:
            original = File(f, name=original)

            preview = metadata.get('preview')
            if preview:
                path = os.path.join(self.ROOT, preview)
                with open(path, 'rb') as f:
                    preview = File(f, name=preview)
                    self.save(metadata, original, instance, preview)
            else:
                self.save(metadata, original, instance)

Example 136

Project: silk Source File: collector.py
    def finalise(self):
        if hasattr(self, 'pythonprofiler'):
            s = StringIO()
            ps = pstats.Stats(self.pythonprofiler, stream=s).sort_stats('cuemulative')
            ps.print_stats()
            profile_text = s.getvalue()
            profile_text = "\n".join(
                profile_text.split("\n")[0:256])  # don't record too much because it can overflow the field storage size
            self.request.pyprofile = profile_text

            if SilkyConfig().SILKY_PYTHON_PROFILER_BINARY:
                file_name = "{}.prof".format(
                    os.path.join(
                        self.profiler_result_path,
                        str(self.request.id)
                    )
                )
                with open(file_name, 'w+b') as f:
                    ps.dump_stats(f.name)
                    self.request.prof_file.save(f.name, File(f))

        for _, query in self.queries.items():
            query_model = models.SQLQuery.objects.create(**query)
            query['model'] = query_model
        for _, profile in self.profiles.items():
            profile_query_models = []
            if TYP_QUERIES in profile:
                profile_queries = profile[TYP_QUERIES]
                del profile[TYP_QUERIES]
                for query_temp_id in profile_queries:
                    try:
                        query = self.queries[query_temp_id]
                        try:
                            profile_query_models.append(query['model'])
                        except KeyError:
                            raise SilkInternalInconsistency('Profile references a query dictionary that has not '
                                                            'been converted into a Django model. This should '
                                                            'never happen, please file a bug report')
                    except KeyError:
                        raise SilkInternalInconsistency('Profile references a query temp_id that does not exist. '
                                                        'This should never happen, please file a bug report')
            profile = models.Profile.objects.create(**profile)
            if profile_query_models:
                profile.queries = profile_query_models
                profile.save()
        self._record_meta_profiling()

Example 137

Project: django-validated-file Source File: tests.py
Function: add_element
    def _add_element(self, container, orig_filename, dest_filename):
        return container.test_elements.create(
                the_file = File(self._get_sample_file(orig_filename), dest_filename)
            )

Example 138

Project: django-radioportal Source File: receive.py
    def podcast_feed(self, data):
        tree = etree.fromstring(data["content"])
        pf = PodcastFeed.objects.get(show__slug=data["show"])
        show = pf.show
        for field, value in filter(lambda x: x[0].endswith("_enabled"), vars(pf).iteritems()):
            # print field, value
            if not value:
                continue
            if field[:-8] + "_xpath" in vars(pf):
                xpath = vars(pf)[field[:-8] + "_xpath"]
                value = tree.xpath(xpath, namespaces=tree.nsmap)
                if not value:
                    continue
                value = value[0]
            regex = None
            if field[:-8] + "_regex" in vars(pf):
                regex = vars(pf)[field[:-8] + "_regex"]
            if regex:
                match = re.search(regex,value)
                if match and "value" in match.groupdict():
                    value = match.group("value")
            # print field[:-8], value
            if field[:-8] == "icon":
                headers = {}
                local_modtime = None
                if os.path.exists(show.icon.path):
                    if show.icon_url == value and show.icon_etag:
                        headers["If-None-Match"] = show.icon_etag
                    local_modtime = os.path.getmtime(show.icon.path)
                    headers["If-Modified-Since"] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.localtime(local_modtime))
                r = requests.head(value, headers=headers)
                if r.status_code == 200:
                    if local_modtime and "last-modified" in r.headers:
                        time_s = r.headers['last-modified']
                        time_d = dateutil.parser.parse(time_s)
                        remote_modtime = int(time_d.strftime("%s"))
                        if remote_modtime < local_modtime:
                            continue
                    r = requests.get(value, headers=headers)
                    if r.status_code != 200:
                        continue
                    img_temp = NamedTemporaryFile(delete=True)
                    img_temp.write(r.content)
                    img_temp.flush()
                    show.icon.save("%s.jpg" % show.slug, File(img_temp), save=True)
                    generate_all_aliases(show.icon, include_global=True)
                    if "etag" in r.headers:
                        show.icon_etag = r.headers["etag"]
                    show.icon_url = r.url
                    show.save()
            else:
                setattr(show, field[:-8], value)
        show.save()

Example 139

Project: airmozilla Source File: test_views.py
    def test_download_from_srt(self):
        filepath = os.path.join(TEST_DIRECTORY, 'example.srt')
        with open(filepath) as f:
            item = ClosedCaptions.objects.create(
                event=self.event,
                file=File(f),
            )

        # txt
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'txt'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'text/plain')
        ok_(response.content.startswith('Language: en-US\n'))

        # dfxp
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'dfxp'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'application/ttml+xml; charset=utf-8')
        ok_(response.content.startswith(
            '<?xml version="1.0" encoding="utf-8"?>\n<tt'
        ))

        # srt
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'srt'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'text/plain')
        ok_(response.content.startswith(
            '1\n00:00:09,209 -->'
        ))

        # vtt
        url = reverse('closedcaptions:download', args=(
            item.filename_hash,
            item.id,
            self.event.slug,
            'vtt'
        ))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        eq_(response['Content-Type'], 'text/vtt')
        ok_(response.content.startswith(
            'WEBVTT\n\n00:09.209 -->'
        ))

Example 140

Project: satchmo Source File: tests.py
    def setUp(self):
        self.site = Site.objects.get_current()

        #
        # setup the protected dir; since we're using the default storage class,
        # this will point to
        #
        #   /path/to/static/protected/
        #
        # where "/path/to/static/" is your settings.MEDIA_ROOT and "protected"
        # is your PRODUCT.PROTECTED_DIR setting.
        #
        self.protected_dir = default_storage.path(
            config_value('PRODUCT', 'PROTECTED_DIR')
        )
        if not os.path.exists(self.protected_dir):
            os.makedirs(self.protected_dir)

        # setup a temporary file in the protected dir: this is the file that
        # django will use during this test, but we won't use it; close and
        # remove it.
        _file, _abs_path = mkstemp(dir=self.protected_dir)
        os.close(_file)
        os.remove(_abs_path)
        self.file_name = os.path.basename(_abs_path)

        # setup a temporary source dir and source file, using the same file name
        # generated eariler.
        self.dir = mkdtemp()
        self.file = open(os.path.join(self.dir, self.file_name), "w")

        # a fake SHA
        self.key = "".join(["12abf" for i in range(8)])

        # setup a contact
        c, _created = Contact.objects.get_or_create(
            first_name="Jim",
            last_name="Tester",
            email="[email protected]",
        )
        ad, _created = AddressBook.objects.get_or_create(
            contact=c, description="home",
            street1 = "test", state="OR", city="Portland",
            country = Country.objects.get(iso2_code__iexact = 'US'),
            is_default_shipping=True,
            is_default_billing=True,
        )

        # setup a order
        o, _created = Order.objects.get_or_create(
            contact=c, shipping_cost=Decimal('6.00'), site=self.site
        )

        # setup download
        self.product, _created = DownloadableProduct.objects.get_or_create(
            product=Product.objects.get(slug='dj-rocks'),
            file=File(self.file),
            num_allowed_downloads=3,
            expire_minutes=1,
        )
        self.product_link, _created = DownloadLink.objects.get_or_create(
            downloadable_product=self.product,
            order=o, key=self.key, num_attempts=0,
            time_stamp=datetime.datetime.now()
        )

        # setup client
        self.domain = 'satchmoserver'
        self.client = Client(SERVER_NAME=self.domain)

        # go through the verification step
        self.pd_url = urlresolvers.reverse(
            'satchmo_download_send', kwargs= {'download_key': self.key}
        )
        pd_process_url = urlresolvers.reverse(
            'satchmo_download_process', kwargs= {'download_key': self.key}
        )

        # first, hit the url.
        response = self.client.get(self.pd_url)
        self.assertEqual(response['Location'],
            'http://%s%s' % (self.domain, pd_process_url)
        )

        # follow the redirect to "process" the key.
        response = self.client.get(response['Location'])
        self.assertEqual(self.client.session.get('download_key', None), self.key)

Example 141

Project: airmozilla Source File: test_eventedit.py
    def test_view_revision_change(self):
        event = Event.objects.get(title='Test event')
        event.tags.add(Tag.objects.create(name='testing'))
        self._attach_file(event, self.main_image)

        # base revision
        base_revision = EventRevision.objects.create_from_event(event)

        # change the event without saving so we can make a new revision
        event.title = 'Different title'
        event.description = 'New description'
        event.short_description = 'New short description'
        event.additional_links = 'New additional links'
        event.save()
        user = User.objects.create_user(
            'bob', '[email protected]', 'secret'
        )
        user_revision = EventRevision.objects.create_from_event(
            event,
            user=user
        )
        user_revision.tags.add(Tag.objects.create(name='newtag'))
        user_revision.channels.remove(Channel.objects.get(name='Main'))
        user_revision.channels.add(
            Channel.objects.create(name='Web dev', slug='webdev')
        )
        with open(self.other_image, 'rb') as f:
            img = File(f)
            user_revision.placeholder_img.save(
                os.path.basename(self.other_image),
                img
            )

        # view the change
        url = reverse('main:event_change', args=(event.slug, user_revision.pk))
        self._login()
        response = self.client.get(url)
        eq_(response.status_code, 200)
        ok_('Different title' in response.content)
        ok_('New description' in response.content)
        ok_('New short description' in response.content)
        ok_('New additional links' in response.content)
        ok_('Web dev' in response.content)
        ok_('newtag, testing' in response.content)

        event.tags.add(Tag.objects.create(name='newtag'))
        event.channels.remove(Channel.objects.get(name='Main'))
        event.channels.add(
            Channel.objects.get(name='Web dev')
        )

        # view the difference
        url = reverse(
            'main:event_difference',
            args=(event.slug, base_revision.pk))
        response = self.client.get(url)
        eq_(response.status_code, 200)
        ok_('Different title' in response.content)
        ok_('New description' in response.content)
        ok_('New short description' in response.content)
        ok_('New additional links' in response.content)
        ok_('Web dev' in response.content)
        ok_('newtag, testing' in response.content)

Example 142

Project: betty-cropper Source File: models.py
    def create_from_path(self, path, filename=None, name=None, credit=None):
        """Creates an image object from a TemporaryUploadedFile insance"""

        image_buffer = io.BytesIO(open(path, 'rb').read())

        im = PILImage.open(image_buffer)
        if filename is None:
            filename = os.path.split(path)[1]
        if name is None:
            name = filename

        image = self.create(
            name=name,
            credit=credit,
            width=im.size[0],
            height=im.size[1]
        )

        # Copy temp image file to S3
        image_buffer.seek(0)
        image.source.save(filename, File(image_buffer))

        # If the image is a GIF, we need to do some special stuff
        if im.format == "GIF":
            image.animated = True

        image.save()

        # Use temp image path (instead of pulling from S3)
        image_buffer.seek(0)
        optimize_image(image_model=image, image_buffer=image_buffer, filename=filename)

        if settings.BETTY_JPEG_QUALITY_RANGE:
            search_image_quality.apply_async(args=(image.id,))

        return image

Example 143

Project: airmozilla Source File: views.py
@require_POST
@login_required
@json_view
@transaction.atomic
def youtube_create(request):
    try:
        body = json.loads(request.body)
    except ValueError:
        # it wasn't sent as a JSON request body
        return http.HttpResponseBadRequest('Missing JSON request body')
    if not body.get('id'):
        return http.HttpResponseBadRequest('Missing id')

    # extract all the details again
    data = youtube.extract_metadata_by_id(body['id'])

    for template in Template.objects.filter(name__icontains='YouTube'):
        break
    else:
        template = Template.objects.create(
            name='YouTube',
            content=(
                '<iframe width="896" height="504" src="https://www.youtube-noc'
                'ookie.com/embed/{{ id }}?rel=0&amp;showinfo=0" '
                'frameborder="0" allowfullscreen></iframe>'
            )
        )

    youtube_url = 'https://www.youtube.com/watch?v=' + data['id']
    additional_links = u'On YouTube™ {}'.format(youtube_url)

    event = Event.objects.create(
        title=data['title'],
        description=data['description'],
        template=template,
        template_environment={'id': data['id']},
        creator=request.user,
        status=Event.STATUS_INITIATED,
        privacy=Event.PRIVACY_PUBLIC,
        start_time=timezone.now(),
        additional_links=additional_links,
        archive_time=timezone.now(),
    )
    img_temp = NamedTemporaryFile(delete=True)
    img_temp.write(requests.get(data['thumbnail_url']).content)
    img_temp.flush()
    event.placeholder_img.save(
        os.path.basename(data['thumbnail_url']),
        File(img_temp)
    )
    for tag in data['tags']:
        for this_tag in Tag.objects.filter(name__iexact=tag):
            break
        else:
            this_tag = Tag.objects.create(name=tag)
        event.tags.add(this_tag)

    # first get the parent of all YouTube channels
    youtube_parent, __ = Channel.objects.get_or_create(
        name=u'YouTube™',
        slug='youtube',
        never_show=True,
    )
    try:
        channel = Channel.objects.get(
            parent=youtube_parent,
            youtube_id=data['channel']['id'],
            name=data['channel']['title'],
        )
    except Channel.DoesNotExist:
        # If it doesn't exist under the "YouTube parent",
        # see if it exists globally.
        try:
            channel = Channel.objects.get(
                youtube_id=data['channel']['id'],
                name=data['channel']['title'],
            )
        except Channel.DoesNotExist:
            channel = Channel.objects.create(
                parent=youtube_parent,
                youtube_id=data['channel']['id'],
                name=data['channel']['title'],
                slug=slugify(data['channel']['title'])
            )
        if data['channel']['thumbnail_url']:
            img_temp = NamedTemporaryFile(delete=True)
            img_temp.write(
                requests.get(data['channel']['thumbnail_url']).content
            )
            img_temp.flush()
            channel.image.save(
                os.path.basename(data['channel']['thumbnail_url']),
                File(img_temp)
            )
    event.channels.add(channel)
    # also put it in the other default channels
    for channel in Channel.objects.filter(default=True):
        event.channels.add(channel)
    return serialize_event(event)

Example 144

Project: django-machina Source File: test_views.py
    @pytest.yield_fixture(autouse=True)
    def setup(self):
        # Permission handler
        self.perm_handler = PermissionHandler()

        # Set up a top-level forum
        self.top_level_forum = create_forum()

        # Set up a topic and some posts
        self.topic = create_topic(forum=self.top_level_forum, poster=self.user)
        self.post = PostFactory.create(topic=self.topic, poster=self.user)

        # Set up an attachment
        f = open(settings.MEDIA_ROOT + '/attachment.jpg', 'rb')
        self.attachment_file = File(f)
        self.attachment = AttachmentFactory.create(
            post=self.post, file=self.attachment_file)

        # Mark the forum as read
        ForumReadTrackFactory.create(forum=self.top_level_forum, user=self.user)

        # Assign some permissions
        assign_perm('can_read_forum', self.user, self.top_level_forum)
        assign_perm('can_download_file', self.user, self.top_level_forum)

        yield

        # teardown
        # --

        self.attachment_file.close()
        attachments = Attachment.objects.all()
        for attachment in attachments:
            try:
                attachment.file.delete()
            except:
                pass

Example 145

Project: django-admin-cli Source File: cli.py
Function: update
    def _update(self, modeladmin, fields, filters, filefields, confirm=True):
        """
        Update one or more fields of all instances filtered.

        :param modeladmin: ModelAdmin of model
        :type modeladmin: :class:`admin.ModelAdmin`

        :param fields: Fields to update
        :type fields: ``dict``

        :param filters: Lookups for filters
        :type filters: ``dict``

        :param filefields: File fields to update
        :type filefields: ``dict``

        :param confirm: Ask confirmation before make operation
        :type confirm: ``bool``

        :raises CommandError: If data is unvalid or files are unfoundable
        """
        for filename, path in filefields.items():
            try:
                fields[filename] = File(open(path, 'rb'))
            except IOError as err:
                raise CommandError(err.args[0])
        for obj in modeladmin.model.objects.filter(**filters):
            if confirm:
                res = raw_input("Update '%s' ? [Yes|No|All|Cancel] " % obj)\
                    .lower()
                if not res or res.startswith('n'):
                    continue
                elif res.startswith('c'):
                    break
                elif res.startswith('a'):
                    confirm = False
            try:
                filtr = {obj._meta.pk.name: getattr(obj, obj._meta.pk.name)}
                modeladmin.model.objects.filter(**filtr).update(**fields)
                obj = modeladmin.model.objects.get(**filtr)
                self.stdout.write("Updated '%s'" % obj)
            except Exception as err:
                msg = "%s: %s" % (err.__class__.__name__, err.args[0])
                self.stderr.write(msg)

Example 146

Project: betty-cropper Source File: models.py
def optimize_image(image_model, image_buffer, filename):

    im = PILImage.open(image_buffer)

    # Let's cache some important stuff
    format = im.format
    icc_profile = im.info.get("icc_profile")
    quantization = getattr(im, "quantization", None)
    subsampling = None
    if format == "JPEG":
        try:
            subsampling = JpegImagePlugin.get_sampling(im)
        except IndexError:
            # Ignore if sampling fails
            logger.debug('JPEG sampling failed, ignoring')
        except:
            # mparent(2016-03-25): Eventually eliminate "catch all", but need to log errors to see
            # if we're missing any other exception types in the wild
            logger.exception('JPEG sampling error')

    if im.size[0] > settings.BETTY_MAX_WIDTH:
        # If the image is really large, we'll save a more reasonable version as the "original"
        height = settings.BETTY_MAX_WIDTH * float(im.size[1]) / float(im.size[0])
        im = im.resize((settings.BETTY_MAX_WIDTH, int(round(height))), PILImage.ANTIALIAS)

        out_buffer = io.BytesIO()
        if format == "JPEG" and im.mode == "RGB":
            # For JPEG files, we need to make sure that we keep the quantization profile
            try:
                im.save(
                    out_buffer,
                    icc_profile=icc_profile,
                    qtables=quantization,
                    subsampling=subsampling,
                    format="JPEG")
            except ValueError as e:
                # Maybe the image already had an invalid quant table?
                if e.args[:1] == ('Invalid quantization table',):
                    out_buffer = io.BytesIO()  # Make sure it's empty after failed save attempt
                    im.save(
                        out_buffer,
                        icc_profile=icc_profile,
                        format=format,
                    )
                else:
                    raise
        else:
            im.save(out_buffer,
                    icc_profile=icc_profile,
                    format=format)

        image_model.optimized.save(filename, File(out_buffer))

    else:
        # No modifications, just save original as optimized
        image_buffer.seek(0)
        image_model.optimized.save(filename, File(image_buffer))

    image_model.save()

Example 147

Project: wagtail Source File: models.py
    def get_rendition(self, filter):
        if isinstance(filter, string_types):
            filter, created = Filter.objects.get_or_create(spec=filter)

        cache_key = filter.get_cache_key(self)
        Rendition = self.get_rendition_model()

        try:
            rendition = self.renditions.get(
                filter=filter,
                focal_point_key=cache_key,
            )
        except Rendition.DoesNotExist:
            # Generate the rendition image
            generated_image = filter.run(self, BytesIO())

            # Generate filename
            input_filename = os.path.basename(self.file.name)
            input_filename_without_extension, input_extension = os.path.splitext(input_filename)

            # A mapping of image formats to extensions
            FORMAT_EXTENSIONS = {
                'jpeg': '.jpg',
                'png': '.png',
                'gif': '.gif',
            }

            output_extension = filter.spec.replace('|', '.') + FORMAT_EXTENSIONS[generated_image.format_name]
            if cache_key:
                output_extension = cache_key + '.' + output_extension

            # Truncate filename to prevent it going over 60 chars
            output_filename_without_extension = input_filename_without_extension[:(59 - len(output_extension))]
            output_filename = output_filename_without_extension + '.' + output_extension

            rendition, created = self.renditions.get_or_create(
                filter=filter,
                focal_point_key=cache_key,
                defaults={'file': File(generated_image.f, name=output_filename)}
            )

        return rendition

Example 148

Project: Wooey Source File: addscript.py
Function: handle
    def handle(self, *args, **options):
        script = options.get('script')
        if not script:
            if len(args):
                script = args[-1]
            else:
                raise CommandError('You must provide a script path or directory containing scripts.')
        if not os.path.exists(script):
            raise CommandError('{0} does not exist.'.format(script))
        group = options.get('group', 'Wooey Scripts')
        scripts = [os.path.join(script, i) for i in os.listdir(script)] if os.path.isdir(script) else [script]
        converted = 0
        for script in scripts:
            if script.endswith('.pyc') or '__init__' in script:
                continue
            if script.endswith('.py'):
                sys.stdout.write('Converting {}\n'.format(script))
                # copy the script to our storage
                base_name = os.path.splitext(os.path.split(script)[1])[0]
                with open(script, 'r') as f:
                    script = default_storage.save(os.path.join(wooey_settings.WOOEY_SCRIPT_DIR, os.path.split(script)[1]), File(f))
                    if wooey_settings.WOOEY_EPHEMERAL_FILES:
                        # save it locally as well (the default_storage will default to the remote store)
                        local_storage = get_storage(local=True)
                        local_storage.save(os.path.join(wooey_settings.WOOEY_SCRIPT_DIR, os.path.split(script)[1]), File(f))
                add_kwargs = {
                    'script_path': script,
                    'group': group,
                    'script_name': base_name,
                }
                add_script = True
                if options.get('update'):
                    from wooey.models import Script
                    existing_script = Script.objects.filter(script_name=base_name)
                    if len(existing_script) == 1:
                        script_version = existing_script[0].latest_version
                        script_version.script_path = script
                        script_version.default_version = False
                        add_script = False
                        script_version.save()
                        converted += 1
                        # add_kwargs['script_version'] = script_version
                if add_script:
                    res = add_wooey_script(**add_kwargs)
                    if res['valid']:
                        converted += 1
        sys.stdout.write('Converted {} scripts\n'.format(converted))

Example 149

Project: django-djangui Source File: utils.py
@transaction.atomic
def create_job_fileinfo(job):
    parameters = job.get_parameters()
    from ..models import DjanguiFile
    # first, create a reference to things the script explicitly created that is a parameter
    files = []
    for field in parameters:
        try:
            if field.parameter.form_field == 'FileField':
                value = field.value
                if value is None:
                    continue
                if isinstance(value, six.string_types):
                    # check if this was ever created and make a fileobject if so
                    if get_storage(local=True).exists(value):
                        if not get_storage(local=False).exists(value):
                            get_storage(local=False).save(value, File(get_storage(local=True).open(value)))
                        value = field.value
                    else:
                        field.force_value(None)
                        field.save()
                        continue
                d = {'parameter': field, 'file': value}
                files.append(d)
        except ValueError:
            continue

    known_files = {i['file'].name for i in files}
    # add the user_output files, these are things which may be missed by the model fields because the script
    # generated them without an explicit arguments reference in the script
    file_groups = {'archives': []}
    absbase = os.path.join(settings.MEDIA_ROOT, job.save_path)
    for filename in os.listdir(absbase):
        new_name = os.path.join(job.save_path, filename)
        if any([i.endswith(new_name) for i in known_files]):
            continue
        try:
            filepath = os.path.join(absbase, filename)
            if os.path.isdir(filepath):
                continue
            d = {'name': filename, 'file': get_storage_object(os.path.join(job.save_path, filename))}
            if filename.endswith('.tar.gz') or filename.endswith('.zip'):
                file_groups['archives'].append(d)
            else:
                files.append(d)
        except IOError:
            sys.stderr.format('{}'.format(traceback.format_exc()))
            continue

    # establish grouping by inferring common things
    file_groups['all'] = files
    import imghdr
    file_groups['images'] = []
    for filemodel in files:
        if imghdr.what(filemodel['file'].path):
            file_groups['images'].append(filemodel)
    file_groups['tabular'] = []
    file_groups['fasta'] = []

    for filemodel in files:
        fileinfo = get_file_info(filemodel['file'].path)
        filetype = fileinfo.get('type')
        if filetype is not None:
            file_groups[filetype].append(dict(filemodel, **{'preview': fileinfo.get('preview')}))
        else:
            filemodel['preview'] = json.dumps(None)

    # Create our DjanguiFile models

    # mark things that are in groups so we don't add this to the 'all' category too to reduce redundancy
    grouped = set([i['file'].path for file_type, groups in six.iteritems(file_groups) for i in groups if file_type != 'all'])
    for file_type, group_files in six.iteritems(file_groups):
        for group_file in group_files:
            if file_type == 'all' and group_file['file'].path in grouped:
                continue
            try:
                preview = group_file.get('preview')
                dj_file = DjanguiFile(job=job, filetype=file_type, filepreview=preview,
                                    parameter=group_file.get('parameter'))
                filepath = group_file['file'].path
                save_path = job.get_relative_path(filepath)
                dj_file.filepath.name = save_path
                dj_file.save()
            except:
                sys.stderr.write('Error in saving DJFile: {}\n'.format(traceback.format_exc()))
                continue

Example 150

Project: django-pagebits Source File: templatetags.py
Function: set_up
    def setUp(self):
        self.group = BitGroup.objects.create(name='testgroup')
        self.bit1 = PageBit.objects.create(
            name='header',
            context_name='header',
            type=0,
            group=self.group
        )
        self.bit1.data.data = 'Test Page Header'
        self.bit1.data.save()

        self.bit2 = PageBit.objects.create(
            name='page block',
            context_name='page_block',
            type=1,
            group=self.group
        )

        self.bit2.data.data = '<p>Block</p>'
        self.bit2.data.save()

        self.bit3 = PageBit.objects.create(
            name='logo image',
            context_name='logo_image',
            type=2,
            group=self.group
        )
        f = open(os.path.join(os.path.dirname(__file__), 'test-image.jpg'))
        myfile = File(f)

        self.bit3.data.image = myfile
        self.bit3.data.save()
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4