django.utils.encoding.force_bytes

Here are the examples of the python api django.utils.encoding.force_bytes taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

146 Examples 7

Example 101

Project: django-downloadview Source File: test.py
    def assert_content(self, test_case, response, value):
        """Assert value equals response's content (byte comparison)."""
        parts = [force_bytes(s) for s in response.streaming_content]
        test_case.assertEqual(b''.join(parts), force_bytes(value))

Example 102

Project: django-memoize Source File: __init__.py
    def _memoize_make_cache_key(self, make_name=None, timeout=DEFAULT_TIMEOUT):
        """
        Function used to create the cache_key for memoized functions.
        """
        def make_cache_key(f, *args, **kwargs):
            _timeout = getattr(timeout, 'cache_timeout', timeout)
            fname, version_data = self._memoize_version(f, args=args,
                                                        timeout=_timeout)

            #: this should have to be after version_data, so that it
            #: does not break the delete_memoized functionality.
            if callable(make_name):
                altfname = make_name(fname)
            else:
                altfname = fname

            if callable(f):
                keyargs, keykwargs = self._memoize_kwargs_to_args(
                    f, *args, **kwargs
                )
            else:
                keyargs, keykwargs = args, kwargs

            cache_key = hashlib.md5(
                force_bytes((altfname, keyargs, keykwargs))
            ).hexdigest()
            cache_key += version_data

            if self.cache_prefix:
                cache_key = '%s:%s' % (self.cache_prefix, cache_key)

            return cache_key
        return make_cache_key

Example 103

Project: django-adv-cache-tag Source File: tag.py
Function: init
    def __init__(self, node, context):
        """
        Constructor of the Cache class:
            * preparing fields to be used later,
            * prepare the templatetag parameters
            * create the cache key
        """
        super(CacheTag, self).__init__()

        # the actual Node object
        self.node = node

        # the context used for the rendering
        self.context = context

        # indicate that we force regenerating the cache, even if it exists
        self.regenerate = bool(self.context.get('__regenerate__', False))

        # indicate if we only want html without parsing the nocache parts
        self.partial = bool(self.context.get('__partial__', False))

        # the content of the template, will be used through the whole process
        self.content = ''
        # the version used in the cached templatetag
        self.content_version = None

        # Final "INTERNAL_VERSION"
        if self.options.internal_version:
            self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
                                               self.options.internal_version)
        else:
            self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)

        self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)

        # prepare all parameters passed to the templatetag
        self.expire_time = None
        self.version = None
        self.prepare_params()

        # get the cache and cache key
        self.cache = self.get_cache_object()
        self.cache_key = self.get_cache_key()

Example 104

Project: zulip Source File: send_password_reset_email.py
Function: send
    def send(self, users,
             subject_template_name='registration/password_reset_subject.txt',
             email_template_name='registration/password_reset_email.txt',
             use_https=True, token_generator=default_token_generator,
             from_email=None, html_email_template_name=None):
        # type: (List[UserProfile], str, str, bool, PasswordResetTokenGenerator, Optional[Text], Optional[str]) -> None
        """Sends one-use only links for resetting password to target users

        """
        for user_profile in users:
            context = {
                'email': user_profile.email,
                'domain': user_profile.realm.host,
                'site_name': "zulipo",
                'uid': urlsafe_base64_encode(force_bytes(user_profile.pk)),
                'user': user_profile,
                'token': token_generator.make_token(user_profile),
                'protocol': 'https' if use_https else 'http',
            }

            logging.warning("Sending %s email to %s" % (email_template_name, user_profile.email,))
            self.send_mail(subject_template_name, email_template_name,
                           context, from_email, user_profile.email,
                           html_email_template_name=html_email_template_name)

Example 105

Project: cgstudiomap Source File: source.py
    def __init__(self, ds_input, write=False):
        self._write = 1 if write else 0
        Driver.ensure_registered()

        # Preprocess json inputs. This converts json strings to dictionaries,
        # which are parsed below the same way as direct dictionary inputs.
        if isinstance(ds_input, six.string_types) and json_regex.match(ds_input):
            ds_input = json.loads(ds_input)

        # If input is a valid file path, try setting file as source.
        if isinstance(ds_input, six.string_types):
            if not os.path.exists(ds_input):
                raise GDALException('Unable to read raster source input "{}"'.format(ds_input))
            try:
                # GDALOpen will auto-detect the data source type.
                self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
            except GDALException as err:
                raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
        elif isinstance(ds_input, dict):
            # A new raster needs to be created in write mode
            self._write = 1

            # Create driver (in memory by default)
            driver = Driver(ds_input.get('driver', 'MEM'))

            # For out of memory drivers, check filename argument
            if driver.name != 'MEM' and 'name' not in ds_input:
                raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))

            # Check if width and height where specified
            if 'width' not in ds_input or 'height' not in ds_input:
                raise GDALException('Specify width and height attributes for JSON or dict input.')

            # Check if srid was specified
            if 'srid' not in ds_input:
                raise GDALException('Specify srid for JSON or dict input.')

            # Create GDAL Raster
            self._ptr = capi.create_ds(
                driver._ptr,
                force_bytes(ds_input.get('name', '')),
                ds_input['width'],
                ds_input['height'],
                ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
                ds_input.get('datatype', 6),
                None
            )

            # Set band data if provided
            for i, band_input in enumerate(ds_input.get('bands', [])):
                band = self.bands[i]
                band.data(band_input['data'])
                if 'nodata_value' in band_input:
                    band.nodata_value = band_input['nodata_value']

            # Set SRID
            self.srs = ds_input.get('srid')

            # Set additional properties if provided
            if 'origin' in ds_input:
                self.origin.x, self.origin.y = ds_input['origin']

            if 'scale' in ds_input:
                self.scale.x, self.scale.y = ds_input['scale']

            if 'skew' in ds_input:
                self.skew.x, self.skew.y = ds_input['skew']
        elif isinstance(ds_input, c_void_p):
            # Instantiate the object using an existing pointer to a gdal raster.
            self._ptr = ds_input
        else:
            raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))

Example 106

Project: cgstudiomap Source File: base.py
Function: init
    def __init__(self, path=None, cache=0, country=None, city=None):
        """
        Initializes the GeoIP object, no parameters are required to use default
        settings.  Keyword arguments may be passed in to customize the locations
        of the GeoIP data sets.

        * path: Base directory to where GeoIP data is located or the full path
            to where the city or country data files (*.dat) are located.
            Assumes that both the city and country data sets are located in
            this directory; overrides the GEOIP_PATH settings attribute.

        * cache: The cache settings when opening up the GeoIP datasets,
            and may be an integer in (0, 1, 2, 4, 8) corresponding to
            the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
            GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
            settings,  respectively.  Defaults to 0, meaning that the data is read
            from the disk.

        * country: The name of the GeoIP country data file.  Defaults to
            'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.

        * city: The name of the GeoIP city data file.  Defaults to
            'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
        """

        warnings.warn(
            "django.contrib.gis.geoip is deprecated in favor of "
            "django.contrib.gis.geoip2 and the MaxMind GeoLite2 database "
            "format.", RemovedInDjango20Warning, 2
        )

        # Checking the given cache option.
        if cache in self.cache_options:
            self._cache = cache
        else:
            raise GeoIPException('Invalid GeoIP caching option: %s' % cache)

        # Getting the GeoIP data path.
        if not path:
            path = GEOIP_SETTINGS.get('GEOIP_PATH')
            if not path:
                raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
        if not isinstance(path, six.string_types):
            raise TypeError('Invalid path type: %s' % type(path).__name__)

        if os.path.isdir(path):
            # Constructing the GeoIP database filenames using the settings
            # dictionary.  If the database files for the GeoLite country
            # and/or city datasets exist, then try and open them.
            country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
            if os.path.isfile(country_db):
                self._country = GeoIP_open(force_bytes(country_db), cache)
                self._country_file = country_db

            city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
            if os.path.isfile(city_db):
                self._city = GeoIP_open(force_bytes(city_db), cache)
                self._city_file = city_db
        elif os.path.isfile(path):
            # Otherwise, some detective work will be needed to figure
            # out whether the given database path is for the GeoIP country
            # or city databases.
            ptr = GeoIP_open(force_bytes(path), cache)
            info = GeoIP_database_info(ptr)
            if lite_regex.match(info):
                # GeoLite City database detected.
                self._city = ptr
                self._city_file = path
            elif free_regex.match(info):
                # GeoIP Country database detected.
                self._country = ptr
                self._country_file = path
            else:
                raise GeoIPException('Unable to recognize database edition: %s' % info)
        else:
            raise GeoIPException('GeoIP path must be a valid file or directory.')

Example 107

Project: django-mutant Source File: test_commands.py
Function: write
    def write(self, data):
        self._stream.write(force_bytes(data))

Example 108

Project: drf-extensions Source File: test.py
    def _encode_data(self, data, format=None, content_type=None):
        """
        Encode the data returning a two tuple of (bytes, content_type)
        """

        if not data:
            return ('', None)

        assert format is None or content_type is None, (
            'You may not set both `format` and `content_type`.'
        )

        if content_type:
            # Content type specified explicitly, treat data as a raw bytestring
            ret = force_bytes(data, settings.DEFAULT_CHARSET)

        else:
            format = format or self.default_format

            assert format in self.renderer_classes, (
                "Invalid format '{0}'."
                "Available formats are {1}. Set TEST_REQUEST_RENDERER_CLASSES "
                "to enable extra request formats.".format(
                    format,
                    ', '.join(
                        ["'" + fmt + "'" for fmt in self.renderer_classes.keys()])
                )
            )

            # Use format and render the data into a bytestring
            renderer = self.renderer_classes[format]()
            ret = renderer.render(data)

            # Determine the content-type header from the renderer
            content_type = "{0}; charset={1}".format(
                renderer.media_type, renderer.charset
            )

            # Coerce text to bytes if required.
            if isinstance(ret, six.text_type):
                ret = bytes(ret.encode(renderer.charset))

        return ret, content_type

Example 109

Project: hue Source File: base.py
Function: init
    def __init__(self, path=None, cache=0, country=None, city=None):
        """
        Initializes the GeoIP object, no parameters are required to use default
        settings.  Keyword arguments may be passed in to customize the locations
        of the GeoIP data sets.

        * path: Base directory to where GeoIP data is located or the full path
            to where the city or country data files (*.dat) are located.
            Assumes that both the city and country data sets are located in
            this directory; overrides the GEOIP_PATH settings attribute.

        * cache: The cache settings when opening up the GeoIP datasets,
            and may be an integer in (0, 1, 2, 4, 8) corresponding to
            the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
            GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
            settings,  respectively.  Defaults to 0, meaning that the data is read
            from the disk.

        * country: The name of the GeoIP country data file.  Defaults to
            'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.

        * city: The name of the GeoIP city data file.  Defaults to
            'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
        """
        # Checking the given cache option.
        if cache in self.cache_options:
            self._cache = cache
        else:
            raise GeoIPException('Invalid GeoIP caching option: %s' % cache)

        # Getting the GeoIP data path.
        if not path:
            path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
            if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
        if not isinstance(path, six.string_types):
            raise TypeError('Invalid path type: %s' % type(path).__name__)

        if os.path.isdir(path):
            # Constructing the GeoIP database filenames using the settings
            # dictionary.  If the database files for the GeoLite country
            # and/or city datasets exist, then try and open them.
            country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
            if os.path.isfile(country_db):
                self._country = GeoIP_open(force_bytes(country_db), cache)
                self._country_file = country_db

            city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
            if os.path.isfile(city_db):
                self._city = GeoIP_open(force_bytes(city_db), cache)
                self._city_file = city_db
        elif os.path.isfile(path):
            # Otherwise, some detective work will be needed to figure
            # out whether the given database path is for the GeoIP country
            # or city databases.
            ptr = GeoIP_open(force_bytes(path), cache)
            info = GeoIP_database_info(ptr)
            if lite_regex.match(info):
                # GeoLite City database detected.
                self._city = ptr
                self._city_file = path
            elif free_regex.match(info):
                # GeoIP Country database detected.
                self._country = ptr
                self._country_file = path
            else:
                raise GeoIPException('Unable to recognize database edition: %s' % info)
        else:
            raise GeoIPException('GeoIP path must be a valid file or directory.')

Example 110

Project: hue Source File: storage.py
Function: post_process
    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given SortedDict of files (called from collectstatic).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # where to store the new paths
        hashed_paths = {}

        # build a list of adjustable files
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        adjustable_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read().decode(settings.FILE_CHARSET)
                    for patterns in self._patterns.values():
                        for pattern, template in patterns:
                            converter = self.url_converter(name, template)
                            try:
                                content = pattern.sub(converter, content)
                            except ValueError as exc:
                                yield name, None, exc
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(force_bytes(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_text(saved_name.replace('\\', '/'))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_text(saved_name.replace('\\', '/'))

                # and then set the cache accordingly
                hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
                yield name, hashed_name, processed

        # Finally set the cache
        self.cache.set_many(hashed_paths)

Example 111

Project: hue Source File: crypto.py
Function: pbkdf2
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
    """
    Implements PBKDF2 as defined in RFC 2898, section 5.2

    HMAC+SHA256 is used as the default pseudo random function.

    As of 2011, 10,000 iterations was the recommended default which
    took 100ms on a 2.2Ghz Core 2 Duo. This is probably the bare
    minimum for security given 1000 iterations was recommended in
    2001. This code is very well optimized for CPython and is only
    four times slower than openssl's implementation. Look in
    django.contrib.auth.hashers for the present default.
    """
    assert iterations > 0
    if not digest:
        digest = hashlib.sha256
    password = force_bytes(password)
    salt = force_bytes(salt)
    hlen = digest().digest_size
    if not dklen:
        dklen = hlen
    if dklen > (2 ** 32 - 1) * hlen:
        raise OverflowError('dklen too big')
    l = -(-dklen // hlen)
    r = dklen - (l - 1) * hlen

    hex_format_string = "%%0%ix" % (hlen * 2)

    inner, outer = digest(), digest()
    if len(password) > inner.block_size:
        password = digest(password).digest()
    password += b'\x00' * (inner.block_size - len(password))
    inner.update(password.translate(hmac.trans_36))
    outer.update(password.translate(hmac.trans_5C))

    def F(i):
        def U():
            u = salt + struct.pack(b'>I', i)
            for j in xrange(int(iterations)):
                dig1, dig2 = inner.copy(), outer.copy()
                dig1.update(u)
                dig2.update(dig1.digest())
                u = dig2.digest()
                yield _bin_to_long(u)
        return _long_to_bin(reduce(operator.xor, U()), hex_format_string)

    T = [F(x) for x in range(1, l + 1)]
    return b''.join(T[:-1]) + T[-1][:r]

Example 112

Project: django-measurement Source File: test_fields.py
    def test_deconstruct_old_migrations(self, fieldname, measure_cls):
        field = MeasurementTestModel._meta.get_field(fieldname)

        name, path, args, kwargs = field.deconstruct()

        # replace str class with binary
        kwargs['measurement_class'] = force_bytes(kwargs['measurement_class'])

        new_cls = module_loading.import_string(path)
        new_field = new_cls(name=name, *args, **kwargs)

        assert type(field) == type(new_field)

        _, _, _, kwargs_new = field.deconstruct()

        # kwargs get corrected, cls is a str again
        assert (
            kwargs_new['measurement_class'] ==
            force_text(kwargs['measurement_class'])
        )

Example 113

Project: django Source File: geometry.py
    def __init__(self, geo_input, srid=None):
        """
        The base constructor for GEOS geometry objects, and may take the
        following inputs:

         * strings:
            - WKT
            - HEXEWKB (a PostGIS-specific canonical form)
            - GeoJSON (requires GDAL)
         * buffer:
            - WKB

        The `srid` keyword is used to specify the Source Reference Identifier
        (SRID) number for this Geometry.  If not set, the SRID will be None.
        """
        if isinstance(geo_input, bytes):
            geo_input = force_text(geo_input)
        if isinstance(geo_input, six.string_types):
            wkt_m = wkt_regex.match(geo_input)
            if wkt_m:
                # Handling WKT input.
                if wkt_m.group('srid'):
                    srid = int(wkt_m.group('srid'))
                g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
            elif hex_regex.match(geo_input):
                # Handling HEXEWKB input.
                g = wkb_r().read(force_bytes(geo_input))
            elif json_regex.match(geo_input):
                # Handling GeoJSON input.
                g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
            else:
                raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
        elif isinstance(geo_input, GEOM_PTR):
            # When the input is a pointer to a geometry (GEOM_PTR).
            g = geo_input
        elif isinstance(geo_input, six.memoryview):
            # When the input is a buffer (WKB).
            g = wkb_r().read(geo_input)
        elif isinstance(geo_input, GEOSGeometry):
            g = capi.geom_clone(geo_input.ptr)
        else:
            # Invalid geometry type.
            raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))

        if g:
            # Setting the pointer object with a valid pointer.
            self.ptr = g
        else:
            raise GEOSException('Could not initialize GEOS Geometry with given input.')

        # Post-initialization setup.
        self._post_init(srid)

Example 114

Project: django Source File: storage.py
Function: post_process
    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given OrderedDict of files (called from collectstatic).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # where to store the new paths
        hashed_files = OrderedDict()

        # build a list of adjustable files
        adjustable_paths = [
            path for path in paths
            if matches_patterns(path, self._patterns.keys())
        ]

        # then sort the files by the directory level
        def path_level(name):
            return len(name.split(os.sep))

        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read().decode(settings.FILE_CHARSET)
                    for extension, patterns in iteritems(self._patterns):
                        if matches_patterns(path, (extension,)):
                            for pattern, template in patterns:
                                converter = self.url_converter(name, template)
                                try:
                                    content = pattern.sub(converter, content)
                                except ValueError as exc:
                                    yield name, None, exc
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(force_bytes(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_text(self.clean_name(saved_name))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_text(self.clean_name(saved_name))

                # and then set the cache accordingly
                hashed_files[self.hash_key(name)] = hashed_name
                yield name, hashed_name, processed

        # Finally store the processed paths
        self.hashed_files.update(hashed_files)

Example 115

Project: django Source File: debug.py
def technical_404_response(request, exception):
    "Create a technical 404 error response. The exception should be the Http404."
    try:
        error_url = exception.args[0]['path']
    except (IndexError, TypeError, KeyError):
        error_url = request.path_info[1:]  # Trim leading slash

    try:
        tried = exception.args[0]['tried']
    except (IndexError, TypeError, KeyError):
        tried = []
    else:
        if (not tried or (                  # empty URLconf
            request.path == '/' and
            len(tried) == 1 and             # default URLconf
            len(tried[0]) == 1 and
            getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin'
        )):
            return default_urlconf(request)

    urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
    if isinstance(urlconf, types.ModuleType):
        urlconf = urlconf.__name__

    caller = ''
    try:
        resolver_match = resolve(request.path)
    except Resolver404:
        pass
    else:
        obj = resolver_match.func

        if hasattr(obj, '__name__'):
            caller = obj.__name__
        elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
            caller = obj.__class__.__name__

        if hasattr(obj, '__module__'):
            module = obj.__module__
            caller = '%s.%s' % (module, caller)

    t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
    c = Context({
        'urlconf': urlconf,
        'root_urlconf': settings.ROOT_URLCONF,
        'request_path': error_url,
        'urlpatterns': tried,
        'reason': force_bytes(exception, errors='replace'),
        'request': request,
        'settings': get_safe_settings(),
        'raising_view_name': caller,
    })
    return HttpResponseNotFound(t.render(c), content_type='text/html')

Example 116

Project: django-extensions Source File: modelviz.py
Function: add_relation
    def add_relation(self, field, model, extras=""):
        if self.verbose_names and field.verbose_name:
            label = force_bytes(field.verbose_name)
            if label.islower():
                label = label.capitalize()
        else:
            label = field.name

        # show related field name
        if hasattr(field, 'related_query_name'):
            related_query_name = field.related_query_name()
            if self.verbose_names and related_query_name.islower():
                related_query_name = related_query_name.replace('_', ' ').capitalize()
            label = '{} ({})'.format(label, force_bytes(related_query_name))

        # handle self-relationships and lazy-relationships
        if isinstance(field.rel.to, six.string_types):
            if field.rel.to == 'self':
                target_model = field.model
            else:
                if '.' in field.rel.to:
                    app_label, model_name = field.rel.to.split('.', 1)
                else:
                    app_label = field.model._meta.app_label
                    model_name = field.rel.to
                target_model = apps.get_model(app_label, model_name)
        else:
            target_model = field.rel.to

        _rel = self.get_relation_context(target_model, field, label, extras)

        if _rel not in model['relations'] and self.use_model(_rel['target']):
            return _rel

Example 117

Project: django-leonardo Source File: debug.py
def technical_404_response(request, exception):
    "Create a technical 404 error response. The exception should be the Http404."
    try:
        error_url = exception.args[0]['path']
    except (IndexError, TypeError, KeyError):
        error_url = request.path_info[1:]  # Trim leading slash

    try:
        tried = exception.args[0]['tried']
    except (IndexError, TypeError, KeyError):
        tried = []
    else:
        if (not tried                           # empty URLconf
            or (request.path == '/'
                and len(tried) == 1             # default URLconf
                and len(tried[0]) == 1
                and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
            return default_urlconf(request)

    urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
    if isinstance(urlconf, types.ModuleType):
        urlconf = urlconf.__name__

    caller = ''
    try:
        resolver_match = resolve(request.path)
    except Resolver404:
        pass
    else:
        obj = resolver_match.func

        if hasattr(obj, '__name__'):
            caller = obj.__name__
        elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
            caller = obj.__class__.__name__

        if hasattr(obj, '__module__'):
            module = obj.__module__
            caller = '%s.%s' % (module, caller)

    feincms_page = slug = template = None

    try:
        from leonardo.module.web.models import Page
        feincms_page = Page.objects.for_request(request, best_match=True)
        template = feincms_page.theme.template
    except:
        if Page.objects.exists():
            feincms_page = Page.objects.filter(parent=None).first()
            template = feincms_page.theme.template
    else:
        # nested path is not allowed for this time
        try:
            slug = request.path_info.split("/")[-2:-1][0]
        except KeyError:
            raise Exception("Nested path is not allowed !")

    c = RequestContext(request, {
        'urlconf': urlconf,
        'root_urlconf': settings.ROOT_URLCONF,
        'request_path': error_url,
        'urlpatterns': tried,
        'reason': force_bytes(exception, errors='replace'),
        'request': request,
        'settings': get_safe_settings(),
        'raising_view_name': caller,
        'feincms_page': feincms_page,
        'template': template or 'base.html',
        'standalone': True,
        'slug': slug,
    })

    try:
        t = render_to_string('404_technical.html', c)
    except:
        from django.views.debug import TECHNICAL_404_TEMPLATE
        t = Template(TECHNICAL_404_TEMPLATE).render(c)
    return HttpResponseNotFound(t, content_type='text/html')

Example 118

Project: eulfedora Source File: test_views.py
    def test_index_data(self):
        # create a test object for testing index data view
        repo = Repository()
        testobj = repo.get_object(type=SimpleObject)
        testobj.label = 'test object'
        testobj.owner = 'tester'
        testobj.save()
        self.pids.append(testobj.pid)

        # test with request IP not allowed to access the service
        with override_settings(EUL_INDEXER_ALLOWED_IPS=['0.13.23.134']):
            response = index_data(self.request, testobj.pid)
            expected, got = 403, response.status_code
            self.assertEqual(expected, got,
                'Expected %s but returned %s for index_data view with request IP not in configured list' \
                % (expected, got))

        # test with request IP allowed to hit the service
        with override_settings(EUL_INDEXER_ALLOWED_IPS=[self.request_ip]):
            response = index_data(self.request, testobj.pid)
            expected, got = 200, response.status_code
            self.assertEqual(expected, got,
                'Expected %s but returned %s for index_data view' \
                % (expected, got))
            expected, got = 'application/json', response['Content-Type']
            self.assertEqual(expected, got,
                'Expected %s but returned %s for mimetype on index_data view' \
                % (expected, got))
            response_data = json.loads(response.content.decode('utf-8'))
            self.assertEqual(testobj.index_data(), response_data,
               'Response content loaded from JSON should be equal to object indexdata')

            # test with basic auth
            testuser, testpass = 'testuser', 'testpass'
            token = base64.b64encode(force_bytes('%s:%s' % (testuser, testpass)))
            self.request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % force_text(token)
            with patch('eulfedora.indexdata.views.TypeInferringRepository') as typerepo:
                typerepo.return_value.get_object.return_value.index_data.return_value = {}
                index_data(self.request, testobj.pid)
                typerepo.assert_called_with(username=testuser, password=testpass)

            # non-existent pid should generate a 404
            self.assertRaises(Http404, index_data, self.request, 'bogus:testpid')

Example 119

Project: feedhq Source File: models.py
    def update_feed(self, url, etag=None, last_modified=None, subscribers=1,
                    backoff_factor=1, previous_error=None, link=None,
                    title=None, hub=None):
        url = URLObject(url)
        # Check if this domain has rate-limiting rules
        ratelimit_key = 'ratelimit:{0}'.format(
            url.netloc.without_auth().without_port())
        retry_at = cache.get(ratelimit_key)
        if retry_at:
            retry_in = (epoch_to_utc(retry_at) - timezone.now()).seconds
            schedule_job(url, schedule_in=retry_in,
                         connection=get_redis_connection())
            return

        if subscribers == 1:
            subscribers_text = '1 subscriber'
        else:
            subscribers_text = '{0} subscribers'.format(subscribers)

        headers = {
            'User-Agent': USER_AGENT % subscribers_text,
            'Accept': feedparser.ACCEPT_HEADER,
        }

        if last_modified:
            headers['If-Modified-Since'] = force_bytes(last_modified)
        if etag:
            headers['If-None-Match'] = force_bytes(etag)
        if last_modified or etag:
            headers['A-IM'] = force_bytes('feed')

        if settings.TESTS:
            # Make sure requests.get is properly mocked during tests
            if str(type(requests.get)) != "<class 'unittest.mock.MagicMock'>":
                raise ValueError("Not Mocked")

        auth = None
        if url.auth != (None, None):
            auth = url.auth

        start = datetime.datetime.now()
        error = None
        try:
            response = requests.get(
                six.text_type(url.without_auth()), headers=headers, auth=auth,
                timeout=UniqueFeed.request_timeout(backoff_factor))
        except (requests.RequestException, socket.timeout, socket.error,
                IncompleteRead, DecodeError) as e:
            logger.debug("Error fetching %s, %s" % (url, str(e)))
            if isinstance(e, IncompleteRead):
                error = UniqueFeed.CONNECTION_ERROR
            elif isinstance(e, DecodeError):
                error = UniqueFeed.DECODE_ERROR
            else:
                error = UniqueFeed.TIMEOUT
            self.backoff_feed(url, error, backoff_factor)
            return
        except LocationParseError:
            logger.debug(u"Failed to parse URL for %s", url)
            self.mute_feed(url, UniqueFeed.PARSE_ERROR)
            return

        elapsed = (datetime.datetime.now() - start).seconds

        ctype = response.headers.get('Content-Type', None)
        if (response.history and
            url != response.url and ctype is not None and (
                ctype.startswith('application') or
                ctype.startswith('text/xml') or
                ctype.startswith('text/rss'))):
            redirection = None
            for index, redirect in enumerate(response.history):
                if redirect.status_code != 301:
                    break
                # Actual redirection is next request's url
                try:
                    redirection = response.history[index + 1].url
                except IndexError:  # next request is final request
                    redirection = response.url

            if redirection is not None and redirection != url:
                self.handle_redirection(url, redirection)

        update = {'last_update': int(time.time())}

        if response.status_code == 410:
            logger.debug(u"Feed gone, %s", url)
            self.mute_feed(url, UniqueFeed.GONE)
            return

        elif response.status_code in [400, 401, 403, 404, 500, 502, 503]:
            self.backoff_feed(url, str(response.status_code), backoff_factor)
            return

        elif response.status_code not in [200, 204, 226, 304]:
            logger.debug(u"%s returned %s", url, response.status_code)

            if response.status_code == 429:
                # Too Many Requests
                # Prevent next jobs from fetching the URL before retry-after
                retry_in = int(response.headers.get('Retry-After', 60))
                retry_at = timezone.now() + datetime.timedelta(
                    seconds=retry_in)
                cache.set(ratelimit_key,
                          int(retry_at.strftime('%s')),
                          retry_in)
                schedule_job(url, schedule_in=retry_in)
                return

        else:
            # Avoid going back to 1 directly if it isn't safe given the
            # actual response time.
            if previous_error and error is None:
                update['error'] = None
            backoff_factor = min(backoff_factor, self.safe_backoff(elapsed))
            update['backoff_factor'] = backoff_factor

        if response.status_code == 304:
            schedule_job(url,
                         schedule_in=UniqueFeed.delay(backoff_factor, hub),
                         connection=get_redis_connection(), **update)
            return

        if 'etag' in response.headers:
            update['etag'] = response.headers['etag']
        else:
            update['etag'] = None

        if 'last-modified' in response.headers:
            update['modified'] = response.headers['last-modified']
        else:
            update['modified'] = None

        try:
            if not response.content:
                content = ' '  # chardet won't detect encoding on empty strings
            else:
                content = response.content
        except socket.timeout:
            logger.debug(u'%s timed out', url)
            self.backoff_feed(url, UniqueFeed.TIMEOUT, backoff_factor)
            return

        parsed = feedparser.parse(content)

        if not is_feed(parsed):
            self.backoff_feed(url, UniqueFeed.NOT_A_FEED,
                              UniqueFeed.MAX_BACKOFF)
            return

        if 'link' in parsed.feed and parsed.feed.link != link:
            update['link'] = parsed.feed.link

        if 'title' in parsed.feed and parsed.feed.title != title:
            update['title'] = parsed.feed.title

        if 'links' in parsed.feed:
            for link in parsed.feed.links:
                if link.rel == 'hub':
                    update['hub'] = link.href
        if 'hub' not in update:
            update['hub'] = None
        else:
            subs_key = u'pshb:{0}'.format(url)
            enqueued = cache.get(subs_key)
            if not enqueued and not settings.DEBUG:
                cache.set(subs_key, True, 3600 * 24)
                enqueue(ensure_subscribed, args=[url, update['hub']],
                        queue='low')

        schedule_job(url,
                     schedule_in=UniqueFeed.delay(
                         update.get('backoff_factor', backoff_factor),
                         update['hub']),
                     connection=get_redis_connection(), **update)

        entries = list(filter(
            None,
            [self.entry_data(entry, parsed) for entry in parsed.entries]
        ))
        if len(entries):
            enqueue(store_entries, args=[url, entries], queue='store')

Example 120

Project: courtlistener Source File: cl_scrape_opinions.py
Function: scrape_court
    def scrape_court(self, site, full_crawl=False):
        download_error = False
        # Get the court object early for logging
        # opinions.united_states.federal.ca9_u --> ca9
        court_str = site.court_id.split('.')[-1].split('_')[0]
        court = Court.objects.get(pk=court_str)

        dup_checker = DupChecker(court, full_crawl=full_crawl)
        abort = dup_checker.abort_by_url_hash(site.url, site.hash)
        if not abort:
            if site.cookies:
                logger.info("Using cookies: %s" % site.cookies)
            for i, item in enumerate(site):
                msg, r = get_binary_content(
                    item['download_urls'],
                    site.cookies,
                    site._get_adapter_instance(),
                    method=site.method
                )
                if msg:
                    logger.warn(msg)
                    ErrorLog(log_level='WARNING',
                             court=court,
                             message=msg).save()
                    continue

                content = site.cleanup_content(r.content)

                current_date = item['case_dates']
                try:
                    next_date = site[i + 1]['case_dates']
                except IndexError:
                    next_date = None

                # request.content is sometimes a str, sometimes unicode, so
                # force it all to be bytes, pleasing hashlib.
                sha1_hash = hashlib.sha1(force_bytes(content)).hexdigest()
                if (court_str == 'nev' and
                        item['precedential_statuses'] == 'Unpublished'):
                    # Nevada's non-precedential cases have different SHA1
                    # sums every time.
                    lookup_params = {'lookup_value': item['download_urls'],
                                     'lookup_by': 'download_url'}
                else:
                    lookup_params = {'lookup_value': sha1_hash,
                                     'lookup_by': 'sha1'}

                onwards = dup_checker.press_on(Opinion, current_date, next_date,
                                               **lookup_params)
                if dup_checker.emulate_break:
                    break

                if onwards:
                    # Not a duplicate, carry on
                    logger.info('Adding new docuement found at: %s' %
                                item['download_urls'].encode('utf-8'))
                    dup_checker.reset()

                    docket, opinion, cluster, error = self.make_objects(
                        item, court, sha1_hash, content
                    )

                    if error:
                        download_error = True
                        continue

                    self.save_everything(
                        items={
                            'docket': docket,
                            'opinion': opinion,
                            'cluster': cluster
                        },
                        index=False
                    )
                    extract_doc_content.delay(
                        opinion.pk,
                        callback=subtask(extract_by_ocr),
                        citation_countdown=random.randint(0, 3600)
                    )

                    logger.info("Successfully added doc {pk}: {name}".format(
                        pk=opinion.pk,
                        name=item['case_names'].encode('utf-8'),
                    ))

            # Update the hash if everything finishes properly.
            logger.info("%s: Successfully crawled opinions." % site.court_id)
            if not download_error and not full_crawl:
                # Only update the hash if no errors occurred.
                dup_checker.update_site_hash(site.hash)

Example 121

Project: courtlistener Source File: cl_scrape_oral_arguments.py
Function: scrape_court
    def scrape_court(self, site, full_crawl=False):
        download_error = False
        # Get the court object early for logging
        # opinions.united_states.federal.ca9_u --> ca9
        court_str = site.court_id.split('.')[-1].split('_')[0]
        court = Court.objects.get(pk=court_str)

        dup_checker = DupChecker(court, full_crawl=full_crawl)
        abort = dup_checker.abort_by_url_hash(site.url, site.hash)
        if not abort:
            if site.cookies:
                logger.info("Using cookies: %s" % site.cookies)
            for i, item in enumerate(site):
                msg, r = get_binary_content(
                    item['download_urls'],
                    site.cookies,
                    site._get_adapter_instance(),
                    method=site.method
                )
                if msg:
                    logger.warn(msg)
                    ErrorLog(log_level='WARNING',
                             court=court,
                             message=msg).save()
                    continue

                content = site.cleanup_content(r.content)

                current_date = item['case_dates']
                try:
                    next_date = site[i + 1]['case_dates']
                except IndexError:
                    next_date = None

                # request.content is sometimes a str, sometimes unicode, so
                # force it all to be bytes, pleasing hashlib.
                sha1_hash = hashlib.sha1(force_bytes(content)).hexdigest()
                onwards = dup_checker.press_on(
                    Audio,
                    current_date,
                    next_date,
                    lookup_value=sha1_hash,
                    lookup_by='sha1'
                )
                if dup_checker.emulate_break:
                    break

                if onwards:
                    # Not a duplicate, carry on
                    logger.info('Adding new docuement found at: %s' %
                                item['download_urls'].encode('utf-8'))
                    dup_checker.reset()

                    docket, audio_file, error = self.make_objects(
                        item, court, sha1_hash, content,
                    )

                    if error:
                        download_error = True
                        continue

                    self.save_everything(
                        items={
                            'docket': docket,
                            'audio_file': audio_file,
                        },
                        index=False,
                    )
                    process_audio_file.apply_async(
                        (audio_file.pk,),
                        countdown=random.randint(0, 3600)
                    )

                    logger.info(
                        "Successfully added audio file {pk}: {name}".format(
                            pk=audio_file.pk,
                            name=item['case_names'].encode('utf-8')
                        )
                    )

            # Update the hash if everything finishes properly.
            logger.info("%s: Successfully crawled oral arguments." %
                        site.court_id)
            if not download_error and not full_crawl:
                # Only update the hash if no errors occurred.
                dup_checker.update_site_hash(site.hash)

Example 122

Project: django-cryptography Source File: crypto.py
Function: salted_hmac
def salted_hmac(key_salt, value, secret=None):
    """
    Returns the HMAC-HASH of 'value', using a key generated from key_salt and a
    secret (which defaults to settings.SECRET_KEY).

    A different key_salt should be passed in for every application of HMAC.

    :type key_salt: any
    :type value: any
    :type secret: any
    :rtype: HMAC
    """
    if secret is None:
        secret = settings.SECRET_KEY

    key_salt = force_bytes(key_salt)
    secret = force_bytes(secret)

    # We need to generate a derived key from our base key.  We can do this by
    # passing the key_salt and our base key through a pseudo-random function and
    # SHA1 works nicely.
    digest = hashes.Hash(settings.CRYPTOGRAPHY_DIGEST,
                         backend=settings.CRYPTOGRAPHY_BACKEND)
    digest.update(key_salt + secret)
    key = digest.finalize()

    # If len(key_salt + secret) > sha_constructor().block_size, the above
    # line is redundant and could be replaced by key = key_salt + secret, since
    # the hmac module does the same thing for keys longer than the block size.
    # However, we need to ensure that we *always* do this.
    h = HMAC(key, settings.CRYPTOGRAPHY_DIGEST,
             backend=settings.CRYPTOGRAPHY_BACKEND)
    h.update(force_bytes(value))
    return h

Example 123

Project: sentry Source File: api.py
Function: dispatch
    @csrf_exempt
    @never_cache
    def dispatch(self, request, project_id=None, *args, **kwargs):
        helper = self.helper_cls(
            agent=request.META.get('HTTP_USER_AGENT'),
            project_id=project_id,
            ip_address=request.META['REMOTE_ADDR'],
        )
        origin = None

        try:
            origin = helper.origin_from_request(request)

            response = self._dispatch(request, helper, project_id=project_id,
                                      origin=origin,
                                      *args, **kwargs)
        except APIError as e:
            context = {
                'error': force_bytes(e.msg, errors='replace'),
            }
            if e.name:
                context['error_name'] = e.name

            response = HttpResponse(json.dumps(context),
                                    content_type='application/json',
                                    status=e.http_status)
            # Set X-Sentry-Error as in many cases it is easier to inspect the headers
            response['X-Sentry-Error'] = context['error']

            if isinstance(e, APIRateLimited) and e.retry_after is not None:
                response['Retry-After'] = six.text_type(e.retry_after)

        except Exception as e:
            # TODO(dcramer): test failures are not outputting the log message
            # here
            if settings.DEBUG:
                content = traceback.format_exc()
            else:
                content = ''
            logger.exception(e)
            response = HttpResponse(content,
                                    content_type='text/plain',
                                    status=500)

        # TODO(dcramer): it'd be nice if we had an incr_multi method so
        # tsdb could optimize this
        metrics.incr('client-api.all-versions.requests')
        metrics.incr('client-api.all-versions.responses.%s' % (
            response.status_code,
        ))
        metrics.incr('client-api.all-versions.responses.%sxx' % (
            six.text_type(response.status_code)[0],
        ))

        if helper.context.version:
            metrics.incr('client-api.v%s.requests' % (
                helper.context.version,
            ))
            metrics.incr('client-api.v%s.responses.%s' % (
                helper.context.version, response.status_code
            ))
            metrics.incr('client-api.v%s.responses.%sxx' % (
                helper.context.version, six.text_type(response.status_code)[0]
            ))

        if response.status_code != 200 and origin:
            # We allow all origins on errors
            response['Access-Control-Allow-Origin'] = '*'

        if origin:
            response['Access-Control-Allow-Headers'] = \
                'X-Sentry-Auth, X-Requested-With, Origin, Accept, ' \
                'Content-Type, Authentication'
            response['Access-Control-Allow-Methods'] = \
                ', '.join(self._allowed_methods())

        return response

Example 124

Project: synnefo Source File: __init__.py
    def generic(self, method, path, data='',
                content_type='application/octet-stream', secure=False,
                **extra):
        """Constructs an arbitrary HTTP request."""
        from django.utils.encoding import force_bytes
        import six
        parsed = urlparse(path)
        data = force_bytes(data, settings.DEFAULT_CHARSET)
        r = {
            'PATH_INFO': self._get_path(parsed),
            'REQUEST_METHOD': str(method),
            'SERVER_PORT': str('443') if secure else str('80'),
            'wsgi.url_scheme': str('https') if secure else str('http'),
        }
        # Original django function used the check 'if data:'. But this is not
        # enough as it does not take into account zero-length payloads
        if data is not None:
            r.update({
                'CONTENT_LENGTH': len(data),
                'CONTENT_TYPE': str(content_type),
                'wsgi.input': FakePayload(data),
            })
        r.update(extra)
        # If QUERY_STRING is absent or empty, we want to extract it from the URL.
        if not r.get('QUERY_STRING'):
            query_string = force_bytes(parsed[4])
            # WSGI requires latin-1 encoded strings. See get_path_info().
            if six.PY3:
                query_string = query_string.decode('iso-8859-1')
            r['QUERY_STRING'] = query_string
        return self.request(**r)

Example 125

Project: django-user-management Source File: mixins.py
Function: generate_uid
    def generate_uid(self):
        """Generate user uid for password reset."""
        return urlsafe_base64_encode(force_bytes(self.pk))

Example 126

Project: DjangoUnleashed-1.8 Source File: utils.py
Function: get_context_data
    def get_context_data(
            self, request, user, context=None):
        if context is None:
            context = dict()
        current_site = get_current_site(request)
        if request.is_secure():
            protocol = 'https'
        else:
            protocol = 'http'
        token = token_generator.make_token(user)
        uid = urlsafe_base64_encode(
            force_bytes(user.pk))
        context.update({
            'domain': current_site.domain,
            'protocol': protocol,
            'site_name': current_site.name,
            'token': token,
            'uid': uid,
            'user': user,
        })
        return context

Example 127

Project: django-admin2 Source File: utils.py
def type_str(text):
    if six.PY2:
        return force_bytes(text)
    else:
        return force_text(text)

Example 128

Project: django-sass-processor Source File: processor.py
Function: call
    def __call__(self, path):
        basename, ext = os.path.splitext(path)
        filename = find_file(path)
        if filename is None:
            raise FileNotFoundError("Unable to locate file {path}".format(path=path))

        if ext not in self.sass_extensions:
            # return the given path, since it ends neither in `.scss` nor in `.sass`
            return urljoin(self.prefix, path)

        # compare timestamp of sourcemap file with all its dependencies, and check if we must recompile
        css_filename = basename + '.css'
        url = urljoin(self.prefix, css_filename)
        if not self.processor_enabled:
            return url
        sourcemap_filename = css_filename + '.map'
        if self.is_latest(sourcemap_filename):
            return url

        # with offline compilation, raise an error, if css file could not be found.
        if sass is None:
            msg = "Offline compiled file `{}` is missing and libsass has not been installed."
            raise ImproperlyConfigured(msg.format(css_filename))

        # add a function to be used from inside SASS
        custom_functions = {'get-setting': get_setting}

        # otherwise compile the SASS/SCSS file into .css and store it
        sourcemap_url = self.storage.url(sourcemap_filename)
        compile_kwargs = {
            'filename': filename,
            'source_map_filename': sourcemap_url,
            'include_paths': self.include_paths + APPS_INCLUDE_DIRS,
            'custom_functions': custom_functions,
        }
        if self.sass_precision:
            compile_kwargs['precision'] = self.sass_precision
        if self.sass_output_style:
            compile_kwargs['output_style'] = self.sass_output_style
        content, sourcemap = sass.compile(**compile_kwargs)
        content = force_bytes(content)
        sourcemap = force_bytes(sourcemap)
        if self.storage.exists(css_filename):
            self.storage.delete(css_filename)
        self.storage.save(css_filename, ContentFile(content))
        if self.storage.exists(sourcemap_filename):
            self.storage.delete(sourcemap_filename)
        self.storage.save(sourcemap_filename, ContentFile(sourcemap))
        return url

Example 129

Project: julython.org Source File: forms.py
    def save(self, domain_override=None,
             subject_template_name='registration/password_reset_subject.txt',
             email_template_name='registration/password_reset_email.html',
             use_https=False, token_generator=default_token_generator,
             from_email=None, request=None, html_email_template_name=None):
        """
        Generates a one-use only link for resetting password and sends to the
        user.
        """
        email = self.cleaned_data["email"]
        user = User.get_by_auth_id("email:%s" % email)
        if not user:
            return
        current_site = get_current_site(request)
        site_name = current_site.name
        domain = current_site.domain
        c = {
            'email': email,
            'domain': domain,
            'site_name': site_name,
            'uid': urlsafe_base64_encode(force_bytes(user.pk)),
            'user': user,
            'token': token_generator.make_token(user),
            'protocol': 'https' if use_https else 'http',
        }
        subject = loader.render_to_string(subject_template_name, c)
        # Email subject *must not* contain newlines
        subject = ''.join(subject.splitlines())
        mail = loader.render_to_string(email_template_name, c)

        if html_email_template_name:
            html_email = loader.render_to_string(html_email_template_name, c)
        else:
            html_email = None
        send_mail(subject, mail, from_email, [email])

Example 130

Project: kobocat Source File: test_connect_viewset.py
Function: test_reset_user_password
    def test_reset_user_password(self):
        # set user.last_login, ensures we get same/valid token
        # https://code.djangoproject.com/ticket/10265
        self.user.last_login = now()
        self.user.save()
        token = default_token_generator.make_token(self.user)
        new_password = "bobbob1"
        data = {'token': token, 'new_password': new_password}
        # missing uid, should fail
        request = self.factory.post('/', data=data)
        response = self.view(request)
        self.assertEqual(response.status_code, 400)

        data['uid'] = urlsafe_base64_encode(force_bytes(self.user.pk))
        # with uid, should be successful
        request = self.factory.post('/', data=data)
        response = self.view(request)
        self.assertEqual(response.status_code, 204)
        user = User.objects.get(email=self.user.email)
        self.assertTrue(user.check_password(new_password))

        request = self.factory.post('/', data=data)
        response = self.view(request)
        self.assertEqual(response.status_code, 400)

Example 131

Project: django-smartfields Source File: test_files.py
    def test_file_field(self):
        instance = FileTesting.objects.create()
        # test default static
        self.assertEqual(instance.field_1_foo.url, "/static/defaults/foo.txt")
        self.assertEqual(instance.bar.url, "/static/defaults/bar.txt")
        # test default FieldFile set and processed
        self.assertEqual(instance.field_1_foo.read(), force_bytes("FOO\n"))
        self.assertEqual(instance.bar.read(), force_bytes("BAR\n"))
        self.assertEqual(instance.field_2.read(), force_bytes("foo\n"))
        field_2_path = instance.field_2.path
        self.assertTrue(os.path.isfile(field_2_path))
        # test assignment of file
        foo_bar = File(open(add_base("static/defaults/foo-bar.txt"), 'r'))
        instance.field_1 = foo_bar
        instance.save()
        foo_bar.close()
        # make sure default file was not removed
        self.assertTrue(os.path.isfile(field_2_path))
        # check new content
        self.assertEqual(instance.field_1.read(), force_bytes("FOO BAR\n"))
        self.assertEqual(instance.field_1_foo.read(), force_bytes("FOO BAR\n"))
        instance.field_2.seek(0)
        self.assertEqual(instance.field_2.read(), force_bytes("foo\n"))
        # testing setting default value again
        instance.field_2 = None
        instance.save()
        # make sure previous file was removed
        self.assertFalse(os.path.isfile(field_2_path))
        self.assertEqual(instance.field_2.read(), force_bytes("foo bar\n"))
        # test deletion of file together with instance
        field_1_path = instance.field_1.path
        field_1_foo_path = instance.field_1_foo.path
        field_2_path = instance.field_2.path
        self.assertTrue(os.path.isfile(field_1_path))
        self.assertTrue(os.path.isfile(field_1_foo_path))
        self.assertTrue(os.path.isfile(field_2_path))
        instance.delete()
        self.assertFalse(os.path.isfile(field_1_path))
        self.assertFalse(os.path.isfile(field_1_foo_path))
        self.assertFalse(os.path.isfile(field_2_path))

Example 132

Project: django-daguerre Source File: models.py
Function: upload_to
def upload_to(instance, filename):
    """
    Construct the directory path where the adjusted images will be saved to
    using the MD5 hash algorithm.

    Can be customized using the DAGUERRE_PATH setting set in the project's
    settings. If left unspecified, the default value will be used, i.e. 'dg'.
    WARNING: The maximum length of the specified string is 13 characters.

    Example:
    * Default: dg/ce/2b/7014c0bdbedea0e4f4bf.jpeg
    * DAGUERRE_PATH = 'img': img/ce/2b/7014c0bdbedea0e4f4bf.jpeg

    Known issue:
    * If the extracted hash string is 'ad', ad blockers will block the image.
      All occurrences of 'ad' will be replaced with 'ag' since the MD5 hash
      produces letters from 'a' to 'f'.
    """

    image_path = getattr(
        settings, 'DAGUERRE_ADJUSTED_IMAGE_PATH', DEFAULT_ADJUSTED_IMAGE_PATH)

    if len(image_path) > 13:
        msg = ('The DAGUERRE_PATH value is more than 13 characters long! '
               'Falling back to the default '
               'value: "{}".'.format(DEFAULT_ADJUSTED_IMAGE_PATH))
        warnings.warn(msg)
        image_path = DEFAULT_ADJUSTED_IMAGE_PATH

    # Avoid TypeError on Py3 by forcing the string to bytestring
    # https://docs.djangoproject.com/en/dev/_modules/django/contrib/auth/hashers/
    # https://github.com/django/django/blob/master/django/contrib/auth/hashers.py#L524
    str_for_hash = force_bytes('{} {}'.format(filename, datetime.utcnow()))
    # Replace all occurrences of 'ad' with 'ag' to avoid ad blockers
    hash_for_dir = hashlib.md5(str_for_hash).hexdigest().replace('ad', 'ag')
    return '{0}/{1}/{2}/{3}'.format(
        image_path, hash_for_dir[0:2], hash_for_dir[2:4], filename)

Example 133

Project: django-ca Source File: managers.py
Function: init
    def init(self, name, key_size, key_type, algorithm, expires, parent, pathlen, subject,
             issuer_url=None, issuer_alt_name=None, crl_url=None, ocsp_url=None,
             ca_issuer_url=None, ca_crl_url=None, ca_ocsp_url=None,
             name_constraints=None, password=None):
        """Create a Certificate Authority."""

        # NOTE: This is already verified by KeySizeAction, so none of these checks should ever be
        #       True in the real world. None the less they are here as a safety precaution.
        if not is_power2(key_size):
            raise RuntimeError("%s: Key size must be a power of two." % key_size)
        elif key_size < ca_settings.CA_MIN_KEY_SIZE:
            raise RuntimeError("%s: Key size must be least %s bits."
                               % (key_size, ca_settings.CA_MIN_KEY_SIZE))

        private_key = crypto.PKey()
        private_key.generate_key(getattr(crypto, 'TYPE_%s' % key_type), key_size)

        # set basic properties
        cert = get_basic_cert(expires)
        for key, value in sort_subject_dict(subject):
            setattr(cert.get_subject(), key, force_bytes(value))
        cert.set_pubkey(private_key)

        basicConstraints = 'CA:TRUE'
        if pathlen is not False:
            basicConstraints += ', pathlen:%s' % pathlen

        san = force_bytes('DNS:%s' % subject['CN'])
        cert.add_extensions([
            crypto.X509Extension(b'basicConstraints', True, basicConstraints.encode('utf-8')),
            crypto.X509Extension(b'keyUsage', 0, b'keyCertSign,cRLSign'),
            crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert),
            crypto.X509Extension(b'subjectAltName', 0, san),
        ])

        extensions = self.get_common_extensions(ca_issuer_url, ca_crl_url, ca_ocsp_url)
        if name_constraints:
            name_constraints = ','.join(name_constraints).encode('utf-8')
            extensions.append(crypto.X509Extension(b'nameConstraints', True, name_constraints))

        if parent is None:
            cert.set_issuer(cert.get_subject())
            extensions.append(crypto.X509Extension(b'authorityKeyIdentifier', False,
                                                   b'keyid:always', issuer=cert))
        else:
            cert.set_issuer(parent.x509.get_subject())
            extensions.append(crypto.X509Extension(b'authorityKeyIdentifier', False,
                                                   b'keyid,issuer', issuer=parent.x509))
        cert.add_extensions(extensions)

        # sign the certificate
        if parent is None:
            cert.sign(private_key, algorithm)
        else:
            cert.sign(parent.key, algorithm)

        if crl_url is not None:
            crl_url = '\n'.join(crl_url)

        # create certificate in database
        ca = self.model(name=name, issuer_url=issuer_url, issuer_alt_name=issuer_alt_name,
                        ocsp_url=ocsp_url, crl_url=crl_url, parent=parent)
        ca.x509 = cert
        ca.private_key_path = os.path.join(ca_settings.CA_DIR, '%s.key' % ca.serial)
        ca.save()

        dump_args = []
        if password is not None:  # pragma: no cover
            dump_args = ['des3', password]

        # write private key to file
        oldmask = os.umask(247)
        with open(ca.private_key_path, 'w') as key_file:
            key = crypto.dump_privatekey(crypto.FILETYPE_PEM, private_key, *dump_args)
            key_file.write(key.decode('utf-8'))
        os.umask(oldmask)

        return ca

Example 134

Project: django-ca Source File: managers.py
Function: init
    def init(self, ca, csr, expires, algorithm, subject=None, cn_in_san=True,
             csr_format=crypto.FILETYPE_PEM, subjectAltName=None, keyUsage=None,
             extendedKeyUsage=None, tlsfeature=None):
        """Create a signed certificate from a CSR.

        X509 extensions (`key_usage`, `ext_key_usage`) may either be None (in which case they are
        not added) or a tuple with the first value being a bool indicating if the value is critical
        and the second value being a byte-array indicating the extension value. Example::

            (True, b'value')

        Parameters
        ----------

        ca : django_ca.models.CertificateAuthority
            The certificate authority to sign the certificate with.
        csr : str
            A valid CSR in PEM format. If none is given, `self.csr` will be used.
        expires : int
            When the certificate should expire (passed to :py:func:`get_basic_cert`).
        algorithm : {'sha512', 'sha256', ...}
            Algorithm used to sign the certificate. The default is the CA_DIGEST_ALGORITHM setting.
        subject : dict, optional
            The Subject to use in the certificate.  The keys of this dict are the fields of an X509
            subject, that is `"C"`, `"ST"`, `"L"`, `"OU"` and `"CN"`. If ommited or if the value
            does not contain a `"CN"` key, the first value of the `subjectAltName` parameter is
            used as CommonName (and is obviously mandatory in this case).
        cn_in_san : bool, optional
            Wether the CommonName should also be included as subjectAlternativeName. The default is
            `True`, but the parameter is ignored if no CommonName is given. This is typically set
            to `False` when creating a client certificate, where the subjects CommonName has no
            meaningful value as subjectAltName.
        csr_format : int, optional
            The format of the submitted CSR request. One of the OpenSSL.crypto.FILETYPE_*
            constants. The default is PEM.
        subjectAltName : list of str, optional
            A list of values for the subjectAltName extension. Values are passed to
            `get_subjectAltName`, see function docuementation for how this value is parsed.
        keyUsage : tuple or None
            Value for the `keyUsage` X509 extension. See description for format details.
        extendedKeyUsage : tuple or None
            Value for the `extendedKeyUsage` X509 extension. See description for format details.
        tlsfeature : tuple or None
            Value for the `tlsfeature` extension. See description for format details.

        Returns
        -------

        OpenSSL.crypto.X509
            The signed certificate.
        """
        if subject is None:
            subject = {}
        if not subject.get('CN') and not subjectAltName:
            raise ValueError("Must at least cn or subjectAltName parameter.")

        req = crypto.load_certificate_request(csr_format, csr)

        # Process CommonName and subjectAltName extension.
        if subject.get('CN') is None:
            subject['CN'] = re.sub('^%s' % SAN_OPTIONS_RE, '', subjectAltName[0])
            subjectAltName = get_subjectAltName(subjectAltName)
        elif cn_in_san is True:
            if subjectAltName:
                subjectAltName = get_subjectAltName(subjectAltName, cn=subject['CN'])
            else:
                subjectAltName = get_subjectAltName([subject['CN']])

        # subjectAltName might still be None, in which case the extension is not added.
        elif subjectAltName:
            subjectAltName = get_subjectAltName(subjectAltName)

        # Create signed certificate
        cert = get_basic_cert(expires)
        cert.set_issuer(ca.x509.get_subject())
        for key, value in sort_subject_dict(subject):
            setattr(cert.get_subject(), key, force_bytes(value))
        cert.set_pubkey(req.get_pubkey())

        extensions = self.get_common_extensions(ca.issuer_url, ca.crl_url, ca.ocsp_url)
        extensions += [
            crypto.X509Extension(b'subjectKeyIdentifier', 0, b'hash', subject=cert),
            crypto.X509Extension(b'authorityKeyIdentifier', 0, b'keyid,issuer', issuer=ca.x509),
            crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'),
        ]

        if keyUsage is not None:
            extensions.append(crypto.X509Extension(b'keyUsage', *keyUsage))
        if extendedKeyUsage is not None:
            extensions.append(crypto.X509Extension(b'extendedKeyUsage', *extendedKeyUsage))

        if tlsfeature is not None:  # pragma: no cover
            extensions.append(crypto.X509Extension(b'tlsFeature', *tlsfeature))

        # Add subjectAltNames, always also contains the CommonName
        if subjectAltName:
            extensions.append(crypto.X509Extension(b'subjectAltName', 0, subjectAltName))

        # Add issuerAltName
        if ca.issuer_alt_name:
            issuerAltName = force_bytes('URI:%s' % ca.issuer_alt_name)
        else:
            issuerAltName = b'issuer:copy'
        extensions.append(crypto.X509Extension(b'issuerAltName', 0, issuerAltName, issuer=ca.x509))

        # Add collected extensions
        cert.add_extensions(extensions)

        # Finally sign the certificate:
        cert.sign(ca.key, str(algorithm))  # str() to force py2 unicode to str

        return cert

Example 135

Project: django-ca Source File: views.py
    def get_ocsp_response(self, data):
        try:
            ocsp_request = asn1crypto.ocsp.OCSPRequest.load(data)

            tbs_request = ocsp_request['tbs_request']
            request_list = tbs_request['request_list']
            if len(request_list) != 1:
                log.error('Received OCSP request with multiple sub requests')
                raise NotImplemented('Combined requests not yet supported')
            single_request = request_list[0]  # TODO: Support more than one request
            req_cert = single_request['req_cert']
            serial = serial_from_int(req_cert['serial_number'].native)
        except Exception as e:
            log.exception('Error parsing OCSP request: %s', e)
            return self.fail(u'malformed_request')

        # Get CA and certificate
        ca = CertificateAuthority.objects.get(serial=self.ca)
        try:
            cert = Certificate.objects.filter(ca=ca).get(serial=serial)
        except Certificate.DoesNotExist:
            log.warn('OCSP request for unknown cert received.')
            return self.fail(u'internal_error')

        # load ca cert and responder key/cert
        ca_cert = load_certificate(force_bytes(ca.pub))
        responder_key = load_private_key(self.responder_key)
        responder_cert = load_certificate(self.responder_cert)

        builder = OCSPResponseBuilder(
            response_status=u'successful',  # ResponseStatus.successful.value,
            certificate=load_certificate(force_bytes(cert.pub)),
            certificate_status=force_text(cert.ocsp_status),
            revocation_date=cert.revoked_date,
        )

        # Parse extensions
        for extension in tbs_request['request_extensions']:
            extn_id = extension['extn_id'].native
            critical = extension['critical'].native
            value = extension['extn_value'].parsed

            # This variable tracks whether any unknown extensions were encountered
            unknown = False

            # Handle nonce extension
            if extn_id == 'nonce':
                builder.nonce = value.native

            # That's all we know
            else:  # pragma: no cover
                unknown = True

            # If an unknown critical extension is encountered (which should not
            # usually happen, according to RFC 6960 4.1.2), we should throw our
            # hands up in despair and run.
            if unknown is True and critical is True:  # pragma: no cover
                log.warning('Could not parse unknown critical extension: %r',
                        dict(extension.native))
                return self._fail('internal_error')

            # If it's an unknown non-critical extension, we can safely ignore it.
            elif unknown is True:  # pragma: no cover
                log.info('Ignored unknown non-critical extension: %r', dict(extension.native))

        builder.certificate_issuer = ca_cert
        builder.next_update = datetime.utcnow() + timedelta(seconds=self.expires)
        return builder.build(responder_key, responder_cert)

Example 136

Project: towel Source File: forms.py
Function: persist
    def persist(self, request):
        """
        Persist the search in the session, or load saved search if user
        isn't searching right now.
        """

        session_key = 'sf_%s.%s' % (
            self.__class__.__module__,
            self.__class__.__name__)

        if 'clear' in request.GET or 'n' in request.GET:
            if session_key in request.session:
                del request.session[session_key]

        if self.original_data and (
                set(self.original_data.keys()) & set(self.fields.keys())):
            data = self.data.copy()
            if 's' in data:
                del data['s']
                request.session[session_key] = data.urlencode()

        elif request.method == 'GET' and 's' not in request.GET:
            # try to get saved search from session
            if session_key in request.session:
                session_data = force_bytes(request.session[session_key])
                self.data = QueryDict(session_data, encoding='utf-8')
                self.persistency = True

            else:
                self.filtered = False

        elif request.method == 'POST' and 's' not in request.POST:
            # It wasn't the search form which was POSTed, hopefully :-)
            self.filtered = False

Example 137

Project: django-firebird Source File: operations.py
    def convert_binaryfield_value(self, value, expression, connection, context):
        if value is not None:
            value = force_bytes(value)
        return value

Example 138

Project: django-users2 Source File: utils.py
def send_activation_email(
        user=None, request=None, from_email=None,
        subject_template='users/activation_email_subject.html',
        email_template='users/activation_email.html', html_email_template=None):

    if not user.is_active and settings.USERS_VERIFY_EMAIL:
        token_generator = EmailActivationTokenGenerator()

        current_site = get_current_site(request)

        context = {
            'email': user.email,
            'site': current_site,
            'expiration_days': settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS,
            'user': user,
            'uid': urlsafe_base64_encode(force_bytes(user.pk)),
            'token': token_generator.make_token(user=user),
            'protocol': 'https' if request.is_secure() else 'http',
        }

        subject = render_to_string(subject_template, context)
        # email subject *must not* contain newlines
        subject = ''.join(subject.splitlines())
        body = render_to_string(email_template, context)

        email_message = EmailMultiAlternatives(subject, body, from_email, [user.email])
        if html_email_template is not None:
            html_email = render_to_string(html_email_template, context)
            email_message.attach_alternative(html_email, 'text/html')

        email_message.send()

Example 139

Project: django-all-access Source File: fields.py
Function: get_signature
    def get_signature(self, value):
        return force_bytes(hmac.new(self.get_key(), value).hexdigest())

Example 140

Project: addons-server Source File: middleware.py
Function: process_request
    def process_request(self, request):
        # Find locale, app
        prefixer = urlresolvers.Prefixer(request)
        if settings.DEBUG:
            redirect_type = HttpResponseRedirect
        else:
            redirect_type = HttpResponsePermanentRedirect
        urlresolvers.set_url_prefix(prefixer)
        full_path = prefixer.fix(prefixer.shortened_path)

        if (prefixer.app == amo.MOBILE.short and
                request.path.rstrip('/').endswith('/' + amo.MOBILE.short)):
            # TODO: Eventually put MOBILE in RETIRED_APPS, but not yet.
            return redirect_type(request.path.replace('/mobile', '/android'))

        if ('lang' in request.GET and not prefixer.shortened_path.startswith(
                settings.SUPPORTED_NONAPPS_NONLOCALES_PREFIX)):
            # Blank out the locale so that we can set a new one.  Remove lang
            # from query params so we don't have an infinite loop.
            prefixer.locale = ''
            new_path = prefixer.fix(prefixer.shortened_path)
            query = dict((force_bytes(k), request.GET[k]) for k in request.GET)
            query.pop('lang')
            return redirect_type(urlparams(new_path, **query))

        if full_path != request.path:
            query_string = request.META.get('QUERY_STRING', '')
            full_path = urllib.quote(full_path.encode('utf-8'))

            if query_string:
                query_string = query_string.decode('utf-8', 'ignore')
                full_path = u'%s?%s' % (full_path, query_string)

            response = redirect_type(full_path)
            # Cache the redirect for a year.
            if not settings.DEBUG:
                patch_cache_control(response, max_age=60 * 60 * 24 * 365)

            # Vary on Accept-Language or User-Agent if we changed the locale or
            # app.
            old_app = prefixer.app
            old_locale = prefixer.locale
            new_locale, new_app, _ = prefixer.split_path(full_path)

            if old_locale != new_locale:
                patch_vary_headers(response, ['Accept-Language'])
            if old_app != new_app:
                patch_vary_headers(response, ['User-Agent'])
            return response

        request.path_info = '/' + prefixer.shortened_path
        request.LANG = prefixer.locale or prefixer.get_language()
        activate(request.LANG)
        request.APP = amo.APPS.get(prefixer.app, amo.FIREFOX)

Example 141

Project: fjord Source File: analyzer_views.py
def _analytics_search_export(request, opinions_s):
    """Handles CSV export for analytics search

    This only exports MAX_OPINIONS amount. It adds a note to the top
    about that if the results are truncated.

    """
    MAX_OPINIONS = 1000

    # Create the HttpResponse object with the appropriate CSV header.
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="{0}"'.format(
        datetime.now().strftime('%Y%m%d_%H%M_search_export.csv'))

    keys = Response.get_export_keys(confidential=True)
    total_opinions = opinions_s.count()

    opinions_s = opinions_s.fields('id')[:MAX_OPINIONS].execute()

    # We convert what we get back from ES to what's in the db so we
    # can get all the information.
    opinions = Response.objects.filter(
        id__in=[mem['id'][0] for mem in opinions_s])

    writer = csv.writer(response)

    # Specify what this search is
    writer.writerow(['URL: {0}'.format(request.get_full_path())])
    writer.writerow(['Params: ' +
                     ' '.join(['{0}: {1}'.format(key, val)
                               for key, val in request.GET.items()])])

    # Add note if we truncated.
    if total_opinions > MAX_OPINIONS:
        writer.writerow(['Truncated {0} rows.'.format(
            total_opinions - MAX_OPINIONS)])

    # Write headers row.
    writer.writerow(keys)

    # Write opinion rows.
    for op in opinions:
        writer.writerow([force_bytes(getattr(op, key)) for key in keys])

    return response

Example 142

Project: standup Source File: test_views.py
Function: post_json
    def post_json(self, path, payload, secure=False, **extra):
        payload = force_bytes(json.dumps(payload))
        return self.generic('POST', path, payload, content_type='application/json',
                            secure=secure, **extra)

Example 143

Project: standup Source File: test_views.py
Function: delete_json
    def delete_json(self, path, payload, secure=False, **extra):
        payload = force_bytes(json.dumps(payload))
        return self.generic('DELETE', path, payload, content_type='application/json',
                            secure=secure, **extra)

Example 144

Project: django-redis Source File: pickle.py
Function: loads
    def loads(self, value):
        return pickle.loads(force_bytes(value))

Example 145

Project: django-redis Source File: json.py
Function: dumps
    def dumps(self, value):
        return force_bytes(json.dumps(value))

Example 146

Project: connect Source File: forms.py
    def save(self, domain_override=None,
             subject_template_name='accounts/emails/password_reset_subject.txt',  # NoQA
             email_template_name='accounts/emails/password_reset_email.html',
             use_https=False, token_generator=default_token_generator,
             from_email=None, request=None, html_email_template_name=None):
        """
        Generates a one-use only link for resetting password and sends to the
        user.
        """
        from django.core.mail import send_mail
        email = self.cleaned_data["email"]
        active_users = User._default_manager.filter(
            email__iexact=email, is_active=True)

        for user in active_users:
            # Make sure that no email is sent to a user that actually has
            # a password marked as unusable
            if not user.has_usable_password():
                continue
            if not domain_override:
                site = get_current_site(request)
                domain = site.domain
            else:
                site = Site(name=domain_override,
                            domain=domain_override)
                domain = site.domain

            context = {
                'email': user.email,
                'domain': domain,
                'site': site,
                'uid': urlsafe_base64_encode(force_bytes(user.pk)),
                'user': user,
                'token': token_generator.make_token(user),
                'protocol': 'https' if use_https else 'http',
                # TODO: dynamically retrieve color from CSS
                'link_color': 'e51e41'
            }

            subject = loader.render_to_string(subject_template_name, context)
            # Email subject *must not* contain newlines
            subject = ''.join(subject.splitlines())

            email = loader.render_to_string(email_template_name, context)

            if html_email_template_name:
                html_email = loader.render_to_string(html_email_template_name,
                                                     context)
            else:
                html_email = None

            send_mail(subject, email, from_email,
                      [user.email], html_message=html_email)
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected