Here are the examples of the python api django.utils.encoding.force_bytes taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
146 Examples
3
Example 51
Project: drf-json-api Source File: utils.py
def dump_json(data):
import rest_framework
version = rest_framework.__version__.split(".")
json_kwargs = {
"sort_keys": True,
"indent": 4,
}
if version[0] >= "3":
json_kwargs["separators"] = (", ", ": ", )
return force_bytes(json.dumps(data, **json_kwargs))
3
Example 52
Project: kuma Source File: models.py
def get_recovery_url(self):
"""
Creates a recovery URL for the user.
The recovery URL uses the password reset workflow, which requires the
user has a password on their account. Users without a password get a
randomly generated password.
"""
if not self.has_usable_password():
self.set_password(uuid.uuid4().hex)
self.save()
uidb64 = urlsafe_base64_encode(force_bytes(self.pk))
token = default_token_generator.make_token(self)
link = reverse('users.recover',
kwargs={'token': token, 'uidb64': uidb64},
force_locale=True)
return link
3
Example 53
def get_db_prep_value(self, value, connection=None, prepared=False):
if self.null:
# Normalize empty values to None
value = value or None
if value is None:
return None
value = force_bytes(value)
if not self.cipher.is_encrypted(value):
value = self.cipher.encrypt(value)
return force_text(value)
3
Example 54
Project: django-mama-cas Source File: utils.py
def add_query_params(url, params):
"""
Inject additional query parameters into an existing URL. If
parameters already exist with the same name, they will be
overwritten. Parameters with empty values are ignored. Return
the modified URL as a string.
"""
def encode(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
params = dict([(encode(k), encode(v)) for k, v in params.items() if v])
parts = list(urlparse(url))
query = dict(parse_qsl(parts[4]))
query.update(params)
parts[4] = urlencode(query)
return urlunparse(parts)
3
Example 55
Project: golismero Source File: client.py
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' \
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
3
Example 56
Project: karaage Source File: emails.py
def send_reset_password_email(person):
"""Sends an email to user allowing them to set their password."""
uid = urlsafe_base64_encode(force_bytes(person.pk)).decode("ascii")
token = default_token_generator.make_token(person)
url = '%s/persons/reset/%s/%s/' % (
settings.REGISTRATION_BASE_URL, uid, token)
context = CONTEXT.copy()
context.update({
'url': url,
'receiver': person,
})
to_email = person.email
subject, body = render_email('reset_password', context)
send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
3
Example 57
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
value = force_bytes(value)
if self.cipher.is_encrypted(value):
return force_text(self.cipher.decrypt(value))
return force_text(value)
3
Example 58
def xor(s, pad):
'''XOR a given string ``s`` with the one-time-pad ``pad``'''
from itertools import cycle
s = bytearray(force_bytes(s, encoding='latin-1'))
pad = bytearray(force_bytes(pad, encoding='latin-1'))
return binary_type(bytearray(x ^ y for x, y in zip(s, cycle(pad))))
3
Example 59
Project: unisubs Source File: plugin.py
def patch_for_rest_framework(self):
# patch some of old django code to be compatible with the rest
# framework testing tools
# restframeworkcompat is the compat module from django-rest-framework
# 3.0.3
from utils.test_utils import restframeworkcompat
import django.test.client
import django.utils.encoding
django.test.client.RequestFactory = restframeworkcompat.RequestFactory
django.utils.encoding.force_bytes = restframeworkcompat.force_bytes_or_smart_bytes
3
Example 60
def get_db_prep_save(self, value, connection):
value = super(
EncryptedField, self
).get_db_prep_save(value, connection)
if value is not None:
retval = self.fernet.encrypt(force_bytes(value))
return connection.Database.Binary(retval)
3
Example 61
@property
def guid(self, language=None):
if not language:
language = self.get_current_language()
base_string = '-{0}-{2}-{1}-'.format(
language, self.app_config.namespace,
self.safe_translation_getter('slug', language_code=language, any_language=True)
)
return hashlib.sha256(force_bytes(base_string)).hexdigest()
3
Example 62
Project: django-mobile Source File: loader.py
def cache_key(self, template_name, template_dirs, *args):
if len(args) > 0: # Django >= 1.9
key = super(CachedLoader, self).cache_key(template_name, template_dirs, *args)
else:
if template_dirs:
key = '-'.join([
template_name,
hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()
])
else:
key = template_name
return '{0}:{1}'.format(get_flavour(), key)
3
Example 63
def test_options(self):
user = UserFactory.create()
token = default_token_generator.make_token(user)
uid = urlsafe_base64_encode(force_bytes(user.pk))
request = self.create_request('options', auth=False)
view = self.view_class.as_view()
response = view(request, uidb64=uid, token=token)
self.assertEqual(response.status_code, status.HTTP_200_OK)
3
Example 64
Project: drf-json-api Source File: test_delete.py
def test_delete(client):
person = models.Person.objects.create(name="test")
response = client.delete(reverse("person-detail", args=[person.id]))
assert response.status_code == 204
assert response.content == force_bytes("")
3
Example 65
def test_insert(self, db, model, vals):
"""Data stored in DB is actually encrypted."""
field = model._meta.get_field('value')
model.objects.create(value=vals[0])
with connection.cursor() as cur:
cur.execute('SELECT value FROM %s' % model._meta.db_table)
data = [
force_text(field.fernet.decrypt(force_bytes(r[0])))
for r in cur.fetchall()
]
assert list(map(field.to_python, data)) == [vals[0]]
3
Example 66
def handle_uploaded_files(form):
if len(form.file_fields):
for field in form.file_fields:
uploaded_file = form.cleaned_data.get(field, None)
if uploaded_file is None:
continue
valid_file_name = file_storage.get_valid_name(uploaded_file.name)
root, ext = os.path.splitext(valid_file_name)
secret_hash = hashlib.sha1(force_bytes(uuid.uuid4())).hexdigest()
filename = file_storage.get_available_name(os.path.join(
settings.DJANGOCMS_FORMS_FILE_STORAGE_DIR,
form.form_definition.upload_to,
'{root}_{hash}{ext}'.format(root=root, hash=secret_hash, ext=ext)))
file_storage.save(filename, uploaded_file)
form.cleaned_data[field] = StoredUploadedFile(filename)
3
Example 67
Project: Django-Merged-Inlines Source File: tests.py
def assertStringOrder(self, response, check_list):
"""
Check that a list of strings is properly ordered in the content
"""
index_order = [response.content.index(force_bytes(x)) for x in check_list]
self.assertEqual(index_order, sorted(index_order))
3
Example 68
Project: addons-server Source File: views.py
def parse_next_path(state_parts):
next_path = None
if len(state_parts) == 2:
# The = signs will be stripped off so we need to add them back
# but it only cares if there are too few so add 4 of them.
encoded_path = state_parts[1] + '===='
try:
next_path = base64.urlsafe_b64decode(
force_bytes(encoded_path)).decode('utf-8')
except TypeError:
log.info('Error decoding next_path {}'.format(
encoded_path))
pass
if not is_safe_url(next_path):
next_path = None
return next_path
3
Example 69
Project: django-avatar Source File: utils.py
def get_cache_key(user_or_username, size, prefix):
"""
Returns a cache key consisten of a username and image size.
"""
if isinstance(user_or_username, get_user_model()):
user_or_username = get_username(user_or_username)
key = six.u('%s_%s_%s') % (prefix, user_or_username, size)
return six.u('%s_%s') % (slugify(key)[:100],
hashlib.md5(force_bytes(key)).hexdigest())
3
Example 70
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
data = bcrypt.hashpw(force_bytes(password), salt)
return "%s$%s" % (self.algorithm, data)
3
Example 71
@register.filter(is_safe=True)
def restructuredtext(value):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'restructuredtext' filter: The Python docutils library isn't installed.")
return force_text(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {})
parts = publish_parts(source=force_bytes(value), writer_name="html4css1", settings_overrides=docutils_settings)
return mark_safe(force_text(parts["fragment"]))
3
Example 72
Project: golismero Source File: smtp.py
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else 'utf-8'
try:
self.connection.sendmail(from_email, recipients,
force_bytes(message.as_string(), charset))
except:
if not self.fail_silently:
raise
return False
return True
3
Example 73
def make_template_fragment_key(fragm_name, vary_on):
args_map = map(urlquote, vary_on)
args_map = map(lambda x: force_bytes(x), args_map)
args_string = b':'.join(args_map)
args_hash = hashlib.md5(args_string).hexdigest()
return 'template.cache.{0}.{1}'.format(fragm_name, args_hash)
3
Example 74
Project: django-fernet-fields Source File: hkdf.py
def derive_fernet_key(input_key):
"""Derive a 32-bit b64-encoded Fernet key from arbitrary input key."""
hkdf = HKDF(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
info=info,
backend=backend,
)
return base64.urlsafe_b64encode(hkdf.derive(force_bytes(input_key)))
3
Example 75
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file.write(force_bytes(content))
self._is_dirty = True
self._is_read = True
3
Example 76
Project: django-user-management Source File: test_views.py
def test_put_invalid_token(self):
user = UserFactory.create()
other_user = UserFactory.create()
token = default_token_generator.make_token(other_user)
uid = urlsafe_base64_encode(force_bytes(user.pk))
request = self.create_request('put', auth=False)
view = self.view_class.as_view()
response = view(request, uidb64=uid, token=token)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
0
Example 77
def _get_cache_key(self, request):
"""
Generate cache key that's exactly unique enough.
Assumes that the response is determined by the request.method, authenticated user, and URL path.
"""
# HTTP method
method = request.method
# Authenticated username
if not request.user.is_authenticated() or self.cache_ignore_auth:
username = '*'
else:
username = request.user.username
# URL path
url = force_bytes(iri_to_uri(request.get_full_path()))
# build a cache key out of that
key = '#'.join(('CacheMixin', self.key_prefix, username, method, url))
if len(key) > MAX_KEY_LENGTH:
# make sure keys don't get too long
key = key[:(MAX_KEY_LENGTH - 33)] + '-' + hashlib.md5(key).hexdigest()
return key
0
Example 78
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 as defined in RFC 2898, section 5.2
HMAC+SHA256 is used as the default pseudo random function.
As of 2011, 10,000 iterations was the recommended default which
took 100ms on a 2.2Ghz Core 2 Duo. This is probably the bare
minimum for security given 1000 iterations was recommended in
2001. This code is very well optimized for CPython and is only
four times slower than openssl's implementation. Look in
django.contrib.auth.hashers for the present default.
"""
assert iterations > 0
if not digest:
digest = hashlib.sha256
password = force_bytes(password)
salt = force_bytes(salt)
hlen = digest().digest_size
if not dklen:
dklen = hlen
if dklen > (2 ** 32 - 1) * hlen:
raise OverflowError('dklen too big')
l = -(-dklen // hlen)
r = dklen - (l - 1) * hlen
hex_format_string = "%%0%ix" % (hlen * 2)
inner, outer = digest(), digest()
if len(password) > inner.block_size:
password = digest(password).digest()
password += b'\x00' * (inner.block_size - len(password))
inner.update(password.translate(hmac.trans_36))
outer.update(password.translate(hmac.trans_5C))
def F(i):
u = salt + struct.pack(b'>I', i)
result = 0
for j in xrange(int(iterations)):
dig1, dig2 = inner.copy(), outer.copy()
dig1.update(u)
dig2.update(dig1.digest())
u = dig2.digest()
result ^= _bin_to_long(u)
return _long_to_bin(result, hex_format_string)
T = [F(x) for x in range(1, l)]
return b''.join(T) + F(l)[:r]
0
Example 79
def __init__(self, text, title=None, url=None, kind="info", dismissal_url=None, datetime=None):
"""
:param text: The notification's text.
:type text: str
:param title: An optional title for the notification.
:type title: str|None
:param url: The optional main URL for the notification.
:type url: str|None
:param kind: The kind of the notification (see KINDS)
:type kind: str
:param dismissal_url: An optional dismissal URL for the notification.
The admin framework will add a button that will
cause an AJAX post into this URL.
:type dismissal_url: str|None
:param datetime: An optional date+time for this notification.
:type datetime: datetime
"""
self.title = title
self.text = text
self._url = url
self.dismissal_url = dismissal_url
self.kind = kind
self.datetime = datetime
bits = [force_text(v) for (k, v) in sorted(vars(self).items())]
self.id = hashlib.md5(force_bytes("+".join(bits))).hexdigest()
0
Example 80
Project: splunk-webframework Source File: geometry.py
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'): srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif gdal.HAS_GDAL and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geomtry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if bool(g):
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
0
Example 81
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4, 8) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
GEOIP_INDEX_CACHE, and GEOIP_MMAP_CACHE, `GeoIPOptions` C API
settings, respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIPException('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path:
raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = GeoIP_open(force_bytes(country_db), cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = GeoIP_open(force_bytes(city_db), cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = GeoIP_open(force_bytes(path), cache)
info = GeoIP_database_info(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
0
Example 82
Project: readthedocs.org Source File: core_tags.py
@register.filter(is_safe=True)
def restructuredtext(value, short=False):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'restructuredtext' filter: "
"The Python docutils library isn't installed."
)
return force_text(value)
else:
docutils_settings = {
'raw_enabled': False,
'file_insertion_enabled': False,
}
docutils_settings.update(getattr(settings, 'RESTRUCTUREDTEXT_FILTER_SETTINGS', {}))
parts = publish_parts(source=force_bytes(value), writer_name="html4css1",
settings_overrides=docutils_settings)
out = force_text(parts["fragment"])
try:
if short:
out = out.split("\n")[0]
except IndexError:
pass
finally:
return mark_safe(out)
0
Example 83
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given SortedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
0
Example 84
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 as defined in RFC 2898, section 5.2
HMAC+SHA256 is used as the default pseudo random function.
Right now 10,000 iterations is the recommended default which takes
100ms on a 2.2Ghz Core 2 Duo. This is probably the bare minimum
for security given 1000 iterations was recommended in 2001. This
code is very well optimized for CPython and is only four times
slower than openssl's implementation.
"""
assert iterations > 0
if not digest:
digest = hashlib.sha256
password = force_bytes(password)
salt = force_bytes(salt)
hlen = digest().digest_size
if not dklen:
dklen = hlen
if dklen > (2 ** 32 - 1) * hlen:
raise OverflowError('dklen too big')
l = -(-dklen // hlen)
r = dklen - (l - 1) * hlen
hex_format_string = "%%0%ix" % (hlen * 2)
inner, outer = digest(), digest()
if len(password) > inner.block_size:
password = digest(password).digest()
password += b'\x00' * (inner.block_size - len(password))
inner.update(password.translate(hmac.trans_36))
outer.update(password.translate(hmac.trans_5C))
def F(i):
def U():
u = salt + struct.pack(b'>I', i)
for j in xrange(int(iterations)):
dig1, dig2 = inner.copy(), outer.copy()
dig1.update(u)
dig2.update(dig1.digest())
u = dig2.digest()
yield _bin_to_long(u)
return _long_to_bin(reduce(operator.xor, U()), hex_format_string)
T = [F(x) for x in range(1, l + 1)]
return b''.join(T[:-1]) + T[-1][:r]
0
Example 85
Project: Django--an-app-at-a-time Source File: geometry.py
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
0
Example 86
Project: djoser Source File: utils.py
def encode_uid(pk):
return urlsafe_base64_encode(force_bytes(pk)).decode()
0
Example 87
Project: django-password-policies Source File: __init__.py
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.txt',
email_html_template_name='registration/password_reset_email.html',
use_https=False, from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
:arg str domain_override: A string that changes the site name and
domain if needed.
:arg str email_template_name: A relative path to a template in the root of a
template directory to generate the body of the mail.
:arg str email_html_template_name: A relative path to a template in the root of
a template directory to generate the HTML attachment of the mail.
:arg str from_email: The email address to use as sender of the email.
:arg request: A HttpRequest instance.
:arg str subject_template_name: A relative path to a template in the root of
a template directory to generate the subject of the mail.
:arg bool use_https: Determines wether to use HTTPS while generating
the one-use only link for resetting passwords.
"""
from django.core.mail import EmailMultiAlternatives
context = self.get_context_data(request,
domain_override,
use_https)
signer = signing.TimestampSigner()
for user in self.users_cache:
c = context
var = signer.sign(user.password)
var = var.split(':')
c['email'] = user.email
c['signature'] = var[2]
c['timestamp'] = var[1]
c['uid'] = urlsafe_base64_encode(force_bytes(user.id))
c['user'] = user
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
html = loader.render_to_string(email_html_template_name, c)
msg = EmailMultiAlternatives(subject, email, from_email, [user.email])
msg.attach_alternative(html, "text/html")
msg.send()
0
Example 88
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
0
Example 89
Project: PyClassLessons Source File: geometry.py
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif gdal.HAS_GDAL and json_regex.match(geo_input):
# Handling GeoJSON input.
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
0
Example 90
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 as defined in RFC 2898, section 5.2
HMAC+SHA256 is used as the default pseudo random function.
As of 2014, 100,000 iterations was the recommended default which took
100ms on a 2.7Ghz Intel i7 with an optimized implementation. This is
probably the bare minimum for security given 1000 iterations was
recommended in 2001. This code is very well optimized for CPython and
is about five times slower than OpenSSL's implementation. Look in
django.contrib.auth.hashers for the present default, it is lower than
the recommended 100,000 because of the performance difference between
this and an optimized implementation.
"""
assert iterations > 0
if not digest:
digest = hashlib.sha256
password = force_bytes(password)
salt = force_bytes(salt)
hlen = digest().digest_size
if not dklen:
dklen = hlen
if dklen > (2 ** 32 - 1) * hlen:
raise OverflowError('dklen too big')
l = -(-dklen // hlen)
r = dklen - (l - 1) * hlen
hex_format_string = "%%0%ix" % (hlen * 2)
inner, outer = digest(), digest()
if len(password) > inner.block_size:
password = digest(password).digest()
password += b'\x00' * (inner.block_size - len(password))
inner.update(password.translate(hmac.trans_36))
outer.update(password.translate(hmac.trans_5C))
def F(i):
u = salt + struct.pack(b'>I', i)
result = 0
for j in range(int(iterations)):
dig1, dig2 = inner.copy(), outer.copy()
dig1.update(u)
dig2.update(dig1.digest())
u = dig2.digest()
result ^= _bin_to_long(u)
return _long_to_bin(result, hex_format_string)
T = [F(x) for x in range(1, l)]
return b''.join(T) + F(l)[:r]
0
Example 91
def save(self, email_template_name='registration/password_reset_email_user_list.html', **kwargs):
"""
Generates a one-use only link for resetting password and sends to the designated email.
The email will contain links for resetting passwords for all accounts associated to the email.
"""
from django.core.mail import send_mail
email_template_name = 'registration/password_reset_email_user_list.html'
domain_override = kwargs.get('domain_override', False)
use_https = kwargs.get('use_https', False)
token_generator = kwargs.get('token_generator', default_token_generator)
user_list = []
for user in self.users_cache:
user_list.append({
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
})
if not domain_override:
site_name = get_setting('site', 'global', 'sitedisplayname')
else:
site_name = domain_override
site_url = get_setting('site', 'global', 'siteurl')
t = loader.get_template(email_template_name)
c = {
'email': self.email,
'site_url': site_url,
'site_name': site_name,
'user_list': user_list,
'protocol': use_https and 'https' or 'http',
}
from_email = get_setting('site', 'global', 'siteemailnoreplyaddress') or settings.DEFAULT_FROM_EMAIL
send_mail(_("Password reset on %s") % site_name,
t.render(Context(c)), from_email, [user.email])
0
Example 92
Project: Django--an-app-at-a-time Source File: debug.py
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
0
Example 93
Project: django-rest-framework Source File: test.py
def _encode_data(self, data, format=None, content_type=None):
"""
Encode the data returning a two tuple of (bytes, content_type)
"""
if data is None:
return ('', content_type)
assert format is None or content_type is None, (
'You may not set both `format` and `content_type`.'
)
if content_type:
# Content type specified explicitly, treat data as a raw bytestring
ret = force_bytes(data, settings.DEFAULT_CHARSET)
else:
format = format or self.default_format
assert format in self.renderer_classes, (
"Invalid format '{0}'. Available formats are {1}. "
"Set TEST_REQUEST_RENDERER_CLASSES to enable "
"extra request formats.".format(
format,
', '.join(["'" + fmt + "'" for fmt in self.renderer_classes.keys()])
)
)
# Use format and render the data into a bytestring
renderer = self.renderer_classes[format]()
ret = renderer.render(data)
# Determine the content-type header from the renderer
content_type = "{0}; charset={1}".format(
renderer.media_type, renderer.charset
)
# Coerce text to bytes if required.
if isinstance(ret, six.text_type):
ret = bytes(ret.encode(renderer.charset))
return ret, content_type
0
Example 94
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given OrderedDict of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = OrderedDict()
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(self.clean_name(saved_name))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(self.clean_name(saved_name))
# and then set the cache accordingly
hashed_files[self.hash_key(name)] = hashed_name
yield name, hashed_name, processed
# Finally store the processed paths
self.hashed_files.update(hashed_files)
0
Example 95
Project: djmail Source File: utils.py
def deserialize_email_message(data):
return pickle.loads(base64.b64decode(force_bytes(data)))
0
Example 96
def deserialize(self, value):
return pickle.loads(force_bytes(value))
0
Example 97
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
is_db_value = False
if k.startswith(SERIALIZED_DB_FIELD_PREFIX):
k = k[len(SERIALIZED_DB_FIELD_PREFIX):]
is_db_value = True
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
elif isinstance(f, BinaryField):
v = force_bytes(
base64.b64decode(
force_bytes(v)))
elif is_db_value:
try:
# This is quite an ugly hack, but will cover most
# use cases...
v = f.from_db_value(v, None, None, None)
except:
raise ImproperlyConfigured(
"Unable to auto serialize field '{}', custom"
" serialization override required".format(k)
)
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
0
Example 98
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
0
Example 99
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = Oracle_datetime.from_datetime(param.astimezone(timezone.utc))
string_size = 0
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = "1"
elif param is False:
param = "0"
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, Database.Binary):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if isinstance(self.force_bytes, six.string_types):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
0
Example 100
def serialize(self, value):
return force_bytes(json.dumps(value))