Here are the examples of the python api django.utils.encoding.smart_str taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
172 Examples
0
Example 151
Project: micro-finance Source File: views.py
def get(self, request, *args, **kwargs):
client = get_object_or_404(Client, id=kwargs.get("client_id"))
loanaccount = get_object_or_404(LoanAccount, id=self.kwargs.get("loanaccount_id"))
receipts_list = Receipts.objects.filter(
client=client,
member_loan_account=loanaccount
).exclude(
demand_loanprinciple_amount_atinstant=0,
demand_loaninterest_amount_atinstant=0
)
try:
response = HttpResponse(content_type='application/x-download')
response['Content-Disposition'] = 'attachment; filename=' + \
client.first_name + client.last_name + "_ledger.csv"
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8'))
group = client.group_set.get()
writer.writerow([
smart_str(client.id),
smart_str(client.first_name),
smart_str(group.name),
])
writer.writerow([
smart_str(u"Date"),
smart_str(u"Recepit No"),
smart_str(u"Demand Principal"),
smart_str(u"Demand Interest"),
smart_str(u"Collecton Principal"),
smart_str(u"Collecton Interest"),
smart_str(u"Balance Principal"),
smart_str(u"Balance Interest"),
smart_str(u"Loan Outstanding"),
])
for receipt in receipts_list:
if receipt.demand_loanprinciple_amount_atinstant:
var1 = d(receipt.demand_loanprinciple_amount_atinstant)
else:
var1 = 0
if receipt.loanprinciple_amount:
var2 = d(receipt.loanprinciple_amount)
else:
var2 = 0
if var1 > var2:
balance_principle = d(d(var1) - d(var2))
else:
balance_principle = 0
if receipt.demand_loaninterest_amount_atinstant:
var4 = d(receipt.demand_loaninterest_amount_atinstant)
else:
var4 = 0
if receipt.loaninterest_amount:
var5 = d(receipt.loaninterest_amount)
else:
var5 = 0
if var4 > var5:
balance_interest = d(d(var4) - d(var5))
else:
balance_interest = 0
writer.writerow([
smart_str(receipt.date),
smart_str(receipt.receipt_number),
smart_str(receipt.demand_loanprinciple_amount_atinstant),
smart_str(receipt.demand_loaninterest_amount_atinstant),
smart_str(receipt.loanprinciple_amount),
smart_str(receipt.loaninterest_amount),
smart_str(balance_principle),
smart_str(balance_interest),
smart_str(receipt.principle_loan_balance_atinstant),
])
return response
except Exception as err:
errmsg = "%s" % (err)
return HttpResponse(errmsg)
0
Example 152
Project: django-speedbar Source File: middleware.py
def process_response(self, request, response):
if not getattr(settings, 'SPEEDBAR_ENABLE', True):
return response
request_trace = RequestTrace.instance()
# TODO: Do we also need to stash this on in case of exception?
request_trace.response = response
metrics = dict((key, module.get_metrics()) for key, module in request_trace.modules.items())
if self.should_return_response_headers(request):
self.add_response_headers(response, metrics)
if self.should_return_trace_header(request):
response['X-TraceUrl'] = reverse('speedbar_trace', args=[request_trace.id])
request_trace.persist_log = True
if self.should_replace_template_tags(request):
if 'gzip' not in response.get('Content-Encoding', '') and response.get('Content-Type', '').split(';')[0] in HTML_TYPES:
# Force render of response (from lazy TemplateResponses) before speedbar is injected
if hasattr(response, 'render'):
response.render()
content = smart_unicode(response.content)
content = self.replace_templatetag_placeholders(content, metrics)
# Note: The URLs returned here do not exist at this point. The relevant data is added to the cache by a signal handler
# once all page processing is finally done. This means it is possible summary values displayed and the detailed
# break down won't quite correspond.
if getattr(settings, 'SPEEDBAR_PANEL', True):
panel_url = reverse('speedbar_panel', args=[request_trace.id])
panel_placeholder_url = reverse('speedbar_details_for_this_request')
content = content.replace(panel_placeholder_url, panel_url)
request_trace.persist_details = True
response.content = smart_str(content)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
0
Example 153
Project: django-mongoengine Source File: util.py
def label_for_field(name, model, model_admin=None, return_attr=False):
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
label = field.name.replace('_', ' ')
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
0
Example 154
def process_request(self, request):
prefixer = urlresolvers.Prefixer(request)
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if self._is_lang_change(request):
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return HttpResponsePermanentRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponsePermanentRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = urlresolvers.split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.locale = prefixer.locale
tower.activate(prefixer.locale)
0
Example 155
Project: betafarm Source File: middleware.py
def process_request(self, request):
prefixer = urlresolvers.Prefixer(request)
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if 'lang' in request.GET:
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return HttpResponsePermanentRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponsePermanentRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = prefixer.split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.locale = prefixer.locale
tower.activate(prefixer.locale)
0
Example 156
Project: fjord Source File: middleware.py
def process_request(self, request):
prefixer = urlresolvers.Prefixer(request)
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if self._is_lang_change(request):
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return HttpResponsePermanentRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponsePermanentRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = urlresolvers.split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.locale = prefixer.locale
# prefixer.locale can be '', but we need a real locale code to activate
# otherwise the request uses the previously handled request's
# translations.
translation.activate(prefixer.locale or settings.LANGUAGE_CODE)
0
Example 157
Project: kitsune Source File: middleware.py
def process_request(self, request):
prefixer = Prefixer(request)
set_url_prefixer(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if request.GET.get('lang', '') in settings.SUMO_LANGUAGES:
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), v) for
k, v in request.GET.iteritems() if k != 'lang')
# 'lang' is only used on the language selection page. If this is
# present it is safe to set language preference for the current
# user.
if request.user.is_anonymous():
cookie = settings.LANGUAGE_COOKIE_NAME
request.session[cookie] = request.GET['lang']
return HttpResponseRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponseRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.LANGUAGE_CODE = prefixer.locale
translation.activate(prefixer.locale)
0
Example 158
Project: kuma Source File: utils.py
def urlparams(url_, fragment=None, query_dict=None, **query):
"""
Add a fragment and/or query parameters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url_ = urlparse.urlparse(url_)
fragment = fragment if fragment is not None else url_.fragment
q = url_.query
new_query_dict = (QueryDict(smart_str(q), mutable=True) if
q else QueryDict('', mutable=True))
if query_dict:
for k, l in query_dict.lists():
new_query_dict[k] = None # Replace, don't append.
for v in l:
new_query_dict.appendlist(k, v)
for k, v in query.items():
# Replace, don't append.
if isinstance(v, list):
new_query_dict.setlist(k, v)
else:
new_query_dict[k] = v
query_string = urlencode([(k, v) for k, l in new_query_dict.lists() for
v in l if v is not None])
new = urlparse.ParseResult(url_.scheme, url_.netloc, url_.path,
url_.params, query_string, fragment)
return new.geturl()
0
Example 159
Project: mozilla-ignite Source File: __init__.py
def foreignkey_autocomplete(self, request):
"""
Searches in the fields of the given related model and returns the
result as a simple string to be used by the jQuery Autocomplete plugin
"""
query = request.GET.get('q', None)
app_label = request.GET.get('app_label', None)
model_name = request.GET.get('model_name', None)
search_fields = request.GET.get('search_fields', None)
object_pk = request.GET.get('object_pk', None)
try:
to_string_function = self.related_string_functions[model_name]
except KeyError:
to_string_function = lambda x: x.__unicode__()
if search_fields and app_label and model_name and (query or object_pk):
def construct_search(field_name):
# use different lookup methods depending on the notation
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
model = models.get_model(app_label, model_name)
queryset = model._default_manager.all()
data = ''
if query:
for bit in query.split():
or_queries = [models.Q(**{construct_search(
smart_str(field_name)): smart_str(bit)})
for field_name in search_fields.split(',')]
other_qs = QuerySet(model)
other_qs.dup_select_related(queryset)
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
queryset = queryset & other_qs
data = ''.join([u'%s|%s\n' % (
to_string_function(f), f.pk) for f in queryset])
elif object_pk:
try:
obj = queryset.get(pk=object_pk)
except:
pass
else:
data = to_string_function(obj)
return HttpResponse(data)
return HttpResponseNotFound()
0
Example 160
def process_request(self, request):
prefixer = urlresolvers.Prefixer(request)
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if self._is_lang_change(request):
# Blank out the locale so that we can set a new one. Remove lang
# from the query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((smart_str(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return HttpResponsePermanentRedirect(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = urllib.quote(full_path.encode('utf-8'))
if query_string:
full_path = '%s?%s' % (full_path, query_string)
response = HttpResponsePermanentRedirect(full_path)
# Vary on Accept-Language if we changed the locale
old_locale = prefixer.locale
new_locale, _ = urlresolvers.split_path(full_path)
if old_locale != new_locale:
response['Vary'] = 'Accept-Language'
return response
request.path_info = '/' + prefixer.shortened_path
request.locale = prefixer.locale
activate(prefixer.locale)
0
Example 161
Project: popcorn_maker Source File: db.py
def execute(self, sql, params=()):
__traceback_hide__ = True
start = datetime.now()
try:
return self.cursor.execute(sql, params)
finally:
stop = datetime.now()
duration = ms_from_timedelta(stop - start)
enable_stacktraces = getattr(settings,
'DEBUG_TOOLBAR_CONFIG', {}) \
.get('ENABLE_STACKTRACES', True)
if enable_stacktraces:
stacktrace = tidy_stacktrace(reversed(get_stack()))
else:
stacktrace = []
_params = ''
try:
_params = simplejson.dumps(
[force_unicode(x, strings_only=True) for x in params]
)
except TypeError:
pass # object not JSON serializable
template_info = None
cur_frame = sys._getframe().f_back
try:
while cur_frame is not None:
if cur_frame.f_code.co_name == 'render':
node = cur_frame.f_locals['self']
if isinstance(node, Node):
template_info = get_template_info(node.source)
break
cur_frame = cur_frame.f_back
except:
pass
del cur_frame
alias = getattr(self.db, 'alias', 'default')
conn = connections[alias].connection
# HACK: avoid imports
if conn:
engine = conn.__class__.__module__.split('.', 1)[0]
else:
engine = 'unknown'
params = {
'engine': engine,
'alias': alias,
'sql': self.db.ops.last_executed_query(self.cursor, sql,
self._quote_params(params)),
'duration': duration,
'raw_sql': sql,
'params': _params,
'hash': sha_constructor(settings.SECRET_KEY \
+ smart_str(sql) \
+ _params).hexdigest(),
'stacktrace': stacktrace,
'start_time': start,
'stop_time': stop,
'is_slow': (duration > SQL_WARNING_THRESHOLD),
'is_select': sql.lower().strip().startswith('select'),
'template_info': template_info,
}
if engine == 'psycopg2':
params.update({
'trans_id': self.logger.get_transaction_id(alias),
'trans_status': conn.get_transaction_status(),
'iso_level': conn.isolation_level,
'encoding': conn.encoding,
})
# We keep `sql` to maintain backwards compatibility
self.logger.record(**params)
0
Example 162
Project: armstrong.esi Source File: http_client.py
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, basestring) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
0
Example 163
Project: mapit Source File: mapit_global_find_differences.py
def handle_label(self, directory_name, **options):
if not os.path.isdir(directory_name):
raise Exception("'%s' is not a directory" % (directory_name,))
os.chdir(directory_name)
skip_up_to = None
# skip_up_to = 'relation-80370'
skipping = bool(skip_up_to)
osm_elements_seen_in_new_data = set([])
with open("/home/mark/difference-results.csv", 'w') as fp:
csv_writer = csv.writer(fp)
csv_writer.writerow(["ElementType",
"ElementID",
"ExistedPreviously",
"PreviousEmpty",
"PreviousArea",
"NewEmpty",
"NewArea",
"SymmetricDifferenceArea",
"GEOSEquals",
"GEOSEqualsExact"])
for admin_directory in sorted(x for x in os.listdir('.') if os.path.isdir(x)):
if not re.search('^[A-Z0-9]{3}$', admin_directory):
print("Skipping a directory that doesn't look like a MapIt type:", admin_directory)
if not os.path.exists(admin_directory):
continue
files = sorted(os.listdir(admin_directory))
for i, e in enumerate(files):
if skipping:
if skip_up_to in e:
skipping = False
else:
continue
if not e.endswith('.kml'):
continue
m = re.search(r'^(way|relation)-(\d+)-', e)
if not m:
raise Exception("Couldn't extract OSM element type and ID from: " + e)
osm_type, osm_id = m.groups()
osm_elements_seen_in_new_data.add((osm_type, osm_id))
kml_filename = os.path.join(admin_directory, e)
# Need to parse the KML manually to get the ExtendedData
kml_data = KML()
print("parsing", kml_filename)
xml.sax.parse(kml_filename, kml_data)
useful_names = [n for n in kml_data.data.keys() if not n.startswith('Boundaries for')]
if len(useful_names) == 0:
raise Exception("No useful names found in KML data")
elif len(useful_names) > 1:
raise Exception("Multiple useful names found in KML data")
name = useful_names[0]
print(" ", smart_str(name))
if osm_type == 'relation':
code_type_osm = CodeType.objects.get(code='osm_rel')
elif osm_type == 'way':
code_type_osm = CodeType.objects.get(code='osm_way')
else:
raise Exception("Unknown OSM element type: " + osm_type)
ds = DataSource(kml_filename)
if len(ds) != 1:
raise Exception("We only expect one layer in a DataSource")
layer = ds[0]
if len(layer) != 1:
raise Exception("We only expect one feature in each layer")
feat = layer[0]
osm_codes = list(Code.objects.filter(type=code_type_osm, code=osm_id))
osm_codes.sort(key=lambda e: e.area.generation_high.created)
new_area = None
new_empty = None
previous_area = None
previous_empty = None
symmetric_difference_area = None
g = feat.geom.transform(4326, clone=True)
for polygon in g:
if polygon.point_count < 4:
new_empty = True
if not new_empty:
new_geos_geometry = g.geos.simplify(tolerance=0)
new_area = new_geos_geometry.area
new_empty = new_geos_geometry.empty
geos_equals = None
geos_equals_exact = None
most_recent_osm_code = None
if osm_codes:
most_recent_osm_code = osm_codes[-1]
previous_geos_geometry = most_recent_osm_code.area.polygons.collect()
previous_empty = previous_geos_geometry is None
if not previous_empty:
previous_geos_geometry = previous_geos_geometry.simplify(tolerance=0)
previous_area = previous_geos_geometry.area
if not new_empty:
symmetric_difference_area = previous_geos_geometry.sym_difference(
new_geos_geometry).area
geos_equals = previous_geos_geometry.equals(new_geos_geometry)
geos_equals_exact = previous_geos_geometry.equals_exact(new_geos_geometry)
csv_writer.writerow([osm_type,
osm_id,
bool(osm_codes), # ExistedPreviously
empty_if_none(previous_empty),
empty_if_none(previous_area),
empty_if_none(new_empty),
empty_if_none(new_area),
empty_if_none(symmetric_difference_area),
empty_if_none(geos_equals),
empty_if_none(geos_equals_exact)])
0
Example 164
Project: mapit Source File: mapit_global_import.py
def handle_label(self, directory_name, **options):
current_generation = Generation.objects.current()
new_generation = Generation.objects.new()
if not new_generation:
raise Exception("No new generation to be used for import!")
if not os.path.isdir(directory_name):
raise Exception("'%s' is not a directory" % (directory_name,))
os.chdir(directory_name)
mapit_type_glob = smart_text("[A-Z0-9][A-Z0-9][A-Z0-9]")
if not glob(mapit_type_glob):
raise Exception(
"'%s' did not contain any directories that look like MapIt types (e.g. O11, OWA, etc.)" % (
directory_name,))
def verbose(s):
if int(options['verbosity']) > 1:
print(smart_str(s))
verbose("Loading any admin boundaries from " + directory_name)
verbose("Finding language codes...")
language_code_to_name = {}
code_keys = ('two_letter', 'three_letter')
for row in get_iso639_2_table():
english_name = getattr(row, 'english_name')
for k in code_keys:
code = getattr(row, k)
if not code:
continue
language_code_to_name[code] = english_name
global_country = Country.objects.get(code='G')
# print json.dumps(language_code_to_name, sort_keys=True, indent=4)
skip_up_to = None
# skip_up_to = 'relation-80370'
skipping = bool(skip_up_to)
for type_directory in sorted(glob(mapit_type_glob)):
verbose("Loading type " + type_directory)
if not os.path.exists(type_directory):
verbose("Skipping the non-existent " + type_directory)
continue
verbose("Loading all KML in " + type_directory)
files = sorted(os.listdir(type_directory))
total_files = len(files)
for i, e in enumerate(files):
progress = "[%d%% complete] " % ((i * 100) / total_files,)
if skipping:
if skip_up_to in e:
skipping = False
else:
continue
if not e.endswith('.kml'):
verbose("Ignoring non-KML file: " + e)
continue
m = re.search(r'^(way|relation)-(\d+)-', e)
if not m:
raise Exception("Couldn't extract OSM element type and ID from: " + e)
osm_type, osm_id = m.groups()
kml_filename = os.path.join(type_directory, e)
verbose(progress + "Loading " + os.path.realpath(kml_filename))
# Need to parse the KML manually to get the ExtendedData
kml_data = KML()
xml.sax.parse(smart_str(kml_filename), kml_data)
useful_names = [n for n in kml_data.data.keys() if not n.startswith('Boundaries for')]
if len(useful_names) == 0:
raise Exception("No useful names found in KML data")
elif len(useful_names) > 1:
raise Exception("Multiple useful names found in KML data")
name = useful_names[0]
print(smart_str(" %s" % name))
if osm_type == 'relation':
code_type_osm = CodeType.objects.get(code='osm_rel')
elif osm_type == 'way':
code_type_osm = CodeType.objects.get(code='osm_way')
else:
raise Exception("Unknown OSM element type: " + osm_type)
ds = DataSource(kml_filename)
layer = ds[0]
if len(layer) != 1:
raise Exception("We only expect one feature in each layer")
feat = layer[1]
g = feat.geom.transform(4326, clone=True)
if g.geom_count == 0:
# Just ignore any KML files that have no polygons in them:
verbose(' Ignoring that file - it contained no polygons')
continue
# Nowadays, in generating the data we should have
# excluded any "polygons" with less than four points
# (the final one being the same as the first), but
# just in case:
polygons_too_small = 0
for polygon in g:
if polygon.num_points < 4:
polygons_too_small += 1
if polygons_too_small:
message = "%d out of %d polygon(s) were too small" % (polygons_too_small, g.geom_count)
verbose(' Skipping, since ' + message)
continue
g_geos = g.geos
if not g_geos.valid:
verbose(" Invalid KML:" + kml_filename)
fixed_multipolygon = fix_invalid_geos_multipolygon(g_geos)
if len(fixed_multipolygon) == 0:
verbose(" Invalid polygons couldn't be fixed")
continue
g = fixed_multipolygon.ogr
area_type = Type.objects.get(code=type_directory)
try:
osm_code = Code.objects.get(type=code_type_osm,
code=osm_id,
area__generation_high__lte=current_generation,
area__generation_high__gte=current_generation)
except Code.DoesNotExist:
verbose(' No area existed in the current generation with that OSM element type and ID')
osm_code = None
was_the_same_in_current = False
if osm_code:
m = osm_code.area
# First, we need to check if the polygons are
# still the same as in the previous generation:
previous_geos_geometry = m.polygons.collect()
if previous_geos_geometry is None:
verbose(' In the current generation, that area was empty - skipping')
else:
# Simplify it to make sure the polygons are valid:
previous_geos_geometry = shapely.wkb.loads(
str(previous_geos_geometry.simplify(tolerance=0).ewkb))
new_geos_geometry = shapely.wkb.loads(str(g.geos.simplify(tolerance=0).ewkb))
if previous_geos_geometry.almost_equals(new_geos_geometry, decimal=7):
was_the_same_in_current = True
else:
verbose(' In the current generation, the boundary was different')
if was_the_same_in_current:
# Extend the high generation to the new one:
verbose(' The boundary was identical in the previous generation; raising generation_high')
m.generation_high = new_generation
else:
# Otherwise, create a completely new area:
m = Area(
name=name,
type=area_type,
country=global_country,
parent_area=None,
generation_low=new_generation,
generation_high=new_generation,
)
poly = [g]
if options['commit']:
m.save()
verbose(' Area ID: ' + str(m.id))
if name not in kml_data.data:
print(json.dumps(kml_data.data, sort_keys=True, indent=4))
raise Exception("Will fail to find '%s' in the dictionary" % (name,))
old_lang_codes = set(n.type.code for n in m.names.all())
for k, translated_name in kml_data.data[name].items():
language_name = None
if k == 'name':
lang = 'default'
language_name = "OSM Default"
else:
name_match = re.search(r'^name:(.+)$', k)
if name_match:
lang = name_match.group(1)
if lang in language_code_to_name:
language_name = language_code_to_name[lang]
if not language_name:
continue
old_lang_codes.discard(lang)
# Otherwise, make sure that a NameType for this language exists:
NameType.objects.update_or_create(code=lang, defaults={'description': language_name})
name_type = NameType.objects.get(code=lang)
m.names.update_or_create(type=name_type, defaults={'name': translated_name})
if old_lang_codes:
verbose('Removing deleted languages codes: ' + ' '.join(old_lang_codes))
m.names.filter(type__code__in=old_lang_codes).delete()
# If the boundary was the same, the old Code
# object will still be pointing to the same Area,
# which just had its generation_high incremented.
# In every other case, there's a new area object,
# so create a new Code and save it:
if not was_the_same_in_current:
new_code = Code(area=m, type=code_type_osm, code=osm_id)
new_code.save()
save_polygons({'dummy': (m, poly)})
0
Example 165
Project: mapit Source File: mapit_global_oneoff_fix_gen4.py
def handle_label(self, directory_name, **options):
current_generation = Generation.objects.current()
if not os.path.isdir(directory_name):
raise Exception("'%s' is not a directory" % (directory_name,))
os.chdir(directory_name)
mapit_type_glob = smart_text("[A-Z0-9][A-Z0-9][A-Z0-9]")
if not glob(mapit_type_glob):
raise Exception(
"'%s' did not contain any directories that look like MapIt types (e.g. O11, OWA, etc.)" % (
directory_name,))
def verbose(s):
if int(options['verbosity']) > 1:
print(smart_str(s))
verbose("Loading any admin boundaries from " + directory_name)
for type_directory in sorted(glob(mapit_type_glob)):
verbose("Loading type " + type_directory)
if not os.path.exists(type_directory):
verbose("Skipping the non-existent " + type_directory)
continue
verbose("Loading all KML in " + type_directory)
files = sorted(os.listdir(type_directory))
total_files = len(files)
for i, e in enumerate(files):
progress = "[%d%% complete] " % ((i * 100) / total_files,)
if not e.endswith('.kml'):
verbose("Ignoring non-KML file: " + e)
continue
m = re.search(r'^(way|relation)-(\d+)-', e)
if not m:
raise Exception("Couldn't extract OSM element type and ID from: " + e)
osm_type, osm_id = m.groups()
kml_filename = os.path.join(type_directory, e)
verbose(progress + "Loading " + os.path.realpath(kml_filename))
if osm_type == 'relation':
code_type_osm = CodeType.objects.get(code='osm_rel')
elif osm_type == 'way':
code_type_osm = CodeType.objects.get(code='osm_way')
else:
raise Exception("Unknown OSM element type: " + osm_type)
ds = DataSource(kml_filename)
layer = ds[0]
if len(layer) != 1:
raise Exception("We only expect one feature in each layer")
feat = layer[1]
g = feat.geom.transform(4326, clone=True)
if g.geom_count == 0:
verbose(' Ignoring that file - it contained no polygons')
continue
polygons_too_small = 0
for polygon in g:
if polygon.num_points < 4:
polygons_too_small += 1
if polygons_too_small:
message = "%d out of %d polygon(s) were too small" % (polygons_too_small, g.geom_count)
verbose(' Skipping, since ' + message)
continue
g_geos = g.geos
if not g_geos.valid:
verbose(" Invalid KML:" + kml_filename)
fixed_multipolygon = fix_invalid_geos_multipolygon(g_geos)
if len(fixed_multipolygon) == 0:
verbose(" Invalid polygons couldn't be fixed")
continue
g = fixed_multipolygon.ogr
osm_code = Code.objects.get(
type=code_type_osm,
code=osm_id,
area__generation_high__lte=current_generation,
area__generation_high__gte=current_generation)
m = osm_code.area
previous_geos_geometry = m.polygons.collect()
previous_geos_geometry = shapely.wkb.loads(str(previous_geos_geometry.simplify(tolerance=0).ewkb))
new_geos_geometry = shapely.wkb.loads(str(g.geos.simplify(tolerance=0).ewkb))
if previous_geos_geometry.almost_equals(new_geos_geometry, decimal=7):
verbose(' Boundary unchanged')
else:
verbose(' In the current generation, the boundary was different')
poly = [g]
if options['commit']:
save_polygons({'dummy': (m, poly)})
0
Example 166
Project: theyworkforyou Source File: admin_list.py
def result_headers(cl):
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
attr = None
try:
f = lookup_opts.get_field(field_name)
admin_order_field = None
except models.FieldDoesNotExist:
# For non-field list_display values, check for the function
# attribute "short_description". If that doesn't exist, fall back
# to the method name. And __str__ and __unicode__ are special-cases.
if field_name == '__unicode__':
header = force_unicode(lookup_opts.verbose_name)
elif field_name == '__str__':
header = smart_str(lookup_opts.verbose_name)
else:
if callable(field_name):
attr = field_name # field_name can be a callable
else:
try:
attr = getattr(cl.model_admin, field_name)
except AttributeError:
try:
attr = getattr(cl.model, field_name)
except AttributeError:
raise AttributeError, \
"'%s' model or '%s' objects have no attribute '%s'" % \
(lookup_opts.object_name, cl.model_admin.__class__, field_name)
try:
header = attr.short_description
except AttributeError:
if callable(field_name):
header = field_name.__name__
else:
header = field_name
header = header.replace('_', ' ')
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
header = f.verbose_name
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')}
0
Example 167
Project: theyworkforyou Source File: main.py
def get_query_set(self):
qs = self.root_query_set
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR):
if i in lookup_params:
del lookup_params[i]
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[smart_str(key)] = value
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
lookup_params[key] = value.split(',')
# Apply lookup parameters from the query string.
try:
qs = qs.filter(**lookup_params)
# Naked except! Because we don't have any other way of validating "params".
# They might be invalid if the keyword arguments are incorrect, or if the
# values are not in the correct type, so we might get FieldError, ValueError,
# ValicationError, or ? from a custom field that raises yet something else
# when handed impossible data.
except:
raise IncorrectLookupParameters
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not qs.query.select_related:
if self.list_select_related:
qs = qs.select_related()
else:
for field_name in self.list_display:
try:
f = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(f.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and self.query:
for bit in self.query.split():
or_queries = [models.Q(**{construct_search(str(field_name)): bit}) for field_name in self.search_fields]
qs = qs.filter(reduce(operator.or_, or_queries))
for field_name in self.search_fields:
if '__' in field_name:
qs = qs.distinct()
break
return qs
0
Example 168
Project: Nitrate Source File: files.py
@user_passes_test(lambda u: u.has_perm('management.add_testattachment'))
def upload_file(request):
if request.FILES.get('upload_file'):
import os
from datetime import datetime
from django.conf import settings
from tcms.core.views import Prompt
from tcms.management.models import TestAttachment
try:
upload_file = request.FILES['upload_file']
try:
upload_file.name.encode('utf8')
except UnicodeEncodeError:
return HttpResponse(Prompt.render(
request=request,
info_type=Prompt.Alert,
info='Upload File name is not legal.',
next='javascript:window.history.go(-1);',
))
now = datetime.now()
stored_name = '%s-%s-%s' % (
request.user.username,
now,
upload_file.name
)
stored_file_name = os.path.join(
settings.FILE_UPLOAD_DIR, stored_name
).replace('\\', '/')
stored_file_name = smart_str(stored_file_name)
if upload_file._size > settings.MAX_UPLOAD_SIZE:
return HttpResponse(Prompt.render(
request=request,
info_type=Prompt.Alert,
info='You upload entity is too large. \
Please ensure the file is less than %s bytes. \
' % settings.MAX_UPLOAD_SIZE,
next='javascript:window.history.go(-1);',
))
# Create the upload directory when it's not exist
try:
os.listdir(settings.FILE_UPLOAD_DIR)
except OSError:
os.mkdir(settings.FILE_UPLOAD_DIR)
# Write to a temporary file
try:
open(stored_file_name, 'ro')
return HttpResponse(Prompt.render(
request=request,
info_type=Prompt.Alert,
info='File named \'%s\' already exist in upload folder, \
please rename to another name for solve conflict.\
' % upload_file.name,
next='javascript:window.history.go(-1);',
))
except IOError:
pass
dest = open(stored_file_name, 'wb+')
for chunk in upload_file.chunks():
dest.write(chunk)
dest.close()
# Write the file to database
# store_file = open(upload_file_name, 'ro')
ta = TestAttachment.objects.create(
submitter_id=request.user.id,
description=request.REQUEST.get('description', None),
file_name=upload_file.name,
stored_name=stored_name,
create_date=now,
mime_type=upload_file.content_type
)
if request.REQUEST.get('to_plan_id'):
from tcms.testplans.models import TestPlanAttachment
try:
int(request.REQUEST['to_plan_id'])
except ValueError:
raise
TestPlanAttachment.objects.create(
plan_id=request.REQUEST.get('to_plan_id'),
attachment_id=ta.attachment_id,
)
return HttpResponseRedirect(
reverse('tcms.testplans.views.attachment',
args=[request.REQUEST['to_plan_id']])
)
elif request.REQUEST.get('to_case_id'):
from tcms.testcases.models import TestCaseAttachment
try:
int(request.REQUEST['to_case_id'])
except ValueError:
raise
TestCaseAttachment.objects.create(
attachment_id=ta.attachment_id,
case_id=request.REQUEST['to_case_id']
)
return HttpResponseRedirect(
reverse('tcms.testcases.views.attachment',
args=[request.REQUEST['to_case_id']])
)
except:
raise
else:
try:
return HttpResponseRedirect(
reverse('tcms.testplans.views.attachment',
args=[request.REQUEST['to_plan_id']])
)
except KeyError:
return HttpResponseRedirect(
reverse('tcms.testcases.views.attachment',
args=[request.REQUEST['to_case_id']])
)
raise NotImplementedError
0
Example 169
Project: django-pluggables Source File: pluggables.py
def render(self, context):
args = [arg.resolve(context) for arg in self.args]
kwargs = dict([(smart_str(k, 'ascii'), v.resolve(context)) for k, v in self.kwargs.items()])
request = self.request.resolve(context)
view_name = self.view_name
if hasattr(request, 'pluggable'):
view_name = request.pluggable.prefix and '%s_%s' % (request.pluggable.prefix, self.view_name) or '%s' % self.view_name
parent_args, parent_kwargs = request.pluggable.parent_arguments
if parent_args:
args = parent_args + args
if parent_kwargs:
kwargs.update(parent_kwargs)
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which cause return nothing.
url = ''
try:
url = reverse(view_name, args=args, kwargs=kwargs)
except NoReverseMatch:
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + '.' + view_name,
args=args, kwargs=kwargs)
except NoReverseMatch:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ''
else:
return url
0
Example 170
def check_spam(field):
'''Decorator to check if there is spam in the form'''
def decorator(view_func):
@functools.wraps(view_func)
def wrapper(request, *args, **kwargs):
if openode_settings.USE_AKISMET and openode_settings.AKISMET_API_KEY == "":
raise ImproperlyConfigured('You have not set AKISMET_API_KEY')
if openode_settings.USE_AKISMET and request.method == "POST":
comment = smart_str(request.POST[field])
data = {'user_ip': request.META["REMOTE_ADDR"],
'user_agent': request.environ['HTTP_USER_AGENT'],
'comment_author': smart_str(request.user.username),
}
if request.user.is_authenticated():
data.update({'comment_author_email': request.user.email})
from akismet import Akismet
api = Akismet(
openode_settings.AKISMET_API_KEY,
smart_str(openode_settings.APP_URL),
"Openode/%s" % get_version()
)
if api.comment_check(comment, data, build_data=False):
logging.debug(
'Spam detected in %s post at: %s',
request.user.username,
datetime.datetime.now()
)
spam_message = _(
'Spam was detected on your post, sorry '
'for if this is a mistake'
)
if request.is_ajax():
return HttpResponseForbidden(
spam_message,
mimetype="application/json"
)
else:
request.user.message_set.create(message=spam_message)
return HttpResponseRedirect(reverse('index'))
return view_func(request, *args, **kwargs)
return wrapper
return decorator
0
Example 171
Project: horizon Source File: tests.py
def _test_usage(self, nova_stu_enabled=True, tenant_deleted=False,
overview_days_range=1):
self._stub_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
usage_list = [api.nova.NovaUsage(u) for u in self.usages.list()]
if tenant_deleted:
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([[self.tenants.first()], False])
else:
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
if nova_stu_enabled:
start_day, now = self._get_start_end_range(overview_days_range)
api.nova.usage_list(IsA(http.HttpRequest),
datetime.datetime(start_day.year,
start_day.month,
start_day.day, 0, 0, 0, 0),
datetime.datetime(now.year,
now.month,
now.day, 23, 59, 59, 0)) \
.AndReturn(usage_list)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.AndReturn(self.limits['absolute'])
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(True)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:overview:index'))
self.assertTemplateUsed(res, 'admin/overview/usage.html')
self.assertIsInstance(res.context['usage'], usage.GlobalUsage)
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
usage_table = encoding.smart_str(u'''
<tr class="" data-object-id="1" id="global_usage__row__1">
<td class="sortable normal_column">test_tenant</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
</tr>
''' % (usage_list[0].vcpus,
sizeformat.diskgbformat(usage_list[0].local_gb),
sizeformat.mb_float_format(usage_list[0].memory_mb),
usage_list[0].vcpu_hours,
usage_list[0].disk_gb_hours,
usage_list[0].memory_mb_hours)
)
# test for deleted project
usage_table_deleted = encoding.smart_str(u'''
<tr class="" data-object-id="3" id="global_usage__row__3">
<td class="sortable normal_column">3 (Deleted)</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%s</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
<td class="sortable normal_column">%.2f</td>
</tr>
''' % (usage_list[1].vcpus,
sizeformat.diskgbformat(usage_list[1].local_gb),
sizeformat.mb_float_format(usage_list[1].memory_mb),
usage_list[1].vcpu_hours,
usage_list[1].disk_gb_hours,
usage_list[1].memory_mb_hours)
)
if nova_stu_enabled:
self.assertContains(res, usage_table, html=True)
if tenant_deleted:
self.assertContains(res, usage_table_deleted, html=True)
else:
self.assertNotContains(res, usage_table_deleted, html=True)
else:
self.assertNotContains(res, usage_table, html=True)
0
Example 172
@login_required
def review(request, proposalid):
proposal = Proposal.objects.get(id=proposalid)
if not topiclead(request.user, proposal.topic):
return HttpResponseForbidden("Forbidden")
current_status = proposal.status
status_long = proposal.get_status_display()
if request.method == 'POST':
form = ProposalReviewForm(request.POST, instance=proposal)
if form.is_valid():
form.save()
reviewer_notes = ''
if form.cleaned_data['comment']:
reviewer_notes = form.cleaned_data['comment']
c = Comment()
c.proposal = proposal
c.author = request.user
c.content = reviewer_notes
c.save()
if (settings.SEND_MAIL and current_status != proposal.status):
lead = User.objects.get(username=proposal.topic.lead_username)
if (lead.email and proposal.proposer.email):
message = """
This is an automated email.
If needed, you should reply directly to the topic lead (%s).
On your session proposal: %s
The topic lead (%s) changed status from %s to %s.
Reviewer's notes:
%s
You can access your proposal at: %s/cfp/details/%s""" \
% (proposal.topic.lead_username,
smart_str(proposal.title),
proposal.topic.lead_username,
status_long, proposal.get_status_display(),
smart_str(reviewer_notes),
settings.SITE_ROOT, proposalid)
email = EmailMessage(settings.EMAIL_PREFIX +
"Status change on your session proposal",
message, settings.EMAIL_FROM,
[proposal.proposer.email, ], [],
headers={'Reply-To': lead.email})
email.send()
return HttpResponseRedirect('/cfp/topic/%d' % proposal.topic.id)
else:
form = ProposalReviewForm(instance=proposal)
comments = Comment.objects.filter(proposal=proposal)
return TemplateResponse(request, 'cfpreview.html',
{'form': form,
'proposal': proposal,
'comments': comments,
'blueprints': linkify(proposal.blueprints)})