Here are the examples of the python api django.utils.datastructures.SortedDict taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
144 Examples
0
Example 101
Project: elijah-openstack Source File: views.py
def get_instances_data(self):
# Gather synthesized instances
try:
(instances, self._more_instances) = api.nova.server_list(self.request)
except:
instances = []
exceptions.handle(self.request,
_('Unable to retrieve instances.'))
# Gather our flavors and correlate our instances to them
filtered_instances = list()
if instances:
try:
flavors = api.nova.flavor_list(self.request)
except:
flavors = []
exceptions.handle(self.request, ignore=True)
full_flavors = SortedDict([(str(flavor.id), flavor)
for flavor in flavors])
# Loop through instances to get flavor info.
for instance in instances:
try:
flavor_id = instance.flavor["id"]
if flavor_id in full_flavors:
instance.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# get it via nova api.
instance.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
for instance in instances:
instance_type = get_cloudlet_type(instance)
if instance_type == CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
filtered_instances.append(instance)
setattr(instance, 'cloudlet_type', "Resumed Base VM")
if instance_type == CLOUDLET_TYPE.IMAGE_TYPE_OVERLAY:
filtered_instances.append(instance)
setattr(instance, 'cloudlet_type', "Provisioned VM")
return filtered_instances
0
Example 102
def __init__(self, meta, app_label=None):
# 将多对多属性和其他属性分开存储, 是什么原因???
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = SortedDict()
# 如果两个属性同时关联两个外部表, 比如两个外键的时候, 需要创建一个集合.
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
# 仅供内部使用
self.related_fkey_lookups = []
0
Example 103
Project: djangorest-alchemy Source File: serializers.py
def get_fields(self):
ret = SortedDict()
mapper = class_mapper(self.cls.__class__)
r = self.context['request']
try:
# URI field for get pk field
pk_field = primary_key(self.cls.__class__)
ret['href'] = AlchemyUriField(source=pk_field,
path=r.build_absolute_uri(r.path),
read_only=True)
except KeyNotFoundException:
pass
# Get all the Column fields
for col_prop in mapper.iterate_properties:
if isinstance(col_prop, ColumnProperty):
field_nm = str(col_prop).split('.')[1]
field_cls = col_prop.columns[0].type.__class__
assert field_cls in self.field_mapping, \
"Field %s has not been mapped" % field_cls
ret[field_nm] = self.field_mapping[field_cls]()
# Get all the relationship fields
for rel_prop in mapper.iterate_properties:
if isinstance(rel_prop, RelationshipProperty):
field_nm = str(rel_prop).split('.')[1]
# many becomes same as uselist so that
# RelatedField can iterate over the queryset
kwargs = dict(
path=r.build_absolute_uri(r.path),
read_only=True
)
if rel_prop.uselist:
kwargs['many'] = True
ret[field_nm] = AlchemyRelatedField(**kwargs)
return ret
0
Example 104
Project: nexus Source File: nexus_helpers.py
def show_navigation(context):
site = context.get('nexus_site', NexusModule.get_global('site'))
request = NexusModule.get_request()
category_link_set = SortedDict([(k, {
'label': v,
'links': [],
}) for k, v in site.get_categories()])
for namespace, module in site._registry.iteritems():
module, category = module
if module.permission and not request.user.has_perm(module.permission):
continue
home_url = None
if 'request' in context:
home_url = module.get_home_url(context['request'])
if not home_url:
continue
active = request.path.startswith(home_url)
if category not in category_link_set:
if category:
label = site.get_category_label(category)
else:
label = None
category_link_set[category] = {
'label': label,
'links': []
}
category_link_set[category]['links'].append((module.get_title(), home_url, active))
category_link_set[category]['active'] = active
return {
'nexus_site': site,
'category_link_set': category_link_set.itervalues(),
}
0
Example 105
Project: django-dataforms Source File: forms.py
def _generate_bound_fields(self):
self.bound_fields = SortedDict([(name, BoundField(self, field, name)) for name, field in self.fields.items()])
0
Example 106
Project: django-dataforms Source File: forms.py
def _create_form(form, title=None, description=None):
"""
Creates a form class object.
Usage::
FormClass = _create_form(dataform="myForm")
form = FormClass(data=request.POST)
:param form: a data form slug or object
:param title: optional title; pulled from DB by default
:param description: optional description; pulled from DB by default
:param readonly: optional readonly; converts form fields to be readonly.
Usefull for display only logic.
"""
# Make sure the form definition exists before continuing
# Slightly evil, do type checking to see if form is a DataForm object or string
# If form object is a slug then get the form object and reassign
if isinstance(form, str) or isinstance(form, unicode):
try:
form = DataForm.objects.get(visible=True, slug=form)
except DataForm.DoesNotExist:
raise DataForm.DoesNotExist('DataForm %s does not exist. Make sure the slug name is correct and the form is visible.' % form)
# Otherwise it should be a form model object, if not raise
elif not isinstance(form, DataForm):
raise AttributeError('Dataform %s is not a valid data form object.' % form)
meta = {}
slug = form if isinstance(form, str) or isinstance(form, unicode) else form.slug
final_fields = SortedDict()
choices_dict = defaultdict(tuple)
attrs = {
'declared_fields' : final_fields,
'base_fields' : final_fields,
'meta' : meta,
'slug' : slug,
}
# Parse the slug and create a class title
form_class_title = create_form_class_title(slug)
# Set the title and/or the description from the DB (but only if it wasn't given)
meta['title'] = safe(form.title if not title else title)
meta['description'] = safe(form.description if not description else description)
meta['slug'] = form.slug
# Get all the fields
fields_qs = Field.objects.filter(
dataformfield__data_form__slug=slug,
visible=True
).order_by('dataformfield__order')
fields = [field for field in fields_qs.values()]
if not fields:
raise Field.DoesNotExist('Field for %s do not exist. Make sure the slug name is correct and the fields are visible.' % slug)
# Get all the choices associated to fields
choices_qs = (
FieldChoice.objects.select_related('choice', 'field').filter(
field__dataformfield__data_form__slug=slug,
field__visible=True
).order_by('order')
)
# Get the bindings for use in the Field Loop
bindings = get_bindings(form=form)
# Add a hidden field used for passing information to the JavaScript bindings function
fields.append({
'field_type': 'HiddenInput',
'slug': 'js_dataform_bindings',
'initial': safe(force_escape(json.dumps(bindings))),
'required': False,
})
# Populate our choices dictionary
for row in choices_qs:
choices_dict[row.field.pk] += (row.choice.value, safe(row.choice.title)),
# Process the field mappings and import any modules specified by string name
for key in FIELD_MAPPINGS:
# Replace the string arguments with the actual modules or classes
for sub_key in ('class', 'widget'):
if not FIELD_MAPPINGS[key].has_key(sub_key):
continue
value = FIELD_MAPPINGS[key][sub_key]
if isinstance(value, str) or isinstance(value, unicode):
names = value.split(".")
module_name = ".".join(names[:-1])
class_name = names[-1]
module = __import__(module_name, fromlist=[class_name])
# Replace the string with a class pointer
FIELD_MAPPINGS[key][sub_key] = getattr(module, class_name)
# Handle widget arguments
if not FIELD_MAPPINGS[key].has_key('widget_kwargs'):
# Initialize all field-mappings that don't have a 'widget_kwargs' key
FIELD_MAPPINGS[key]['widget_kwargs'] = {}
# ----- Field Loop -----
# Populate our fields dictionary for this form
for row in fields:
form_field_name = _field_for_form(name=row['slug'], form=slug)
field_kwargs = {}
field_map = FIELD_MAPPINGS[row['field_type']]
widget_attrs = field_map.get('widget_attrs', {})
if row.has_key('label'):
field_kwargs['label'] = safe(row['label'])
if row.has_key('help_text'):
field_kwargs['help_text'] = safe(row['help_text'])
if row.has_key('initial'):
field_kwargs['initial'] = row['initial']
if row.has_key('required'):
field_kwargs['required'] = row['required']
additional_field_kwargs = {}
if row.has_key('arguments') and row['arguments'].strip():
# Parse any additional field arguments as JSON and include them in field_kwargs
temp_args = json.loads(str(row['arguments']))
for arg in temp_args:
additional_field_kwargs[str(arg)] = temp_args[arg]
# Update the field arguments with the "additional arguments" JSON in the DB
field_kwargs.update(additional_field_kwargs)
# Get the choices for single and multiple choice fields
if row['field_type'] in CHOICE_FIELDS:
choices = ()
# We add a separator for select boxes
if row['field_type'] == 'Select':
choices += ('', '--------'),
choices_func = getattr(choices_module, row['slug'].replace('-', '_'), None)
# Populate our choices tuple
if choices_func:
choices += choices_func()
else:
choices += choices_dict[row['id']]
field_kwargs['choices'] = choices
if row['field_type'] in MULTI_CHOICE_FIELDS:
# Get all of the specified default selected values (as a list, even if one element)
field_kwargs['initial'] = (
field_kwargs['initial'].split(',')
if ',' in field_kwargs['initial']
else [field_kwargs['initial'], ]
)
# Remove whitespace so the user can use spaces
field_kwargs['initial'] = [element.strip() for element in field_kwargs['initial']]
else:
field_kwargs['initial'] = ''.join(field_kwargs['initial'])
# Add our additional css classes
if row.has_key('classes'):
existing_widget_attrs = widget_attrs.get('class', '')
widget_attrs['class'] = existing_widget_attrs + ' '.join(row['classes'].split(',')).strip()
# Add bindings css class
#FIXME: Should we be adding this on the widget or field?
if row['field_type'] != 'HiddenInput':
if not 'dataform-field' in widget_attrs['class']:
widget_attrs['class'] += " dataform-field"
# Instantiate the widget that this field will use
# TODO: Possibly create logic that passes submissionid to file upload widget to handle file
# paths without enforcing a redirect.
if field_map.has_key('widget'):
field_kwargs['widget'] = field_map['widget'](attrs=widget_attrs, **field_map['widget_kwargs'])
# Add this field, including any widgets and additional arguments
# (initial, label, required, help_text, etc)
final_field = field_map['class'](**field_kwargs)
final_field.is_checkbox = (row['field_type'] == 'CheckboxInput')
final_field.dataform_key = row['field_type']
final_fields[form_field_name] = final_field
# Grab the dynamic validation function from validation.py
if validation_module:
validate = getattr(validation_module, form_class_title, None)
if validate:
# Pull the "clean_" functions from the validation
# for this form and inject them into the form object
for attr_name in dir(validate):
if attr_name.startswith('clean'):
attrs[attr_name] = getattr(validate, attr_name)
# Return a class object of this form with all attributes
DataFormClass = type(form_class_title, (BaseDataForm,), attrs)
# Also return the querysets so that they can be re-used
query_data = {
'dataform_query' : form,
'choice_query' : choices_qs,
'field_query' : fields_qs,
'fields_list' : fields,
}
return DataFormClass, query_data
0
Example 107
Project: django-accounting Source File: checks.py
def get_checking_fields(self, special_exclude=['id']):
"""
Returns the set of fields on which we perform checkings
"""
ret = SortedDict()
for f in self._meta.fields:
# avoid special_exclude fields
if f.attname in special_exclude:
continue
ret[f.attname] = f
# Deal with reverse relationships
reverse_rels = self._meta.get_all_related_objects()
# reverse_rels += self._meta.get_all_related_many_to_many_objects()
for relation in reverse_rels:
accessor_name = relation.get_accessor_name()
to_many = relation.field.rel.multiple
if not self.opts.fields or accessor_name not in self.opts.fields:
continue
if not to_many:
raise NotImplementedError
ret[accessor_name] = PrimaryKeyRelatedField()
# If 'fields' is specified, use those fields, in that order.
if self.opts.fields:
new = SortedDict()
for key in self.opts.fields:
new[key] = ret[key]
ret = new
# Remove anything in 'exclude'
if self.opts.exclude:
for key in self.opts.exclude:
ret.pop(key, None)
return ret
0
Example 108
def url(self):
if self.options.get('cht', None) == 't':
self.datasets.append(self.options.pop("_mapdata"))
# Figure out the chart's data range
if not self.datarange:
maxvalue = max(max(d) for d in self.datasets if d)
minvalue = min(min(d) for d in self.datasets if d)
self.datarange = (minvalue, maxvalue)
# Encode data
if "chds" in self.options or self.options.get('cht', None) == 'gom':
# text encoding if scaling provided, or for google-o-meter type
data = "|".join(encode_text(d) for d in self.datasets)
encoded_data = "t:%s" % data
else:
# extended encoding otherwise
data = extended_separator.join(encode_extended(d, self.datarange) for d in self.datasets)
encoded_data = "e:%s" % data
# Update defaults
for k in self.defaults:
if k not in self.options:
self.options[k] = self.defaults[k]
# Start to calcuate the URL
url = "%s?%s&chd=%s" % (self.BASE, urlencode(self.options), encoded_data)
# Calculate axis options
if self.axes:
axis_options = SortedDict()
axis_sides = []
for i, axis in enumerate(self.axes):
axis_sides.append(axis.side)
for opt in axis.options:
axis_options.setdefault(opt, []).append(axis.options[opt] % i)
# Turn the option lists into strings
axis_sides = smart_join(",", *axis_sides)
for opt in axis_options:
axis_options[opt] = smart_join("|", *axis_options[opt])
url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options))
return url
0
Example 109
Project: eulxml Source File: xmlobject.py
def formfields_for_xmlobject(model, fields=None, exclude=None, widgets=None, options=None,
declared_subforms=None, max_num=None, extra=None):
"""
Returns three sorted dictionaries (:class:`django.utils.datastructures.SortedDict`).
* The first is a dictionary of form fields based on the
:class:`~eulxml.xmlmap.XmlObject` class fields and their types.
* The second is a sorted dictionary of subform classes for any fields of type
:class:`~eulxml.xmlmap.fields.NodeField` on the model.
* The third is a sorted dictionary of formsets for any fields of type
:class:`~eulxml.xmlmap.fields.NodeListField` on the model.
Default sorting (within each dictionary) is by XmlObject field creation order.
Used by :class:`XmlObjectFormType` to set up a new :class:`XmlObjectForm`
class.
:param fields: optional list of field names; if specified, only the named fields
will be returned, in the specified order
:param exclude: optional list of field names that should not be included on
the form; if a field is listed in both ``fields`` and ``exclude``,
it will be excluded
:param widgets: optional dictionary of widget options to be passed to form
field constructor, keyed on field name
:param options: optional :class:`~django.forms.models.ModelFormOptions`.
if specified then fields, exclude, and widgets will default
to its values.
:param declared_subforms: optional dictionary of field names and form classes;
if specified, the specified form class will be used to initialize
the corresponding subform (for a :class:`~eulxml.xmlmap.fields.NodeField`)
or a formset (for a :class:`~eulxml.xmlmap.fields.NodeListField`)
:param max_num: optional value for the maximum number of times a fieldset should repeat.
:param max_num: optional value for the number of extra forms to provide.
"""
# first collect fields and excludes for the form and all subforms. base
# these on the specified options object unless overridden in args.
fieldlist = getattr(options, 'parsed_fields', None)
if isinstance(fields, ParsedFieldList):
fieldlist = fields
elif fields is not None:
fieldlist = _parse_field_list(fields, include_parents=True)
excludelist = getattr(options, 'parsed_exclude', None)
if isinstance(fields, ParsedFieldList):
fieldlist = fields
elif exclude is not None:
excludelist = _parse_field_list(exclude, include_parents=False)
if widgets is None and options is not None:
widgets = options.widgets
if max_num is None and options is not None:
max_num = options.max_num
# collect the fields (unordered for now) that we're going to be returning
formfields = {}
subforms = {}
formsets = {}
field_order = {}
subform_labels = {}
for name, field in six.iteritems(model._fields):
if fieldlist and not name in fieldlist.fields:
# if specific fields have been requested and this is not one of them, skip it
continue
if excludelist and name in excludelist.fields:
# if exclude has been specified and this field is listed, skip it
continue
if widgets and name in widgets:
# if a widget has been specified for this field, pass as option to form field init
kwargs = {'widget': widgets[name] }
else:
kwargs = {}
# get apppropriate form widget based on xmlmap field type
field_type = None
# if the xmlmap field knows whether or not it is required, use for form
if field.required is not None:
kwargs['required'] = field.required
if field.verbose_name is not None:
kwargs['label'] = field.verbose_name
if field.help_text is not None:
kwargs['help_text'] = field.help_text
if hasattr(field, 'choices') and field.choices:
# if a field has choices defined, use a choice field (no matter what base type)
field_type = ChoiceField
kwargs['choices'] = [(val, val) for val in field.choices]
# FIXME: how to properly do non-required choice field?
# if field is optional, add a blank choice at the beginning of the list
if field.required == False and '' not in field.choices:
# TODO: add an empty_label option (like django ModelChoiceField)
# to xmlobjectform and pass it in to make this easier to customize
kwargs['choices'].insert(0, ('', ''))
elif isinstance(field, xmlmap.fields.StringField):
field_type = CharField
elif isinstance(field, xmlmap.fields.IntegerField):
field_type = IntegerField
elif isinstance(field, xmlmap.fields.DateField):
field_type = DateField
elif isinstance(field, xmlmap.fields.SimpleBooleanField):
# by default, fields are required - for a boolean, required means it must be checked
# since that seems nonsensical and not useful for a boolean,
# setting required to False to allow True or False values
kwargs['required'] = False
field_type = BooleanField
# datefield ? - not yet well-supported; leaving out for now
# ... should probably distinguish between date and datetime field
elif isinstance(field, xmlmap.fields.NodeField) or \
isinstance(field, xmlmap.fields.NodeListField):
form_label = kwargs['label'] if 'label' in kwargs else fieldname_to_label(name)
# store subform label in case we can't set on subform/formset
subform_labels[name] = form_label
# if a subform class was declared, use that class exactly as is
if name in declared_subforms:
subform = declared_subforms[name]
# otherwise, define a new xmlobject form for the nodefield or
# nodelistfield class, using any options passed in for fields under this one
else:
subform_opts = {
'fields': fieldlist.subfields[name] if fieldlist and name in fieldlist.subfields else None,
'exclude': excludelist.subfields[name] if excludelist and name in excludelist.subfields else None,
'widgets': widgets[name] if widgets and name in widgets else None,
'label': form_label,
}
# create the subform class
subform = xmlobjectform_factory(field.node_class, **subform_opts)
# store subform or generate and store formset, depending on field type
if isinstance(field, xmlmap.fields.NodeField):
subforms[name] = subform
elif isinstance(field, xmlmap.fields.NodeListField):
# formset_factory is from django core and we link into it here.
formsets[name] = formset_factory(subform, formset=BaseXmlObjectFormSet,
max_num=subform._meta.max_num, can_delete=subform._meta.can_delete,
extra=subform._meta.extra, can_order=subform._meta.can_order)
formsets[name].form_label = form_label
elif isinstance(field, xmlmap.fields.StringListField) or \
isinstance(field, xmlmap.fields.IntegerListField):
form_label = kwargs['label'] if 'label' in kwargs else fieldname_to_label(name)
if isinstance(field, xmlmap.fields.IntegerListField):
listform = IntegerListFieldForm
else:
listform = ListFieldForm
# generate a listfield formset
formsets[name] = formset_factory(listform, formset=BaseXmlObjectListFieldFormSet)
# don't need can_delete: since each form is a single field, empty implies delete
# todo: extra, max_num ? widget?
formsets[name].form_label = form_label
# TODO: other list variants
else:
# raise exception for unsupported fields
# currently doesn't handle list fields
raise Exception('Error on field "%s": XmlObjectForm does not yet support auto form field generation for %s.' \
% (name, field.__class__))
if field_type is not None:
if 'label' not in kwargs:
kwargs['label'] = fieldname_to_label(name)
formfields[name] = field_type(**kwargs)
# create a dictionary indexed by field creation order, for default field ordering
field_order[field.creation_counter] = name
# if fields were explicitly specified, return them in that order
if fieldlist:
ordered_fields = SortedDict((name, formfields[name])
for name in fieldlist.fields
if name in formfields)
ordered_subforms = SortedDict((name, subforms[name])
for name in fieldlist.fields
if name in subforms)
ordered_formsets = SortedDict((name, formsets[name])
for name in fieldlist.fields
if name in formsets)
else:
# sort on field creation counter and generate a django sorted dictionary
ordered_fields = SortedDict(
[(field_order[key], formfields[field_order[key]]) for key in sorted(field_order.keys())
if field_order[key] in formfields ]
)
ordered_subforms = SortedDict(
[(field_order[key], subforms[field_order[key]]) for key in sorted(field_order.keys())
if field_order[key] in subforms ]
)
ordered_formsets = SortedDict(
[(field_order[key], formsets[field_order[key]]) for key in sorted(field_order.keys())
if field_order[key] in formsets ]
)
return ordered_fields, ordered_subforms, ordered_formsets, subform_labels
0
Example 110
def fields_for_model(instance, fields=None, exclude=None,
formfield_callback=None):
"""
Returns a "SortedDict" containing form fields for the given fom_object.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
field_list = []
ignored = []
for f in instance.ordered_fields:
if fields and not f in fields:
continue
if exclude and not f in exclude:
continue
formfield = formfield_for_model_field(instance, f)
if formfield:
field_list.append((f, formfield))
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude))]
)
return field_dict
0
Example 111
def get_declared_fields(bases, attrs, cls_filter,
with_base_fields=True,
extra_attr_name='base_fields'):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
fields = [(field_name, attrs.pop(field_name))\
for field_name, obj in attrs.items()\
if isinstance(obj, cls_filter)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in reversed(bases):
if hasattr(base, extra_attr_name):
fields = getattr(base, extra_attr_name).items() + fields
else:
for base in reversed(bases):
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return SortedDict(fields)
0
Example 112
Project: lettuce Source File: dumpdata.py
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_models, get_model
format = options.get('format','json')
indent = options.get('indent',None)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
exclude = options.get('exclude',[])
show_traceback = options.get('traceback', False)
use_natural_keys = options.get('use_natural_keys', False)
excluded_apps = set(get_app(app_label) for app_label in exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
# Now collate the objects to be serialized.
objects = []
for model in sort_dependencies(app_list.items()):
if not model._meta.proxy and router.allow_syncdb(using, model):
objects.extend(model._default_manager.using(using).all())
try:
return serializers.serialize(format, objects, indent=indent,
use_natural_keys=use_natural_keys)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
0
Example 113
Project: lettuce Source File: syncdb.py
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 2:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 2:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 1:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
0
Example 114
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
field_list = []
ignored = []
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
0
Example 115
def generate_model(model_description, mapping, db_key=''):
"""Uses instrospection to generate a Django model from a database table.
"""
connection = db.connections[db_key]
cursor = connection.cursor()
table_name = model_description.name
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
# Holds Field notes, to be displayed in a Python comment.
comment_notes = []
# Holds Field parameters such as 'db_column'.
extra_params = SortedDict()
column_name = row[0]
is_relation = i in relations
att_name, params, notes = normalize_col_name(
column_name,
used_column_names,
is_relation
)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes
field_type, field_params, field_notes = get_field_type(
connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
GEOM_FIELDS = {
'GEOMETRYCOLLECTION': 'GeometryCollectionField',
'POINT': 'PointField',
'MULTIPOINT': 'MultiPointField',
'LINESTRING': 'LineStringField',
'MULTILINESTRING': 'MultiLineStringField',
'POLYGON': 'PolygonField',
'MULTIPOLYGON': 'MultiPolygonField',
'GEOMETRY': 'GeometryField',
}
geo_column_name = 'the_geom' if has_datastore else 'geom'
geom_type = mapping[geo_column_name]
# Use the geom_type to override the geometry field.
if field_type == 'GeometryField':
if geom_type in GEOM_FIELDS:
field_type = GEOM_FIELDS[geom_type]
# Change the type of id to AutoField to get auto generated ids.
if att_name in ['id', 'fid'] and extra_params == {'primary_key': True}:
field_type = 'AutoField'
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField':
field_type = 'NullBooleanField'
else:
extra_params['blank'] = True
if field_type not in ('TextField', 'CharField'):
extra_params['null'] = True
if any(field_type) and column_name != 'id':
field, __ = Field.objects.get_or_create(
model=model_description, name=att_name)
field.type = field_type
field.original_name = mapping[column_name]
field.save()
for name, value in extra_params.items():
if any(name):
Setting.objects.get_or_create(
field=field,
name=name,
value=value)
0
Example 116
Project: database-as-a-service Source File: main.py
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
0
Example 117
Project: golismero Source File: options.py
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.admin = None
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = SortedDict()
self.duplicate_targets = {}
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
0
Example 118
Project: simian Source File: djangoforms.py
def __new__(cls, class_name, bases, attrs):
"""Constructor for a new ModelForm class instance.
The signature of this method is determined by Python internals.
All Django Field instances are removed from attrs and added to
the base_fields attribute instead. Additional Field instances
are added to this based on the Datastore Model class specified
by the Meta attribute.
"""
fields = sorted(((field_name, attrs.pop(field_name))
for field_name, obj in attrs.items()
if isinstance(obj, forms.Field)),
key=lambda obj: obj[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
declared_fields = django.utils.datastructures.SortedDict()
for field_name, obj in fields:
declared_fields[field_name] = obj
opts = ModelFormOptions(attrs.get('Meta', None))
attrs['_meta'] = opts
base_models = []
for base in bases:
base_opts = getattr(base, '_meta', None)
base_model = getattr(base_opts, 'model', None)
if base_model is not None:
base_models.append(base_model)
if len(base_models) > 1:
raise django.core.exceptions.ImproperlyConfigured(
"%s's base classes define more than one model." % class_name)
if opts.model is not None:
if base_models and base_models[0] is not opts.model:
raise django.core.exceptions.ImproperlyConfigured(
'%s defines a different model than its parent.' % class_name)
model_fields = django.utils.datastructures.SortedDict()
for name, prop in sorted(opts.model.properties().iteritems(),
key=lambda prop: prop[1].creation_counter):
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
form_field = prop.get_form_field()
if form_field is not None:
model_fields[name] = form_field
model_fields.update(declared_fields)
attrs['base_fields'] = model_fields
props = opts.model.properties()
for name, field in model_fields.iteritems():
prop = props.get(name)
if prop:
if hasattr(forms, 'FileField') and isinstance(field, forms.FileField):
def clean_for_property_field(value, initial, prop=prop,
old_clean=field.clean):
value = old_clean(value, initial)
property_clean(prop, value)
return value
else:
def clean_for_property_field(value, prop=prop,
old_clean=field.clean):
value = old_clean(value)
property_clean(prop, value)
return value
field.clean = clean_for_property_field
else:
attrs['base_fields'] = declared_fields
return super(ModelFormMetaclass, cls).__new__(cls,
class_name, bases, attrs)
0
Example 119
Project: hortonworks-sandbox Source File: syncdb.py
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 2:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 2:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 1:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
0
Example 120
Project: hunch-gift-app Source File: syncdb.py
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
print "Creating tables ..."
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
print "Installing custom SQL ..."
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 3:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
if verbosity >= 1:
print "Installing indexes ..."
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
0
Example 121
Project: django-ecstatic Source File: createstaticmanifest.py
def handle_noargs(self, **options):
self.set_options(**options)
found_files = SortedDict()
manifest = ConfiguredStaticFilesManifest()
manifest.clear()
ignore_patterns = getattr(settings, 'ECSTATIC_MANIFEST_EXCLUDES', [])
for finder in finders.get_finders():
for path, storage in finder.list(ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = path
for path in found_files.values() + settings.ECSTATIC_MANIFEST_EXTRAS:
try:
generate_url = self.storage.generate_url
except AttributeError:
raise AttributeError('%s doesn\'t define a generate_url method.'
' Did you remember to extend StaticManifestMixin?' %
self.storage)
hashed_name = generate_url(path)
manifest.add(path, hashed_name)
manifest.flush()
0
Example 122
Project: django-ecstatic Source File: eccollect.py
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle_noargs() to facilitate testing.
"""
if self.symlink:
if sys.platform == 'win32':
raise CommandError("Symlinking is not supported by this "
"platform (%s)." % sys.platform)
if not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
handler = self._get_handler()
do_post_process = self.post_process and hasattr(self.storage, 'post_process')
found_files = SortedDict()
for finder in finders.get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
if self.progressive_post_process and do_post_process:
try:
self._post_process(
{prefixed_path: (storage, path)},
self.dry_run)
except ValueError as e:
message = ('%s current storage requires all files'
' to have been collected first. Try '
' ecstatic.storage.CachedStaticFilesStorage' \
% e)
raise ValueError(message)
if not self.progressive_post_process and do_post_process:
self._post_process(found_files, self.dry_run)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
0
Example 123
def url(self):
if self.options.get('cht', None) == 't':
self.datasets.append(self.options.pop("_mapdata"))
# Figure out the chart's data range
if not self.datarange:
maxvalue = max(max(d) for d in chain(self.datasets, self.hidden_datasets) if d)
minvalue = min(min(d) for d in chain(self.datasets, self.hidden_datasets) if d)
self.datarange = (minvalue, maxvalue)
# Encode data
if "chds" in self.options or self.options.get('cht', None) == 'gom':
# text encoding if scaling provided, or for google-o-meter type
data = "|".join(encode_text(d) for d in chain(self.datasets, self.hidden_datasets))
encoded_data = "t%d:%s" % (len(self.datasets), data)
else:
# extended encoding otherwise
data = extended_separator.join(encode_extended(d, self.datarange) for d in chain(self.datasets, self.hidden_datasets))
encoded_data = "e%d:%s" % (len(self.datasets), data)
# Update defaults
for k in self.defaults:
if k not in self.options:
self.options[k] = self.defaults[k]
# Start to calculate the URL
url = "%s?%s&chd=%s" % (self.BASE, urlencode(self.options), encoded_data)
# Calculate axis options
if self.axes:
axis_options = SortedDict()
axis_sides = []
for i, axis in enumerate(self.axes):
axis_sides.append(axis.side)
for opt in axis.options:
try:
axis_options.setdefault(opt, []).append(axis.options[opt] % i)
except TypeError:
pass
# Turn the option lists into strings
axis_sides = smart_join(",", *axis_sides)
for opt in axis_options:
axis_options[opt] = smart_join("|", *axis_options[opt])
url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options))
return url
0
Example 124
Project: django-googlecharts Source File: charts.py
@option("chart-auto-colors")
def chart_auto_colors(color, item_label_list):
'''Takes a starting color and a list of labels and creates the correct number of
colors, storing the correspondance between the labels and colors for later use
in the context.'''
# Convert to RGB values between 0 and 1
_r = float(int(color[0:2], 16)) / 255
_g = float(int(color[2:4], 16)) / 255
_b = float(int(color[4:6], 16)) / 255
# Switch to HSV color space
hsv = colorsys.rgb_to_hsv(_r, _g, _b)
colors = []
# For each label, compute a new color
for index, color in enumerate(range(0, len(item_label_list))):
if index == 0:
# this is the first value, make it 100%
s_value = hsv[1]
v_value = hsv[2]
elif index == len(item_label_list) - 1:
#this is the last value, make it 20%
s_value = hsv[1] * .2
v_value = hsv[2] * 1.8
else:
# otherwise, do a calculation
s_value = hsv[1] * (.8/(len(item_label_list)-1))
v_value = hsv[2] * (1 + (.8/(len(item_label_list)-1)))
if s_value >= 1:
s_value = hsv[1]
if v_value >= 1:
v_value = hsv[2]
# Convert back to rgb
c_list = colorsys.hsv_to_rgb(hsv[0], s_value, v_value)
# Turn a list of rgb values from 0 to 1 back to a value from 0 to 255,
# and then to hex
c_converted = ([ str(hex(int(c * 255)))[2:] for c in c_list])
c_final = []
# Zero-pad the hex numbers
for c in c_converted:
try:
c = "%02d" % int(c)
except ValueError, e:
# Ignore, this is a hex letter (e.g. 'ff')
pass
c_final.append(c)
colors.append(''.join(c_final))
final_color_map = SortedDict()
# Map our final color values to the label that will be associated with them
for index, c in enumerate(colors):
final_color_map[c] = item_label_list[index]
# Values which begin with an underscore won't be passed on to Google but will
# end up in the request context.
return {"chco": ','.join(colors),
'_final_color_map': final_color_map}
0
Example 125
Project: django-staticfiles Source File: collectstatic.py
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle_noargs() to facilitate testing.
"""
if self.symlink:
if sys.platform == 'win32':
raise CommandError("Symlinking is not supported by this "
"platform (%s)." % sys.platform)
if not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = SortedDict()
for finder in finders.get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if processed:
self.log(u"Post-processed '%s' as '%s" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log(u"Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
0
Example 126
Project: comics Source File: views.py
@login_required
def status(request, num_days=21):
today = datetime.date.today()
timeline = SortedDict()
last = today - datetime.timedelta(days=num_days)
releases = Release.objects.filter(pub_date__gte=last, comic__active=True)
releases = releases.select_related().order_by('comic__slug').distinct()
comics = Comic.objects.filter(active=True)
comics = comics.annotate(last_pub_date=Max('release__pub_date'))
comics = comics.order_by('last_pub_date')
for comic in comics:
if comic.last_pub_date:
comic.days_since_last_release = (today - comic.last_pub_date).days
else:
comic.days_since_last_release = 1000
schedule = get_comic_schedule(comic)
timeline[comic] = []
for i in range(num_days + 1):
day = today - datetime.timedelta(days=i)
classes = set()
if not schedule:
classes.add('unscheduled')
elif int(day.strftime('%w')) in schedule:
classes.add('scheduled')
timeline[comic].append([classes, day, None])
for release in releases:
day = (today - release.pub_date).days
timeline[release.comic][day][0].add('fetched')
timeline[release.comic][day][2] = release
days = [
today - datetime.timedelta(days=i)
for i in range(num_days + 1)]
return render(request, 'status/status.html', {
'active': {'status': True},
'days': days,
'timeline': timeline,
})
0
Example 127
Project: fosdem-volunteers Source File: views.py
def task_list(request):
# get the signed in volunteer
if request.user.is_authenticated():
volunteer = Volunteer.objects.get(user=request.user)
else:
volunteer = None
is_dr_manhattan = False
current_tasks = Task.objects.filter(edition=Edition.get_current)
if volunteer:
is_dr_manhattan, dr_manhattan_task_sets = volunteer.detect_dr_manhattan()
dr_manhattan_task_ids = [x.id for x in set.union(*dr_manhattan_task_sets)] if dr_manhattan_task_sets else []
ok_tasks = current_tasks.exclude(id__in=dr_manhattan_task_ids)
else:
ok_tasks = current_tasks
days = sorted(list(set([x.date for x in current_tasks])))
# when the user submitted the form
if request.method == 'POST' and volunteer:
# get the checked tasks
task_ids = request.POST.getlist('task')
# unchecked boxes, delete him/her from the task
for task in current_tasks.exclude(id__in=task_ids):
VolunteerTask.objects.filter(task=task, volunteer=volunteer).delete()
# checked boxes, add the volunteer to the tasks when he/she is not added
for task in current_tasks.filter(id__in=task_ids):
VolunteerTask.objects.get_or_create(task=task, volunteer=volunteer)
# show success message when enabled
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your tasks have been updated.'), fail_silently=True)
# redirect to prevent repost
return redirect('task_list')
# get the preferred and other tasks, preserve key order with srteddict for view
context = {
'tasks': SortedDict({}),
'checked': {},
'attending': {},
'is_dr_manhattan': is_dr_manhattan,
}
# get the categories the volunteer is interested in
if volunteer:
categories_by_task_pref = {
'preferred tasks': TaskCategory.objects.filter(volunteer=volunteer, active=True),
'other tasks': TaskCategory.objects.filter(active=True).exclude(volunteer=volunteer),
}
context['volunteer'] = volunteer
context['dr_manhattan_task_sets'] = dr_manhattan_task_sets
context['tasks']['preferred tasks'] = SortedDict.fromkeys(days, {})
context['tasks']['other tasks'] = SortedDict.fromkeys(days, {})
else:
categories_by_task_pref = {
# 'preferred tasks': [],
'tasks': TaskCategory.objects.filter(active=True),
}
context['tasks']['tasks'] = SortedDict.fromkeys(days, {})
context['user'] = request.user
for category_group in context['tasks']:
for day in context['tasks'][category_group]:
context['tasks'][category_group][day] = SortedDict.fromkeys(categories_by_task_pref[category_group], [])
for category in context['tasks'][category_group][day]:
dct = ok_tasks.filter(template__category=category, date=day)
context['tasks'][category_group][day][category] = dct
# mark checked, attending tasks
if volunteer:
for task in current_tasks:
context['checked'][task.id] = 'checked' if volunteer in task.volunteers.all() else ''
context['attending'][task.id] = False
# take the moderation tasks to talks the volunteer is attending
for task in current_tasks.filter(talk__volunteers=volunteer):
context['attending'][task.id] = True
check_profile_completeness(request, volunteer)
else:
for task in current_tasks:
context['attending'][task.id] = False
return render(request, 'volunteers/tasks.html', context)
0
Example 128
def __init__(self, cal_items, year=None, month=None, *args, **kwargs):
today = datetime.date.today()
if year == None:
year = today.year
self.year = year
if not month:
month = today.month
self.month = month
self.date_field = kwargs.pop('date_field', 'date')
super(ListCalendar, self).__init__(*args, **kwargs)
cal_arr = self.monthdatescalendar(year, month)
month_dict = SortedDict()
for week in cal_arr:
for date in week:
month_dict[date] = []
for item in cal_items:
if isinstance(item, dict):
possible_date = item.get(self.date_field)
else:
possible_date = getattr(item, self.date_field)
if type(possible_date) == datetime.datetime:
# transform possible_date to a date, not a datetime
possible_date = possible_date.date()
if possible_date:
month_dict[possible_date].append(item)
self.month_dict = month_dict
0
Example 129
def __init__(self):
self._rules = SortedDict()
self._initialized = False
0
Example 130
Project: betafarm Source File: syncdb.py
def handle_noargs(self, migrate_all=False, **options):
# Import the 'management' module within each installed app, to register
# dispatcher events.
# This is copied from Django, to fix bug #511.
try:
from django.utils.importlib import import_module
except ImportError:
pass # TODO: Remove, only for Django1.0
else:
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
# Work out what uses migrations and so doesn't need syncing
apps_needing_sync = []
apps_migrated = []
for app in models.get_apps():
app_label = get_app_label(app)
if migrate_all:
apps_needing_sync.append(app_label)
else:
try:
migrations = migration.Migrations(app_label)
except NoMigrations:
# It needs syncing
apps_needing_sync.append(app_label)
else:
# This is a migrated app, leave it
apps_migrated.append(app_label)
verbosity = int(options.get('verbosity', 0))
# Run syncdb on only the ones needed
if verbosity:
print "Syncing..."
old_installed, settings.INSTALLED_APPS = settings.INSTALLED_APPS, apps_needing_sync
old_app_store, cache.app_store = cache.app_store, SortedDict([
(k, v) for (k, v) in cache.app_store.items()
if get_app_label(k) in apps_needing_sync
])
# This will allow the setting of the MySQL storage engine, for example.
for db in dbs.values():
db.connection_init()
# OK, run the actual syncdb
syncdb.Command().execute(**options)
settings.INSTALLED_APPS = old_installed
cache.app_store = old_app_store
# Migrate if needed
if options.get('migrate', True):
if verbosity:
print "Migrating..."
management.call_command('migrate', **options)
# Be obvious about what we did
if verbosity:
print "\nSynced:\n > %s" % "\n > ".join(apps_needing_sync)
if options.get('migrate', True):
if verbosity:
print "\nMigrated:\n - %s" % "\n - ".join(apps_migrated)
else:
if verbosity:
print "\nNot synced (use migrations):\n - %s" % "\n - ".join(apps_migrated)
print "(use ./manage.py migrate to migrate these)"
0
Example 131
Project: betafarm Source File: modelsinspector.py
def get_model_fields(model, m2m=False):
"""
Given a model class, returns a dict of {field_name: field_triple} defs.
"""
field_defs = SortedDict()
inherited_fields = {}
# Go through all bases (that are themselves models, but not Model)
for base in model.__bases__:
if base != models.Model and issubclass(base, models.Model):
if not base._meta.abstract:
# Looks like we need their fields, Ma.
inherited_fields.update(get_model_fields(base))
# Now, go through all the fields and try to get their definition
source = model._meta.local_fields[:]
if m2m:
source += model._meta.local_many_to_many
for field in source:
# Can we ignore it completely?
if can_ignore(field):
continue
# Does it define a south_field_triple method?
if hasattr(field, "south_field_triple"):
if NOISY:
print " ( Nativing field: %s" % field.name
field_defs[field.name] = field.south_field_triple()
# Can we introspect it?
elif can_introspect(field):
# Get the full field class path.
field_class = field.__class__.__module__ + "." + field.__class__.__name__
# Run this field through the introspector
args, kwargs = introspector(field)
# Workaround for Django bug #13987
if model._meta.pk.column == field.column and 'primary_key' not in kwargs:
kwargs['primary_key'] = True
# That's our definition!
field_defs[field.name] = (field_class, args, kwargs)
# Shucks, no definition!
else:
if NOISY:
print " ( Nodefing field: %s" % field.name
field_defs[field.name] = None
# If they've used the horrific hack that is order_with_respect_to, deal with
# it.
if model._meta.order_with_respect_to:
field_defs['_order'] = ("django.db.models.fields.IntegerField", [], {"default": "0"})
return field_defs
0
Example 132
Project: kitsune Source File: utils.py
def render_readouts(request, readouts, template, locale=None, extra_data=None, product=None):
"""Render a readouts, possibly with overview page.
Use the given template, pass the template the given readouts, limit the
considered data to the given locale, and pass along anything in the
`extra_data` dict to the template in addition to the standard data.
"""
current_locale = locale or request.LANGUAGE_CODE
on_default_locale = request.LANGUAGE_CODE == settings.WIKI_DEFAULT_LANGUAGE
default_kwargs = {
'locale': settings.WIKI_DEFAULT_LANGUAGE,
}
locale_kwargs = {
'locale': request.LANGUAGE_CODE,
}
ready_kwargs = {}
if product is not None:
default_kwargs['product'] = product.slug
locale_kwargs['product'] = product.slug
ready_kwargs['product'] = product.slug
data = {
'readouts': SortedDict((slug, class_(request, locale=locale,
product=product))
for slug, class_ in readouts.iteritems()
if class_.should_show_to(request)),
'default_locale': settings.WIKI_DEFAULT_LANGUAGE,
'default_locale_name': LOCALES[settings.WIKI_DEFAULT_LANGUAGE].native,
'current_locale': current_locale,
'current_locale_name': LOCALES[current_locale].native,
'request_locale_name': LOCALES[request.LANGUAGE_CODE].native,
'is_watching_default_approved':
ApproveRevisionInLocaleEvent.is_notifying(request.user, **default_kwargs),
'is_watching_other_approved': (
None if on_default_locale
else ApproveRevisionInLocaleEvent.is_notifying(request.user, **locale_kwargs)),
'is_watching_default_locale': (
ReviewableRevisionInLocaleEvent.is_notifying(request.user, **default_kwargs)),
'is_watching_other_locale': (
None if on_default_locale
else ReviewableRevisionInLocaleEvent.is_notifying(request.user, **locale_kwargs)),
'is_watching_default_ready': ReadyRevisionEvent.is_notifying(request.user, **ready_kwargs),
'on_default_locale': on_default_locale,
'announce_form': AnnouncementForm(),
'announcements': Announcement.get_for_locale_name(current_locale),
'product': product,
'products': Product.objects.filter(visible=True),
}
if extra_data:
data.update(extra_data)
return render(request, 'dashboards/' + template, data)
0
Example 133
def get_model_fields(model, m2m=False):
"""
Given a model class, returns a dict of {field_name: field_triple} defs.
"""
field_defs = SortedDict()
inherited_fields = {}
# Go through all bases (that are themselves models, but not Model)
for base in model.__bases__:
if hasattr(base, '_meta') and issubclass(base, models.Model):
if not base._meta.abstract:
# Looks like we need their fields, Ma.
inherited_fields.update(get_model_fields(base))
# Now, go through all the fields and try to get their definition
source = model._meta.local_fields[:]
if m2m:
source += model._meta.local_many_to_many
for field in source:
# Can we ignore it completely?
if can_ignore(field):
continue
# Does it define a south_field_triple method?
if hasattr(field, "south_field_triple"):
if NOISY:
print " ( Nativing field: %s" % field.name
field_defs[field.name] = field.south_field_triple()
# Can we introspect it?
elif can_introspect(field):
# Get the full field class path.
field_class = field.__class__.__module__ + "." + field.__class__.__name__
# Run this field through the introspector
args, kwargs = introspector(field)
# Workaround for Django bug #13987
if model._meta.pk.column == field.column and 'primary_key' not in kwargs:
kwargs['primary_key'] = True
# That's our definition!
field_defs[field.name] = (field_class, args, kwargs)
# Shucks, no definition!
else:
if NOISY:
print " ( Nodefing field: %s" % field.name
field_defs[field.name] = None
# If they've used the horrific hack that is order_with_respect_to, deal with
# it.
if model._meta.order_with_respect_to:
field_defs['_order'] = ("django.db.models.fields.IntegerField", [], {"default": "0"})
return field_defs
0
Example 134
Project: theyworkforyou Source File: dumpdata.py
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_models, get_model
format = options.get('format','json')
indent = options.get('indent',None)
exclude = options.get('exclude',[])
show_traceback = options.get('traceback', False)
excluded_apps = [get_app(app_label) for app_label in exclude]
if len(app_labels) == 0:
app_list = SortedDict([(app, None) for app in get_apps() if app not in excluded_apps])
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
objects = []
for app, model_list in app_list.items():
if model_list is None:
model_list = get_models(app)
for model in model_list:
if not model._meta.proxy:
objects.extend(model._default_manager.all())
try:
return serializers.serialize(format, objects, indent=indent)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
0
Example 135
def __new__(cls, class_name, bases, attrs):
"""Constructor for a new ModelForm class instance.
The signature of this method is determined by Python internals.
All Django Field instances are removed from attrs and added to
the base_fields attribute instead. Additional Field instances
are added to this based on the Datastore Model class specified
by the Meta attribute.
"""
fields = sorted(((field_name, attrs.pop(field_name))
for field_name, obj in attrs.items()
if isinstance(obj, forms.Field)),
key=lambda obj: obj[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
declared_fields = django.utils.datastructures.SortedDict()
for field_name, obj in fields:
declared_fields[field_name] = obj
opts = ModelFormOptions(attrs.get('Meta', None))
attrs['_meta'] = opts
base_models = []
for base in bases:
base_opts = getattr(base, '_meta', None)
base_model = getattr(base_opts, 'model', None)
if base_model is not None:
base_models.append(base_model)
if len(base_models) > 1:
raise django.core.exceptions.ImproperlyConfigured(
"%s's base classes define more than one model." % class_name)
if opts.model is not None:
if base_models and base_models[0] is not opts.model:
raise django.core.exceptions.ImproperlyConfigured(
'%s defines a different model than its parent.' % class_name)
model_fields = django.utils.datastructures.SortedDict()
for name, prop in sorted(opts.model.properties().iteritems(),
key=lambda prop: prop[1].creation_counter):
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
form_field = prop.get_form_field()
if form_field is not None:
model_fields[name] = form_field
model_fields.update(declared_fields)
attrs['base_fields'] = model_fields
props = opts.model.properties()
for name, field in model_fields.iteritems():
prop = props.get(name)
if prop:
def clean_for_property_field(value, prop=prop, old_clean=field.clean):
value = old_clean(value)
property_clean(prop, value)
return value
field.clean = clean_for_property_field
else:
attrs['base_fields'] = declared_fields
return super(ModelFormMetaclass, cls).__new__(cls,
class_name, bases, attrs)
0
Example 136
Project: theyworkforyou Source File: query.py
def _get_sql_clause(self):
opts = self.model._meta
# Construct the fundamental parts of the query: SELECT X FROM Y WHERE Z.
select = ["%s.%s" % (backend.quote_name(opts.db_table), backend.quote_name(f.column)) for f in opts.fields]
tables = [quote_only_if_word(t) for t in self._tables]
joins = SortedDict()
where = self._where[:]
params = self._params[:]
# Convert self._filters into SQL.
joins2, where2, params2 = self._filters.get_sql(opts)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
# Add additional tables and WHERE clauses based on select_related.
if self._select_related:
fill_table_cache(opts, select, tables, where,
old_prefix=opts.db_table,
cache_tables_seen=[opts.db_table],
max_depth=self._max_related_depth)
# Add any additional SELECTs.
if self._select:
select.extend(['(%s) AS %s' % (quote_only_if_word(s[1]), backend.quote_name(s[0])) for s in self._select.items()])
# Start composing the body of the SQL statement.
sql = [" FROM", backend.quote_name(opts.db_table)]
# Compose the join dictionary into SQL describing the joins.
if joins:
sql.append(" ".join(["%s %s AS %s ON %s" % (join_type, table, alias, condition)
for (alias, (table, join_type, condition)) in joins.items()]))
# Compose the tables clause into SQL.
if tables:
sql.append(", " + ", ".join(tables))
# Compose the where clause into SQL.
if where:
sql.append(where and "WHERE " + " AND ".join(where))
# ORDER BY clause
order_by = []
if self._order_by is not None:
ordering_to_use = self._order_by
else:
ordering_to_use = opts.ordering
for f in handle_legacy_orderlist(ordering_to_use):
if f == '?': # Special case.
order_by.append(backend.get_random_function_sql())
else:
if f.startswith('-'):
col_name = f[1:]
order = "DESC"
else:
col_name = f
order = "ASC"
if "." in col_name:
table_prefix, col_name = col_name.split('.', 1)
table_prefix = backend.quote_name(table_prefix) + '.'
else:
# Use the database table as a column prefix if it wasn't given,
# and if the requested column isn't a custom SELECT.
if "." not in col_name and col_name not in (self._select or ()):
table_prefix = backend.quote_name(opts.db_table) + '.'
else:
table_prefix = ''
order_by.append('%s%s %s' % (table_prefix, backend.quote_name(orderfield2column(col_name, opts)), order))
if order_by:
sql.append("ORDER BY " + ", ".join(order_by))
# LIMIT and OFFSET clauses
if self._limit is not None:
sql.append("%s " % backend.get_limit_offset_sql(self._limit, self._offset))
else:
assert self._offset is None, "'offset' is not allowed without 'limit'"
return select, " ".join(sql), params
0
Example 137
Project: theyworkforyou Source File: query.py
def lookup_inner(path, lookup_type, value, opts, table, column):
qn = backend.quote_name
joins, where, params = SortedDict(), [], []
current_opts = opts
current_table = table
current_column = column
intermediate_table = None
join_required = False
name = path.pop(0)
# Has the primary key been requested? If so, expand it out
# to be the name of the current class' primary key
if name is None or name == 'pk':
name = current_opts.pk.name
# Try to find the name in the fields associated with the current class
try:
# Does the name belong to a defined many-to-many field?
field = find_field(name, current_opts.many_to_many, False)
if field:
new_table = current_table + '__' + name
new_opts = field.rel.to._meta
new_column = new_opts.pk.column
# Need to create an intermediate table join over the m2m table
# This process hijacks current_table/column to point to the
# intermediate table.
current_table = "m2m_" + new_table
intermediate_table = field.m2m_db_table()
join_column = field.m2m_reverse_name()
intermediate_column = field.m2m_column_name()
raise FieldFound
# Does the name belong to a reverse defined many-to-many field?
field = find_field(name, current_opts.get_all_related_many_to_many_objects(), True)
if field:
new_table = current_table + '__' + name
new_opts = field.opts
new_column = new_opts.pk.column
# Need to create an intermediate table join over the m2m table.
# This process hijacks current_table/column to point to the
# intermediate table.
current_table = "m2m_" + new_table
intermediate_table = field.field.m2m_db_table()
join_column = field.field.m2m_column_name()
intermediate_column = field.field.m2m_reverse_name()
raise FieldFound
# Does the name belong to a one-to-many field?
field = find_field(name, current_opts.get_all_related_objects(), True)
if field:
new_table = table + '__' + name
new_opts = field.opts
new_column = field.field.column
join_column = opts.pk.column
# 1-N fields MUST be joined, regardless of any other conditions.
join_required = True
raise FieldFound
# Does the name belong to a one-to-one, many-to-one, or regular field?
field = find_field(name, current_opts.fields, False)
if field:
if field.rel: # One-to-One/Many-to-one field
new_table = current_table + '__' + name
new_opts = field.rel.to._meta
new_column = new_opts.pk.column
join_column = field.column
raise FieldFound
elif path:
# For regular fields, if there are still items on the path,
# an error has been made. We munge "name" so that the error
# properly identifies the cause of the problem.
name += LOOKUP_SEPARATOR + path[0]
else:
raise FieldFound
except FieldFound: # Match found, loop has been shortcut.
pass
else: # No match found.
raise TypeError, "Cannot resolve keyword '%s' into field" % name
# Check whether an intermediate join is required between current_table
# and new_table.
if intermediate_table:
joins[qn(current_table)] = (
qn(intermediate_table), "LEFT OUTER JOIN",
"%s.%s = %s.%s" % (qn(table), qn(current_opts.pk.column), qn(current_table), qn(intermediate_column))
)
if path:
# There are elements left in the path. More joins are required.
if len(path) == 1 and path[0] in (new_opts.pk.name, None) \
and lookup_type in ('exact', 'isnull') and not join_required:
# If the next and final name query is for a primary key,
# and the search is for isnull/exact, then the current
# (for N-1) or intermediate (for N-N) table can be used
# for the search. No need to join an extra table just
# to check the primary key.
new_table = current_table
else:
# There are 1 or more name queries pending, and we have ruled out
# any shortcuts; therefore, a join is required.
joins[qn(new_table)] = (
qn(new_opts.db_table), "INNER JOIN",
"%s.%s = %s.%s" % (qn(current_table), qn(join_column), qn(new_table), qn(new_column))
)
# If we have made the join, we don't need to tell subsequent
# recursive calls about the column name we joined on.
join_column = None
# There are name queries remaining. Recurse deeper.
joins2, where2, params2 = lookup_inner(path, lookup_type, value, new_opts, new_table, join_column)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
else:
# No elements left in path. Current element is the element on which
# the search is being performed.
if join_required:
# Last query term is a RelatedObject
if field.field.rel.multiple:
# RelatedObject is from a 1-N relation.
# Join is required; query operates on joined table.
column = new_opts.pk.name
joins[qn(new_table)] = (
qn(new_opts.db_table), "INNER JOIN",
"%s.%s = %s.%s" % (qn(current_table), qn(join_column), qn(new_table), qn(new_column))
)
current_table = new_table
else:
# RelatedObject is from a 1-1 relation,
# No need to join; get the pk value from the related object,
# and compare using that.
column = current_opts.pk.name
elif intermediate_table:
# Last query term is a related object from an N-N relation.
# Join from intermediate table is sufficient.
column = join_column
elif name == current_opts.pk.name and lookup_type in ('exact', 'isnull') and current_column:
# Last query term is for a primary key. If previous iterations
# introduced a current/intermediate table that can be used to
# optimize the query, then use that table and column name.
column = current_column
else:
# Last query term was a normal field.
column = field.column
where.append(get_where_clause(lookup_type, current_table + '.', column, value))
params.extend(field.get_db_prep_lookup(lookup_type, value))
return joins, where, params
0
Example 138
Project: Open-Knesset Source File: iterdumpdata.py
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_models, get_model
format = options.get('format','json')
indent = options.get('indent',None)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
excludes = options.get('exclude',[])
show_traceback = options.get('traceback', False)
use_natural_keys = options.get('use_natural_keys', False)
use_base_manager = options.get('use_base_manager', False)
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
# Now collate the objects to be serialized.
objects_chains = []
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects_chains.append(model._base_manager.using(using).iterator())
else:
objects_chains.append(model._default_manager.using(using).iterator())
try:
return serializers.serialize(format, chain(*objects_chains), indent=indent,
use_natural_keys=use_natural_keys, stream=sys.stdout)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
0
Example 139
Project: openode Source File: util.py
def get_enabled_major_login_providers():
"""returns a dictionary with data about login providers
whose icons are to be shown in large format
disabled providers are excluded
items of the dictionary are dictionaries with keys:
* name
* display_name
* icon_media_path (relative to /media directory)
* type (oauth|openid-direct|openid-generic|openid-username|password)
Fields dependent on type of the login provider type
---------------------------------------------------
Password (type = password) - login provider using login name and password:
* extra_token_name - a phrase describing what the login name and the
password are from
* create_password_prompt - a phrase prompting to create an account
* change_password_prompt - a phrase prompting to change password
OpenID (type = openid) - Provider of login using the OpenID protocol
* openid_endpoint (required for type=openid|openid-username)
for type openid-username - the string must have %(username)s
format variable, plain string url otherwise
* extra_token_name - required for type=openid-username
describes name of required extra token - e.g. "XYZ user name"
OAuth2 (type = oauth)
* request_token_url - url to initiate OAuth2 protocol with the resource
* access_token_url - url to access users data on the resource via OAuth2
* authorize_url - url at which user can authorize the app to access a resource
* authenticate_url - url to authenticate user (lower privilege than authorize)
* get_user_id_function - a function that returns user id from data dictionary
containing: response to the access token url & consumer_key
and consumer secret. The purpose of this function is to hide the differences
between the ways user id is accessed from the different OAuth providers
"""
data = SortedDict()
if use_password_login():
site_name = openode_settings.APP_SHORT_NAME
prompt = _('%(site)s user name and password') % {'site': site_name}
data['local'] = {
'name': 'local',
'display_name': site_name,
'extra_token_name': prompt,
'type': 'password',
'create_password_prompt': _('Create a password-protected account'),
'change_password_prompt': _('Change your password'),
'icon_media_path': openode_settings.LOCAL_LOGIN_ICON,
'password_changeable': True
}
if openode_settings.FACEBOOK_KEY and openode_settings.FACEBOOK_SECRET:
data['facebook'] = {
'name': 'facebook',
'display_name': 'Facebook',
'type': 'facebook',
'icon_media_path': '/jquery-openid/images/facebook.gif',
}
if openode_settings.TWITTER_KEY and openode_settings.TWITTER_SECRET:
data['twitter'] = {
'name': 'twitter',
'display_name': 'Twitter',
'type': 'oauth',
'request_token_url': 'https://api.twitter.com/oauth/request_token',
'access_token_url': 'https://api.twitter.com/oauth/access_token',
'authorize_url': 'https://api.twitter.com/oauth/authorize',
'authenticate_url': 'https://api.twitter.com/oauth/authenticate',
'get_user_id_url': 'https://twitter.com/account/verify_credentials.json',
'icon_media_path': '/jquery-openid/images/twitter.gif',
'get_user_id_function': lambda data: data['user_id'],
}
def get_identica_user_id(data):
consumer = oauth.Consumer(data['consumer_key'], data['consumer_secret'])
token = oauth.Token(data['oauth_token'], data['oauth_token_secret'])
client = oauth.Client(consumer, token=token)
url = 'https://identi.ca/api/account/verify_credentials.json'
response, content = client.request(url, 'GET')
json = simplejson.loads(content)
return json['id']
if openode_settings.IDENTICA_KEY and openode_settings.IDENTICA_SECRET:
data['identi.ca'] = {
'name': 'identi.ca',
'display_name': 'identi.ca',
'type': 'oauth',
'request_token_url': 'https://identi.ca/api/oauth/request_token',
'access_token_url': 'https://identi.ca/api/oauth/access_token',
'authorize_url': 'https://identi.ca/api/oauth/authorize',
'authenticate_url': 'https://identi.ca/api/oauth/authorize',
'icon_media_path': '/jquery-openid/images/identica.png',
'get_user_id_function': get_identica_user_id,
}
def get_linked_in_user_id(data):
consumer = oauth.Consumer(data['consumer_key'], data['consumer_secret'])
token = oauth.Token(data['oauth_token'], data['oauth_token_secret'])
client = oauth.Client(consumer, token=token)
url = 'https://api.linkedin.com/v1/people/~:(first-name,last-name,id)'
response, content = client.request(url, 'GET')
if response['status'] == '200':
id_re = re.compile(r'<id>([^<]+)</id>')
matches = id_re.search(content)
if matches:
return matches.group(1)
raise OAuthError()
if openode_settings.SIGNIN_WORDPRESS_SITE_ENABLED and openode_settings.WORDPRESS_SITE_URL:
data['wordpress_site'] = {
'name': 'wordpress_site',
'display_name': 'Self hosted wordpress blog', # need to be added as setting.
'icon_media_path': openode_settings.WORDPRESS_SITE_ICON,
'type': 'wordpress_site',
}
if openode_settings.LINKEDIN_KEY and openode_settings.LINKEDIN_SECRET:
data['linkedin'] = {
'name': 'linkedin',
'display_name': 'LinkedIn',
'type': 'oauth',
'request_token_url': 'https://api.linkedin.com/uas/oauth/requestToken',
'access_token_url': 'https://api.linkedin.com/uas/oauth/accessToken',
'authorize_url': 'https://www.linkedin.com/uas/oauth/authorize',
'authenticate_url': 'https://www.linkedin.com/uas/oauth/authenticate',
'icon_media_path': '/jquery-openid/images/linkedin.gif',
'get_user_id_function': get_linked_in_user_id
}
data['google'] = {
'name': 'google',
'display_name': 'Google',
'type': 'openid-direct',
'icon_media_path': '/jquery-openid/images/google.gif',
'openid_endpoint': 'https://www.google.com/accounts/o8/id',
}
# data['yahoo'] = {
# 'name': 'yahoo',
# 'display_name': 'Yahoo',
# 'type': 'openid-direct',
# 'icon_media_path': '/jquery-openid/images/yahoo.gif',
# 'tooltip_text': _('Sign in with Yahoo'),
# 'openid_endpoint': 'http://yahoo.com',
# }
# data['aol'] = {
# 'name': 'aol',
# 'display_name': 'AOL',
# 'type': 'openid-username',
# 'extra_token_name': _('AOL screen name'),
# 'icon_media_path': '/jquery-openid/images/aol.gif',
# 'openid_endpoint': 'http://openid.aol.com/%(username)s'
# }
data['openid'] = {
'name': 'openid',
'display_name': 'OpenID',
'type': 'openid-generic',
'extra_token_name': _('OpenID url'),
'icon_media_path': '/jquery-openid/images/openid.gif',
'openid_endpoint': None,
}
return filter_enabled_providers(data)
0
Example 140
Project: openode Source File: util.py
def get_enabled_minor_login_providers():
"""same as get_enabled_major_login_providers
but those that are to be displayed with small buttons
disabled providers are excluded
structure of dictionary values is the same as in get_enabled_major_login_providers
"""
data = SortedDict()
#data['myopenid'] = {
# 'name': 'myopenid',
# 'display_name': 'MyOpenid',
# 'type': 'openid-username',
# 'extra_token_name': _('MyOpenid user name'),
# 'icon_media_path': '/jquery-openid/images/myopenid-2.png',
# 'openid_endpoint': 'http://%(username)s.myopenid.com'
#}
# data['flickr'] = {
# 'name': 'flickr',
# 'display_name': 'Flickr',
# 'type': 'openid-username',
# 'extra_token_name': _('Flickr user name'),
# 'icon_media_path': '/jquery-openid/images/flickr.png',
# 'openid_endpoint': 'http://flickr.com/%(username)s/'
# }
# data['technorati'] = {
# 'name': 'technorati',
# 'display_name': 'Technorati',
# 'type': 'openid-username',
# 'extra_token_name': _('Technorati user name'),
# 'icon_media_path': '/jquery-openid/images/technorati-1.png',
# 'openid_endpoint': 'http://technorati.com/people/technorati/%(username)s/'
# }
# data['wordpress'] = {
# 'name': 'wordpress',
# 'display_name': 'WordPress',
# 'type': 'openid-username',
# 'extra_token_name': _('WordPress blog name'),
# 'icon_media_path': '/jquery-openid/images/wordpress.png',
# 'openid_endpoint': 'http://%(username)s.wordpress.com'
# }
# data['blogger'] = {
# 'name': 'blogger',
# 'display_name': 'Blogger',
# 'type': 'openid-username',
# 'extra_token_name': _('Blogger blog name'),
# 'icon_media_path': '/jquery-openid/images/blogger-1.png',
# 'openid_endpoint': 'http://%(username)s.blogspot.com'
# }
# data['livejournal'] = {
# 'name': 'livejournal',
# 'display_name': 'LiveJournal',
# 'type': 'openid-username',
# 'extra_token_name': _('LiveJournal blog name'),
# 'icon_media_path': '/jquery-openid/images/livejournal-1.png',
# 'openid_endpoint': 'http://%(username)s.livejournal.com'
# }
# data['claimid'] = {
# 'name': 'claimid',
# 'display_name': 'ClaimID',
# 'type': 'openid-username',
# 'extra_token_name': _('ClaimID user name'),
# 'icon_media_path': '/jquery-openid/images/claimid-0.png',
# 'openid_endpoint': 'http://claimid.com/%(username)s/'
# }
# data['vidoop'] = {
# 'name': 'vidoop',
# 'display_name': 'Vidoop',
# 'type': 'openid-username',
# 'extra_token_name': _('Vidoop user name'),
# 'icon_media_path': '/jquery-openid/images/vidoop.png',
# 'openid_endpoint': 'http://%(username)s.myvidoop.com/'
# }
# data['verisign'] = {
# 'name': 'verisign',
# 'display_name': 'Verisign',
# 'type': 'openid-username',
# 'extra_token_name': _('Verisign user name'),
# 'icon_media_path': '/jquery-openid/images/verisign-2.png',
# 'openid_endpoint': 'http://%(username)s.pip.verisignlabs.com/'
# }
return filter_enabled_providers(data)
0
Example 141
Project: openode Source File: send_email_alerts.py
def get_updated_threads_for_user(self, user):
"""
retreive relevant question updates for the user
according to their subscriptions and recorded question
views
"""
user_feeds = EmailFeedSetting.objects.filter(
subscriber=user
).exclude(
frequency__in=('n', 'i')
)
should_proceed = False
for feed in user_feeds:
if feed.should_send_now() == True:
should_proceed = True
break
#shortcircuit - if there is no ripe feed to work on for this user
if should_proceed == False:
return {}
#these are placeholders for separate query sets per question group
#there are four groups - one for each EmailFeedSetting.feed_type
#and each group has subtypes A and B
#that's because of the strange thing commented below
#see note on Q and F objects marked with todo tag
q_sel_A = None
q_sel_B = None
q_ask_A = None
q_ask_B = None
q_ans_A = None
q_ans_B = None
q_all_A = None
q_all_B = None
#base question query set for this user
#basic things - not deleted, not closed, not too old
#not last edited by the same user
base_qs = Post.objects.get_questions().exclude(
thread__last_activity_by=user
).exclude(
thread__last_activity_at__lt=user.date_joined # exclude old stuff
).exclude(
deleted=True
).exclude(
thread__closed=True
).order_by('-thread__last_activity_at')
#todo: for some reason filter on did not work as expected ~Q(viewed__who=user) |
# Q(viewed__who=user,viewed__when__lt=F('thread__last_activity_at'))
#returns way more questions than you might think it should
#so because of that I've created separate query sets Q_set2 and Q_set3
#plus two separate queries run faster!
#build two two queries based
#questions that are not seen by the user at all
not_seen_qs = base_qs.filter(~Q(thread__viewed__user=user))
#questions that were seen, but before last modification
seen_before_last_mod_qs = base_qs.filter(
Q(
thread__viewed__user=user,
thread__viewed__last_visit__lt=F('thread__last_activity_at')
)
)
#shorten variables for convenience
Q_set_A = not_seen_qs
Q_set_B = seen_before_last_mod_qs
for feed in user_feeds:
if feed.feed_type == 'm_and_c':
#alerts on mentions and comments are processed separately
#because comments to questions do not trigger change of last_updated
#this may be changed in the future though, see
#http://openode.org/en/question/96/
continue
#each group of updates represented by the corresponding
#query set has it's own cutoff time
#that cutoff time is computed for each user individually
#and stored as a parameter "cutoff_time"
#we won't send email for a given question if an email has been
#sent after that cutoff_time
if feed.should_send_now():
if DEBUG_THIS_COMMAND == False:
feed.mark_reported_now()
cutoff_time = feed.get_previous_report_cutoff_time()
if feed.feed_type == 'q_sel':
q_sel_A = Q_set_A.filter(thread__followed_by=user)
q_sel_A.cutoff_time = cutoff_time # store cutoff time per query set
q_sel_B = Q_set_B.filter(thread__followed_by=user)
q_sel_B.cutoff_time = cutoff_time # store cutoff time per query set
elif feed.feed_type == 'q_ask':
q_ask_A = Q_set_A.filter(author=user)
q_ask_A.cutoff_time = cutoff_time
q_ask_B = Q_set_B.filter(author=user)
q_ask_B.cutoff_time = cutoff_time
elif feed.feed_type == 'q_ans':
q_ans_A = Q_set_A.filter(thread__posts__author=user, thread__posts__post_type='answer')
q_ans_A = q_ans_A[:openode_settings.MAX_ALERTS_PER_EMAIL]
q_ans_A.cutoff_time = cutoff_time
q_ans_B = Q_set_B.filter(thread__posts__author=user, thread__posts__post_type='answer')
q_ans_B = q_ans_B[:openode_settings.MAX_ALERTS_PER_EMAIL]
q_ans_B.cutoff_time = cutoff_time
elif feed.feed_type == 'q_all':
q_all_A = user.get_tag_filtered_questions(Q_set_A)
q_all_B = user.get_tag_filtered_questions(Q_set_B)
q_all_A = q_all_A[:openode_settings.MAX_ALERTS_PER_EMAIL]
q_all_B = q_all_B[:openode_settings.MAX_ALERTS_PER_EMAIL]
q_all_A.cutoff_time = cutoff_time
q_all_B.cutoff_time = cutoff_time
#build ordered list questions for the email report
q_list = SortedDict()
#todo: refactor q_list into a separate class?
extend_question_list(q_sel_A, q_list)
extend_question_list(q_sel_B, q_list)
#build list of comment and mention responses here
#it is separate because posts are not marked as changed
#when people add comments
#mention responses could be collected in the loop above, but
#it is inconvenient, because feed_type m_and_c bundles the two
#also we collect metadata for these here
try:
feed = user_feeds.get(feed_type='m_and_c')
if feed.should_send_now():
cutoff_time = feed.get_previous_report_cutoff_time()
comments = Post.objects.get_comments().filter(
added_at__lt=cutoff_time,
).exclude(
author=user
)
q_commented = list()
for c in comments:
post = c.parent
if post.author != user:
continue
#skip is post was seen by the user after
#the comment posting time
q_commented.append(post.get_origin_post())
extend_question_list(
q_commented,
q_list,
cutoff_time=cutoff_time,
add_comment=True
)
mentions = Activity.objects.get_mentions(
mentioned_at__lt=cutoff_time,
mentioned_whom=user
)
#print 'have %d mentions' % len(mentions)
#MM = Activity.objects.filter(activity_type = const.TYPE_ACTIVITY_MENTION)
#print 'have %d total mentions' % len(MM)
#for m in MM:
# print m
mention_posts = get_all_origin_posts(mentions)
q_mentions_id = [q.id for q in mention_posts]
q_mentions_A = Q_set_A.filter(id__in=q_mentions_id)
q_mentions_A.cutoff_time = cutoff_time
extend_question_list(q_mentions_A, q_list, add_mention=True)
q_mentions_B = Q_set_B.filter(id__in=q_mentions_id)
q_mentions_B.cutoff_time = cutoff_time
extend_question_list(q_mentions_B, q_list, add_mention=True)
except EmailFeedSetting.DoesNotExist:
pass
if user.email_tag_filter_strategy == const.INCLUDE_INTERESTING:
extend_question_list(q_all_A, q_list)
extend_question_list(q_all_B, q_list)
extend_question_list(q_ask_A, q_list, limit=True)
extend_question_list(q_ask_B, q_list, limit=True)
extend_question_list(q_ans_A, q_list, limit=True)
extend_question_list(q_ans_B, q_list, limit=True)
if user.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
extend_question_list(q_all_A, q_list, limit=True)
extend_question_list(q_all_B, q_list, limit=True)
ctype = ContentType.objects.get_for_model(Post)
EMAIL_UPDATE_ACTIVITY = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT
#up to this point we still don't know if emails about
#collected questions were sent recently
#the next loop examines activity record and decides
#for each question, whether it needs to be included or not
#into the report
for q, meta_data in q_list.items():
#this loop edits meta_data for each question
#so that user will receive counts on new edits new answers, etc
#and marks questions that need to be skipped
#because an email about them was sent recently enough
#also it keeps a record of latest email activity per question per user
try:
#todo: is it possible to use content_object here, instead of
#content type and object_id pair?
update_info = Activity.objects.get(
user=user,
content_type=ctype,
object_id=q.id,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = update_info.active_at
except Activity.DoesNotExist:
update_info = Activity(
user=user,
content_object=q,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = datetime.datetime(1970, 1, 1) # long time ago
except Activity.MultipleObjectsReturned:
raise Exception(
'server error - multiple question email activities '
'found per user-question pair'
)
cutoff_time = meta_data['cutoff_time'] # cutoff time for the question
#skip question if we need to wait longer because
#the delay before the next email has not yet elapsed
#or if last email was sent after the most recent modification
if emailed_at > cutoff_time or emailed_at > q.thread.last_activity_at:
meta_data['skip'] = True
continue
#collect info on all sorts of news that happened after
#the most recent emailing to the user about this question
q_rev = q.revisions.filter(revised_at__gt=emailed_at)
q_rev = q_rev.exclude(author=user)
#now update all sorts of metadata per question
meta_data['q_rev'] = len(q_rev)
if len(q_rev) > 0 and q.added_at == q_rev[0].revised_at:
meta_data['q_rev'] = 0
meta_data['new_q'] = True
else:
meta_data['new_q'] = False
new_ans = Post.objects.get_answers(user).filter(
thread=q.thread,
added_at__gt=emailed_at,
deleted=False,
)
new_ans = new_ans.exclude(author=user)
meta_data['new_ans'] = len(new_ans)
ans_ids = Post.objects.get_answers(user).filter(
thread=q.thread,
added_at__gt=emailed_at,
deleted=False,
).values_list(
'id', flat=True
)
ans_rev = PostRevision.objects.filter(post__id__in=ans_ids)
ans_rev = ans_rev.exclude(author=user).distinct()
meta_data['ans_rev'] = len(ans_rev)
comments = meta_data.get('comments', 0)
mentions = meta_data.get('mentions', 0)
#print meta_data
#finally skip question if there are no news indeed
if len(q_rev) + len(new_ans) + len(ans_rev) + comments + mentions == 0:
meta_data['skip'] = True
#print 'skipping'
else:
meta_data['skip'] = False
#print 'not skipping'
update_info.active_at = datetime.datetime.now()
if DEBUG_THIS_COMMAND == False:
update_info.save() # save question email update activity
#q_list is actually an ordered dictionary
#print 'user %s gets %d' % (user.username, len(q_list.keys()))
#todo: sort question list by update time
return q_list
0
Example 142
Project: openode Source File: send_email_notifications.py
def get_updated_threads_for_user(self, user):
"""
retreive relevant question updates for the user
according to their subscriptions and recorded question
views
"""
# set default language TODO - language per user - add user atribute
activate(django_settings.LANGUAGE_CODE)
user_feeds = EmailFeedSetting.objects.filter(
subscriber=user
).exclude(
frequency__in=('n', 'i')
)
should_proceed = False
for feed in user_feeds:
if feed.should_send_now() == True:
should_proceed = True
break
#shortcircuit - if there is no ripe feed to work on for this user
if should_proceed == False:
logging.debug(u'Notification: %s not send - should proceed = False' % user.screen_name)
return {}
#these are placeholders for separate query sets per question group
#there are four groups - one for each EmailFeedSetting.feed_type
#and each group has subtypes A and B
#that's because of the strange thing commented below
#see note on Q and F objects marked with todo tag
q_sel_A = None
q_sel_B = None
# q_ask_A = None
# q_ask_B = None
# q_ans_A = None
# q_ans_B = None
# q_all_A = None
# q_all_B = None
#base question query set for this user
#basic things - not deleted, not closed, not too old
#not last edited by the same user
base_qs = Post.objects.filter(post_type__in=[const.POST_TYPE_DISCUSSION, const.POST_TYPE_QUESTION, const.POST_TYPE_DOCUMENT]).exclude(
thread__last_activity_by=user
).exclude(
thread__last_activity_at__lt=user.date_joined # exclude old stuff
).exclude(
deleted=True
).exclude(
thread__closed=True
).order_by('-thread__last_activity_at')
#todo: for some reason filter on did not work as expected ~Q(viewed__who=user) |
# Q(viewed__who=user,viewed__when__lt=F('thread__last_activity_at'))
#returns way more questions than you might think it should
#so because of that I've created separate query sets Q_set2 and Q_set3
#plus two separate queries run faster!
#build two two queries based
#questions that are not seen by the user at all
not_seen_qs = base_qs.filter(~Q(thread__viewed__user=user))
#questions that were seen, but before last modification
seen_before_last_mod_qs = base_qs.filter(
Q(
thread__viewed__user=user,
thread__viewed__last_visit__lt=F('thread__last_activity_at')
)
)
#shorten variables for convenience
Q_set_A = not_seen_qs
Q_set_B = seen_before_last_mod_qs
for feed in user_feeds:
#each group of updates represented by the corresponding
#query set has it's own cutoff time
#that cutoff time is computed for each user individually
#and stored as a parameter "cutoff_time"
#we won't send email for a given question if an email has been
#sent after that cutoff_time
if feed.should_send_now():
feed.mark_reported_now()
cutoff_time = feed.get_previous_report_cutoff_time()
if feed.feed_type == 'q_sel':
q_sel_A = Q_set_A.filter(Q(thread__followed_by=user) | Q(thread__node__followed_by=user))
q_sel_A.cutoff_time = cutoff_time # store cutoff time per query set
q_sel_B = Q_set_B.filter(thread__followed_by=user)
q_sel_B.cutoff_time = cutoff_time # store cutoff time per query set
# print q_sel_A, q_sel_B
# elif feed.feed_type == 'q_ask':
# q_ask_A = Q_set_A.filter(author=user)
# q_ask_A.cutoff_time = cutoff_time
# q_ask_B = Q_set_B.filter(author=user)
# q_ask_B.cutoff_time = cutoff_time
# elif feed.feed_type == 'q_ans':
# q_ans_A = Q_set_A.filter(thread__posts__author=user, thread__posts__post_type='answer')
# q_ans_A = q_ans_A[:openode_settings.MAX_ALERTS_PER_EMAIL]
# q_ans_A.cutoff_time = cutoff_time
# q_ans_B = Q_set_B.filter(thread__posts__author=user, thread__posts__post_type='answer')
# q_ans_B = q_ans_B[:openode_settings.MAX_ALERTS_PER_EMAIL]
# q_ans_B.cutoff_time = cutoff_time
# elif feed.feed_type == 'q_all':
# q_all_A = user.get_tag_filtered_questions(Q_set_A)
# q_all_B = user.get_tag_filtered_questions(Q_set_B)
# q_all_A = q_all_A[:openode_settings.MAX_ALERTS_PER_EMAIL]
# q_all_B = q_all_B[:openode_settings.MAX_ALERTS_PER_EMAIL]
# q_all_A.cutoff_time = cutoff_time
# q_all_B.cutoff_time = cutoff_time
#build ordered list questions for the email report
q_list = SortedDict()
#todo: refactor q_list into a separate class?
extend_question_list(q_sel_A, q_list)
extend_question_list(q_sel_B, q_list)
#build list of comment responses here
#it is separate because posts are not marked as changed
# extend_question_list(q_ask_A, q_list, limit=True)
# extend_question_list(q_ask_B, q_list, limit=True)
# extend_question_list(q_ans_A, q_list, limit=True)
# extend_question_list(q_ans_B, q_list, limit=True)
# if user.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
# extend_question_list(q_all_A, q_list, limit=True)
# extend_question_list(q_all_B, q_list, limit=True)
ctype = ContentType.objects.get_for_model(Post)
EMAIL_UPDATE_ACTIVITY = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT
#up to this point we still don't know if emails about
#collected questions were sent recently
#the next loop examines activity record and decides
#for each question, whether it needs to be included or not
#into the report
for q, meta_data in q_list.items():
#this loop edits meta_data for each question
#so that user will receive counts on new edits new answers, etc
#and marks questions that need to be skipped
#because an email about them was sent recently enough
#also it keeps a record of latest email activity per question per user
try:
#todo: is it possible to use content_object here, instead of
#content type and object_id pair?
update_info = Activity.objects.get(
user=user,
content_type=ctype,
object_id=q.id,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = update_info.active_at
except Activity.DoesNotExist:
update_info = Activity(
user=user,
content_object=q,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = datetime.datetime(1970, 1, 1) # long time ago
except Activity.MultipleObjectsReturned:
raise Exception(
'server error - multiple question email activities '
'found per user-question pair'
)
cutoff_time = meta_data['cutoff_time'] # cutoff time for the question
#skip question if we need to wait longer because
#the delay before the next email has not yet elapsed
#or if last email was sent after the most recent modification
if emailed_at > cutoff_time or emailed_at > q.thread.last_activity_at:
meta_data['skip'] = True
continue
#collect info on all sorts of news that happened after
#the most recent emailing to the user about this question
q_rev = q.revisions.filter(revised_at__gt=emailed_at).exclude(author=user)
#now update all sorts of metadata per question
meta_data['q_rev'] = len(q_rev)
if len(q_rev) > 0 and q.added_at == q_rev[0].revised_at:
meta_data['q_rev'] = 0
meta_data['new_q'] = True
else:
meta_data['new_q'] = False
new_ans = Post.objects.get_answers(user).filter(
thread=q.thread,
added_at__gt=emailed_at,
deleted=False,
).exclude(author=user)
meta_data['new_ans'] = len(new_ans)
ans_rev = PostRevision.objects.filter(
post__post_type=const.POST_TYPE_THREAD_POST,
post__thread=q.thread,
revised_at__gt=emailed_at,
post__deleted=False,
revision__gte=2,
).exclude(author=user)
meta_data['ans_rev'] = len(ans_rev)
comments = meta_data.get('comments', 0)
#print meta_data
#finally skip question if there are no news indeed
if len(q_rev) + len(new_ans) + len(ans_rev) + comments == 0:
meta_data['skip'] = True
#print 'skipping'
else:
meta_data['skip'] = False
#print 'not skipping'
update_info.active_at = datetime.datetime.now()
if DEBUG_THIS_COMMAND == False:
update_info.save() # save question email update activity
#q_list is actually an ordered dictionary
#print 'user %s gets %d' % (user.username, len(q_list.keys()))
#todo: sort question list by update time
return q_list
0
Example 143
Project: otm-legacy Source File: dumpdata_iter.py
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_models, get_model
format = options.get('format','json')
indent = options.get('indent',None)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
excludes = options.get('exclude',[])
show_traceback = options.get('traceback', False)
use_natural_keys = options.get('use_natural_keys', False)
use_base_manager = options.get('use_base_manager', False)
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Now collate the objects to be serialized.
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
for obj in objects.using(using).order_by(model._meta.pk.name).iterator():
yield obj
try:
return serializers.serialize(format, get_objects(), indent=indent,
use_natural_keys=use_natural_keys, stream=self.stdout)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
0
Example 144
Project: nashvegas Source File: utils.py
def get_sql_for_new_models(apps=None, using=DEFAULT_DB_ALIAS):
"""
Unashamedly copied and tweaked from django.core.management.commands.syncdb
"""
connection = connections[using]
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
if apps:
apps = [models.get_app(a) for a in apps]
else:
apps = models.get_apps()
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2], [
m
for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(using, m)
])
for app in apps
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
db_table_in = (converter(opts.db_table) in tables)
auto_create_in = (
opts.auto_created and
converter(opts.auto_created._meta.db_table) in tables
)
return not (db_table_in or auto_create_in)
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
statements = []
sql = None
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
sql, references = connection.creation.sql_create_model(
model,
no_style(),
seen_models
)
seen_models.add(model)
created_models.add(model)
statements.append("### New Model: %s.%s" % (
app_name,
str(model).replace("'>", "").split(".")[-1]
))
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(
connection.creation.sql_for_pending_references(
refto,
no_style(),
pending_references
)
)
sql.extend(
connection.creation.sql_for_pending_references(
model,
no_style(),
pending_references
)
)
statements.extend(sql)
custom_sql = None
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(
model,
no_style(),
connection
)
if custom_sql:
statements.extend(custom_sql)
index_sql = None
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(
model,
no_style()
)
if index_sql:
statements.extend(index_sql)
return statements