Example #1
0
def get_declared_fields(bases, attrs, with_base_fields=True):
    """
    Create a list of form field instances from the passed in 'attrs', plus any
    similar fields on the base classes (in 'bases'). This is used by both the
    Form and ModelForm metaclasses.

    If 'with_base_fields' is True, all fields from the bases are used.
    Otherwise, only fields in the 'declared_fields' attribute on the bases are
    used. The distinction is useful in ModelForm subclassing.
    Also integrates any additional media definitions.
    """

    warnings.warn(
        "get_declared_fields is deprecated and will be removed in Django 1.9.", RemovedInDjango19Warning, stacklevel=2
    )

    fields = [
        (field_name, attrs.pop(field_name)) for field_name, obj in list(six.iteritems(attrs)) if isinstance(obj, Field)
    ]
    fields.sort(key=lambda x: x[1].creation_counter)

    # If this class is subclassing another Form, add that Form's fields.
    # Note that we loop over the bases in *reverse*. This is necessary in
    # order to preserve the correct order of fields.
    if with_base_fields:
        for base in bases[::-1]:
            if hasattr(base, "base_fields"):
                fields = list(six.iteritems(base.base_fields)) + fields
    else:
        for base in bases[::-1]:
            if hasattr(base, "declared_fields"):
                fields = list(six.iteritems(base.declared_fields)) + fields

    return OrderedDict(fields)
Example #2
0
    def __get__(self, instance, owner):

        if not hasattr(self, '_base_fields'):
            self._base_fields = bf = {}

            # construct an empty model to get to the data container and thus to the form classes
            app_container = getattr(owner.ModelForm._meta.model(), owner.app_data_field)

            # all the fields form model_form
            bf.update(owner.ModelForm.base_fields)

            # go through all the app forms...
            for label, opts in six.iteritems(owner.get_app_form_opts()):
                Form = app_container[label].form_class
                exclude = set(opts.get('exclude', ()))
                fields = opts.get('fields', None)
                for name, field in six.iteritems(Form.base_fields):
                    # skip proper fields
                    if fields is not None and name not in fields:
                        continue
                    if name in exclude:
                        continue
                    # prefix the fields
                    bf['%s.%s' % (label, name)] = field

        return self._base_fields
    def test_bound_forms_with_post_with_bug_tracker_service_and_plan(self):
        """Testing RepositoryForm binds hosting service forms only if matching
        posted bug tracker hosting_service with specific plans
        """
        form = RepositoryForm({
            'name': 'test',
            'bug_tracker_type': 'github',
            'bug_tracker_plan': 'public',
        })

        # Make sure only the relevant forms are bound.
        for hosting_type, bug_forms in six.iteritems(form.bug_tracker_forms):
            for plan_id, bug_form in six.iteritems(bug_forms):
                self.assertEqual(bug_form.is_bound,
                                 hosting_type == 'github' and
                                 plan_id == 'public')

        # Repository info wasn't set in the form above.
        for hosting_type, repo_forms in six.iteritems(form.repository_forms):
            for plan_id, repo_form in six.iteritems(repo_forms):
                self.assertFalse(repo_form.is_bound)

        # Auth forms are never bound on initialize.
        for hosting_type, auth_form in six.iteritems(form.hosting_auth_forms):
            self.assertFalse(auth_form.is_bound)
    def parse(self, stream, media_type=None, parser_context=None):
        """Convert JSONAPI data to JSON data"""
        content = super(JsonApiParser, self).parse(stream, media_type=media_type, parser_context=parser_context)

        self.view = parser_context.get("view", None)

        if "data" in content:
            serializer = self.view.get_serializer(instance=None)
            fields = serializer.get_fields()

            resource_data = {}
            for attr_name, val in six.iteritems(content["data"].pop("attributes", {})):
                resource_data[underscore(attr_name)] = val
            relationships = content["data"].pop("relationships", {})
            if content["data"].get("id"):
                resource_data.update(id=content["data"].pop("id"))

            for field_name, field in six.iteritems(fields):
                if dasherize(field_name) not in relationships:
                    continue

                related_field = get_serializer(field)

                if isinstance(related_field, (RelatedField, BaseSerializer)):
                    rel_data = relationships[dasherize(field_name)].get("data")
                    if rel_data:
                        if isinstance(rel_data, list):
                            rel_data = [data.get("id") for data in rel_data]
                        else:
                            rel_data = rel_data.get("id")
                    resource_data[underscore(field_name)] = rel_data
            return resource_data
        return {}
Example #5
0
def recursive_dict_filter(d, fields, exclude):
    mapp = {}
    new_fields = []
    new_exclude = []
    if fields:
        for field in fields:
            if '.' in field:
                ss = field.split('.', 1)
                if not ss[0] in mapp:
                    mapp[ss[0]] = []
                    new_fields.append(ss[0])
                mapp[ss[0]].append(ss[1])
            else:
                new_fields.append(field)
        ret = {k: v for k, v in iteritems(d) if k in new_fields}
        for k, v in iteritems(mapp):
            if k in d:
                ret[k] = recursive_filter(d[k], v, None)
    elif exclude:
        for each in exclude:
            if '.' in each:
                ss = each.split('.', 1)
                if not ss[0] in mapp:
                    mapp[ss[0]] = []
                mapp[ss[0]].append(ss[1])
            else:
                new_exclude.append(each)
        ret = {k: v for k, v in iteritems(d) if not k in new_exclude}
        for k, v in iteritems(mapp):
            if k in d:
                ret[k] = recursive_filter(d[k], None, v)
    else:
        ret = d
    return ret
Example #6
0
 def _fetch(self):
     select_related = []
     for app_label, classes in iteritems(registered_settings):
         for model_name in classes.keys():
             select_related.append('%s_%s' % (app_label, model_name))
     qs = Root.objects.select_related(*select_related)
     try:
         root = qs[0]
     except IndexError:
         root, created = qs.get_or_create(defaults={})
         root = qs[0]  # Run select_related again
     for app_label, classes in iteritems(registered_settings):
         for model_name, cls in iteritems(classes):
             f = '%s_%s' % (app_label, model_name)
             try:
                 v = getattr(root, f)
             except cls.DoesNotExist:
                 # Django 1.5+
                 v = None
             if not v:
                 # Doesn't yet exist in the database
                 v = cls(root=root)
             self.__dict__[f] = v
             if model_name == 'settings':
                 self.__dict__[app_label] = v
     self._fetched = True
Example #7
0
        def delete(collector):
            for model, obj in collector.instances_with_model():
                if not model._meta.auto_created:
                    signals.pre_delete.send(
                        sender=model, instance=obj, using=using
                    )

            # be compatible with django 1.4.x
            if hasattr(collector, 'fast_deletes'):
                # fast deletes
                for qs in collector.fast_deletes:
                    for instance in qs:
                        self._delete(instance)

            # delete batches
            # be compatible with django>=1.6
            if hasattr(collector, 'batches'):
                for model, batches in six.iteritems(collector.batches):
                    for field, instances in six.iteritems(batches):
                        for instance in instances:
                            self._delete(instance)

            # "delete" instances
            for model, instances in six.iteritems(collector.data):
                for instance in instances:
                    self._delete(instance)

            # send post_delete signals
            for model, obj in collector.instances_with_model():
                if not model._meta.auto_created:
                    signals.post_delete.send(
                        sender=model, instance=obj, using=using
                    )
Example #8
0
    def siteconfig_settings(self, settings):
        """Temporarily sets siteconfig settings for a test.

        Subclasses should override this if they want to run a method like
        :py:func:`~djblets.siteconfig.django_settings.apply_django_settings`
        before and after each test.

        Args:
            settings (dict):
                The new siteconfig settings to set.

        Context:
            The current site configuration will contain the new settings for
            this test.
        """
        siteconfig = SiteConfiguration.objects.get_current()

        old_settings = {}

        for key, value in six.iteritems(settings):
            old_settings[key] = siteconfig.get(key)
            siteconfig.set(key, value)

        siteconfig.save()

        try:
            yield
        finally:
            for key, value in six.iteritems(old_settings):
                siteconfig.set(key, value)

            siteconfig.save()
Example #9
0
    def index_model(self, index, fields=None):
        model_name = class_name_from_parts(index.name, 'index')

        class Meta:
            apps = self.apps
            app_label = 'indexing'
            db_table = db_table_for_index(index)

        attrs = {'Meta': Meta, '__module__': '__fake__'}

        fields = index.fields if fields is None else fields
        for field_name, index_field in six.iteritems(fields):
            if not index_field.multi_value:
                db_field = db_field_for_index_field(field_name, index_field)
                attrs[field_name] = db_field

        model = type(model_name, (models.Model, ), attrs)
        related_models = {}

        for field_name, index_field in six.iteritems(fields):
            if index_field.multi_value:
                related_model = self.index_multi_value_field(
                    index, model, field_name, index_field
                )

                related_models[field_name] = related_model

        return model, related_models
Example #10
0
def _get_declared_columns(bases, attrs, with_base_columns=True):
    """
    Create a list of grid column instances from the passed in 'attrs', plus any
    similar columns on the base classes (in 'bases'). This is used by Grid metaclass.

    If 'with_base_columns' is True, all columns from the bases are used.
    Otherwise, only columns in the 'declared_columns' attribute on the bases are
    used. The distinction is useful in ModelForm subclassing.
    Also integrates any additional media definitions.
    """
    columns = [(column_name, attrs.pop(column_name)) for column_name, obj in list(six.iteritems(attrs)) if isinstance(obj, Column)]
    columns.sort(key=lambda x: x[1]._count)

    # If this class is subclassing another Form, add that Form's columns.
    # Note that we loop over the bases in *reverse*. This is necessary in
    # order to preserve the correct order of columns.
    if with_base_columns:
        for base in bases[::-1]:
            if hasattr(base, 'base_columns'):
                columns = list(six.iteritems(base.base_columns)) + columns
    else:
        for base in bases[::-1]:
            if hasattr(base, 'declared_columns'):
                columns = list(six.iteritems(base.declared_columns)) + columns

    return SortedDict(columns)
Example #11
0
File: utils.py Project: 6ft/django
    def prepare_test_settings(self, alias):
        """
        Makes sure the test settings are available in the 'TEST' sub-dictionary.
        """
        try:
            conn = self.databases[alias]
        except KeyError:
            raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)

        test_settings = conn.setdefault('TEST', {})
        for key, value in six.iteritems(conn):
            if key.startswith('TEST_'):
                new_key = key[5:]
                new_key = self.TEST_SETTING_RENAMES.get(new_key, new_key)
                if new_key in test_settings:
                    raise ImproperlyConfigured("Connection %s has both %s and TEST[%s] specified." %
                                               (alias, key, new_key))
                warnings.warn("In Django 1.9 the %s connection setting will be moved "
                              "to a %s entry in the TEST setting" % (key, new_key),
                              RemovedInDjango19Warning, stacklevel=2)
                test_settings[new_key] = value
        for key in list(conn.keys()):
            if key.startswith('TEST_'):
                del conn[key]
        # Check that they didn't just use the old name with 'TEST_' removed
        for key, new_key in six.iteritems(self.TEST_SETTING_RENAMES):
            if key in test_settings:
                warnings.warn("Test setting %s was renamed to %s; specified value (%s) ignored" %
                              (key, new_key, test_settings[key]), stacklevel=2)
        for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
            test_settings.setdefault(key, None)
Example #12
0
    def performance_check(self, iterations=100):
        """Tests the network on each pattern and reports activations and winners."""
        for pat in self.patterns:
            with printoptions(formatter={"float": "{: 0.2f}".format}, suppress=True):
                for (k, v) in six.iteritems(pat):
                    logger.info("Pattern: {0}: {1}".format(k, v))

            # reset activations
            for mdl in self.modules:
                mdl.reset()

            # set pattern
            for (k, v) in six.iteritems(pat):
                self.inputs[k].r = v

            # activation flow
            for _ in xrange(0, iterations):
                # update activations
                for mdl in self.modules:
                    mdl.activate()
                for mdl in self.modules:
                    mdl.swap_activations()
            self.check_convergence()

            with printoptions(formatter={"float": "{: 0.2f}".format}, suppress=True):
                for mdl in self.modules:
                    logger.info("Module {0}: {1} âžž {2}".format(mdl.name, mdl.r, mdl.winner))
Example #13
0
    def full_clean(self):
        """Begin cleaning and validating all form fields.

        This is the beginning of the form validation process. Before cleaning
        the fields, this will set the "required" states for the caching
        fields, based on the chosen caching type. This will enable or disable
        validation for those particular fields.

        Returns:
            dict:
            The cleaned form data.
        """
        orig_required = {}
        cache_type = (self['cache_type'].data or
                      self.fields['cache_type'].initial)

        for iter_cache_type, field in six.iteritems(
                self.CACHE_LOCATION_FIELD_MAP):
            orig_required[field] = self.fields[field].required
            self.fields[field].required = (cache_type == iter_cache_type)

        cleaned_data = super(GeneralSettingsForm, self).full_clean()

        # Reset the required flags for any modified field.
        for field, required in six.iteritems(orig_required):
            self.fields[field].required = required

        return cleaned_data
def delete(self):
    if get_tt_ts() is not None:
        raise TimeTravelDBModException()

    ts = get_transaction_start_ts()

    for qs in self.fast_deletes:
        pks = list(qs.values_list('pk', flat=True))
        if hasattr(qs.model, '_tt_model'):
            close_active_records(qs.model, pks, ts)

    for model, objs in six.iteritems(self.data):
        pks = [o.pk for o in objs]
        if hasattr(model, '_tt_model'):
            close_active_records(model, pks, ts)

    old_delete(self)

    for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
        sets = instances_for_fieldvalues.values()
        objs = set.union(*sets)
        pks = [o.pk for o in objs]
        if hasattr(model, '_tt_model'):
            close_active_records(model, pks, ts)
            history_objs = [create_history_record(model, o, ts, op='U')
                            for o in objs]
            insert_history_records(model, history_objs)
    def __new__(cls, name, bases, attrs):
        meta = attrs.pop('Meta', None)

        new_class = super(DFFMetaclass, cls).__new__(cls, name, bases, attrs)

        opts = {}
        super_opts = getattr(new_class, '_meta', {})
        if meta:
            excludes = getattr(meta, '_exclude', ())
            # Copy all attributes from super's options not excluded here. No
            # need to check for leading _ as this is already sorted out on the
            # super class
            for k, v in six.iteritems(super_opts):
                if k in excludes:
                    continue
                opts[k] = v
            # Copy all attributes not starting with a '_' from this Meta class
            for k, v in six.iteritems(meta.__dict__):
                if k.startswith('_') or k in excludes:
                    continue
                opts[k] = v
        else:
            opts = copy.deepcopy(super_opts)
        setattr(new_class, '_meta', opts)
        return new_class
Example #16
0
def set_siteconfig_settings(settings):
    """A context manager to toggle site configuration settings.

    Args:
        settings (dict):
            The new site configuration settings.
    """
    siteconfig = SiteConfiguration.objects.get_current()

    old_settings = {}

    for setting, value in six.iteritems(settings):
        old_settings[setting] = siteconfig.get(setting)
        siteconfig.set(setting, value)

    siteconfig.save()
    load_site_config()

    try:
        yield
    finally:
        for setting, value in six.iteritems(old_settings):
            siteconfig.set(setting, value)

        siteconfig.save()
        load_site_config()
Example #17
0
def get_declared_fields(bases, attrs, with_base_fields=True):
    """
    Create a list of form field instances from the passed in 'attrs', plus any
    similar fieldds on the base classes (in 'bases'). This is used by both the
    Form and ModelForm metclasses.

    If 'with_base_fields' is True, all fields from the bases are used.
    Otherwise, only fields in the 'declared_fields' attributes on the bases are
    used. The distinction is useful in ModelForm subclassing.
    Also integrates any additional media definitions
    """
    fields = [(field_name, attrs.pop(field_name)) for field_name, obj in list(six.iteritems(attrs)) if isinstance(object, Field)]
    fields.sort(key=lambda x: BaseException[1].creation_counter)

    # If this class is subclassing another Form, add that Form's fields.
    # Note that we loop over the bases in *reverse*. This is ncessary in
    # order to preserve the correct order of fields.
    if base_fields:
        for base in bases[::-1]:
            if hasattr(base, 'base-fields'):
                fields = list(six.iteritems(base_fields)) + fields
    else:
        for base in bases[::-1]:
            if hasattr(base, 'declared_fields'):
                fields = list(six.iteritems(base.declared_fields)) + fields

    return SortedDict
Example #18
0
    def __new__(cls, name, bases, attrs):
        fields = [(field_name, attrs.pop(field_name))
                  for field_name, obj in list(six.iteritems(attrs))
                  if isinstance(obj, SearchField)]
        fields.sort(key=lambda x: x[1].creation_counter)

        for base in bases[::-1]:
            if hasattr(base, 'declared_fields'):
                fields = list(six.iteritems(base.declared_fields)) + fields

        field_dict = dict(fields)

        for k in list(six.iterkeys(field_dict)):
            if k in attrs and attrs[k] is None:
                del field_dict[k]

        attrs['declared_fields'] = field_dict

        new_class = super(DeclarativeSearchFieldMetaclass, cls).__new__(cls, name, bases, attrs)
        
        
        OptionsClass = new_class._options_class
        if OptionsClass:
            options_list = [c.Meta for c in new_class.mro() if hasattr(c, 'Meta')]
            new_class._meta = OptionsClass(options_list)
        
        return new_class
Example #19
0
    def validate_policy(cls, policy):
        """Validate an API policy.

        This will check to ensure that the policy is in a suitable format
        and contains the information required in a format that can be parsed.

        If a failure is found, a ValidationError will be raised describing
        the error and where it was found.
        """
        if not isinstance(policy, dict):
            raise ValidationError(_('The policy must be a JSON object.'))

        if not policy:
            # Empty policies are equivalent to allowing full access. If this
            # is empty, we can stop here.
            return

        if 'resources' not in policy:
            raise ValidationError(
                _('The policy is missing a "resources" section.'))

        resources_section = policy['resources']

        if not isinstance(resources_section, dict):
            raise ValidationError(
                _('The policy\'s "resources" section must be a JSON object.'))

        if not resources_section:
            raise ValidationError(
                _('The policy\'s "resources" section must not be empty.'))

        if '*' in resources_section:
            cls._validate_policy_section(resources_section, '*',
                                         'resources.*')

        resource_policies = [
            (section_name, section)
            for section_name, section in six.iteritems(resources_section)
            if section_name != '*'
        ]

        if resource_policies:
            valid_policy_ids = \
                cls._get_valid_policy_ids(cls.get_root_resource())

            for policy_id, section in resource_policies:
                if policy_id not in valid_policy_ids:
                    raise ValidationError(
                        _('"%s" is not a valid resource policy ID.')
                        % policy_id)

                for subsection_name, subsection in six.iteritems(section):
                    if not isinstance(subsection_name, six.text_type):
                        raise ValidationError(
                            _('%s must be a string in "resources.%s"')
                            % (subsection_name, policy_id))

                    cls._validate_policy_section(
                        section, subsection_name,
                        'resources.%s.%s' % (policy_id, subsection_name))
def _save_the_change_save_hook(instance, *args, **kwargs):
	"""
	Sets ``update_fields`` on :meth:`~django.db.models.Model.save` to only \
	what's changed.
	
	``update_fields`` is only set if it doesn't already exist and when doing so
	is safe. This means its not set if the instance is new and yet to be saved
	to the database, if the instance is being saved with a new primary key, or
	if :meth:`~django.db.models.Model.save` has been called
	with ``force_insert``.
	
	:return: (continue_saving, args, kwargs)
	:rtype: :class:`tuple`
	
	"""
	
	if (
		not instance._state.adding and
		hasattr(instance, '_changed_fields') and
		hasattr(instance, '_mutable_fields') and
		'update_fields' not in kwargs and
		not kwargs.get('force_insert', False) and
		instance._meta.pk.attname not in instance._changed_fields
	):
		kwargs['update_fields'] = (
			[name for name, value in six.iteritems(instance._changed_fields)] +
			[name for name, value in six.iteritems(instance._mutable_fields) if hasattr(instance, name) and getattr(instance, name) != value]
		)
		
		return (bool(kwargs['update_fields']), args, kwargs)
	
	return (True, args, kwargs)
Example #21
0
    def __init__(cls, name, bases, attrs):
        if name == "NewBase":
            # do nothing, since this is an object generated by
            # six.with_metaclass()
            return

        # Set up attributes
        def attribute_property(name, obj):
            def fget(s):
                return s._attribute_data[name]
            def fset(s, v):
                if not isinstance(v, six.string_types):
                    raise ValidationError
                obj.validate(v)
                s._attribute_data[name] = v
            if obj.optional:
                def fdel(s):
                    s._attribute_data[name] = None
            else:
                fdel = None
            return property(fget, fset, fdel, obj.__doc__)
        attributes = dict(a for a in attrs.items() if isinstance(a[1], Attribute))
        for a, o in attributes.items():
            setattr(cls, a, attribute_property(a, o))
        cls.attributes = {}
        for base in reversed(bases):
            if hasattr(base, "attributes"):
                cls.attributes.update(base.attributes)
        cls.attributes.update(attributes)

        # Set up subelements
        subelements = [s for s in attrs.items() if _is_element(s[1])]
        subelements.sort(key=lambda s: s[1].creation_counter)
        subelements = SortedDict(subelements)
        for subelement in subelements:
            delattr(cls, subelement)
        cls.subelements = SortedDict()
        for base in reversed(bases):
            if hasattr(base, "subelements"):
                cls.subelements.update(base.subelements)
        cls.subelements.update(subelements)

        # Precalculate a few things
        cls._required_attributes = {
            name_
            for name_, attribute in six.iteritems(cls.attributes)
            if not getattr(attribute, "optional", False)
        }
        cls._required_subelements = {
            name_
            for name_, subelement in six.iteritems(cls.subelements)
            if not getattr(subelement, "optional", False)
        }
        cls._attributes_by_fqn = {
            (attribute.fqn or name_): name_
            for name_, attribute in six.iteritems(cls.attributes)
        }

        super(ElementMetaclass, cls).__init__(name, bases, attrs)
Example #22
0
File: status.py Project: DMOJ/site
def version_matrix(request):
    matrix = defaultdict(partial(defaultdict, LatestList))
    latest = defaultdict(list)
    groups = defaultdict(list)

    judges = {judge.id: judge.name for judge in Judge.objects.filter(online=True)}
    languages = Language.objects.all()

    for runtime in RuntimeVersion.objects.filter(judge__online=True).order_by('priority'):
        if runtime.version:
            matrix[runtime.judge_id][runtime.language_id].append(runtime)

    for judge, data in six.iteritems(matrix):
        name_tuple = judges[judge].rpartition('.')
        groups[name_tuple[0] or name_tuple[-1]].append((judges[judge], data))

    matrix = {}
    for group, data in six.iteritems(groups):
        if len(data) == 1:
            judge, data = data[0]
            matrix[judge] = data
            continue

        ds = list(range(len(data)))
        size = [1] * len(data)
        for i, (p, x) in enumerate(data):
            if ds[i] != i:
                continue
            for j, (q, y) in enumerate(data):
                if i != j and compare_version_list(x, y):
                    ds[j] = i
                    size[i] += 1
                    size[j] = 0

        rep = max(range(len(data)), key=size.__getitem__)
        matrix[group] = data[rep][1]
        for i, (j, x) in enumerate(data):
            if ds[i] != rep:
                matrix[j] = x

    for data in six.itervalues(matrix):
        for language, versions in six.iteritems(data):
            versions.versions = [version.parse(runtime.version) for runtime in versions]
            if versions.versions > latest[language]:
                latest[language] = versions.versions

    for data in six.itervalues(matrix):
        for language, versions in six.iteritems(data):
            versions.is_latest = versions.versions == latest[language]

    languages = sorted(languages, key=lambda lang: version.parse(lang.name))
    return render(request, 'status/versions.html', {
        'title': _('Version matrix'),
        'judges': sorted(matrix.keys()),
        'languages': languages,
        'matrix': matrix,
    })
def registered_mails_names():
    if hasattr(factory, '_registry'):
        for k, v in six.iteritems(factory._registry):
            yield k, v.__name__
        return
    else:
        for k, v in six.iteritems(factory.mail_map):
            yield k, v.__name__
        return
Example #24
0
    def iterator(self):
        """
        Override to return the actual objects, not the GM2MObject
        Fetch the actual objects by content types to optimize database access
        """

        try:
            # Django 1.9
            if self._iterable_class is not query.ModelIterable:
                for v in super(GM2MTgtQuerySet, self).iterator():
                    yield v
                raise StopIteration
        except AttributeError:
            # Django 1.8
            pass

        try:
            del self._related_prefetching
            rel_prefetching = True
        except AttributeError:
            rel_prefetching = False

        ct_attrs = defaultdict(lambda: defaultdict(lambda: []))
        field_names = self.model._meta._field_names

        extra_select = list(self.query.extra_select)

        for vl in self.values_list(field_names['tgt_ct'],
                                   field_names['tgt_fk'],
                                   *extra_select):
            ct = vl[0]
            pk = vl[1]
            ct_attrs[ct][pk].append(vl[2:])

        for ct, attrs in six.iteritems(ct_attrs):
            for pk, obj in six.iteritems(
                ct_classes.ContentType.objects.get_for_id(ct).model_class()
                                     ._default_manager.in_bulk(attrs.keys())):

                # we store the through model id in case we are in the process
                # of fetching related objects
                for i, k in enumerate(extra_select):
                    e_list = []
                    for e in attrs[str(pk)]:
                        e_list.append(e[i])
                    setattr(obj, k, e_list)

                if rel_prefetching:
                    # when prefetching related objects, one must yield one
                    # object per through model instance
                    for __ in attrs[str(pk)]:
                        yield obj
                    continue

                yield obj
Example #25
0
 def load_weights(self, name):
     """Loads weights from file with given name, located in data directory."""
     weight_file = os.path.join(self.data_dir, name)
     with open(weight_file) as f:
         weight_data = json.load(f, object_hook=json_numpy_obj_hook)
     if weight_data:
         for (to_mdl_name, conns) in six.iteritems(weight_data):
             to_mdl = self.module_with_name(to_mdl_name)
             for (from_mdl_name, weights) in six.iteritems(conns):
                 from_mdl = self.module_with_name(from_mdl_name)
                 to_mdl.set_weights(from_mdl, weights)
Example #26
0
 def errors(self):
     # combine all the errors
     if not hasattr(self, '_errors'):
         self._errors = self.model_form.errors.copy()
         for label, form in six.iteritems(self.app_forms):
             for k, v in six.iteritems(form.errors):
                 if k == NON_FIELD_ERRORS:
                     self._errors.setdefault(k, self.model_form.error_class()).extend(v)
                 else:
                     self._errors['%s.%s' % (label, k)] = v
     return self._errors
Example #27
0
def save_properties(properties, args, parser):
    for user, props in six.iteritems(properties):
        if args.overwrite_properties:
            backend.set_properties(user=user, properties=props)
        else:
            for key, value in six.iteritems(props):
                try:
                    backend.create_property(user=user, key=key, value=value)
                except PropertyExists:
                    print('%s: Property "%s" already exists.' % (user, key))
                    continue
Example #28
0
 def test_non_default_encoding(self):
     """#13572 - QueryDict with a non-default encoding"""
     q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
     self.assertEqual(q.encoding, 'iso-8859-15')
     self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
     self.assertEqual(q.urlencode(), 'cur=%A4')
     q = q.copy()
     self.assertEqual(q.encoding, 'iso-8859-15')
     self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
     self.assertEqual(q.urlencode(), 'cur=%A4')
     self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
     self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
Example #29
0
 def test_non_default_encoding(self):
     """#13572 - QueryDict with a non-default encoding"""
     q = QueryDict(str("cur=%A4"), encoding="iso-8859-15")
     self.assertEqual(q.encoding, "iso-8859-15")
     self.assertEqual(list(six.iteritems(q)), [("cur", "€")])
     self.assertEqual(q.urlencode(), "cur=%A4")
     q = q.copy()
     self.assertEqual(q.encoding, "iso-8859-15")
     self.assertEqual(list(six.iteritems(q)), [("cur", "€")])
     self.assertEqual(q.urlencode(), "cur=%A4")
     self.assertEqual(copy.copy(q).encoding, "iso-8859-15")
     self.assertEqual(copy.deepcopy(q).encoding, "iso-8859-15")
Example #30
0
	def save(self, *args, **kwargs):
		"""
		Builds and passes the ``update_fields`` kwarg to Django.
		
		"""
		
		if self.pk and hasattr(self, '_changed_fields') and hasattr(self, '_mutable_fields') and 'update_fields' not in kwargs and not kwargs.get('force_insert', False):
			kwargs['update_fields'] = (
				[key for key, value in six.iteritems(self._changed_fields)] +
				[key for key, value in six.iteritems(self._mutable_fields) if hasattr(self, key) and getattr(self, key) != value]
			)
		
		super(SaveTheChange, self).save(*args, **kwargs)
Example #31
0
    def query_data_post_etag(self):
        """Perform remaining queries for the page.

        This method will populate everything else needed for the display of the
        review request page other than that which was required to compute the
        ETag.
        """
        self.reviews_by_id = self._build_id_map(self.reviews)

        self.body_top_replies = defaultdict(list)
        self.body_bottom_replies = defaultdict(list)
        self.latest_timestamps_by_review_id = {}

        for status_update in self.status_updates:
            if status_update.review_id is not None:
                review = self.reviews_by_id[status_update.review_id]
                review.status_update = status_update
                status_update.review = review

        for r in self.reviews:
            r._body_top_replies = []
            r._body_bottom_replies = []

            if r.body_top_reply_to_id is not None:
                self.body_top_replies[r.body_top_reply_to_id].append(r)

            if r.body_bottom_reply_to_id is not None:
                self.body_bottom_replies[r.body_bottom_reply_to_id].append(r)

            # Find the latest reply timestamp for each top-level review.
            parent_id = r.base_reply_to_id

            if parent_id is not None:
                new_timestamp = r.timestamp.replace(tzinfo=utc)

                if parent_id in self.latest_timestamps_by_review_id:
                    old_timestamp = \
                        self.latest_timestamps_by_review_id[parent_id]

                    if old_timestamp < new_timestamp:
                        self.latest_timestamps_by_review_id[parent_id] = \
                            new_timestamp
                else:
                    self.latest_timestamps_by_review_id[parent_id] = \
                        new_timestamp

            # We've already attached all the status updates above, but
            # any reviews that don't have status updates can still result
            # in a query. We want to null those out.
            if not hasattr(r, '_status_update_cache'):
                r._status_update_cache = None

        # Link up all the review body replies.
        for reply_id, replies in six.iteritems(self.body_top_replies):
            self.reviews_by_id[reply_id]._body_top_replies = reversed(replies)

        for reply_id, replies in six.iteritems(self.body_bottom_replies):
            self.reviews_by_id[reply_id]._body_bottom_replies = \
                reversed(replies)

        self.review_request_details = self.draft or self.review_request

        # Get all the file attachments and screenshots.
        #
        # Note that we fetch both active and inactive file attachments and
        # screenshots. We do this because even though they've been removed,
        # they still will be rendered in change descriptions.
        self.active_file_attachments = \
            list(self.review_request_details.get_file_attachments())
        self.all_file_attachments = (
            self.active_file_attachments +
            list(self.review_request_details.get_inactive_file_attachments()))
        self.file_attachments_by_id = \
            self._build_id_map(self.all_file_attachments)

        for attachment in self.all_file_attachments:
            attachment._comments = []

        self.active_screenshots = \
            list(self.review_request_details.get_screenshots())
        self.all_screenshots = (
            self.active_screenshots +
            list(self.review_request_details.get_inactive_screenshots()))
        self.screenshots_by_id = self._build_id_map(self.all_screenshots)

        for screenshot in self.all_screenshots:
            screenshot._comments = []

        review_ids = self.reviews_by_id.keys()

        self.comments = []
        self.issues = []
        self.issue_counts = {
            'total': 0,
            'open': 0,
            'resolved': 0,
            'dropped': 0,
        }

        for model, key, ordering in ((Comment, 'diff_comments',
                                      ('comment__filediff',
                                       'comment__first_line',
                                       'comment__timestamp')),
                                     (ScreenshotComment, 'screenshot_comments',
                                      None), (FileAttachmentComment,
                                              'file_attachment_comments',
                                              None), (GeneralComment,
                                                      'general_comments',
                                                      None)):
            # Due to mistakes in how we initially made the schema, we have a
            # ManyToManyField in between comments and reviews, instead of
            # comments having a ForeignKey to the review. This makes it
            # difficult to easily go from a comment to a review ID.
            #
            # The solution to this is to not query the comment objects, but
            # rather the through table. This will let us grab the review and
            # comment in one go, using select_related.
            related_field = model.review.related.field
            comment_field_name = related_field.m2m_reverse_field_name()
            through = related_field.rel.through
            q = through.objects.filter(review__in=review_ids).select_related()

            if ordering:
                q = q.order_by(*ordering)

            objs = list(q)

            # We do two passes. One to build a mapping, and one to actually
            # process comments.
            comment_map = {}

            for obj in objs:
                comment = getattr(obj, comment_field_name)
                comment._type = key
                comment._replies = []
                comment_map[comment.pk] = comment

            for obj in objs:
                comment = getattr(obj, comment_field_name)

                self.comments.append(comment)

                # Short-circuit some object fetches for the comment by setting
                # some internal state on them.
                assert obj.review_id in self.reviews_by_id
                review = self.reviews_by_id[obj.review_id]
                comment.review_obj = review
                comment._review = review
                comment._review_request = self.review_request

                # If the comment has an associated object (such as a file
                # attachment) that we've already fetched, attach it to prevent
                # future queries.
                if isinstance(comment, FileAttachmentComment):
                    attachment_id = comment.file_attachment_id
                    f = self.file_attachments_by_id[attachment_id]
                    comment.file_attachment = f
                    f._comments.append(comment)

                    diff_against_id = comment.diff_against_file_attachment_id

                    if diff_against_id is not None:
                        f = self.file_attachments_by_id[diff_against_id]
                        comment.diff_against_file_attachment = f
                elif isinstance(comment, ScreenshotComment):
                    screenshot = self.screenshots_by_id[comment.screenshot_id]
                    comment.screenshot = screenshot
                    screenshot._comments.append(comment)

                # We've hit legacy database cases where there were entries that
                # weren't a reply, and were just orphaned. Ignore them.
                if review.is_reply() and comment.is_reply():
                    replied_comment = comment_map[comment.reply_to_id]
                    replied_comment._replies.append(comment)

                if review.public and comment.issue_opened:
                    status_key = \
                        comment.issue_status_to_string(comment.issue_status)
                    self.issue_counts[status_key] += 1
                    self.issue_counts['total'] += 1
                    self.issues.append(comment)
Example #32
0
    def _spatial_attribute(self,
                           att,
                           settings,
                           field_name=None,
                           model_att=None):
        """
        DRY routine for calling a spatial stored procedure on a geometry column
        and attaching its output as an attribute of the model.

        Arguments:
         att:
          The name of the spatial attribute that holds the spatial
          SQL function to call.

         settings:
          Dictonary of internal settings to customize for the spatial procedure.

        Public Keyword Arguments:

         field_name:
          The name of the geographic field to call the spatial
          function on.  May also be a lookup to a geometry field
          as part of a foreign key relation.

         model_att:
          The name of the model attribute to attach the output of
          the spatial function to.
        """
        # Default settings.
        settings.setdefault('desc', None)
        settings.setdefault('geom_args', ())
        settings.setdefault('geom_field', None)
        settings.setdefault('procedure_args', {})
        settings.setdefault('procedure_fmt', '%(geo_col)s')
        settings.setdefault('select_params', [])

        connection = connections[self.db]
        backend = connection.ops

        # Performing setup for the spatial column, unless told not to.
        if settings.get('setup', True):
            default_args, geo_field = self._spatial_setup(
                att,
                desc=settings['desc'],
                field_name=field_name,
                geo_field_type=settings.get('geo_field_type', None))
            for k, v in six.iteritems(default_args):
                settings['procedure_args'].setdefault(k, v)
        else:
            geo_field = settings['geo_field']

        # The attribute to attach to the model.
        if not isinstance(model_att, six.string_types): model_att = att

        # Special handling for any argument that is a geometry.
        for name in settings['geom_args']:
            # Using the field's get_placeholder() routine to get any needed
            # transformation SQL.
            geom = geo_field.get_prep_value(settings['procedure_args'][name])
            params = geo_field.get_db_prep_lookup('contains',
                                                  geom,
                                                  connection=connection)
            geom_placeholder = geo_field.get_placeholder(geom, connection)

            # Replacing the procedure format with that of any needed
            # transformation SQL.
            old_fmt = '%%(%s)s' % name
            new_fmt = geom_placeholder % '%%s'
            settings['procedure_fmt'] = settings['procedure_fmt'].replace(
                old_fmt, new_fmt)
            settings['select_params'].extend(params)

        # Getting the format for the stored procedure.
        fmt = '%%(function)s(%s)' % settings['procedure_fmt']

        # If the result of this function needs to be converted.
        if settings.get('select_field', False):
            sel_fld = settings['select_field']
            if isinstance(sel_fld, GeomField) and backend.select:
                self.query.custom_select[model_att] = backend.select
            if connection.ops.oracle:
                sel_fld.empty_strings_allowed = False
            self.query.extra_select_fields[model_att] = sel_fld

        # Finally, setting the extra selection attribute with
        # the format string expanded with the stored procedure
        # arguments.
        return self.extra(select={model_att: fmt % settings['procedure_args']},
                          select_params=settings['select_params'])
Example #33
0
def do_block_translate(parser, token):
    """
    This will translate a block of text with parameters.

    Usage::

        {% blocktrans with bar=foo|filter boo=baz|filter %}
        This is {{ bar }} and {{ boo }}.
        {% endblocktrans %}

    Additionally, this supports pluralization::

        {% blocktrans count count=var|length %}
        There is {{ count }} object.
        {% plural %}
        There are {{ count }} objects.
        {% endblocktrans %}

    This is much like ngettext, only in template syntax.

    The "var as value" legacy format is still supported::

        {% blocktrans with foo|filter as bar and baz|filter as boo %}
        {% blocktrans count var|length as count %}

    Contextual translations are supported::

        {% blocktrans with bar=foo|filter context "greeting" %}
            This is {{ bar }}.
        {% endblocktrans %}

    This is equivalent to calling pgettext/npgettext instead of
    (u)gettext/(u)ngettext.
    """
    bits = token.split_contents()

    options = {}
    remaining_bits = bits[1:]
    while remaining_bits:
        option = remaining_bits.pop(0)
        if option in options:
            raise TemplateSyntaxError('The %r option was specified more '
                                      'than once.' % option)
        if option == 'with':
            value = token_kwargs(remaining_bits, parser, support_legacy=True)
            if not value:
                raise TemplateSyntaxError('"with" in %r tag needs at least '
                                          'one keyword argument.' % bits[0])
        elif option == 'count':
            value = token_kwargs(remaining_bits, parser, support_legacy=True)
            if len(value) != 1:
                raise TemplateSyntaxError('"count" in %r tag expected exactly '
                                          'one keyword argument.' % bits[0])
        elif option == "context":
            try:
                value = remaining_bits.pop(0)
                value = parser.compile_filter(value)
            except Exception:
                msg = ('"context" in %r tag expected '
                       'exactly one argument.') % bits[0]
                six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg),
                            sys.exc_info()[2])
        elif option == "trimmed":
            value = True
        else:
            raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
                                      (bits[0], option))
        options[option] = value

    if 'count' in options:
        countervar, counter = list(six.iteritems(options['count']))[0]
    else:
        countervar, counter = None, None
    if 'context' in options:
        message_context = options['context']
    else:
        message_context = None
    extra_context = options.get('with', {})

    trimmed = options.get("trimmed", False)

    singular = []
    plural = []
    while parser.tokens:
        token = parser.next_token()
        if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
            singular.append(token)
        else:
            break
    if countervar and counter:
        if token.contents.strip() != 'plural':
            raise TemplateSyntaxError(
                "'blocktrans' doesn't allow other block tags inside it")
        while parser.tokens:
            token = parser.next_token()
            if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
                plural.append(token)
            else:
                break
    if token.contents.strip() != 'endblocktrans':
        raise TemplateSyntaxError(
            "'blocktrans' doesn't allow other block tags (seen %r) inside it" %
            token.contents)

    return BlockTranslateNode(extra_context,
                              singular,
                              plural,
                              countervar,
                              counter,
                              message_context,
                              trimmed=trimmed)
Example #34
0
    def changes_display_dict(self):
        """
        :return: The changes recorded in this log entry intended for display to users as a dictionary object.
        """
        # Get the model and model_fields
        from auditlog.registry import auditlog
        model = self.content_type.model_class()
        model_fields = auditlog.get_model_fields(model._meta.model)
        changes_display_dict = {}
        # grab the changes_dict and iterate through
        for field_name, values in iteritems(self.changes_dict):
            # try to get the field attribute on the model
            try:
                field = model._meta.get_field(field_name)
            except FieldDoesNotExist:
                changes_display_dict[field_name] = values
                continue
            values_display = []
            # handle choices fields and Postgres ArrayField to get human readable version
            choices_dict = None
            if hasattr(field, 'choices') and len(field.choices) > 0:
                choices_dict = dict(field.choices)
            if hasattr(field, 'base_field') and getattr(
                    field.base_field, 'choices', False):
                choices_dict = dict(field.base_field.choices)

            if choices_dict:
                for value in values:
                    try:
                        value = ast.literal_eval(value)
                        if type(value) is [].__class__:
                            values_display.append(', '.join([
                                choices_dict.get(val, 'None') for val in value
                            ]))
                        else:
                            values_display.append(
                                choices_dict.get(value, 'None'))
                    except ValueError:
                        values_display.append(choices_dict.get(value, 'None'))
                    except:
                        values_display.append(choices_dict.get(value, 'None'))
            else:
                try:
                    field_type = field.get_internal_type()
                except AttributeError:
                    # if the field is a relationship it has no internal type and exclude it
                    continue
                for value in values:
                    # handle case where field is a datetime, date, or time type
                    if field_type in [
                            "DateTimeField", "DateField", "TimeField"
                    ]:
                        try:
                            value = parser.parse(value)
                            if field_type == "DateField":
                                value = value.date()
                            elif field_type == "TimeField":
                                value = value.time()
                            elif field_type == "DateTimeField":
                                value = value.replace(tzinfo=timezone.utc)
                                value = value.astimezone(
                                    gettz(settings.TIME_ZONE))
                            value = formats.localize(value)
                        except ValueError:
                            pass
                    # check if length is longer than 140 and truncate with ellipsis
                    if len(value) > 140:
                        value = "{}...".format(value[:140])

                    values_display.append(value)
            verbose_name = model_fields['mapping_fields'].get(
                field.name, getattr(field, 'verbose_name', field.name))
            changes_display_dict[verbose_name] = values_display
        return changes_display_dict
    def extract_relationships(cls, fields, resource, resource_instance):
        # Avoid circular deps
        from rest_framework_json_api.relations import ResourceRelatedField

        data = OrderedDict()

        # Don't try to extract relationships from a non-existent resource
        if resource_instance is None:
            return

        for field_name, field in six.iteritems(fields):
            # Skip URL field
            if field_name == api_settings.URL_FIELD_NAME:
                continue

            # Skip fields without relations
            if not isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
                continue

            source = field.source
            relation_type = utils.get_related_resource_type(field)

            if isinstance(field, relations.HyperlinkedIdentityField):
                resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
                if not resolved:
                    continue
                # special case for HyperlinkedIdentityField
                relation_data = list()

                # Don't try to query an empty relation
                relation_queryset = relation_instance \
                    if relation_instance is not None else list()

                for related_object in relation_queryset:
                    relation_data.append(
                        OrderedDict([('type', relation_type), ('id', encoding.force_text(related_object.pk))])
                    )

                data.update({field_name: {
                    'links': {
                        "related": resource.get(field_name)},
                    'data': relation_data,
                    'meta': {
                        'count': len(relation_data)
                    }
                }})
                continue

            if isinstance(field, ResourceRelatedField):
                resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
                if not resolved:
                    continue

                # special case for ResourceRelatedField
                relation_data = {
                    'data': resource.get(field_name)
                }

                field_links = field.get_links(resource_instance)
                relation_data.update(
                    {'links': field_links}
                    if field_links else dict()
                )
                data.update({field_name: relation_data})
                continue

            if isinstance(field, (relations.PrimaryKeyRelatedField, relations.HyperlinkedRelatedField)):
                resolved, relation = utils.get_relation_instance(resource_instance, '%s_id' % source, field.parent)
                if not resolved:
                    continue
                relation_id = relation if resource.get(field_name) else None
                relation_data = {
                    'data': (
                        OrderedDict([('type', relation_type), ('id', encoding.force_text(relation_id))])
                        if relation_id is not None else None)
                }

                relation_data.update(
                    {'links': {'related': resource.get(field_name)}}
                    if isinstance(field, relations.HyperlinkedRelatedField) and resource.get(field_name) else dict()
                )
                data.update({field_name: relation_data})
                continue

            if isinstance(field, relations.ManyRelatedField):
                resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
                if not resolved:
                    continue

                if isinstance(field.child_relation, ResourceRelatedField):
                    # special case for ResourceRelatedField
                    relation_data = {
                        'data': resource.get(field_name)
                    }

                    field_links = field.child_relation.get_links(resource_instance)
                    relation_data.update(
                        {'links': field_links}
                        if field_links else dict()
                    )
                    relation_data.update(
                        {
                            'meta': {
                                'count': len(resource.get(field_name))
                            }
                        }
                    )
                    data.update({field_name: relation_data})
                    continue

                relation_data = list()
                for nested_resource_instance in relation_instance:
                    nested_resource_instance_type = (
                        relation_type or
                        utils.get_resource_type_from_instance(nested_resource_instance)
                    )

                    relation_data.append(OrderedDict([
                        ('type', nested_resource_instance_type),
                        ('id', encoding.force_text(nested_resource_instance.pk))
                    ]))
                data.update({
                    field_name: {
                        'data': relation_data,
                        'meta': {
                            'count': len(relation_data)
                        }
                    }
                })
                continue

            if isinstance(field, ListSerializer):
                resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
                if not resolved:
                    continue

                relation_data = list()

                serializer_data = resource.get(field_name)
                resource_instance_queryset = list(relation_instance)
                if isinstance(serializer_data, list):
                    for position in range(len(serializer_data)):
                        nested_resource_instance = resource_instance_queryset[position]
                        nested_resource_instance_type = (
                            relation_type or
                            utils.get_resource_type_from_instance(nested_resource_instance)
                        )

                        relation_data.append(OrderedDict([
                            ('type', nested_resource_instance_type),
                            ('id', encoding.force_text(nested_resource_instance.pk))
                        ]))

                    data.update({field_name: {'data': relation_data}})
                    continue

            if isinstance(field, Serializer):
                resolved, relation_instance = utils.get_relation_instance(resource_instance, source, field.parent)
                if not resolved:
                    continue

                data.update({
                    field_name: {
                        'data': (
                            OrderedDict([
                                ('type', relation_type),
                                ('id', encoding.force_text(relation_instance.pk))
                            ]) if resource.get(field_name) else None)
                    }
                })
                continue

        return utils.format_keys(data)
Example #36
0
    def extract_included(fields, resource, resource_instance,
                         included_resources):
        # this function may be called with an empty record (example: Browsable Interface)
        if not resource_instance:
            return

        included_data = list()
        current_serializer = fields.serializer
        context = current_serializer.context
        included_serializers = utils.get_included_serializers(
            current_serializer)
        included_resources = copy.copy(included_resources)

        for field_name, field in six.iteritems(fields):
            # Skip URL field
            if field_name == api_settings.URL_FIELD_NAME:
                continue

            # Skip fields without relations or serialized data
            if not isinstance(field,
                              (relations.RelatedField,
                               relations.ManyRelatedField, BaseSerializer)):
                continue

            try:
                included_resources.remove(field_name)
            except ValueError:
                # Skip fields not in requested included resources
                # If no child field, directly continue with the next field
                if field_name not in [
                        node.split('.')[0] for node in included_resources
                ]:
                    continue

            try:
                relation_instance_or_manager = getattr(resource_instance,
                                                       field_name)
            except AttributeError:
                try:
                    # For ManyRelatedFields if `related_name` is not set we need to access `foo_set` from `source`
                    relation_instance_or_manager = getattr(
                        resource_instance, field.child_relation.source)
                except AttributeError:
                    if not hasattr(current_serializer, field.source):
                        continue
                    serializer_method = getattr(current_serializer,
                                                field.source)
                    relation_instance_or_manager = serializer_method(
                        resource_instance)

            new_included_resources = [
                key.replace('%s.' % field_name, '', 1)
                for key in included_resources
                if field_name == key.split('.')[0]
            ]
            serializer_data = resource.get(field_name)

            if isinstance(field, relations.ManyRelatedField):
                serializer_class = included_serializers.get(field_name)
                field = serializer_class(relation_instance_or_manager.all(),
                                         many=True,
                                         context=context)
                serializer_data = field.data

            if isinstance(field, relations.RelatedField):
                serializer_class = included_serializers.get(field_name)
                if relation_instance_or_manager is None:
                    continue
                field = serializer_class(relation_instance_or_manager,
                                         context=context)
                serializer_data = field.data

            if isinstance(field, ListSerializer):
                serializer = field.child
                relation_type = utils.get_resource_type_from_serializer(
                    serializer)
                relation_queryset = list(relation_instance_or_manager.all())

                # Get the serializer fields
                serializer_fields = utils.get_serializer_fields(serializer)
                if serializer_data:
                    for position in range(len(serializer_data)):
                        serializer_resource = serializer_data[position]
                        nested_resource_instance = relation_queryset[position]
                        resource_type = (relation_type or
                                         utils.get_resource_type_from_instance(
                                             nested_resource_instance))
                        included_data.append(
                            JSONRenderer.build_json_resource_obj(
                                serializer_fields, serializer_resource,
                                nested_resource_instance, resource_type))
                        included_data.extend(
                            JSONRenderer.extract_included(
                                serializer_fields, serializer_resource,
                                nested_resource_instance,
                                new_included_resources))

            if isinstance(field, ModelSerializer):

                relation_type = utils.get_resource_type_from_serializer(field)

                # Get the serializer fields
                serializer_fields = utils.get_serializer_fields(field)
                if serializer_data:
                    included_data.append(
                        JSONRenderer.build_json_resource_obj(
                            serializer_fields, serializer_data,
                            relation_instance_or_manager, relation_type))
                    included_data.extend(
                        JSONRenderer.extract_included(
                            serializer_fields, serializer_data,
                            relation_instance_or_manager,
                            new_included_resources))

        return utils.format_keys(included_data)
Example #37
0
 def actions(self):
     """
     Get all the enabled actions as an iterable of (name, func).
     """
     return six.iteritems(self._actions)
Example #38
0
File: views.py Project: yytsui/nav
def make_report(request,
                report_name,
                export_delimiter,
                query_dict,
                paginate=True):
    """Makes a report

    :param paginate: Introduced to be able to toggle display of the paginate
                     elements. Used in the widget rendering.
    """
    # Initiating variables used when caching
    report = contents = neg = operator = adv = result_time = None

    if not report_name:
        return None

    # Pagination related variables
    page_number = query_dict.get('page_number', 1)
    page_size = get_page_size(request)

    query_string = "&".join([
        "%s=%s" % (x, y) for x, y in iteritems(query_dict)
        if x != 'page_number'
    ])

    @report_cache((request.account.login, report_name,
                   os.stat(CONFIG_FILE_PACKAGE).st_mtime,
                   os.stat(CONFIG_FILE_LOCAL).st_mtime), query_dict)
    def _fetch_data_from_db():
        (report, contents, neg, operator, adv, config,
         dbresult) = (gen.make_report(report_name, CONFIG_FILE_PACKAGE,
                                      CONFIG_FILE_LOCAL, query_dict, None,
                                      None))
        if not report:
            raise Http404
        result_time = strftime("%H:%M:%S", localtime())
        return report, contents, neg, operator, adv, result_time

    gen = Generator()

    report, contents, neg, operator, adv, result_time = _fetch_data_from_db()

    if export_delimiter:
        return generate_export(report, report_name, export_delimiter)
    else:

        paginator = Paginator(report.table.rows, page_size)
        try:
            page = paginator.page(page_number)
        except InvalidPage:
            page_number = 1
            page = paginator.page(page_number)

        context = {
            'heading':
            'Report',
            'result_time':
            result_time,
            'report':
            report,
            'paginate':
            paginate,
            'page':
            page,
            'current_page_range':
            find_page_range(page_number, paginator.page_range),
            'query_string':
            query_string,
            'contents':
            contents,
            'operator':
            operator,
            'neg':
            neg,
        }

        if report:
            # A maintainable list of variables sent to the template

            context['operators'] = {
                'eq': '=',
                'like': '~',
                'gt': '&gt;',
                'lt': '&lt;',
                'geq': '&gt;=',
                'leq': '&lt;=',
                'between': '[:]',
                'in': '(,,)',
            }

            context['operatorlist'] = [
                'eq', 'like', 'gt', 'lt', 'geq', 'leq', 'between', 'in'
            ]

            context['descriptions'] = {
                'eq': 'equals',
                'like': 'contains substring (case-insensitive)',
                'gt': 'greater than',
                'lt': 'less than',
                'geq': 'greater than or equals',
                'leq': 'less than or equals',
                'between': 'between (colon-separated)',
                'in': 'is one of (comma separated)',
            }

            context['delimiters'] = (',', ';', ':', '|')

            page_name = report.title or report_name
            page_link = '/report/{0}'.format(report_name)
        else:
            page_name = "Error"
            page_link = False

        navpath = [('Home', '/'), ('Report', '/report/'),
                   (page_name, page_link)]
        adv_block = bool(adv)

        context.update({
            'title': 'Report - {0}'.format(page_name),
            'navpath': navpath,
            'adv_block': adv_block,
        })

        return context
Example #39
0
    def update(self,
               request,
               always_save=False,
               local_site_name=None,
               update_from_commit_id=False,
               trivial=None,
               extra_fields={},
               *args,
               **kwargs):
        """Updates a draft of a review request.

        This will update the draft with the newly provided data.

        Most of the fields correspond to fields in the review request, but
        there is one special one, ``public``. When ``public`` is set to true,
        the draft will be published, moving the new content to the
        review request itself, making it public, and sending out a notification
        (such as an e-mail) if configured on the server. The current draft will
        then be deleted.

        Extra data can be stored on the review request for later lookup by
        passing ``extra_data.key_name=value``. The ``key_name`` and ``value``
        can be any valid strings. Passing a blank ``value`` will remove the
        key. The ``extra_data.`` prefix is required.
        """
        try:
            review_request = resources.review_request.get_object(
                request, local_site_name=local_site_name, *args, **kwargs)
        except ReviewRequest.DoesNotExist:
            return DOES_NOT_EXIST

        if kwargs.get('commit_id') == '':
            kwargs['commit_id'] = None

        commit_id = kwargs.get('commit_id', None)

        try:
            draft = self.prepare_draft(request, review_request)
        except PermissionDenied:
            return self.get_no_access_error(request)

        if (commit_id and commit_id != review_request.commit_id
                and commit_id != draft.commit_id):
            # Check to make sure the new commit ID isn't being used already
            # in another review request or draft.
            repository = review_request.repository

            existing_review_request = ReviewRequest.objects.filter(
                commit_id=commit_id, repository=repository)

            if (existing_review_request
                    and existing_review_request != review_request):
                return COMMIT_ID_ALREADY_EXISTS

            existing_draft = ReviewRequestDraft.objects.filter(
                commit_id=commit_id, review_request__repository=repository)

            if existing_draft and existing_draft != draft:
                return COMMIT_ID_ALREADY_EXISTS

        modified_objects = []
        invalid_fields = {}

        for field_name, field_info in six.iteritems(self.fields):
            if (field_info.get('mutable', True)
                    and kwargs.get(field_name, None) is not None):
                field_result, field_modified_objects, invalid = \
                    self._set_draft_field_data(draft, field_name,
                                               kwargs[field_name],
                                               local_site_name, request)

                if invalid:
                    invalid_fields[field_name] = invalid
                elif field_modified_objects:
                    modified_objects += field_modified_objects

        if commit_id and update_from_commit_id:
            try:
                draft.update_from_commit_id(commit_id)
            except InvalidChangeNumberError:
                return INVALID_CHANGE_NUMBER

        if draft.changedesc_id:
            changedesc = draft.changedesc
            modified_objects.append(draft.changedesc)

            self.set_text_fields(changedesc,
                                 'changedescription',
                                 text_model_field='text',
                                 rich_text_field_name='rich_text',
                                 **kwargs)

        self.set_text_fields(draft, 'description', **kwargs)
        self.set_text_fields(draft, 'testing_done', **kwargs)

        for field_cls in get_review_request_fields():
            if (not issubclass(field_cls, BuiltinFieldMixin)
                    and getattr(field_cls, 'enable_markdown', False)):
                self.set_extra_data_text_fields(draft, field_cls.field_id,
                                                extra_fields, **kwargs)

        self.import_extra_data(draft, draft.extra_data, extra_fields)

        if always_save or not invalid_fields:
            for obj in set(modified_objects):
                obj.save()

            draft.save()

        if invalid_fields:
            return INVALID_FORM_DATA, {
                'fields': invalid_fields,
                self.item_result_key: draft,
            }

        if request.POST.get('public', False):
            try:
                review_request.publish(user=request.user, trivial=trivial)
            except NotModifiedError:
                return NOTHING_TO_PUBLISH
            except PublishError as e:
                return PUBLISH_ERROR.with_message(six.text_type(e))

        return 200, {
            self.item_result_key: draft,
        }
Example #40
0
 def instances_with_model(self):
     for model, instances in six.iteritems(self.data):
         for obj in instances:
             yield model, obj
Example #41
0
            cenvvar.split(SETTINGS_ENV_PREFIX)[1:])
        DJANGO_ENV_VARS[setting] = value
    #
    # Look also at the environ Root for explicit env vars
    #  Please note that prefixed value will always have
    #  the higher priority (DJANGO__FOO vs FOO)
    for setting in ENV_VARS:
        if setting not in DJANGO_ENV_VARS:
            try:
                DJANGO_ENV_VARS[setting] = os.environ[setting]
            except KeyError:
                pass

# export back DJANGO_ENV_VARS dict as django settings
globs = globals()
for setting, value in six.iteritems(DJANGO_ENV_VARS):
    globs[setting] = value


def as_col(value, separators=None, final_type=None, **kw):
    if final_type is None:
        final_type = list
    if separators is None:
        separators = ['-|_', '_|-', '___', ',', ';', '|']
    if isinstance(value, six.string_types):
        assert(len(separators))
        while separators:
            try:
                separator = separators.pop(0)
            except IndexError:
                break
Example #42
0
    def api_call(self,
                 client_http_method,
                 path,
                 data=None,
                 follow_redirects=False,
                 expected_status=200,
                 expected_redirects=[],
                 expected_headers={},
                 expected_mimetype=None,
                 expected_num_queries=None,
                 expected_json=True,
                 return_http_response=True,
                 **extra):
        """Perform an API call using a client API method.

        This will invoke an API function with all the necessary parameters,
        and check the results for the expected values.

        Args:
            api_func (callable):
                The API function to call.

            path (unicode):
                The path to the resource to request.

            data (dict, optional):
                The data to pass in the request. For an HTTP GET, this will
                be used as a query string. For other requests, this will be
                the request body.

            follow_redirects (bool, optional):
                Whether to expect and follow redirects to another URL.

            expected_status (int, optional):
                The expected HTTP status.

            expected_redirects (list of unicode, optional):
                The list of expected redirects performed by the resource(s),
                in order.

            expected_headers (dict, optional):
                Expected HTTP headers and their values from the
                response.

            expected_num_queries (int, optional):
                The number of queries this API call is expected to make.

            expected_mimetype (unicode, optional):
                The expected mimetype for the response payload.

            expected_json (bool):
                Whether JSON-parsable content is expected in the response.

            return_http_response (bool, optional):
                Whether to return the :py:class:`~django.http.HttpResponse`
                as part of the result.

            **extra (dict):
                Extra data to pass to the client HTTP method.

        Returns:
            object:
            By default, this returns the payload content, which may be a
            raw byte string result or a deserialized JSON body (depending on
            ``expected_json``).

            If passing ``return_http_response=True``, this will return a
            tuple in the form of ``(payload_content, http_response)``,
            where ``http_response`` is the
            :py:class:`~django.http.HttpResponse` object.
        """
        def _call_api():
            return client_http_method(path=path,
                                      data=data,
                                      follow=follow_redirects,
                                      HTTP_X_REQUESTED_WITH='XMLHttpRequest',
                                      **extra)

        # Normalize the API path so that the base URL containing the hostname
        # is stripped.
        if path.startswith(self.base_url):
            path = path[len(self.base_url):]

        # If the caller is explicitly requested multipart content, ensure we've
        # encoded the data.
        if extra.get('content_type') == MULTIPART_CONTENT:
            data = encode_multipart(BOUNDARY, data)

        # Log some details about the API request that's about to be performed.
        print('Performing HTTP %s for API %s' %
              (client_http_method.__name__.upper(), path))

        if data is not None:
            print('Request data = %r' % data)

        if expected_num_queries is None:
            response = _call_api()
        else:
            with self.assertNumQueries(expected_num_queries):
                response = _call_api()

        print('Raw API response: %r' % response.content)

        rsp = response.content

        self.assertEqual(response.status_code, expected_status)

        if expected_status in (204, 405):
            self.assertEqual(response.content, b'')
            rsp = None
        else:
            if expected_status != 302 and expected_json:
                rsp = json.loads(force_text(response.content))
            else:
                rsp = response.content

            print('Parsed API response:')
            pprint.pprint(rsp)

            if expected_status >= 400:
                # Error responses should be using the test's error mimetype
                # and not some valid response mimetype.
                self.assertIsNone(expected_mimetype)

                if expected_status != 405:
                    self.assertEqual(response['Content-Type'],
                                     self.error_mimetype)
            elif expected_status != 302:
                # All status codes other than the few above should have a
                # response payload matching the expected mimetype.
                self.assertIsNotNone(expected_mimetype)
                self.assertEqual(response['Content-Type'], expected_mimetype)

        # Check if the response redirected the way the caller expected.
        if expected_redirects:
            self.assertEqual(len(response.redirect_chain),
                             len(expected_redirects))

            for redirect in expected_redirects:
                self.assertEqual(response.redirect_chain[0][0],
                                 self.base_url + expected_redirects[0])

        # Check that all the expected headers are present in the response.
        for header, value in six.iteritems(expected_headers):
            self.assertIn(header, response)
            self.assertEqual(response[header], value)

        if return_http_response:
            return rsp, response
        else:
            return rsp
Example #43
0
def populate_rollup(rollup_tree, normalize_to_one, force_score=False):
    """
    Recursively populate the tree *rollup_tree* with aggregated scores
    for assessment.

    A rollup_tree is recursively defined as a tuple of two dictionnaries.
    The first dictionnary stores the fields on the node while the second
    dictionnary contains the leafs keyed by path.

    Example:

        [{
            "slug": "totals",
            "title": "Total Score",
            "tag": ["scorecard"]
         },
         {
            "/boxes-and-enclosures": [{
                "path": "/boxes-and-enclosures",
                "slug": "boxes-and-enclosures",
                "title": "Boxes & enclosures",
                "tag": "{\"tags\":[\"sustainability\"]}",
                "score_weight": 1.0,
                "transparent_to_rollover": false
            },
            {
                "/boxes-and-enclosures/management-basics": [{
                    "path": "/boxes-and-enclosures/management-basics",
                    "slug": "management-basics",
                    "title": "Management",
                    "tag": "{\"tags\":[\"management\",\"scorecard\"]}",
                    "score_weight": 1.0,
                    "transparent_to_rollover": false,
                    "accounts": {
                        "6": {
                            "nb_answers": 0,
                            "nb_questions": 2,
                            "created_at": null
                        },
                        "7": {
                            "nb_answers": 2,
                            "nb_questions": 2,
                            "created_at": "2017-12-20 18:48:40.666239",
                            "numerator": 4.0,
                            "denominator": 10.5,
                            "improvement_numerator": 1.5,
                            "improvement_denominator": 4.5
                        },
                    }
                },
                {}
                ],
                "/boxes-and-enclosures/design": [{
                    "path": "/boxes-and-enclosures/design",
                    "slug": "design",
                    "title": "Design",
                    "tag": "{\"tags\":[\"scorecard\"]}",
                    "score_weight": 1.0,
                    "transparent_to_rollover": false,
                    "accounts": {
                        "6": {
                            "nb_answers": 0,
                            "nb_questions": 2,
                            "created_at": null
                        },
                        "7": {
                            "nb_answers": 0,
                            "nb_questions": 2,
                            "created_at": null,
                            "improvement_numerator": 0.0,
                            "improvement_denominator": 0.0
                        },
                    }
                },
                {}
                ],
                "/boxes-and-enclosures/production": [{
                    "path": "/boxes-and-enclosures/production",
                    "slug": "production",
                    "title": "Production",
                    "tag": "{\"tags\":[\"scorecard\"]}",
                    "score_weight": 1.0,
                    "transparent_to_rollover": false,
                    "text": "/envconnect/static/img/production.png"
                },
                {
                    "/boxes-and-enclosures/production/energy-efficiency": [{
              "path": "/boxes-and-enclosures/production/energy-efficiency",
                        "slug": "energy-efficiency",
                        "title": "Energy Efficiency",
                        "tag": "{\"tags\":[\"pagebreak\",\"scorecard\"]}",
                        "score_weight": 1.0,
                        "transparent_to_rollover": false,
                        "accounts": {
                            "6": {
                                "nb_answers": 1,
                                "nb_questions": 4,
                                "created_at": "2016-05-01 00:36:19.448000"
                            },
                            "7": {
                                "nb_answers": 0,
                                "nb_questions": 4,
                                "created_at": null,
                                "improvement_numerator": 0.0,
                                "improvement_denominator": 0.0
                            },
                        }
                    },
                    {}
                    ]
                }
            }]
         }]
    """
    #pylint:disable=too-many-locals
    values = rollup_tree[0]
    slug = values.get('slug', None)
    total_score_weight = 0
    if len(rollup_tree[1]) > 1:
        for node in six.itervalues(rollup_tree[1]):
            score_weight = node[0].get('score_weight', 1.0)
            total_score_weight += score_weight
        normalize_children = ((1.0 - 0.01) < total_score_weight < (1.0 + 0.01))
    else:
        # With only one children the weight will always be 1 yet we don't
        # want to normalize here.
        normalize_children = False

    if not 'accounts' in values:
        values['accounts'] = {}
    accounts = values['accounts']
    for node in six.itervalues(rollup_tree[1]):
        populate_rollup(  # recursive call
            node,
            normalize_children,
            force_score=force_score)
        score_weight = node[0].get('score_weight', 1.0)
        for account_id, scores in six.iteritems(node[0].get('accounts', {})):
            if not account_id in accounts:
                accounts[account_id] = {}
            agg_scores = accounts[account_id]
            if 'sample' in scores:
                agg_scores['sample'] = scores['sample']
            if 'created_at' in scores:
                if not ('created_at' in agg_scores and isinstance(
                        agg_scores['created_at'], datetime.datetime)):
                    agg_scores['created_at'] = scores['created_at']
                elif (isinstance(agg_scores['created_at'], datetime.datetime)
                      and isinstance(scores['created_at'], datetime.datetime)):
                    agg_scores['created_at'] = max(agg_scores['created_at'],
                                                   scores['created_at'])
            nb_questions = scores.get('nb_questions')
            if nb_questions is not None:
                agg_scores['nb_questions'] = (
                    agg_scores.get('nb_questions', 0) + nb_questions)
            nb_answers = scores.get('nb_answers')
            if slug != 'totals' or nb_answers:
                # Aggregation of total scores is different. We only want to
                # count scores for assessment that matter for an organization's
                # segment.
                for key in ('nb_answers', 'nb_na_answers',
                            'nb_planned_improvements'):
                    value = scores.get(key)
                    if value is not None:
                        agg_scores[key] = agg_scores.get(key, 0) + value
                for key in [
                        'numerator', 'denominator', 'improvement_numerator'
                ]:
                    value = scores.get(key)
                    if value is not None:
                        agg_scores[key] = agg_scores.get(
                            key, 0) + (value * score_weight)

    for account_id, scores in six.iteritems(accounts):
        _normalize(scores,
                   normalize_to_one=normalize_to_one,
                   force_score=force_score)
Example #44
0
def version_matrix(request):
    matrix = defaultdict(partial(defaultdict, LatestList))
    latest = defaultdict(list)
    groups = defaultdict(list)

    judges = {
        judge.id: judge.name
        for judge in Judge.objects.filter(online=True)
    }
    languages = Language.objects.all()

    for runtime in RuntimeVersion.objects.filter(
            judge__online=True).order_by('priority'):
        if runtime.version:
            matrix[runtime.judge_id][runtime.language_id].append(runtime)

    for judge, data in six.iteritems(matrix):
        name_tuple = judges[judge].rpartition('.')
        groups[name_tuple[0] or name_tuple[-1]].append((judges[judge], data))

    matrix = {}
    for group, data in six.iteritems(groups):
        if len(data) == 1:
            judge, data = data[0]
            matrix[judge] = data
            continue

        ds = range(len(data))
        size = [1] * len(data)
        for i, (p, x) in enumerate(data):
            if ds[i] != i:
                continue
            for j, (q, y) in enumerate(data):
                if i != j and compare_version_list(x, y):
                    ds[j] = i
                    size[i] += 1
                    size[j] = 0

        rep = max(xrange(len(data)), key=size.__getitem__)
        matrix[group] = data[rep][1]
        for i, (j, x) in enumerate(data):
            if ds[i] != rep:
                matrix[j] = x

    for data in six.itervalues(matrix):
        for language, versions in six.iteritems(data):
            versions.versions = [
                LooseVersion(runtime.version) for runtime in versions
            ]
            if versions.versions > latest[language]:
                latest[language] = versions.versions

    for data in six.itervalues(matrix):
        for language, versions in six.iteritems(data):
            versions.is_latest = versions.versions == latest[language]

    languages = sorted(languages, key=lambda lang: LooseVersion(lang.name))
    return render(
        request, 'status/versions.html', {
            'title': _('Version matrix'),
            'judges': sorted(matrix.keys()),
            'languages': languages,
            'matrix': matrix,
        })
Example #45
0
    def extract_relationships(fields, resource, resource_instance):
        # Avoid circular deps
        from rest_framework_json_api.relations import ResourceRelatedField

        data = OrderedDict()

        # Don't try to extract relationships from a non-existent resource
        if resource_instance is None:
            return

        for field_name, field in six.iteritems(fields):
            # Skip URL field
            if field_name == api_settings.URL_FIELD_NAME:
                continue

            # Skip fields without relations
            if not isinstance(field,
                              (relations.RelatedField,
                               relations.ManyRelatedField, BaseSerializer)):
                continue

            source = field.source
            try:
                relation_instance_or_manager = getattr(resource_instance,
                                                       source)
            except AttributeError:
                # if the field is not defined on the model then we check the serializer
                # and if no value is there we skip over the field completely
                serializer_method = getattr(field.parent, source, None)
                if serializer_method and hasattr(serializer_method,
                                                 '__call__'):
                    relation_instance_or_manager = serializer_method(
                        resource_instance)
                else:
                    continue

            relation_type = utils.get_related_resource_type(field)

            if isinstance(field, relations.HyperlinkedIdentityField):
                # special case for HyperlinkedIdentityField
                relation_data = list()

                # Don't try to query an empty relation
                relation_queryset = relation_instance_or_manager.all() \
                    if relation_instance_or_manager is not None else list()

                for related_object in relation_queryset:
                    relation_data.append(
                        OrderedDict([
                            ('type', relation_type),
                            ('id', encoding.force_text(related_object.pk))
                        ]))

                data.update({
                    field_name: {
                        'links': {
                            "related": resource.get(field_name)
                        },
                        'data': relation_data,
                        'meta': {
                            'count': len(relation_data)
                        }
                    }
                })
                continue

            if isinstance(field, ResourceRelatedField):
                # special case for ResourceRelatedField
                relation_data = {'data': resource.get(field_name)}

                field_links = field.get_links(resource_instance)
                relation_data.update(
                    {'links': field_links} if field_links else dict())
                data.update({field_name: relation_data})
                continue

            if isinstance(field, (relations.PrimaryKeyRelatedField,
                                  relations.HyperlinkedRelatedField)):
                relation_id = relation_instance_or_manager.pk if resource.get(
                    field_name) else None

                relation_data = {
                    'data':
                    (OrderedDict([('type', relation_type),
                                  ('id', encoding.force_text(relation_id))])
                     if relation_id is not None else None)
                }

                relation_data.update(
                    {'links': {
                        'related': resource.get(field_name)
                    }} if isinstance(field, relations.HyperlinkedRelatedField)
                    and resource.get(field_name) else dict())
                data.update({field_name: relation_data})
                continue

            if isinstance(field, relations.ManyRelatedField):

                if isinstance(field.child_relation, ResourceRelatedField):
                    # special case for ResourceRelatedField
                    relation_data = {'data': resource.get(field_name)}

                    field_links = field.child_relation.get_links(
                        resource_instance)
                    relation_data.update(
                        {'links': field_links} if field_links else dict())
                    relation_data.update(
                        {'meta': {
                            'count': len(resource.get(field_name))
                        }})
                    data.update({field_name: relation_data})
                    continue

                relation_data = list()
                for related_object in relation_instance_or_manager.all():
                    related_object_type = utils.get_instance_or_manager_resource_type(
                        related_object)
                    relation_data.append(
                        OrderedDict([
                            ('type', related_object_type),
                            ('id', encoding.force_text(related_object.pk))
                        ]))
                data.update({
                    field_name: {
                        'data': relation_data,
                        'meta': {
                            'count': len(relation_data)
                        }
                    }
                })
                continue

            if isinstance(field, ListSerializer):
                relation_data = list()

                serializer_data = resource.get(field_name)
                resource_instance_queryset = list(
                    relation_instance_or_manager.all())
                if isinstance(serializer_data, list):
                    for position in range(len(serializer_data)):
                        nested_resource_instance = resource_instance_queryset[
                            position]
                        nested_resource_instance_type = utils.get_resource_type_from_instance(
                            nested_resource_instance)
                        relation_data.append(
                            OrderedDict([('type',
                                          nested_resource_instance_type),
                                         ('id',
                                          encoding.force_text(
                                              nested_resource_instance.pk))]))

                    data.update({field_name: {'data': relation_data}})
                    continue

            if isinstance(field, ModelSerializer):
                relation_model = field.Meta.model
                relation_type = utils.format_type(relation_model.__name__)

                data.update({
                    field_name: {
                        'data':
                        (OrderedDict([('type', relation_type),
                                      ('id',
                                       encoding.force_text(
                                           relation_instance_or_manager.pk))])
                         if resource.get(field_name) else None)
                    }
                })
                continue

        return utils.format_keys(data)
Example #46
0
def generate_wolk_changes(giedo):
    creds = settings.WOLK_MYSQL_SECRET
    if not creds:
        logging.warning('wolk: no credentials available, skipping')
        return None

    todo = {
        'addUser': [],
        'addGroup': [],
        'addUserToGroup': [],
        'removeUserFromGroup': []
    }
    dt_now = now()
    # First, lets see which users and groups to create
    users = dict()  # users to have
    groups = dict()  # groups to have
    missing_groups = set()
    missing_users = set()
    ulut = dict()
    for m in Es.by_name('leden').get_members():
        if not m.got_unix_user:
            continue
        ulut[m._id] = m
        users[str(m.name)] = m
    # Get all groups and create a look-up-table for group membership
    gs = tuple(Es.groups())
    mrels = Es.query_relations(how=None, _with=gs, _from=dt_now, until=dt_now)
    mlut = dict()
    for g in gs:
        mlut[g._id] = []
    for mrel in mrels:
        mlut[mrel['with']].append(mrel['who'])
    # Flatten out group membership.  For instance: if Giedo is in Kasco
    # and Kasco is in Boekenlezers, then Giedo is also in the Boekenlezers
    # unix group.
    # But first split the mlut graph into a group and a non-group subgraph.
    mlut_g = {}  # { <group> : <members that are groups> }
    mlut_ng = {}  # { <group> : <members that are not groups> }
    for g_id in mlut:
        mlut_g[g_id] = [c for c in mlut[g_id] if c in mlut]
        mlut_ng[g_id] = [c for c in mlut[g_id] if c not in mlut]
    mlut_g_tc = tc(mlut_g)  # transitive closure
    # Generate the { <group> : <indirect non-group members> } graph
    memb_graph = {}
    for g in gs:
        if not g.got_unix_group:
            continue
        memb_graph[g._id] = set(mlut_ng[g._id])
        for h_id in mlut_g_tc[g._id]:
            memb_graph[g._id].update(mlut_ng[h_id])
    # Fill the groups variable
    for g in gs:
        if not g.got_unix_group:
            continue
        groups[str(g.name)] = set(
            [str(ulut[c].name) for c in memb_graph[g._id] if c in ulut])

    # Now, check which users and groups actually exist in owncloud
    missing_users = set(users.keys())
    missing_groups = set(groups.keys())
    dc = pymysql.connect(host=creds[0],
                         user=creds[1],
                         password=creds[2],
                         db=creds[3],
                         charset='utf8')
    try:
        with dc.cursor() as c:
            c.execute("SELECT gid, uid FROM oc_group_user")
            for group, user in c.fetchall():
                if group not in groups:
                    continue
                if user not in users or user not in groups[group]:
                    todo['removeUserFromGroup'].append((user, group))
                    continue
                if user in groups[group]:
                    groups[group].remove(user)
            c.execute("SELECT uid FROM oc_users")
            for user, in c.fetchall():
                if user not in users:
                    logging.info("wolk: stray user %s", user)
                    continue
                missing_users.remove(user)
            c.execute("SELECT gid FROM oc_groups")
            for group, in c.fetchall():
                if group not in groups:
                    logging.info("wolk: stray group %s", user)
                    continue
                missing_groups.remove(group)
            for user in missing_users:
                todo['addUser'].append(
                    (user, six.text_type(users[user].humanName)))
            todo['addGroup'] = list(missing_groups)
            for group, missing_members in six.iteritems(groups):
                for user in missing_members:
                    todo['addUserToGroup'].append((user, group))
    finally:
        dc.close()
    return todo
Example #47
0
    def test_metadata_from_package(self):
        """Testing ExtensionInfo metadata from package"""
        entrypoint = Mock()
        entrypoint.dist = Mock()

        test_author = 'Test author lorem ipsum'
        test_description = 'Test description lorem ipsum'
        test_email = 'Test [email protected]'
        test_home_page = 'http://www.example.com'
        test_license = 'Test License MIT GPL Apache Drivers'
        test_module_name = 'testextension.dummy.dummy'
        test_extension_id = '%s:DummyExtension' % test_module_name
        test_module_to_app = 'testextension.dummy'
        test_project_name = 'TestProjectName'
        test_summary = 'Test summary lorem ipsum'
        test_version = '1.0'

        test_htdocs_path = os.path.join(settings.MEDIA_ROOT, 'ext',
                                        test_project_name)
        test_static_path = os.path.join(settings.STATIC_ROOT, 'ext',
                                        test_extension_id)

        test_metadata = {
            'Name': test_project_name,
            'Version': test_version,
            'Summary': test_summary,
            'Description': test_description,
            'Author': test_author,
            'Author-email': test_email,
            'License': test_license,
            'Home-page': test_home_page,
        }

        entrypoint.dist.get_metadata_lines = Mock(return_value=[
            "%s: %s" % (key, value)
            for key, value in six.iteritems(test_metadata)
        ])

        entrypoint.dist.project_name = test_project_name
        entrypoint.dist.version = test_version

        ext_class = Mock()
        ext_class.__module__ = test_module_name
        ext_class.id = test_extension_id
        ext_class.metadata = None
        extension_info = ExtensionInfo(entrypoint, ext_class)

        self.assertEqual(extension_info.app_name, test_module_to_app)
        self.assertEqual(extension_info.author, test_author)
        self.assertEqual(extension_info.author_email, test_email)
        self.assertEqual(extension_info.description, test_description)
        self.assertFalse(extension_info.enabled)
        self.assertEqual(extension_info.installed_htdocs_path,
                         test_htdocs_path)
        self.assertEqual(extension_info.installed_static_path,
                         test_static_path)
        self.assertFalse(extension_info.installed)
        self.assertEqual(extension_info.license, test_license)
        self.assertEqual(extension_info.metadata, test_metadata)
        self.assertEqual(extension_info.name, test_project_name)
        self.assertEqual(extension_info.summary, test_summary)
        self.assertEqual(extension_info.url, test_home_page)
        self.assertEqual(extension_info.version, test_version)
Example #48
0
def write_csv(queryset, file_obj, **kwargs):
    """
    The main worker function. Writes CSV data to a file object based on the
    contents of the queryset.
    """

    # process keyword arguments to pull out the ones used by this function
    field_header_map = kwargs.get('field_header_map', {})
    field_serializer_map = kwargs.get('field_serializer_map', {})
    use_verbose_names = kwargs.get('use_verbose_names', True)
    field_order = kwargs.get('field_order', None)

    csv_kwargs = {}

    for key, val in six.iteritems(kwargs):
        if key not in DJQSCSV_KWARGS:
            csv_kwargs[key] = val

    # add BOM to support CSVs in MS Excel (for Windows only)
    file_obj.write(_safe_utf8_stringify(u'\ufeff'))

    # the CSV must always be built from a values queryset
    # in order to introspect the necessary fields.
    if isinstance(queryset, ValuesQuerySet):
        values_qs = queryset
    else:
        values_qs = queryset.values()

    try:
        field_names = values_qs.field_names

    except AttributeError:
        # in django1.5, empty querysets trigger
        # this exception, but not django 1.6
        raise CSVException("Empty queryset provided to exporter.")

    extra_columns = list(values_qs.query.extra_select)
    if extra_columns:
        field_names += extra_columns

    aggregate_columns = list(values_qs.query.aggregate_select)
    if aggregate_columns:
        field_names += aggregate_columns

    if field_order:
        # go through the field_names and put the ones
        # that appear in the ordering list first
        field_names = ([field for field in field_order
                       if field in field_names] +
                       [field for field in field_names
                        if field not in field_order])

    writer = csv.DictWriter(file_obj, field_names, **csv_kwargs)

    # verbose_name defaults to the raw field name, so in either case
    # this will produce a complete mapping of field names to column names
    name_map = dict((field, field) for field in field_names)
    if use_verbose_names:
        name_map.update(
            dict((field.name, field.verbose_name)
                 for field in queryset.model._meta.fields
                 if field.name in field_names))

    # merge the custom field headers into the verbose/raw defaults, if provided
    merged_header_map = name_map.copy()
    merged_header_map.update(field_header_map)
    if extra_columns:
        merged_header_map.update(dict((k, k) for k in extra_columns))

    merged_header_map = dict((k, _safe_utf8_stringify(v))
                             for (k, v) in merged_header_map.items())
    writer.writerow(merged_header_map)

    for record in values_qs:
        record = _sanitize_unicode_record(field_serializer_map, record)
        writer.writerow(record)
Example #49
0
    def test_create_instance(self):
        class Meta:
            model = Article
            fields = (
                'author__team__name',
                'author__name',
                'title',
            )
            order_fields = (
                'author__team',
                'author__name',
                'title',
            )
            order_field_name = 'sort_condition'
            keyword_fields = (
                'author__team__name',
                'author__name',
                'title',
                'body',
            )
            keyword_field_name = 'keyword_text'
            widgets = {
                'author__team__name': forms.Textarea,
                'author__name': forms.Textarea,
                'title': forms.Textarea,
            }
            labels = {
                'author__team__name': 'team name',
                'author__name': 'author name',
                'title': 'article title',
            }
            help_texts = {
                'author__team__name': 'team name help text',
                'author__name': 'author name help text',
                'title': 'article title help text',
            }
            error_messages = {
                NON_FIELD_ERRORS: {
                    'unique_together':
                    "%(model_name)s's %(field_labels)s are not unique.",
                }
            }

        options = SearchFormOptions(Meta)
        assert options
        assert isinstance(options.fields, OrderedDict)
        assert options.order_fields == Meta.order_fields
        assert options.order_field_name == Meta.order_field_name
        assert options.keyword_fields == Meta.keyword_fields
        assert options.keyword_field_name == Meta.keyword_field_name
        assert options.widgets == Meta.widgets
        assert options.labels == Meta.labels
        assert options.help_texts == Meta.help_texts
        assert options.error_messages == Meta.error_messages
        assert isinstance(options.lookups, dict)
        assert isinstance(options.aliases, dict)

        for field, opts in six.iteritems(options.fields):
            assert isinstance(field, six.string_types)
            assert isinstance(opts, dict)
            assert opts['lookup'] is None
            assert opts['as'] is None
            assert options.lookups[field] == opts['lookup']
            assert options.aliases[field] == opts['as']
Example #50
0
    def _get_requested_filters(self, **kwargs):
        """
        Convert 'filters' query params into a dict that can be passed
        to Q. Returns a dict with two fields, 'include' and 'exclude',
        which can be used like:

            result = self._get_requested_filters()
            q = Q(**result['_include'] & ~Q(**result['_exclude'])

        """

        filters_map = kwargs.get('filters_map')

        view = getattr(self, 'view', None)
        if view:
            serializer_class = view.get_serializer_class()
            serializer = serializer_class()
            if not filters_map:
                filters_map = view.get_request_feature(view.FILTER)
        else:
            serializer = None

        out = TreeMap()

        for key, value in six.iteritems(filters_map):

            # Inclusion or exclusion?
            if key[0] == '-':
                key = key[1:]
                category = '_exclude'
            else:
                category = '_include'

            # for relational filters, separate out relation path part
            if '|' in key:
                rel, key = key.split('|')
                rel = rel.split('.')
            else:
                rel = None

            terms = key.split('.')
            # Last part could be operator, e.g. "events.capacity.gte"
            if len(terms) > 1 and terms[-1] in self.VALID_FILTER_OPERATORS:
                operator = terms.pop()
            else:
                operator = None

            # All operators except 'range' and 'in' should have one value
            if operator == 'range':
                value = value[:2]
                if value[0] == '':
                    operator = 'lte'
                    value = value[1]
                elif value[1] == '':
                    operator = 'gte'
                    value = value[0]
            elif operator == 'in':
                # no-op: i.e. accept `value` as an arbitrarily long list
                pass
            elif operator in self.VALID_FILTER_OPERATORS:
                value = value[0]
                if (operator == 'isnull'
                        and isinstance(value, six.string_types)):
                    value = is_truthy(value)
                elif operator == 'eq':
                    operator = None

            if serializer:
                s = serializer

                if rel:
                    # get related serializer
                    model_fields, serializer_fields = serializer.resolve(rel)
                    s = serializer_fields[-1]
                    s = getattr(s, 'serializer', s)
                    rel = [Meta.get_query_name(f) for f in model_fields]

                # perform model-field resolution
                model_fields, serializer_fields = s.resolve(terms)
                field = serializer_fields[-1] if serializer_fields else None
                # if the field is a boolean,
                # coerce the value
                if field and isinstance(
                        field,
                    (serializers.BooleanField, serializers.NullBooleanField)):
                    value = is_truthy(value)
                key = '__'.join([Meta.get_query_name(f) for f in model_fields])

            else:
                key = '__'.join(terms)

            if operator:
                key += '__%s' % operator

            # insert into output tree
            path = rel if rel else []
            path += [category, key]
            out.insert(path, value)
        return out
Example #51
0
def get_backup_strategies():
    entry_points = pkg_resources.get_entry_map('nodeconductor').get('backup_strategies', {})
    strategies = {name.upper(): entry_point.load() for name, entry_point in six.iteritems(entry_points)}
    return strategies
Example #52
0
 def assertObjectAttrs(self, obj, **kwargs):
     for attr, value in six.iteritems(kwargs):
         self.assertEqual(getattr(obj, attr), value)
Example #53
0
    def get_columns(self, with_aliases=False):
        """
        Return the list of columns to use in the select statement. If no
        columns have been specified, returns all columns relating to fields in
        the model.

        If 'with_aliases' is true, any column names that are duplicated
        (without the table names) are given unique aliases. This is needed in
        some cases to avoid ambiguity with nested queries.

        This routine is overridden from Query to handle customized selection of
        geometry columns.
        """
        qn = self
        qn2 = self.connection.ops.quote_name
        result = [
            '(%s) AS %s' %
            (self.get_extra_select_format(alias) % col[0], qn2(alias))
            for alias, col in six.iteritems(self.query.extra_select)
        ]
        params = []
        aliases = set(self.query.extra_select.keys())
        if with_aliases:
            col_aliases = aliases.copy()
        else:
            col_aliases = set()
        if self.query.select:
            only_load = self.deferred_to_columns()
            # This loop customized for GeoQuery.
            for col, field in self.query.select:
                if isinstance(col, (list, tuple)):
                    alias, column = col
                    table = self.query.alias_map[alias].table_name
                    if table in only_load and column not in only_load[table]:
                        continue
                    r = self.get_field_select(field, alias, column)
                    if with_aliases:
                        if col[1] in col_aliases:
                            c_alias = 'Col%d' % len(col_aliases)
                            result.append('%s AS %s' % (r, c_alias))
                            aliases.add(c_alias)
                            col_aliases.add(c_alias)
                        else:
                            result.append('%s AS %s' % (r, qn2(col[1])))
                            aliases.add(r)
                            col_aliases.add(col[1])
                    else:
                        result.append(r)
                        aliases.add(r)
                        col_aliases.add(col[1])
                else:
                    col_sql, col_params = col.as_sql(qn, self.connection)
                    result.append(col_sql)
                    params.extend(col_params)

                    if hasattr(col, 'alias'):
                        aliases.add(col.alias)
                        col_aliases.add(col.alias)

        elif self.query.default_cols:
            cols, new_aliases = self.get_default_columns(
                with_aliases, col_aliases)
            result.extend(cols)
            aliases.update(new_aliases)

        max_name_length = self.connection.ops.max_name_length()
        for alias, aggregate in self.query.aggregate_select.items():
            agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
            if alias is None:
                result.append(agg_sql)
            else:
                result.append(
                    '%s AS %s' %
                    (agg_sql, qn(truncate_name(alias, max_name_length))))
            params.extend(agg_params)

        # This loop customized for GeoQuery.
        for (table, col), field in self.query.related_select_cols:
            r = self.get_field_select(field, table, col)
            if with_aliases and col in col_aliases:
                c_alias = 'Col%d' % len(col_aliases)
                result.append('%s AS %s' % (r, c_alias))
                aliases.add(c_alias)
                col_aliases.add(c_alias)
            else:
                result.append(r)
                aliases.add(r)
                col_aliases.add(col)

        self._select_aliases = aliases
        return result, params
Example #54
0
    def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
                    export_ordered_set=False):
        # This helper function is used to allow recursion in ``get_fields()``
        # implementation and to provide a fast way for Django's internals to
        # access specific subsets of fields.

        # Creates a cache key composed of all arguments
        cache_key = (forward, reverse, include_parents, include_hidden, export_ordered_set)
        try:
            # In order to avoid list manipulation. Always return a shallow copy
            # of the results.
            return self._get_fields_cache[cache_key]
        except KeyError:
            pass

        # Using an OrderedDict preserves the order of insertion. This is
        # important when displaying a ModelForm or the contrib.admin panel
        # and no specific ordering is provided.
        fields = OrderedDict()
        options = {
            'include_parents': include_parents,
            'include_hidden': include_hidden,
            'export_ordered_set': True,
        }

        # Abstract models cannot hold reverse fields.
        if reverse and not self.abstract:
            if include_parents:
                parent_list = self.get_parent_list()
                # Recursively call _get_fields() on each parent, with the same
                # options provided in this call.
                for parent in self.parents:
                    for obj, _ in six.iteritems(parent._meta._get_fields(forward=False, **options)):
                        if obj.many_to_many:
                            # In order for a reverse ManyToManyRel object to be
                            # valid, its creation counter must be > 0 and must
                            # be in the parent list.
                            if not (obj.field.creation_counter < 0 and obj.related_model not in parent_list):
                                fields[obj] = True

                        elif not ((obj.field.creation_counter < 0 or obj.field.rel.parent_link)
                                  and obj.related_model not in parent_list):
                            fields[obj] = True

            # Tree is computed once and cached until the app cache is expired.
            # It is composed of a list of fields pointing to the current model
            # from other models. If the model is a proxy model, then we also
            # add the concrete model.
            all_fields = (
                self._relation_tree if not self.proxy else
                chain(self._relation_tree, self.concrete_model._meta._relation_tree)
            )

            # Pull out all related objects from forward fields
            for field in (f.rel for f in all_fields):
                # If hidden fields should be included or the relation is not
                # intentionally hidden, add to the fields dict.
                if include_hidden or not field.hidden:
                    fields[field] = True
        if forward:
            if include_parents:
                for parent in self.parents:
                    # Add the forward fields of each parent.
                    fields.update(parent._meta._get_fields(reverse=False, **options))
            fields.update(
                (field, True,)
                for field in chain(self.local_fields, self.local_many_to_many)
            )

        if not export_ordered_set:
            # By default, fields contains field instances as keys and all
            # possible names if the field instance as values. When
            # _get_fields() is called, we only want to return field instances,
            # so we just preserve the keys.
            fields = list(fields.keys())

            # Virtual fields are not inheritable, therefore they are inserted
            # only when the recursive _get_fields() call comes to an end.
            if forward:
                fields.extend(self.virtual_fields)
            fields = make_immutable_fields_list("get_fields()", fields)

        # Store result into cache for later access
        self._get_fields_cache[cache_key] = fields

        # In order to avoid list manipulation. Always
        # return a shallow copy of the results
        return fields