Ejemplo n.º 1
0
    def assertRedirectsNoFollow(self, response, expected_url, use_params=True,
                                status_code=302):
        """Checks response redirect without loading the destination page.

        Django's assertRedirects method loads the destination page, which
        requires that the page be renderable in the current test context
        (possibly requiring additional, unrelated setup).
        """
        # Assert that the response has the correct redirect code.
        self.assertEqual(
            response.status_code, status_code,
            "Response didn't redirect as expected: Response code was {0} "
            "(expected {1})".format(response.status_code, status_code))

        # Assert that the response redirects to the correct base URL.
        # Use force_text to force evaluation of anything created by
        # reverse_lazy.
        response_url = force_text(response['location'])
        expected_url = force_text(expected_url)
        parsed1 = urlparse(response_url)
        parsed2 = urlparse(expected_url)
        self.assertEquals(
            parsed1.path, parsed2.path,
            "Response did not redirect to the expected URL: Redirect "
            "location was {0} (expected {1})".format(parsed1.path, parsed2.path))

        # Optionally assert that the response redirect URL has the correct
        # GET parameters.
        if use_params:
            self.assertDictEqual(
                parse_qs(parsed1.query), parse_qs(parsed2.query),
                "Response did not have the GET parameters expected: GET "
                "parameters were {0} (expected "
                "{1})".format(parsed1.query or {}, parsed2.query or {}))
Ejemplo n.º 2
0
    def get_context(self):
        """
        Prepare the context for templates.
        """
        self.title = _('%s List') % force_text(self.opts.verbose_name)
        model_fields = [(f, f.name in self.list_display, self.get_check_field_url(f))
                        for f in (list(self.opts.fields) + self.get_model_method_fields()) if f.name not in self.list_exclude]

        new_context = {
            'model_name': force_text(self.opts.verbose_name_plural),
            'title': self.title,
            'cl': self,
            'model_fields': model_fields,
            'clean_select_field_url': self.get_query_string(remove=[COL_LIST_VAR]),
            'has_add_permission': self.has_add_permission(),
            'app_label': self.app_label,
            'brand_name': self.opts.verbose_name_plural,
            'brand_icon': self.get_model_icon(self.model),
            'add_url': self.model_admin_url('add'),
            'result_headers': self.result_headers(),
            'results': self.results()
        }
        context = super(ListAdminView, self).get_context()
        context.update(new_context)
        return context
Ejemplo n.º 3
0
    def test_files_content(self):
        self.assertTableNotExists("migrations_unicodemodel")
        cache.register_models('migrations', UnicodeModel)
        call_command("makemigrations", "migrations", verbosity=0)

        init_file = os.path.join(self.migration_dir, "__init__.py")

        # Check for existing __init__.py file in migrations folder
        self.assertTrue(os.path.exists(init_file))

        with open(init_file, 'r') as fp:
            content = force_text(fp.read())
            self.assertEqual(content, '')

        initial_file = os.path.join(self.migration_dir, "0001_initial.py")

        # Check for existing 0001_initial.py file in migration folder
        self.assertTrue(os.path.exists(initial_file))

        with open(initial_file, 'r') as fp:
            content = force_text(fp.read())
            self.assertTrue('# encoding: utf8' in content)
            self.assertTrue('migrations.CreateModel' in content)

            if six.PY3:
                self.assertTrue('úñí©óðé µóðéø' in content)  # Meta.verbose_name
                self.assertTrue('úñí©óðé µóðéøß' in content)  # Meta.verbose_name_plural
                self.assertTrue('ÚÑÍ¢ÓÐÉ' in content)  # title.verbose_name
                self.assertTrue('“Ðjáñgó”' in content)  # title.default
            else:
                self.assertTrue('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8' in content)  # Meta.verbose_name
                self.assertTrue('\\xfa\\xf1\\xed\\xa9\\xf3\\xf0\\xe9 \\xb5\\xf3\\xf0\\xe9\\xf8\\xdf' in content)  # Meta.verbose_name_plural
                self.assertTrue('\\xda\\xd1\\xcd\\xa2\\xd3\\xd0\\xc9' in content)  # title.verbose_name
                self.assertTrue('\\u201c\\xd0j\\xe1\\xf1g\\xf3\\u201d' in content)  # title.default
Ejemplo n.º 4
0
    def handle_upload(self):
        request = self.request

        try:
            folder_id = int(request.REQUEST.get("folder_id", 0))
            if folder_id != 0:
                folder = Folder.objects.get(pk=folder_id)
            else:
                folder = None  # Root folder upload. How bold!
        except Exception as exc:
            return JsonResponse({"error": "Invalid folder: %s" % force_text(exc)})

        try:
            upload_file = request.FILES["file"]

            if upload_file.content_type.startswith("image/"):
                filer_file = filer_image_from_upload(request, path=folder, upload_data=upload_file)
            else:
                filer_file = filer_file_from_upload(request, path=folder, upload_data=upload_file)
        except Exception as exc:
            return JsonResponse({"error": force_text(exc)})

        return JsonResponse({
            "file": _filer_file_to_json_dict(filer_file),
            "message": _("%(file)s uploaded to %(folder)s") % {
                "file": filer_file.label,
                "folder": get_folder_name(folder)
            }
        })
Ejemplo n.º 5
0
 def as_text(self):
     """
     Dialog Forms rendered as summary just display their values instead of input fields.
     This is useful to render a summary of a previously filled out form.
     """
     try:
         return mark_safe(self.instance.as_text())
     except (AttributeError, TypeError):
         output = []
         for name in self.fields.keys():
             bound_field = self[name]
             value = bound_field.value()
             if bound_field.is_hidden:
                 continue
             if isinstance(value, (list, tuple)):
                 line = []
                 cast_to = type(tuple(bound_field.field.choices)[0][0])
                 for v in value:
                     try:
                         line.append(dict(bound_field.field.choices)[cast_to(v)])
                     except (AttributeError, KeyError):
                         pass
                 output.append(force_text(', '.join(line)))
             elif value:
                 try:
                     value = dict(bound_field.field.choices)[value]
                 except (AttributeError, KeyError):
                     pass
                 output.append(force_text(value))
         return mark_safe('\n'.join(output))
Ejemplo n.º 6
0
    def test_relatedfieldlistfilter_foreignkey(self):
        modeladmin = BookAdmin(Book, site)

        request = self.request_factory.get('/')
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure that all users are present in the author's list filter
        filterspec = changelist.get_filters(request)[0][1]
        expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
        self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))

        request = self.request_factory.get('/', {'author__isnull': 'True'})
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure the correct queryset is returned
        queryset = changelist.get_queryset(request)
        self.assertEqual(list(queryset), [self.gipsy_book])

        # Make sure the last choice is None and is selected
        filterspec = changelist.get_filters(request)[0][1]
        self.assertEqual(force_text(filterspec.title), 'Verbose Author')
        choices = list(filterspec.choices(changelist))
        self.assertEqual(choices[-1]['selected'], True)
        self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')

        request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure the correct choice is selected
        filterspec = changelist.get_filters(request)[0][1]
        self.assertEqual(force_text(filterspec.title), 'Verbose Author')
        # order of choices depends on User model, which has no order
        choice = select_by(filterspec.choices(changelist), "display", "alfred")
        self.assertEqual(choice['selected'], True)
        self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
Ejemplo n.º 7
0
    def test_relatedfieldlistfilter_manytomany(self):
        modeladmin = BookAdmin(Book, site)

        request = self.request_factory.get('/')
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure that all users are present in the contrib's list filter
        filterspec = changelist.get_filters(request)[0][2]
        expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
        self.assertEqual(filterspec.lookup_choices, expected)

        request = self.request_factory.get('/', {'contributors__isnull': 'True'})
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure the correct queryset is returned
        queryset = changelist.get_queryset(request)
        self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])

        # Make sure the last choice is None and is selected
        filterspec = changelist.get_filters(request)[0][2]
        self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
        choices = list(filterspec.choices(changelist))
        self.assertEqual(choices[-1]['selected'], True)
        self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')

        request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure the correct choice is selected
        filterspec = changelist.get_filters(request)[0][2]
        self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
        choice = select_by(filterspec.choices(changelist), "display", "bob")
        self.assertEqual(choice['selected'], True)
        self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
Ejemplo n.º 8
0
def safe_join(base, *paths):
    """
    A version of django.utils._os.safe_join for S3 paths.

    Joins one or more path components to the base path component
    intelligently. Returns a normalized version of the final path.

    The final path must be located inside of the base path component
    (otherwise a ValueError is raised).

    Paths outside the base path indicate a possible security
    sensitive operation.
    """
    base_path = force_text(base)
    base_path = base_path.rstrip("/")
    paths = [force_text(p) for p in paths]

    final_path = base_path
    for path in paths:
        final_path = urlparse.urljoin(final_path.rstrip("/") + "/", path)

    # Ensure final_path starts with base_path and that the next character after
    # the final path is '/' (or nothing, in which case final_path must be
    # equal to base_path).
    base_path_len = len(base_path)
    if not final_path.startswith(base_path) or final_path[base_path_len : base_path_len + 1] not in ("", "/"):
        raise ValueError("the joined path is located outside of the base path" " component")

    return final_path.lstrip("/")
Ejemplo n.º 9
0
    def test_allvaluesfieldlistfilter(self):
        modeladmin = BookAdmin(Book, site)

        request = self.request_factory.get('/', {'year__isnull': 'True'})
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure the correct queryset is returned
        queryset = changelist.get_queryset(request)
        self.assertEqual(list(queryset), [self.django_book])

        # Make sure the last choice is None and is selected
        filterspec = changelist.get_filters(request)[0][0]
        self.assertEqual(force_text(filterspec.title), 'year')
        choices = list(filterspec.choices(changelist))
        self.assertEqual(choices[-1]['selected'], True)
        self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')

        request = self.request_factory.get('/', {'year': '2002'})
        changelist = self.get_changelist(request, Book, modeladmin)

        # Make sure the correct choice is selected
        filterspec = changelist.get_filters(request)[0][0]
        self.assertEqual(force_text(filterspec.title), 'year')
        choices = list(filterspec.choices(changelist))
        self.assertEqual(choices[2]['selected'], True)
        self.assertEqual(choices[2]['query_string'], '?year=2002')
Ejemplo n.º 10
0
    def get_field_info(self, field):
        """
        Given an instance of a serializer field, return a dictionary
        of metadata about it.
        """
        field_info = OrderedDict()
        field_info['type'] = self.label_lookup[field]
        field_info['required'] = getattr(field, 'required', False)

        attrs = [
            'read_only', 'label', 'help_text',
            'min_length', 'max_length',
            'min_value', 'max_value'
        ]

        for attr in attrs:
            value = getattr(field, attr, None)
            if value is not None and value != '':
                field_info[attr] = force_text(value, strings_only=True)

        if getattr(field, 'child', None):
            field_info['child'] = self.get_field_info(field.child)
        elif getattr(field, 'fields', None):
            field_info['children'] = self.get_serializer_info(field)

        if not field_info.get('read_only') and hasattr(field, 'choices'):
            field_info['choices'] = [
                {
                    'value': choice_value,
                    'display_name': force_text(choice_name, strings_only=True)
                }
                for choice_value, choice_name in field.choices.items()
            ]

        return field_info
Ejemplo n.º 11
0
    def verify_unsafe_email(self, view, check_for_POST_params=True):
        """
        Asserts that potentially sensitive info are displayed in the email report.
        """
        with self.settings(ADMINS=(('Admin', '*****@*****.**'),)):
            mail.outbox = [] # Empty outbox
            request = self.rf.post('/some_url/', self.breakfast_data)
            response = view(request)
            self.assertEqual(len(mail.outbox), 1)
            email = mail.outbox[0]

            # Frames vars are never shown in plain text email reports.
            body_plain = force_text(email.body)
            self.assertNotIn('cooked_eggs', body_plain)
            self.assertNotIn('scrambled', body_plain)
            self.assertNotIn('sauce', body_plain)
            self.assertNotIn('worcestershire', body_plain)

            # Frames vars are shown in html email reports.
            body_html = force_text(email.alternatives[0][0])
            self.assertIn('cooked_eggs', body_html)
            self.assertIn('scrambled', body_html)
            self.assertIn('sauce', body_html)
            self.assertIn('worcestershire', body_html)

            if check_for_POST_params:
                for k, v in self.breakfast_data.items():
                    # All POST parameters are shown.
                    self.assertIn(k, body_plain)
                    self.assertIn(v, body_plain)
                    self.assertIn(k, body_html)
                    self.assertIn(v, body_html)
Ejemplo n.º 12
0
def get_toolbar_plugin_struct(plugins_list, slot, page, parent=None):
    """
    Return the list of plugins to render in the toolbar.
    The dictionary contains the label, the classname and the module for the
    plugin.
    Names and modules can be defined on a per-placeholder basis using
    'plugin_modules' and 'plugin_labels' attributes in CMS_PLACEHOLDER_CONF

    :param plugins_list: list of plugins valid for the placeholder
    :param slot: placeholder slot name
    :param page: the page
    :param parent: parent plugin class, if any
    :return: list of dictionaries
    """
    template = None
    if page:
        template = page.template
    main_list = []
    for plugin in plugins_list:
        allowed_parents = plugin().get_parent_classes(slot, page)
        if parent:
            ## skip to the next if this plugin is not allowed to be a child
            ## of the parent
            if allowed_parents and parent.__name__ not in allowed_parents:
                continue
        else:
            if allowed_parents:
                continue
        modules = get_placeholder_conf("plugin_modules", slot, template, default={})
        names = get_placeholder_conf("plugin_labels", slot, template, default={})
        main_list.append({'value': plugin.value,
                          'name': force_text(names.get(plugin.value, plugin.name)),
                          'module': force_text(modules.get(plugin.value, plugin.module))})
    return sorted(main_list, key=operator.itemgetter("module"))
Ejemplo n.º 13
0
def safe_join(base, *paths):
    """
    A version of django.utils._os.safe_join for S3 paths.

    Joins one or more path components to the base path component
    intelligently. Returns a normalized version of the final path.

    The final path must be located inside of the base path component
    (otherwise a ValueError is raised).

    Paths outside the base path indicate a possible security
    sensitive operation.
    """
    base_path = force_text(base)
    base_path = base_path.rstrip('/')
    paths = [force_text(p) for p in paths]

    final_path = base_path + '/'
    for path in paths:
        _final_path = posixpath.normpath(posixpath.join(final_path, path))
        # posixpath.normpath() strips the trailing /. Add it back.
        if path.endswith('/') or _final_path + '/' == final_path:
            _final_path += '/'
        final_path = _final_path
    if final_path == base_path:
        final_path += '/'

    # Ensure final_path starts with base_path and that the next character after
    # the base path is /.
    base_path_len = len(base_path)
    if (not final_path.startswith(base_path) or final_path[base_path_len] != '/'):
        raise ValueError('the joined path is located outside of the base path'
                         ' component')

    return final_path.lstrip('/')
Ejemplo n.º 14
0
 def render(self, name, value, attrs=None):
     final_attrs = self.build_attrs(attrs)
     if value is None:
         value = ''
     final_attrs['value'] = force_text(self._format_value(value))
     return format_html("""<select class="form-control bfh-states" data-name="{0}" data-state="{1}"\
     {2}></select>""", force_text(name), force_text(value), flatatt(final_attrs))
Ejemplo n.º 15
0
 def ensure_acceptable_value(self, value):
     """
     if schema_mode disabled (default behaviour):
         - ensure booleans, integers, floats, Decimals, lists and dicts are
           converted to string
         - convert True and False objects to "true" and "false" so they can be
           decoded back with the json library if needed
         - convert lists and dictionaries to json formatted strings
         - leave alone all other objects because they might be representation of django models
     else:
         - encode utf8 strings in python2
         - convert to string
     """
     if not self.schema_mode:
         if isinstance(value, bool):
             return force_text(value).lower()
         elif isinstance(value, six.integer_types + (float, Decimal)):
             return force_text(value)
         elif isinstance(value, (list, dict)):
             return force_text(json.dumps(value))
         else:
             return value
     else:
         # perform string conversion unless is None
         if value is not None:
             value = force_str(value)
         return value
 def render(self, name, value, attrs=None):
     if value is None:
         value = ''
     input_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
     if value != '':
         # Only add the 'value' attribute if a value is non-empty.
         input_attrs['value'] = force_text(self._format_value(value))
     input_attrs = dict([(key, conditional_escape(val)) for key, val in input_attrs.items()])  # python2.6 compatible
     if not self.picker_id:
          self.picker_id = (input_attrs.get('id', '') +
                            '_pickers').replace(' ', '_')
     self.div_attrs['id'] = self.picker_id
     picker_id = conditional_escape(self.picker_id)
     div_attrs = dict(
         [(key, conditional_escape(val)) for key, val in self.div_attrs.items()])  # python2.6 compatible
     icon_attrs = dict([(key, conditional_escape(val)) for key, val in self.icon_attrs.items()])
     html = self.html_template % dict(div_attrs=flatatt(div_attrs),
                                      input_attrs=flatatt(input_attrs),
                                      icon_attrs=flatatt(icon_attrs))
     if self.options:
         self.options['language'] = translation.get_language()
         js = self.js_template % dict(picker_id=picker_id,
                                      options=json.dumps(self.options or {}))
     else:
         js = ''
     return mark_safe(force_text(html + js))
Ejemplo n.º 17
0
def celery_task_command(request):

    command = request.POST.get('celery-command')
    job_id = request.POST.get('job-id')
    job = WooeyJob.objects.get(pk=job_id)
    response = {'valid': False, }
    valid = valid_user(job.script_version.script, request.user)
    if valid.get('valid') is True:
        user = request.user if request.user.is_authenticated() else None
        if user == job.user or job.user == None:
            if command == 'resubmit':
                new_job = job.submit_to_celery(resubmit=True, user=request.user)
                response.update({'valid': True, 'extra': {'job_url': reverse('wooey:celery_results', kwargs={'job_id': new_job.pk})}})
            elif command == 'rerun':
                job.submit_to_celery(user=request.user, rerun=True)
                response.update({'valid': True, 'redirect': reverse('wooey:celery_results', kwargs={'job_id': job_id})})
            elif command == 'clone':
                response.update({'valid': True, 'redirect': reverse('wooey:wooey_script_clone', kwargs={'slug': job.script_version.script.slug, 'job_id': job_id})})
            elif command == 'delete':
                job.status = WooeyJob.DELETED
                job.save()
                response.update({'valid': True, 'redirect': reverse('wooey:wooey_home')})
            elif command == 'stop':
                celery_app.control.revoke(job.celery_id, signal='SIGKILL', terminate=True)
                job.status = states.REVOKED
                job.save()
                response.update({'valid': True, 'redirect': reverse('wooey:celery_results', kwargs={'job_id': job_id})})
            else:
                response.update({'errors': {'__all__': [force_text(_("Unknown Command"))]}})
    else:
        response.update({'errors': {'__all__': [force_text(valid.get('error'))]}})
    return JsonResponse(response)
Ejemplo n.º 18
0
 def __init__(self, alias, data):
     self.alias = alias
     ctx_data = {}
     for key, value in trim(data).iteritems():
         if value not in EMPTY_VALUES:
             ctx_data[force_text(key)] = force_text(value)
     self.data = ctx_data
Ejemplo n.º 19
0
    def _repr_query_fragment_callback(self, field, filter_type, value):
        if six.PY3:
            value = force_text(value)
        else:
            value = force_text(value).encode('utf8')

        return "%s%s%s=%s" % (field, FILTER_SEPARATOR, filter_type, value)
Ejemplo n.º 20
0
Archivo: tests.py Proyecto: borls/blog
    def test_article_comments_rss(self):
        article = Article.objects.create(
            title=rs(20), content=rt(4), created=tz.now(), slug=rcs(10),
            is_published=True
        )

        response = self.client.get(
            reverse('lbe:article_comments_rss', args=[article.slug])
        )
        self.assertEqual(response.status_code, 200)

        comment_approved = Comment.objects.create(
            article=article, user_name=rs(10), content=rt(1), created=tz.now(),
            is_approved=True
        )
        comment_not_approved = Comment.objects.create(
            article=article, user_name=rs(10), content=rt(1), created=tz.now(),
            is_approved=False
        )

        response = self.client.get(
            reverse('lbe:article_comments_rss', args=[article.slug])
        )
        self.assertEqual(response.status_code, 200)
        self.assertIn(
            escape(comment_approved.get_content()), force_text(response)
        )
        self.assertNotIn(
            escape(comment_not_approved.get_content()), force_text(response)
        )
Ejemplo n.º 21
0
Archivo: tests.py Proyecto: borls/blog
    def test_article_list_and_feed(self):
        response = self.client.get(reverse('lbe:article_list'))
        self.assertEqual(response.status_code, 200)

        article_pub = Article.objects.create(
            title=rs(20), content=rt(4), created=tz.now(), slug=rcs(10),
            is_published=True
        )
        article_not_pub = Article.objects.create(
            title=rs(20), content=rt(4), created=tz.now(), slug=rcs(10)
        )
        page_pub = Article.objects.create(
            title=rs(20), content=rt(4), created=tz.now(), slug=rcs(10),
            is_standalone=True, is_published=True
        )
        page_not_pub = Article.objects.create(
            title=rs(20), content=rt(4), created=tz.now(), slug=rcs(10),
            is_standalone=True
        )

        response = self.client.get(reverse('lbe:article_list'))
        self.assertEqual(response.status_code, 200)
        self.assertIn(article_pub, response.context['object_list'])
        for a in (article_not_pub, page_pub, page_not_pub):
            self.assertNotIn(a, response.context['object_list'])

        response = self.client.get(reverse('lbe:rss'))
        self.assertEqual(response.status_code, 200)
        self.assertIn(escape(article_pub.get_content()), force_text(response))
        for a in (article_not_pub, page_pub, page_not_pub):
            self.assertNotIn(escape(a.get_content()), force_text(response))
Ejemplo n.º 22
0
    def handle(self, *args, **options):

        username = force_text(options['username'])
        users = list(User.objects.filter(name=username)[0:1])
        if not users:
            user = User(name=username, uid_number=options['uid'], gid_number=options['gid'])
        else:
            user = users[0]
        if options['display_name']:
            user.display_name = options['display_name']
        if options['phone']:
            user.phone = options['phone']
        if options['gid']:
            user.gid = options['gid']
        if options['password']:
            user.set_password(options['password'])
        else:
            user.save()
        for group_name in options['group']:
            group_name = force_text(group_name)
            groups = list(Group.objects.filter(name=group_name)[0:1])
            if groups:
                group = groups[0]
            else:
                group = Group(name=group_name)
            if username not in group.members:
                group.members.append(username)
            group.save()
Ejemplo n.º 23
0
 def test_all_files_more_verbose(self):
     """
     Test that findstatic returns all candidate files if run without --first and -v2.
     Also, test that findstatic returns the searched locations with -v2.
     """
     out = six.StringIO()
     call_command('findstatic', 'test/file.txt', verbosity=2, stdout=out)
     out.seek(0)
     lines = [l.strip() for l in out.readlines()]
     self.assertIn('project', force_text(lines[1]))
     self.assertIn('apps', force_text(lines[2]))
     self.assertIn("Looking in the following locations:", force_text(lines[3]))
     searched_locations = ', '.join(lines[4:])
     # AppDirectoriesFinder searched locations
     self.assertIn(os.path.join('staticfiles_tests', 'apps', 'test', 'static'),
                   searched_locations)
     self.assertIn(os.path.join('staticfiles_tests', 'apps', 'no_label', 'static'),
                   searched_locations)
     self.assertIn(os.path.join('django', 'contrib', 'admin', 'static'),
                   searched_locations)
     self.assertIn(os.path.join('tests', 'servers', 'another_app', 'static'),
                   searched_locations)
     # FileSystemFinder searched locations
     self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][1][1], searched_locations)
     self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][0], searched_locations)
     # DefaultStorageFinder searched locations
     self.assertIn(os.path.join('staticfiles_tests', 'project', 'site_media', 'media'),
                   searched_locations)
Ejemplo n.º 24
0
 def render(self):
     return format_html('<div class="form-row">{}</div>',
         format_html_join('\n', '<div class="field-box">'
                          '<span class="btn {1}">{2}</span>'
                          '<div class="label">{0}</div></div>',
             ((force_text(w), w.choice_value, force_text(self.BUTTON_TYPES[w.choice_value])) for w in self)
         ))
Ejemplo n.º 25
0
 def test_toolbar_with_items(self):
     """
     Test that PageMeta/TitleMeta items are present for superuser if PageMeta/TitleMeta exists for current page
     """
     from cms.toolbar.toolbar import CMSToolbar
     page1, page2 = self.get_pages()
     page_ext = PageMeta.objects.create(extended_object=page1)
     request = self.get_page_request(page1, self.user, '/', edit=True)
     toolbar = CMSToolbar(request)
     toolbar.get_left_items()
     page_menu = toolbar.menus['page']
     meta_menu = page_menu.find_items(SubMenu, name=force_text(PAGE_META_MENU_TITLE))[0].item
     try:
         pagemeta_menu = meta_menu.find_items(ModalItem, name="{0}...".format(force_text(PAGE_META_ITEM_TITLE)))
         self.assertEqual(len(pagemeta_menu), 1)
     except AssertionError:
         pagemeta_menu = meta_menu.find_items(ModalItem, name="{0} ...".format(force_text(PAGE_META_ITEM_TITLE)))
         self.assertEqual(len(pagemeta_menu), 1)
     self.assertTrue(pagemeta_menu[0].item.url.startswith(reverse('admin:djangocms_page_meta_pagemeta_change', args=(page_ext.pk,))))
     for title in page1.title_set.all():
         language = get_language_object(title.language)
         try:
             titlemeta_menu = meta_menu.find_items(ModalItem, name='{0}...'.format(language['name']))
             self.assertEqual(len(titlemeta_menu), 1)
         except AssertionError:
             titlemeta_menu = meta_menu.find_items(ModalItem, name='{0} ...'.format(language['name']))
             self.assertEqual(len(titlemeta_menu), 1)
         try:
             title_ext = TitleMeta.objects.get(extended_object_id=title.pk)
             self.assertTrue(titlemeta_menu[0].item.url.startswith(reverse('admin:djangocms_page_meta_titlemeta_change', args=(title_ext.pk,))))
         except TitleMeta.DoesNotExist:
             self.assertTrue(titlemeta_menu[0].item.url.startswith(reverse('admin:djangocms_page_meta_titlemeta_add')))
Ejemplo n.º 26
0
    def handle_related_field(self, resource, field, field_name, request):
        links = self.dict_class()
        linked_ids = self.dict_class()

        related_field = get_related_field(field)

        model = self.model_from_obj(related_field)
        resource_type = self.model_to_resource_type(model)

        if field_name in resource:
            links[field_name] = {
                "type": resource_type,
            }

            if is_related_many(field):
                link_data = [
                    encoding.force_text(pk) for pk in resource[field_name]]
            elif resource[field_name]:
                link_data = encoding.force_text(resource[field_name])
            else:
                link_data = None

            linked_ids[field_name] = link_data

        return {"linked_ids": linked_ids, "links": links}
Ejemplo n.º 27
0
def generate_unique_username(txt):
    from .account.app_settings import USER_MODEL_USERNAME_FIELD

    username = unicodedata.normalize('NFKD', force_text(txt))
    username = username.encode('ascii', 'ignore').decode('ascii')
    username = force_text(re.sub('[^\w\s@+.-]', '', username).lower())
    # Django allows for '@' in usernames in order to accomodate for
    # project wanting to use e-mail for username. In allauth we don't
    # use this, we already have a proper place for putting e-mail
    # addresses (EmailAddress), so let's not use the full e-mail
    # address and only take the part leading up to the '@'.
    username = username.split('@')[0]
    username = username.strip() or 'user'

    User = get_user_model()
    try:
        max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
    except FieldDoesNotExist:
        raise ImproperlyConfigured(
            "USER_MODEL_USERNAME_FIELD does not exist in user-model"
        )
    i = 0
    while True:
        try:
            if i:
                pfx = str(i + 1)
            else:
                pfx = ''
            ret = username[0:max_length - len(pfx)] + pfx
            User.objects.get(**{USER_MODEL_USERNAME_FIELD: ret})
            i += 1
        except User.DoesNotExist:
            return ret
Ejemplo n.º 28
0
 def __init__(self, name, value, attrs, choice, index):
     self.name = name
     self.value = value
     self.attrs = attrs
     self.choice_value = force_text(choice[0])
     self.choice_label = force_text(choice[1])
     self.index = index
Ejemplo n.º 29
0
def verbose_field_name(model, field_name):
    """
    Get the verbose name for a given ``field_name``. The ``field_name``
    will be traversed across relationships. Returns '[invalid name]' for
    any field name that cannot be traversed.

    ex::

        >>> verbose_field_name(Article, 'author__name')
        'author name'

    """
    if field_name is None:
        return '[invalid name]'

    parts = get_field_parts(model, field_name)
    if not parts:
        return '[invalid name]'

    names = []
    for part in parts:
        if isinstance(part, ForeignObjectRel):
            names.append(force_text(part.related_name))
        else:
            names.append(force_text(part.verbose_name))

    return ' '.join(names)
Ejemplo n.º 30
0
    def generic_lookup(self, request):
        if request.method != 'GET':
            return HttpResponseNotAllowed(['GET'])
        
        if 'content_type' in request.GET and 'object_id' in request.GET:
            content_type_id = request.GET['content_type']
            object_id = request.GET['object_id']
            
            obj_dict = {
                'content_type_id': content_type_id,
                'object_id': object_id,
            }

            content_type = ContentType.objects.get(pk=content_type_id)
            obj_dict["content_type_text"] = capfirst(force_text(content_type))

            try:
                obj = content_type.get_object_for_this_type(pk=object_id)
                obj_dict["object_text"] = capfirst(force_text(obj))
            except ObjectDoesNotExist:
                raise Http404
            
            resp = json.dumps(obj_dict, ensure_ascii=False)
        else:
            resp = ''
        return HttpResponse(resp, mimetype='application/json')
Ejemplo n.º 31
0
 def convert_textfield_value(self, value, expression, connection, context):
     if isinstance(value, Database.LOB):
         value = force_text(value.read())
     return value
Ejemplo n.º 32
0
    def get_success_url(self, user=None):
        """
        Return the URL to redirect to after successful redirection.

        """
        return force_text(self.success_url)
Ejemplo n.º 33
0
 def render(self):
     return mark_safe(u'\n'.join([force_text(w) for w in self]))
Ejemplo n.º 34
0
def templatize(src, origin=None):
    """
    Turns a Django template into something that is understood by xgettext. It
    does so by translating the Django translation tags into standard gettext
    function invocations.
    """
    from django.conf import settings
    from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
                                 TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
    src = force_text(src, settings.FILE_CHARSET)
    out = StringIO()
    message_context = None
    intrans = False
    inplural = False
    singular = []
    plural = []
    incomment = False
    comment = []
    for t in Lexer(src, origin).tokenize():
        if incomment:
            if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
                content = ''.join(comment)
                translators_comment_start = None
                for lineno, line in enumerate(content.splitlines(True)):
                    if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
                        translators_comment_start = lineno
                for lineno, line in enumerate(content.splitlines(True)):
                    if translators_comment_start is not None and lineno >= translators_comment_start:
                        out.write(' # %s' % line)
                    else:
                        out.write(' #\n')
                incomment = False
                comment = []
            else:
                comment.append(t.contents)
        elif intrans:
            if t.token_type == TOKEN_BLOCK:
                endbmatch = endblock_re.match(t.contents)
                pluralmatch = plural_re.match(t.contents)
                if endbmatch:
                    if inplural:
                        if message_context:
                            out.write(' npgettext(%r, %r, %r,count) ' %
                                      (message_context, ''.join(singular),
                                       ''.join(plural)))
                        else:
                            out.write(' ngettext(%r, %r, count) ' %
                                      (''.join(singular), ''.join(plural)))
                        for part in singular:
                            out.write(blankout(part, 'S'))
                        for part in plural:
                            out.write(blankout(part, 'P'))
                    else:
                        if message_context:
                            out.write(' pgettext(%r, %r) ' %
                                      (message_context, ''.join(singular)))
                        else:
                            out.write(' gettext(%r) ' % ''.join(singular))
                        for part in singular:
                            out.write(blankout(part, 'S'))
                    message_context = None
                    intrans = False
                    inplural = False
                    singular = []
                    plural = []
                elif pluralmatch:
                    inplural = True
                else:
                    filemsg = ''
                    if origin:
                        filemsg = 'file %s, ' % origin
                    raise SyntaxError(
                        "Translation blocks must not include other block tags: %s (%sline %d)"
                        % (t.contents, filemsg, t.lineno))
            elif t.token_type == TOKEN_VAR:
                if inplural:
                    plural.append('%%(%s)s' % t.contents)
                else:
                    singular.append('%%(%s)s' % t.contents)
            elif t.token_type == TOKEN_TEXT:
                contents = one_percent_re.sub('%%', t.contents)
                if inplural:
                    plural.append(contents)
                else:
                    singular.append(contents)
        else:
            if t.token_type == TOKEN_BLOCK:
                imatch = inline_re.match(t.contents)
                bmatch = block_re.match(t.contents)
                cmatches = constant_re.findall(t.contents)
                if imatch:
                    g = imatch.group(1)
                    if g[0] == '"':
                        g = g.strip('"')
                    elif g[0] == "'":
                        g = g.strip("'")
                    g = one_percent_re.sub('%%', g)
                    if imatch.group(2):
                        # A context is provided
                        context_match = context_re.match(imatch.group(2))
                        message_context = context_match.group(1)
                        if message_context[0] == '"':
                            message_context = message_context.strip('"')
                        elif message_context[0] == "'":
                            message_context = message_context.strip("'")
                        out.write(' pgettext(%r, %r) ' % (message_context, g))
                        message_context = None
                    else:
                        out.write(' gettext(%r) ' % g)
                elif bmatch:
                    for fmatch in constant_re.findall(t.contents):
                        out.write(' _(%s) ' % fmatch)
                    if bmatch.group(1):
                        # A context is provided
                        context_match = context_re.match(bmatch.group(1))
                        message_context = context_match.group(1)
                        if message_context[0] == '"':
                            message_context = message_context.strip('"')
                        elif message_context[0] == "'":
                            message_context = message_context.strip("'")
                    intrans = True
                    inplural = False
                    singular = []
                    plural = []
                elif cmatches:
                    for cmatch in cmatches:
                        out.write(' _(%s) ' % cmatch)
                elif t.contents == 'comment':
                    incomment = True
                else:
                    out.write(blankout(t.contents, 'B'))
            elif t.token_type == TOKEN_VAR:
                parts = t.contents.split('|')
                cmatch = constant_re.match(parts[0])
                if cmatch:
                    out.write(' _(%s) ' % cmatch.group(1))
                for p in parts[1:]:
                    if p.find(':_(') >= 0:
                        out.write(' %s ' % p.split(':', 1)[1])
                    else:
                        out.write(blankout(p, 'F'))
            elif t.token_type == TOKEN_COMMENT:
                out.write(' # %s' % t.contents)
            else:
                out.write(blankout(t.contents, 'X'))
    return force_str(out.getvalue())
Ejemplo n.º 35
0
    def escribir_archivo_tiempos_csv(self, estadisticas):

        with open(self.ruta, 'wb') as csvfile:
            # Creamos encabezado
            encabezado = []

            encabezado.append(_("Agente"))
            encabezado.append(_("Tiempo de sesion"))
            encabezado.append(_("Tiempo de pausa"))
            encabezado.append(_("Tiempos en llamada"))
            encabezado.append(_("Porcentaje en llamada"))
            encabezado.append(_("Porcentaje en pausa"))
            encabezado.append(_("Porcentaje en espera"))
            encabezado.append(_("Cantidad de llamadas procesadas"))
            encabezado.append(_("Tiempo promedio de llamadas"))
            encabezado.append(_("Cantidad de intentos fallidos"))

            # Creamos csvwriter
            csvwiter = csv.writer(csvfile)

            # guardamos encabezado
            lista_encabezados_utf8 = [
                force_text(item).encode('utf-8') for item in encabezado
            ]
            csvwiter.writerow(lista_encabezados_utf8)

            # Iteramos cada uno de las metadata de la gestion del formulario
            for agente in estadisticas["agentes_tiempos"]:
                lista_opciones = []

                # --- Buscamos datos

                lista_opciones.append(agente.get_nombre_agente())
                tiempo_sesion = "Ohs"
                if agente.get_string_tiempo_sesion():
                    tiempo_sesion = agente.get_string_tiempo_sesion() + "hs"
                lista_opciones.append(tiempo_sesion)
                tiempo_pausa = "Ohs"
                if agente.get_string_tiempo_pausa():
                    tiempo_pausa = agente.get_string_tiempo_pausa() + "hs"
                lista_opciones.append(tiempo_pausa)
                tiempo_llamada = "Ohs"
                if agente.get_string_tiempo_llamada():
                    tiempo_llamada = str(
                        agente.get_string_tiempo_llamada()) + "hs"
                lista_opciones.append(tiempo_llamada)
                porcentaje_llamada = "O%"
                if agente.tiempo_porcentaje_llamada:
                    porcentaje_llamada = str(
                        agente.tiempo_porcentaje_llamada) + "%"
                lista_opciones.append(porcentaje_llamada)
                porcentaje_pausa = "O%"
                if agente.tiempo_porcentaje_pausa:
                    porcentaje_pausa = str(
                        agente.tiempo_porcentaje_pausa) + "%"
                lista_opciones.append(porcentaje_pausa)
                porcentaje_wait = "O%"
                if agente.tiempo_porcentaje_wait:
                    porcentaje_wait = str(agente.tiempo_porcentaje_wait) + "%"
                lista_opciones.append(porcentaje_wait)
                lista_opciones.append(agente.cantidad_llamadas_procesadas)
                lista_opciones.append(
                    str(agente.get_promedio_llamadas()) + "s")
                lista_opciones.append(agente.cantidad_intentos_fallidos)

                # --- Finalmente, escribimos la linea
                lista_opciones_utf8 = [
                    force_text(item).encode('utf-8') for item in lista_opciones
                ]
                csvwiter.writerow(lista_opciones_utf8)
Ejemplo n.º 36
0
 def __init__(self, message):
     """Initialize with status code 500."""
     self.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
     self.detail = {"detail": force_text(message)}
Ejemplo n.º 37
0
 def process_clob(self, value):
     if value is None:
         return ''
     return force_text(value.read())
Ejemplo n.º 38
0
 def get_culprit(self):
     """Helper to calculate the default culprit"""
     return force_text(
         self._data.get('culprit') or self._data.get('transaction')
         or generate_culprit(self._data) or '')
Ejemplo n.º 39
0
 def image_filename(self):
     return os.path.basename(force_text(self.image.name))
Ejemplo n.º 40
0
    def save(self, project_id, raw=False, assume_normalized=False):
        # Normalize if needed
        if not self._normalized:
            if not assume_normalized:
                self.normalize()
            self._normalized = True

        data = self._data

        project = Project.objects.get_from_cache(id=project_id)
        project._organization_cache = Organization.objects.get_from_cache(
            id=project.organization_id)

        # Check to make sure we're not about to do a bunch of work that's
        # already been done if we've processed an event with this ID. (This
        # isn't a perfect solution -- this doesn't handle ``EventMapping`` and
        # there's a race condition between here and when the event is actually
        # saved, but it's an improvement. See GH-7677.)
        try:
            event = Event.objects.get(
                project_id=project.id,
                event_id=data['event_id'],
            )
        except Event.DoesNotExist:
            pass
        else:
            # Make sure we cache on the project before returning
            event._project_cache = project
            logger.info('duplicate.found',
                        exc_info=True,
                        extra={
                            'event_uuid': data['event_id'],
                            'project_id': project.id,
                            'model': Event.__name__,
                        })
            return event

        # Pull out the culprit
        culprit = self.get_culprit()

        # Pull the toplevel data we're interested in
        level = data.get('level')

        # TODO(mitsuhiko): this code path should be gone by July 2018.
        # This is going to be fine because no code actually still depends
        # on integers here.  When we need an integer it will be converted
        # into one later.  Old workers used to send integers here.
        if level is not None and isinstance(level, six.integer_types):
            level = LOG_LEVELS[level]

        transaction_name = data.get('transaction')
        logger_name = data.get('logger')
        release = data.get('release')
        dist = data.get('dist')
        environment = data.get('environment')
        recorded_timestamp = data.get('timestamp')

        # We need to swap out the data with the one internal to the newly
        # created event object
        event = self._get_event_instance(project_id=project_id)
        self._data = data = event.data.data

        event._project_cache = project

        date = event.datetime
        platform = event.platform
        event_id = event.event_id

        if transaction_name:
            transaction_name = force_text(transaction_name)

        # Some of the data that are toplevel attributes are duplicated
        # into tags (logger, level, environment, transaction).  These are
        # different from legacy attributes which are normalized into tags
        # ahead of time (site, server_name).
        setdefault_path(data, 'tags', value=[])
        set_tag(data, 'level', level)
        if logger_name:
            set_tag(data, 'logger', logger_name)
        if environment:
            set_tag(data, 'environment', environment)
        if transaction_name:
            set_tag(data, 'transaction', transaction_name)

        if release:
            # dont allow a conflicting 'release' tag
            pop_tag(data, 'release')
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )
            set_tag(data, 'sentry:release', release.version)

        if dist and release:
            dist = release.add_dist(dist, date)
            # dont allow a conflicting 'dist' tag
            pop_tag(data, 'dist')
            set_tag(data, 'sentry:dist', dist.name)
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            pop_tag(data, 'user')
            set_tag(data, 'sentry:user', event_user.tag_value)

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        grouping_config = load_grouping_config(
            get_grouping_config_dict_for_event_data(data, project))
        normalize_stacktraces_for_grouping(data, grouping_config)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    if get_tag(data, key) is None:
                        set_tag(data, key, value)

        for path, iface in six.iteritems(event.interfaces):
            for k, v in iface.iter_tags():
                set_tag(data, k, v)
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.path, None)

        # The active grouping config was put into the event in the
        # normalize step before.  We now also make sure that the
        # fingerprint was set to `'{{ default }}' just in case someone
        # removed it from the payload.  The call to get_hashes will then
        # look at `grouping_config` to pick the right paramters.
        data['fingerprint'] = data.get('fingerprint') or ['{{ default }}']
        apply_server_fingerprinting(
            data, get_fingerprinting_config_for_project(project))
        hashes = event.get_hashes()
        data['hashes'] = hashes

        # we want to freeze not just the metadata and type in but also the
        # derived attributes.  The reason for this is that we push this
        # data into kafka for snuba processing and our postprocessing
        # picks up the data right from the snuba topic.  For most usage
        # however the data is dynamically overriden by Event.title and
        # Event.location (See Event.as_dict)
        materialized_metadata = self.materialize_metadata()
        event_metadata = materialized_metadata['metadata']
        data.update(materialized_metadata)
        data['culprit'] = culprit

        # index components into ``Event.message``
        # See GH-3248
        event.message = self.get_search_message(event_metadata, culprit)
        received_timestamp = event.data.get('received') or float(
            event.datetime.strftime('%s'))

        # The group gets the same metadata as the event when it's flushed but
        # additionally the `last_received` key is set.  This key is used by
        # _save_aggregate.
        group_metadata = dict(materialized_metadata)
        group_metadata['last_received'] = received_timestamp
        kwargs = {
            'platform': platform,
            'message': event.message,
            'culprit': culprit,
            'logger': logger_name,
            'level': LOG_LEVELS_MAP.get(level),
            'last_seen': date,
            'first_seen': date,
            'active_at': date,
            'data': group_metadata,
        }

        if release:
            kwargs['first_release'] = release

        try:
            group, is_new, is_regression, is_sample = self._save_aggregate(
                event=event, hashes=hashes, release=release, **kwargs)
        except HashDiscarded:
            event_discarded.send_robust(
                project=project,
                sender=EventManager,
            )

            metrics.incr(
                'events.discarded',
                skip_internal=True,
                tags={
                    'organization_id': project.organization_id,
                    'platform': platform,
                },
            )
            raise
        else:
            event_saved.send_robust(
                project=project,
                event_size=event.size,
                sender=EventManager,
            )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        # When an event was sampled, the canonical source of truth
        # is the EventMapping table since we aren't going to be writing out an actual
        # Event row. Otherwise, if the Event isn't being sampled, we can safely
        # rely on the Event table itself as the source of truth and ignore
        # EventMapping since it's redundant information.
        if is_sample:
            try:
                with transaction.atomic(
                        using=router.db_for_write(EventMapping)):
                    EventMapping.objects.create(project=project,
                                                group=group,
                                                event_id=event_id)
            except IntegrityError:
                logger.info('duplicate.found',
                            exc_info=True,
                            extra={
                                'event_uuid': event_id,
                                'project_id': project.id,
                                'group_id': group.id,
                                'model': EventMapping.__name__,
                            })
                return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
            group_id=group.id,
            environment_id=environment.id,
            defaults={
                'first_release': release if release else None,
            },
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            ReleaseProjectEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters,
                        timestamp=event.datetime,
                        environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append((tsdb.models.frequent_releases_by_group, {
                group.id: {
                    grouprelease.id: 1,
                },
            }))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(
            group=group,
            environment=environment,
        )

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                logger.info('duplicate.found',
                            exc_info=True,
                            extra={
                                'event_uuid': event_id,
                                'project_id': project.id,
                                'group_id': group.id,
                                'model': Event.__name__,
                            })
                return event

            tagstore.delay_index_event_tags(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                environment_id=environment.id,
                event_id=event.id,
                tags=event.tags,
                date_added=event.datetime,
            )

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id,
                     (event_user.tag_value, )),
                    (tsdb.models.users_affected_by_project, project.id,
                     (event_user.tag_value, )),
                ),
                timestamp=event.datetime,
                environment_id=environment.id,
            )
        if release:
            if is_new:
                buffer.incr(ReleaseProject, {'new_groups': 1}, {
                    'release_id': release.id,
                    'project_id': project.id,
                })
            if is_new_group_environment:
                buffer.incr(ReleaseProjectEnvironment, {'new_issues_count': 1},
                            {
                                'project_id': project.id,
                                'release_id': release.id,
                                'environment_id': environment.id,
                            })

        safe_execute(Group.objects.add_tags,
                     group,
                     environment,
                     event.get_tags(),
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send_robust(project=project,
                                                 group=group,
                                                 sender=Project)

        eventstream.insert(
            group=group,
            event=event,
            is_new=is_new,
            is_sample=is_sample,
            is_regression=is_regression,
            is_new_group_environment=is_new_group_environment,
            primary_hash=hashes[0],
            # We are choosing to skip consuming the event back
            # in the eventstream if it's flagged as raw.
            # This means that we want to publish the event
            # through the event stream, but we don't care
            # about post processing and handling the commit.
            skip_consume=raw,
        )

        metrics.timing(
            'events.latency',
            received_timestamp - recorded_timestamp,
            tags={
                'project_id': project.id,
            },
        )

        metrics.timing('events.size.data.post_save',
                       event.size,
                       tags={'project_id': project.id})

        return event
Ejemplo n.º 41
0
 def test_item_transactional_db(self, item_transactional_db, live_server):
     response_data = urlopen(live_server + '/item_count/').read()
     assert force_text(response_data) == 'Item count: 1'
Ejemplo n.º 42
0
 def get_storage_path(instance, filename):
     fn = unicodedata.normalize('NFKD', force_text(filename)).encode(
         'ascii', 'ignore').decode('ascii')
     return os.path.join(PHOTOLOGUE_DIR, 'photos', fn)
Ejemplo n.º 43
0
 def test_db_changes_visibility(self, live_server):
     response_data = urlopen(live_server + '/item_count/').read()
     assert force_text(response_data) == 'Item count: 0'
     Item.objects.create(name='foo')
     response_data = urlopen(live_server + '/item_count/').read()
     assert force_text(response_data) == 'Item count: 1'
Ejemplo n.º 44
0
def test_admin_client_no_db_marker(admin_client):
    assert isinstance(admin_client, Client)
    resp = admin_client.get('/admin-required/')
    assert force_text(resp.content) == 'You are an admin'
Ejemplo n.º 45
0
 def __init__(self, message):
     """Set custom error message for ProviderManager errors."""
     self.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
     self.detail = {"detail": force_text(message)}
Ejemplo n.º 46
0
 def test_fixture_transactional_db(self, transactional_db, live_server):
     Item.objects.create(name='foo')
     response_data = urlopen(live_server + '/item_count/').read()
     assert force_text(response_data) == 'Item count: 1'
Ejemplo n.º 47
0
 def __init__(self):
     """Initialize with status code 403."""
     self.status_code = status.HTTP_403_FORBIDDEN
     self.detail = {"detail": force_text(self.default_detail)}
Ejemplo n.º 48
0
 def test_url(self, live_server):
     assert live_server.url == force_text(live_server)
Ejemplo n.º 49
0
 def __str__(self):
     return force_text(self.object_repr)
Ejemplo n.º 50
0
 def __init__(self, message):
     """Initialize with status code 400."""
     self.status_code = status.HTTP_400_BAD_REQUEST
     self.detail = {"detail": force_text(message)}
Ejemplo n.º 51
0
 def __str__(self):
     return force_text(self._meta.verbose_name)
Ejemplo n.º 52
0
 def list_units(self):
     value = [force_text(i) for i in self.units.all()]
     if len(value) > 1:
         value = [value[0], value[-1]]
     units = "-".join(value)
     return units
Ejemplo n.º 53
0
 def _convert_datetime(self, date):
     if hasattr(date, 'hour'):
         return force_text(date.strftime('%Y%m%d%H%M%S'))
     else:
         return force_text(date.strftime('%Y%m%d000000'))
Ejemplo n.º 54
0
 def __str__(self):
     return force_text(self.goods)
Ejemplo n.º 55
0
    def search(self,
               query_string,
               sort_by=None,
               start_offset=0,
               end_offset=None,
               fields='',
               highlight=False,
               facets=None,
               date_facets=None,
               query_facets=None,
               narrow_queries=None,
               spelling_query=None,
               within=None,
               dwithin=None,
               distance_point=None,
               models=None,
               limit_to_registered_models=None,
               result_class=None,
               **kwargs):
        if not self.setup_complete:
            self.setup()

        # A zero length query should return no results.
        if len(query_string) == 0:
            return {
                'results': [],
                'hits': 0,
            }

        query_string = force_text(query_string)

        # A one-character query (non-wildcard) gets nabbed by a stopwords
        # filter and should yield zero results.
        if len(query_string) <= 1 and query_string != u'*':
            return {
                'results': [],
                'hits': 0,
            }

        reverse = False

        if sort_by is not None:
            # Determine if we need to reverse the results and if Whoosh can
            # handle what it's being asked to sort by. Reversing is an
            # all-or-nothing action, unfortunately.
            sort_by_list = []
            reverse_counter = 0

            for order_by in sort_by:
                if order_by.startswith('-'):
                    reverse_counter += 1

            if reverse_counter and reverse_counter != len(sort_by):
                raise SearchBackendError("Whoosh requires all order_by fields"
                                         " to use the same sort direction")

            for order_by in sort_by:
                if order_by.startswith('-'):
                    sort_by_list.append(order_by[1:])

                    if len(sort_by_list) == 1:
                        reverse = True
                else:
                    sort_by_list.append(order_by)

                    if len(sort_by_list) == 1:
                        reverse = False

            sort_by = sort_by_list

        if facets is not None:
            warnings.warn("Whoosh does not handle faceting.",
                          Warning,
                          stacklevel=2)

        if date_facets is not None:
            warnings.warn("Whoosh does not handle date faceting.",
                          Warning,
                          stacklevel=2)

        if query_facets is not None:
            warnings.warn("Whoosh does not handle query faceting.",
                          Warning,
                          stacklevel=2)

        narrowed_results = None
        self.index = self.index.refresh()

        if limit_to_registered_models is None:
            limit_to_registered_models = getattr(
                settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)

        if models and len(models):
            model_choices = sorted(get_model_ct(model) for model in models)
        elif limit_to_registered_models:
            # Using narrow queries, limit the results to only models handled
            # with the current routers.
            model_choices = self.build_models_list()
        else:
            model_choices = []

        if len(model_choices) > 0:
            if narrow_queries is None:
                narrow_queries = set()

            narrow_queries.add(' OR '.join(
                ['%s:%s' % (DJANGO_CT, rm) for rm in model_choices]))

        narrow_searcher = None

        if narrow_queries is not None:
            # Potentially expensive? I don't see another way to do it in Whoosh...
            narrow_searcher = self.index.searcher()

            for nq in narrow_queries:
                recent_narrowed_results = narrow_searcher.search(
                    self.parser.parse(force_text(nq)), limit=None)

                if len(recent_narrowed_results) <= 0:
                    return {
                        'results': [],
                        'hits': 0,
                    }

                if narrowed_results:
                    narrowed_results.filter(recent_narrowed_results)
                else:
                    narrowed_results = recent_narrowed_results

        self.index = self.index.refresh()

        if self.index.doc_count():
            searcher = self.index.searcher()
            parsed_query = self.parser.parse(query_string)

            # In the event of an invalid/stopworded query, recover gracefully.
            if parsed_query is None:
                return {
                    'results': [],
                    'hits': 0,
                }

            page_num, page_length = self.calculate_page(
                start_offset, end_offset)

            search_kwargs = {
                'pagelen': page_length,
                'sortedby': sort_by,
                'reverse': reverse,
            }

            # Handle the case where the results have been narrowed.
            if narrowed_results is not None:
                search_kwargs['filter'] = narrowed_results

            try:
                raw_page = searcher.search_page(parsed_query, page_num,
                                                **search_kwargs)
            except ValueError:
                if not self.silently_fail:
                    raise

                return {
                    'results': [],
                    'hits': 0,
                    'spelling_suggestion': None,
                }

            # Because as of Whoosh 2.5.1, it will return the wrong page of
            # results if you request something too high. :(
            if raw_page.pagenum < page_num:
                return {
                    'results': [],
                    'hits': 0,
                    'spelling_suggestion': None,
                }

            results = self._process_results(raw_page,
                                            highlight=highlight,
                                            query_string=query_string,
                                            spelling_query=spelling_query,
                                            result_class=result_class)
            searcher.close()

            if hasattr(narrow_searcher, 'close'):
                narrow_searcher.close()

            return results
        else:
            if self.include_spelling:
                if spelling_query:
                    spelling_suggestion = self.create_spelling_suggestion(
                        spelling_query)
                else:
                    spelling_suggestion = self.create_spelling_suggestion(
                        query_string)
            else:
                spelling_suggestion = None

            return {
                'results': [],
                'hits': 0,
                'spelling_suggestion': spelling_suggestion,
            }
Ejemplo n.º 56
0
 def __str__(self):
     return force_text(repr(self))
Ejemplo n.º 57
0
def adapt(text):
    # make sure connection is open
    connection.cursor()
    a = psycopg2.extensions.adapt(force_text(text))
    a.prepare(connection.connection)
    return a
Ejemplo n.º 58
0
    def more_like_this(self,
                       model_instance,
                       additional_query_string=None,
                       start_offset=0,
                       end_offset=None,
                       models=None,
                       limit_to_registered_models=None,
                       result_class=None,
                       **kwargs):
        if not self.setup_complete:
            self.setup()

        field_name = self.content_field_name
        narrow_queries = set()
        narrowed_results = None
        self.index = self.index.refresh()

        if limit_to_registered_models is None:
            limit_to_registered_models = getattr(
                settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)

        if models and len(models):
            model_choices = sorted(get_model_ct(model) for model in models)
        elif limit_to_registered_models:
            # Using narrow queries, limit the results to only models handled
            # with the current routers.
            model_choices = self.build_models_list()
        else:
            model_choices = []

        if len(model_choices) > 0:
            if narrow_queries is None:
                narrow_queries = set()

            narrow_queries.add(' OR '.join(
                ['%s:%s' % (DJANGO_CT, rm) for rm in model_choices]))

        if additional_query_string and additional_query_string != '*':
            narrow_queries.add(additional_query_string)

        narrow_searcher = None

        if narrow_queries is not None:
            # Potentially expensive? I don't see another way to do it in Whoosh...
            narrow_searcher = self.index.searcher()

            for nq in narrow_queries:
                recent_narrowed_results = narrow_searcher.search(
                    self.parser.parse(force_text(nq)), limit=None)

                if len(recent_narrowed_results) <= 0:
                    return {
                        'results': [],
                        'hits': 0,
                    }

                if narrowed_results:
                    narrowed_results.filter(recent_narrowed_results)
                else:
                    narrowed_results = recent_narrowed_results

        page_num, page_length = self.calculate_page(start_offset, end_offset)

        self.index = self.index.refresh()
        raw_results = EmptyResults()

        searcher = None
        if self.index.doc_count():
            query = "%s:%s" % (ID, get_identifier(model_instance))
            searcher = self.index.searcher()
            parsed_query = self.parser.parse(query)
            results = searcher.search(parsed_query)

            if len(results):
                raw_results = results[0].more_like_this(field_name,
                                                        top=end_offset)

            # Handle the case where the results have been narrowed.
            if narrowed_results is not None and hasattr(raw_results, 'filter'):
                raw_results.filter(narrowed_results)

        try:
            raw_page = ResultsPage(raw_results, page_num, page_length)
        except ValueError:
            if not self.silently_fail:
                raise

            return {
                'results': [],
                'hits': 0,
                'spelling_suggestion': None,
            }

        # Because as of Whoosh 2.5.1, it will return the wrong page of
        # results if you request something too high. :(
        if raw_page.pagenum < page_num:
            return {
                'results': [],
                'hits': 0,
                'spelling_suggestion': None,
            }

        results = self._process_results(raw_page, result_class=result_class)

        if searcher:
            searcher.close()

        if hasattr(narrow_searcher, 'close'):
            narrow_searcher.close()

        return results
Ejemplo n.º 59
0
def default_user_display(user):
    if app_settings.USER_MODEL_USERNAME_FIELD:
        return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
    else:
        return force_text(user)
Ejemplo n.º 60
0
 def reduce(self, value, **kwargs):
     return force_text(value)