def test_no_dereference_context_manager_object_id(self):
        """Ensure that DBRef items in ListFields aren't dereferenced.
        """
        connect('mongoenginetest')

        class User(Document):
            name = StringField()

        class Group(Document):
            ref = ReferenceField(User, dbref=False)
            generic = GenericReferenceField()
            members = ListField(ReferenceField(User, dbref=False))

        User.drop_collection()
        Group.drop_collection()

        for i in xrange(1, 51):
            User(name='user %s' % i).save()

        user = User.objects.first()
        Group(ref=user, members=User.objects, generic=user).save()

        with no_dereference(Group) as NoDeRefGroup:
            self.assertTrue(Group._fields['members']._auto_dereference)
            self.assertFalse(NoDeRefGroup._fields['members']._auto_dereference)

        with no_dereference(Group) as Group:
            group = Group.objects.first()
            self.assertTrue(all([not isinstance(m, User)
                                for m in group.members]))
            self.assertFalse(isinstance(group.ref, User))
            self.assertFalse(isinstance(group.generic, User))

        self.assertTrue(all([isinstance(m, User)
                             for m in group.members]))
        self.assertTrue(isinstance(group.ref, User))
        self.assertTrue(isinstance(group.generic, User))
Beispiel #2
0
    def test_no_dereference_context_manager_dbref(self):
        """Ensure that DBRef items in ListFields aren't dereferenced.
        """
        connect('mongoenginetest')

        class User(Document):
            name = StringField()

        class Group(Document):
            ref = ReferenceField(User, dbref=True)
            generic = GenericReferenceField()
            members = ListField(ReferenceField(User, dbref=True))

        User.drop_collection()
        Group.drop_collection()

        for i in range(1, 51):
            User(name='user %s' % i).save()

        user = User.objects.first()
        Group(ref=user, members=User.objects, generic=user).save()

        with no_dereference(Group) as NoDeRefGroup:
            self.assertTrue(Group._fields['members']._auto_dereference)
            self.assertFalse(NoDeRefGroup._fields['members']._auto_dereference)

        with no_dereference(Group) as Group:
            group = Group.objects.first()
            self.assertTrue(
                all([not isinstance(m, User) for m in group.members]))
            self.assertFalse(isinstance(group.ref, User))
            self.assertFalse(isinstance(group.generic, User))

        self.assertTrue(all([isinstance(m, User) for m in group.members]))
        self.assertTrue(isinstance(group.ref, User))
        self.assertTrue(isinstance(group.generic, User))
Beispiel #3
0
    def test_no_dereference_context_manager_object_id(self):
        """Ensure that DBRef items in ListFields aren't dereferenced."""
        connect("mongoenginetest")

        class User(Document):
            name = StringField()

        class Group(Document):
            ref = ReferenceField(User, dbref=False)
            generic = GenericReferenceField()
            members = ListField(ReferenceField(User, dbref=False))

        User.drop_collection()
        Group.drop_collection()

        for i in range(1, 51):
            User(name="user %s" % i).save()

        user = User.objects.first()
        Group(ref=user, members=User.objects, generic=user).save()

        with no_dereference(Group) as NoDeRefGroup:
            assert Group._fields["members"]._auto_dereference
            assert not NoDeRefGroup._fields["members"]._auto_dereference

        with no_dereference(Group) as Group:
            group = Group.objects.first()
            for m in group.members:
                assert not isinstance(m, User)
            assert not isinstance(group.ref, User)
            assert not isinstance(group.generic, User)

        for m in group.members:
            assert isinstance(m, User)
        assert isinstance(group.ref, User)
        assert isinstance(group.generic, User)
Beispiel #4
0
def ticket_view(request, ombudsman_slug, ticket_slug):
    if request.method == 'POST' and request.user.is_authenticated():
        p = request.POST
        comment = Comment()
        comment.text = p['comment'].replace('\r\n', '<br>')
        comment.created_by = request.user.to_dbref()
        comment.created_in = datetime.now()
    
        Ticket.objects(slug = ticket_slug).update_one(push__comments = comment)
        
    with no_dereference(Ticket) as Ticket_all:
        obj = get_document_or_404(Ticket_all, slug = ticket_slug)
    obj.description = obj.description.replace('\r\n', '<br>')
    
    context = Context({'page_title': obj.title, 'current_page': 'ombudsman', 'obj': obj});
    return render(request, 'ticket_view.html', context)
Beispiel #5
0
def analytics():
    sort = request.args.get('s', 'r')
    date_from = request.args.get('f', False)
    date_to = request.args.get('t', False)

    t = timezone(g.current_account.timezone)

    print(sort)
    buletins = g.current_account.bulletins
    stories = []

    for buletin in buletins:
        for content in buletin.content:
            if date_from and buletin.publish_at < datetime.strptime(
                    date_from, '%m/%d/%Y'):
                continue
            if date_to and buletin.publish_at > datetime.strptime(
                    date_to, '%m/%d/%Y'):
                continue

            s = {}
            s['id'] = content.id
            s['title'] = content.title
            s['publish_at'] = utc.localize(buletin.publish_at).astimezone(t)
            #print(content.content)
            with no_dereference(Story):
                s['readers'] = len(content.readers)
            s['readers_by_fragment'] = []
            #f_p = 1
            #s_content = sorted(content.content, key=lambda x: x.order)
            #for f in s_content:
            #    if f.type != Fragment.TYPE_ANSWER:
            #        s['readers_by_fragment'].append({'id':f_p, 'num_readers':f.num_readers})
            #        f_p += 1
            stories.append(s)

        if sort == 'r':
            stories.sort(key=lambda x: x['readers'], reverse=True)
        else:
            stories.sort(key=lambda x: x['publish_at'], reverse=True)

    return render_template('analytics/index.html',
                           stories=stories,
                           sort=sort,
                           date_from=date_from,
                           date_to=date_to)
Beispiel #6
0
def ombudsman_view(request, slug):
    if request.method == 'POST' and request.user.is_authenticated() and request.user.group.name == 'users':
        p = request.POST
        ombudsman = OmbudsmanOffice.objects(slug = slug).fields('users').limit(1)[0]
        if 'action' in p and p['action'] == 'join':
            SubscribeUserInOmbudsmanOffice(slug, request.user.to_dbref())
        elif 'action' in p and p['action'] == 'unjoin':
            UnsubscribeUserInOmbudsmanOffice(slug, request.user.to_dbref())
        elif set(['title', 'description', 'category']).issubset(p):
            ticket = Ticket()
            ticket.title = p['title']
            ticket.slug = ''
            ticket.description = p['description']
            ticket.category = p['category']
            ticket.belongs_to = ombudsman.to_dbref()
            ticket.created_by = request.user.to_dbref()
            ticket.comments = []
            ticket.log = Log()
            ticket.save()
            
            ticket_slug = slugify('{0}-{1}'.format(ticket.title, ticket.obj_id))
        
            ticket.update(
                set__slug = ticket_slug
            )
            
            return HttpResponseRedirect('/ombudsman/{0}/ticket/{1}'.format(ombudsman.slug, ticket_slug)) 
            
    with no_dereference(OmbudsmanOffice) as OmbudsmanOffice_all:
        obj = get_document_or_404(OmbudsmanOffice_all, slug = slug)
    obj.total_tickets = len(Ticket.objects(belongs_to = obj.id))
    obj.total_users = len(obj.users)
    if obj.about:
        obj.about = obj.about.replace('\r\n', '<br>')
    context = Context({'page_title': obj.name, 'current_page': 'ombudsman', 'obj': obj});
    return render(request, 'ombudsman_view.html', context)
Beispiel #7
0
    def get(self, project):
        """Retrieve overview graph for a project.
        ---
        operationId: get_graph
        parameters:
            - name: project
              in: path
              type: string
              pattern: '^[a-zA-Z0-9_]{3,30}$'
              required: true
              description: project name/slug
            - name: columns
              in: query
              type: array
              items:
                  type: string
              required: true
              description: comma-separated list of column names to plot (in MongoDB dot notation)
            - name: filters
              in: query
              type: array
              items:
                  type: string
              description: list of `column__operator:value` filters \
                      with `column` in dot notation and `operator` in mongoengine format \
                      (http://docs.mongoengine.org/guide/querying.html#query-operators). \
                      `column` needs to be a valid field in `content.data`.
            - name: page
              in: query
              type: integer
              default: 1
              description: page to retrieve (in batches of `per_page`)
            - name: per_page
              in: query
              type: integer
              default: 200
              minimum: 2
              maximum: 200
              description: number of results to return per page
        responses:
            200:
                description: x-y-data in plotly format
                schema:
                    type: array
                    items:
                        type: object
                        properties:
                            x:
                                type: array
                                items:
                                    type: number
                            y:
                                type: array
                                items:
                                    type: number
        """
        mask = ['content.data', 'identifier']
        columns = request.args.get('columns').split(',')
        filters = request.args.get('filters', '').split(',')
        page = int(request.args.get('page', 1))
        PER_PAGE_MAX = 200
        per_page = int(request.args.get('per_page', PER_PAGE_MAX))
        per_page = PER_PAGE_MAX if per_page > PER_PAGE_MAX else per_page

        with no_dereference(Contributions) as ContributionsDeref:
            objects = ContributionsDeref.objects(project=project).only(*mask)
            data = [{'x': [], 'y': [], 'text': []} for col in columns]
            # C__gte:0.42,C__lte:2.10,ΔE-QP.direct__lte:11.3 -> content__data__C__value__lte
            if filters:
                query = {}
                for f in filters:
                    if '__' in f and ':' in f:
                        k, v = f.split(':')
                        col, op = k.rsplit('__', 1)
                        col = col.replace(".", "__")
                        key = f'content__data__{col}__value__{op}'
                        query[key] = float(v)
                objects = objects(**query)

            for obj in objects.paginate(page=page, per_page=per_page).items:
                d = nested_to_record(obj['content']['data'], sep='.')
                if all(f'{c}.display' in d.keys() for c in columns):
                    for idx, col in enumerate(columns):
                        val = d.get(f'{col}.display')
                        if val:
                            data[idx]['x'].append(obj.identifier)
                            data[idx]['y'].append(val.split(' ')[0])
                            data[idx]['text'].append(str(obj.id))
            return data
Beispiel #8
0
def build():
    with no_dereference(Contributions) as Contribs:
        # TODO get a random max_docs slice to avoid collisions in parallel Fargate tasks

        # remove dangling and unset missing notebooks
        nbs_total, nbs_count = -1, -1
        ctrbs_cnt = Contribs.objects._cursor.collection.estimated_document_count(
        )
        nbs_cnt = Notebooks.objects._cursor.collection.estimated_document_count(
        )

        if ctrbs_cnt != nbs_cnt:
            contribs = Contribs.objects(notebook__exists=True).only("notebook")
            nids = [contrib.notebook.id for contrib in contribs]
            if len(nids) < nbs_cnt:
                nbs = Notebooks.objects(id__nin=nids).only("id")
                nbs_total = nbs.count()
                max_docs = 2500
                nbs[:max_docs].delete()
                nbs_count = nbs_total if nbs_total < max_docs else max_docs
            else:
                missing_nids = set(nids) - set(
                    Notebooks.objects.distinct("id"))
                if missing_nids:
                    upd_contribs = Contribs.objects(
                        notebook__in=list(missing_nids))
                    nupd_total = upd_contribs.count()
                    nupd = upd_contribs.update(unset__notebook="")
                    print(
                        f"unset notebooks for {nupd}/{nupd_total} contributions"
                    )

        # build missing notebooks
        max_docs = NotebooksResource.max_limit
        cids = request.args.get("cids", "").split(",")[:max_docs]

        if cids[0]:
            documents = Contribs.objects(id__in=cids)
        else:
            documents = Contribs.objects(notebook__exists=False)[:max_docs]

        total = documents.count()
        count = 0

        for document in documents:
            if document.notebook is not None:
                # NOTE document.notebook.delete() doesn't trigger pre_delete signal?
                nb = Notebooks.objects.get(id=document.notebook.id)
                nb.delete()

            cells = [
                # define client only once in kernel
                # avoids API calls for regex expansion for query parameters
                nbf.new_code_cell("\n".join([
                    "if 'client' not in locals():",
                    "\tclient = Client(",
                    '\t\theaders={"X-Authenticated-Groups": "admin"},',
                    f'\t\thost="{MPCONTRIBS_API_HOST}"',
                    "\t)",
                ])),
                nbf.new_code_cell(
                    f'client.get_contribution("{document.id}").pretty()'),
            ]

            if document.tables:
                cells.append(nbf.new_markdown_cell("## Tables"))
                for table in document.tables:
                    cells.append(
                        nbf.new_code_cell("\n".join([
                            f'df = client.get_table("{table.id}")',
                            "df.plot(**df.attrs)",
                        ])))

            if document.structures:
                cells.append(nbf.new_markdown_cell("## Structures"))
                for structure in document.structures:
                    cells.append(
                        nbf.new_code_cell(
                            f'client.get_structure("{structure.id}")'))

            cid = str(document.id)
            outputs = execute_cells(cid, cells)
            if not outputs:
                raise ValueError(f"notebook generation for {cid} failed!")

            for idx, output in outputs.items():
                cells[idx]["outputs"] = output

            doc = deepcopy(seed_nb)
            doc["cells"] += cells[1:]  # skip localhost Client

            document.notebook = Notebooks(**doc).save()
            document.save(signal_kwargs={"skip": True})
            count += 1

        return f"{count}/{total} notebooks built & {nbs_count}/{nbs_total} notebooks deleted"
Beispiel #9
0
def archive_articles(limit=None):
    """ Archive articles that pollute the production database. """

    raise NotImplementedError('REVIEW for RELDB.')

    # cf. https://docs.djangoproject.com/en/dev/topics/db/multi-db/#selecting-a-database-to-delete-from  # NOQA

    counts = {
        'duplicates': 0,
        'orphaned': 0,
        'bad_articles': 0,
        'archived_dupes': 0,
    }

    if limit is None:
        limit = config.ARTICLE_ARCHIVE_BATCH_SIZE

    with no_dereference(Article) as ArticleOnly:
        if config.ARTICLE_ARCHIVE_OLDER_THAN > 0:
            older_than = now() - timedelta(
                days=config.ARTICLE_ARCHIVE_OLDER_THAN)

            duplicates = ArticleOnly.objects(
                duplicate_of__ne=None,
                date_published__lt=older_than).limit(limit)
            orphaned = ArticleOnly.objects(
                orphaned=True, date_published__lt=older_than).limit(limit)

        else:
            duplicates = ArticleOnly.objects(
                duplicate_of__ne=None).limit(limit)
            orphaned = ArticleOnly.objects(orphaned=True).limit(limit)

    duplicates.no_cache()
    orphaned.no_cache()

    counts['duplicates'] = duplicates.count()
    counts['orphaned'] = orphaned.count()

    if counts['duplicates']:
        current = 0
        LOGGER.info(u'Archiving of %s duplicate article(s) started.',
                    counts['duplicates'])

        with benchmark('Archiving of %s duplicate article(s)' %
                       counts['duplicates']):
            for article in duplicates:
                archive_article_one_internal(article, counts)
                current += 1
                if current % 50 == 0:
                    LOGGER.info(u'Archived %s/%s duplicate articles so far.',
                                current, counts['duplicates'])

    if counts['orphaned']:
        current = 0
        LOGGER.info(u'Archiving of %s orphaned article(s) started.',
                    counts['orphaned'])

        with benchmark('Archiving of %s orphaned article(s)' %
                       counts['orphaned']):
            for article in orphaned:
                archive_article_one_internal(article, counts)
                current += 1
                if current % 50 == 0:
                    LOGGER.info(u'Archived %s/%s orphaned articles so far.',
                                current, counts['duplicates'])

    if counts['duplicates'] or counts['orphaned']:
        synchronize_statsd_articles_gauges(full=True)

        LOGGER.info(
            '%s already archived and %s bad articles were found '
            u'during the operation.', counts['archived_dupes'],
            counts['bad_articles'])

    else:
        LOGGER.info(u'No article to archive.')
Beispiel #10
0
def archive_articles(limit=None):
    """ Archive articles that pollute the production database. """

    raise NotImplementedError('REVIEW for RELDB.')

    # cf. https://docs.djangoproject.com/en/dev/topics/db/multi-db/#selecting-a-database-to-delete-from  # NOQA

    counts = {
        'duplicates': 0,
        'orphaned': 0,
        'bad_articles': 0,
        'archived_dupes': 0,
    }

    if limit is None:
        limit = config.ARTICLE_ARCHIVE_BATCH_SIZE

    with no_dereference(Article) as ArticleOnly:
        if config.ARTICLE_ARCHIVE_OLDER_THAN > 0:
            older_than = now() - timedelta(
                days=config.ARTICLE_ARCHIVE_OLDER_THAN)

            duplicates = ArticleOnly.objects(
                duplicate_of__ne=None,
                date_published__lt=older_than).limit(limit)
            orphaned   = ArticleOnly.objects(
                orphaned=True,
                date_published__lt=older_than).limit(limit)

        else:
            duplicates = ArticleOnly.objects(duplicate_of__ne=None
                                             ).limit(limit)
            orphaned   = ArticleOnly.objects(orphaned=True).limit(limit)

    duplicates.no_cache()
    orphaned.no_cache()

    counts['duplicates'] = duplicates.count()
    counts['orphaned']   = orphaned.count()

    if counts['duplicates']:
        current = 0
        LOGGER.info(u'Archiving of %s duplicate article(s) started.',
                    counts['duplicates'])

        with benchmark('Archiving of %s duplicate article(s)'
                       % counts['duplicates']):
            for article in duplicates:
                archive_article_one_internal(article, counts)
                current += 1
                if current % 50 == 0:
                    LOGGER.info(u'Archived %s/%s duplicate articles so far.',
                                current, counts['duplicates'])

    if counts['orphaned']:
        current = 0
        LOGGER.info(u'Archiving of %s orphaned article(s) started.',
                    counts['orphaned'])

        with benchmark('Archiving of %s orphaned article(s)'
                       % counts['orphaned']):
            for article in orphaned:
                archive_article_one_internal(article, counts)
                current += 1
                if current % 50 == 0:
                    LOGGER.info(u'Archived %s/%s orphaned articles so far.',
                                current, counts['duplicates'])

    if counts['duplicates'] or counts['orphaned']:
        synchronize_statsd_articles_gauges(full=True)

        LOGGER.info('%s already archived and %s bad articles were found '
                    u'during the operation.', counts['archived_dupes'],
                    counts['bad_articles'])

    else:
        LOGGER.info(u'No article to archive.')
Beispiel #11
0
def archive_articles(limit=None):

    counts = {
        'duplicates': 0,
        'orphaned': 0,
        'bad_articles': 0,
        'archived_dupes': 0,
    }

    if limit is None:
        limit = config.ARTICLE_ARCHIVE_BATCH_SIZE

    with no_dereference(Article) as ArticleOnly:
        if config.ARTICLE_ARCHIVE_OLDER_THAN > 0:
            older_than = now() - timedelta(
                            days=config.ARTICLE_ARCHIVE_OLDER_THAN)

            duplicates = ArticleOnly.objects(
                            duplicate_of__ne=None,
                            date_published__lt=older_than).limit(limit)
            orphaned   = ArticleOnly.objects(
                            orphaned=True,
                            date_published__lt=older_than).limit(limit)

        else:
            duplicates = ArticleOnly.objects(duplicate_of__ne=None
                                             ).limit(limit)
            orphaned   = ArticleOnly.objects(orphaned=True).limit(limit)

    duplicates.no_cache()
    orphaned.no_cache()

    counts['duplicates'] = duplicates.count()
    counts['orphaned']   = orphaned.count()

    if counts['duplicates']:
        current = 0
        LOGGER.info(u'Archiving of %s duplicate article(s) started.',
                    counts['duplicates'])

        with benchmark('Archiving of %s duplicate article(s)'
                       % counts['duplicates']):
            for article in duplicates:
                archive_article_one_internal(article, counts)
                current += 1
                if current % 50 == 0:
                    LOGGER.info(u'Archived %s/%s duplicate articles so far.',
                                current, counts['duplicates'])

    if counts['orphaned']:
        current = 0
        LOGGER.info(u'Archiving of %s orphaned article(s) started.',
                    counts['orphaned'])

        with benchmark('Archiving of %s orphaned article(s)'
                       % counts['orphaned']):
            for article in orphaned:
                archive_article_one_internal(article, counts)
                current += 1
                if current % 50 == 0:
                    LOGGER.info(u'Archived %s/%s orphaned articles so far.',
                                current, counts['duplicates'])

    if counts['duplicates'] or counts['orphaned']:
        synchronize_statsd_articles_gauges(full=True)

        LOGGER.info('%s already archived and %s bad articles were found '
                    u'during the operation.', counts['archived_dupes'],
                    counts['bad_articles'])

    else:
        LOGGER.info(u'No article to archive.')