Example #1
0
    def get_list(self, term, offset=0, limit=DEFAULT_PAGE_SIZE):
        query = self.model.objects

        criteria = None

        # HACK: True/False checking is not impossible;;
        options_processed = self.options

        if options_processed:
            query = query.filter(**options_processed)

        for field in self._cached_fields:
            flt = {u'%s__icontains' % field.name: term}

            if not criteria:
                criteria = mongoengine.Q(**flt)
            else:
                criteria |= mongoengine.Q(**flt)

        query = query.filter(criteria)

        if offset:
            query = query.skip(offset)

        return query.limit(limit).all()
Example #2
0
 def check_gphotos_membership(self):
     self.status("Checking for photos not in Gphotos")
     for photo in Queue.objects(me.Q(md5sum__ne=None) & me.Q(in_gphotos__ne=True)):
         match = Gphoto.objects(md5Checksum=photo.md5sum).first()
         if match:
             photo.gphotos_path = match.path
             photo.gid = match.gid
             photo.in_gphotos = True
             photo.original_filename = match.originalFilename
             logger.info(f"In Gphotos: {photo.src_path}")
         else:
             if photo.in_process is False:
                 logger.info(f"Not in Gphotos: {photo.src_path}")
             photo.in_gphotos = False
         photo.in_process = True
         photo.save()
         try:
             sources = SourceArchive.objects(md5sum=photo.md5sum).get()
         except me.DoesNotExist:
             SourceArchive(md5sum=photo.md5sum, paths=[photo.src_path]).save()
         else:
             sources.update(add_to_set__paths=[photo.src_path])
     logger.info(
         f"In gphotos: {Queue.objects(in_gphotos=True).count()}, Not in gphotos: {Queue.objects(in_gphotos=False).count()}"
     )
Example #3
0
def copy_rpms_by_name(names, source_repo, dest_repo, import_conduit,
                      copy_deps):
    """
    Copy RPMs from source repo to destination repo by name

    :param names:           iterable of RPM names
    :type  names:           iterable of basestring
    :param source_repo: The repository we are copying units from.
    :type source_repo: pulp.server.db.model.Repository
    :param dest_repo: The repository we are copying units to
    :type dest_repo: pulp.server.db.model.Repository
    :param import_conduit:  import conduit passed to the Importer
    :type  import_conduit:  pulp.plugins.conduits.unit_import.ImportUnitConduit

    :return:    set of pulp.plugins.model.Unit that were copied
    :rtype:     set
    """
    name_q = mongoengine.Q(name__in=names)
    type_q = mongoengine.Q(unit_type_id=ids.TYPE_ID_RPM)
    units = repo_controller.find_repo_content_units(
        source_repo,
        units_q=name_q,
        repo_content_unit_q=type_q,
        unit_fields=models.RPM.unit_key_fields,
        yield_content_unit=True)

    return copy_rpms(units, source_repo, dest_repo, import_conduit, copy_deps)
Example #4
0
    def delete_old_autoprocessed(self, paper):
        pq = db.Q(paper=paper)
        ap = db.Q(auto_generated=True)

        acts = Activity.objects(pq & ap)
        for act in acts:
            act.delete()
Example #5
0
def create():
    schema = schemas.UserSchema()
    try:
        user_data = schema.load(request.get_json())
    except Exception as e:
        print(e)
        response_dict = request.get_json()
        response_dict.update(e.messages)
        response = render_json(response_dict)
        response.status_code = 400
        abort(response)

    user = models.User.objects(
        me.Q(username=user_data.data['username'])
        | me.Q(email=user_data.data['email'])).first()
    if user is None:
        user = models.User(**user_data.data)
        user.set_password(user_data.data['password'],
                          salt=current_app.secret_key)
        user.status = 'active'
        user.save()
        return render_json(schema.dump(user).data)

    errors = [{
        'status': '400',
        'title': 'Found User in System',
        'detail': 'User name or email found in system'
    }]

    response_dict = schema.dump(user_data.data).data
    response_dict['errors'] = errors

    response = render_json(response_dict)
    response.status_code = 400
    abort(response)
Example #6
0
def index_user():

    user = current_user
    now = datetime.datetime.now()

    available_classes = models.Class.objects(
        (me.Q(limited_enrollment__grantees=user.email)
         | me.Q(limited_enrollment__grantees=user.username))
        & (me.Q(started_date__lte=now)
           & me.Q(ended_date__gte=now))).order_by('ended_date')

    ass_schedule = []
    for class_ in available_classes:
        if not class_.is_enrolled(user.id):
            continue

        for ass_t in class_.assignment_schedule:
            if ass_t.started_date <= now and now < ass_t.ended_date:
                ass_schedule.append(
                    dict(assignment_schedule=ass_t, class_=class_))

    def order_by_ended_date(e):
        return e['assignment_schedule'].ended_date

    ass_schedule.sort(key=order_by_ended_date)

    return render_template(
        '/dashboard/index.html',
        available_classes=available_classes,
        assignment_schedule=ass_schedule,
        now=datetime.datetime.now(),
    )
Example #7
0
def get_past_assignment_schedule(user):
    now = datetime.datetime.now()

    available_classes = Class.objects(
            (me.Q(started_date__lte=now) &
                me.Q(ended_date__gte=now))
            ).order_by('ended_date')


    ass_schedule = []
    for class_ in available_classes:
        if not class_.is_enrolled(user.id):
            continue

        for ass_t in class_.assignment_schedule:
            if now > ass_t.ended_date:
                ass_schedule.append(
                        dict(assignment_schedule=ass_t,
                             class_=class_))

    def order_by_ended_date(e):
        return e['assignment_schedule'].ended_date

    ass_schedule.sort(key=order_by_ended_date)
    return ass_schedule
Example #8
0
    def get_list(self, term, offset=0, limit=DEFAULT_PAGE_SIZE, **kwargs):
        query = kwargs.get('query', self.model.objects)
        filters = (self.options and self.options.get('filters')) or {}
        if kwargs.get('filters'):
            filters.update(kwargs['filters'])
        if filters:
            query = query.filter(**filters)

        criteria = None
        if term:
            for field in self._cached_fields:
                flt = {u'%s__icontains' % field.name: term}

                if not criteria:
                    criteria = mongoengine.Q(**flt)
                else:
                    criteria |= mongoengine.Q(**flt)

            query = query.filter(criteria)

        order_by = kwargs.get('order_by', self.options.get('order_by'))
        if order_by:
            query.order_by(order_by)

        if offset:
            query = query.skip(offset)

        return query.limit(limit).all()
Example #9
0
def edit(class_id):
    user = current_user._get_current_object()
    courses = models.Course.objects(
        me.Q(active=True) & (me.Q(contributors=user) | me.Q(owner=user)))

    print("courses", courses)

    class_ = models.Class.objects.get(id=class_id)
    form = forms.classes.ClassForm(obj=class_)
    # le_form = forms.classes.LimitedEnrollmentForm(
    #         obj=class_.limited_enrollment)

    # form.limited_enrollment = le_form
    course_choices = [(str(c.id), c.name) for c in courses]
    form.course.choices = course_choices
    if request.method == "GET":
        form.course.data = str(class_.course.id)
    method_choices = [("email", "Email"), ("student_id", "Student ID")]
    form.limited_enrollment.method.choices = method_choices

    if not form.validate_on_submit():
        return render_template("/administration/classes/create-edit.html",
                               form=form)
    data = form.data.copy()
    data.pop("csrf_token")

    form.populate_obj(class_)
    course = models.Course.objects.get(id=form.course.data)
    class_.course = course
    class_.save()
    return redirect(url_for("administration.classes.index"))
Example #10
0
def find_units(units, pagination_size=50):
    """
    Query for units matching the unit key fields of an iterable of ContentUnit objects.

    This requires that all the ContentUnit objects are of the same content type.

    :param units: Iterable of content units with the unit key fields specified.
    :type units: iterable of pulp.server.db.model.ContentUnit
    :param pagination_size: How large a page size to use when querying units.
    :type pagination_size: int (default 50)

    :returns: unit models that pulp already knows about.
    :rtype: Generator of pulp.server.db.model.ContentUnit
    """
    # get the class from the first unit
    model_class = None

    for units_group in misc.paginate(units, pagination_size):
        q_object = mongoengine.Q()
        # Build a query for the units in this group
        for unit in units_group:
            if model_class is None:
                model_class = unit.__class__

            # Build the query for all the units, the | operator here
            # creates the equivalent of a mongo $or of all the unit keys
            unit_q_obj = mongoengine.Q(**unit.unit_key)
            q_object = q_object | unit_q_obj

        # Get this group of units
        query = model_class.objects(q_object)

        for found_unit in query:
            yield found_unit
Example #11
0
def save_my_molecule():
    user = user_datastore.get_user(current_user.id)
    smiles = request.form['smiles']
    name = request.form['name']

    if rdkit_smile(smiles) is None:
        result = {'status': 'danger',
                  'msg': 'Please enter a valid SMILES',
                  'issues': []}
        return jsonify(result=result)

    mol_query = MyMolecule.objects(db.Q(owner=user) & db.Q(smiles=smiles))

    if len(mol_query) != 0:
        my_mol = mol_query[0]
    else:
        my_mol = MyMolecule(owner=user,
                            smiles=smiles)

        try:
            mol = Chem.MolFromSmiles(smiles)
            my_mol.svg = get_images.moltosvg(mol,molSize=(100,100),kekulize=True)
        except:
            my_mol.svg = ''

    my_mol.name = name
    my_mol.save()

    result = {'status': 'success',
              'msg': 'Molecule saved',
              'issues': []}
    return jsonify(result=result)
def get_enzyme_paper_progress(enzyme_type_obj):
    enz_type = enzyme_type_obj.enzyme_type
    num_papers = len(Paper.objects(tags=enz_type))
    num_complete_papers = len(
        Paper.objects(
            db.Q(tags=enz_type)
            & (db.Q(status='Complete')
               | db.Q(status='Complete - Awaiting review'))))

    if num_papers == 0:
        progress = [
            "0%", f"{num_complete_papers} out of {num_papers}", 'bg-danger',
            enzyme_type_obj.full_name
        ]
    elif num_complete_papers == 0:
        progress = [
            "0%", f"{num_complete_papers} out of {num_papers}", 'bg-danger',
            enzyme_type_obj.full_name
        ]
    else:
        pc_complete = round((num_complete_papers / num_papers) * 100, 1)

        if pc_complete > 80:
            colour = 'bg-success'
        elif pc_complete > 40:
            colour = 'bg-warning'
        else:
            colour = 'bg-danger'
        progress = [
            f"{pc_complete}%", f"{num_complete_papers} out of {num_papers}",
            colour, enzyme_type_obj.full_name
        ]

    return progress
Example #13
0
def index(request):
    recent_topics = models.Topic.objects(
        status='publish',
        type='topic').order_by('-published_date').limit(10).all()
    last_comments_topics_ = models.Topic.objects(
        status='publish',
        type='topic').order_by('-comments__published_date').limit(10).all()
    last_comments_topics = []
    for topic in last_comments_topics_:
        if len(topic.comments) > 0:
            last_comments_topics.append(topic)

    photo_albums_ = models.PhotoAlbum.objects(
        status='publish').order_by('-published_date').limit(10).all()
    photo_albums = []
    for photo_album in photo_albums_:
        if len(photo_album.photos) > 0:
            photo_albums.append(photo_album)

    events = models.Event.objects((me.Q(status='publish') &
                                  (me.Q(started_date__gt=datetime.datetime.now().date()) |
                                  me.Q(ended_date__gt=datetime.datetime.now())) &
                                  me.Q(event_type__in=['undergraduate', 'graduate', 'department'])))\
                        .order_by('+started_date')\
                        .limit(5).all()

    return dict(recent_topics=recent_topics,
                last_comments_topics=last_comments_topics,
                photo_albums=photo_albums,
                events=events)
Example #14
0
    def insert_player(self, player):
        realm = mm.Realm.objects(name=player['realm']).first()
        query = me.Q(name=player['name']) & me.Q(realm=realm)

        player = mm.Player.from_api_json(mm.Player(), player)
        player.player_spec.save()

        talents = []
        for spec in player.talents:
            if mm.ConcreteTalentTree.objects(uid=spec.uid).first() is not None:
                talents.append(mm.ConcreteTalentTree.objects(uid=spec.uid).first())
                continue
            spec.save()
            talents.append(spec)
        for talent in talents:
            talent.save()
        player.talents = talents

        items = []
        for item in player.items:
            q = me.Q(i_id=item.i_id) & me.Q(item_level=item.item_level)
            existing = mm.Item.objects(q).first()
            if existing is not None:
                items.append(mm.Item.objects(q).first())
                continue
            item.save()
            items.append(item)
        player.items = items
        player.realm = realm
        if self.get_player(player.name, player.realm.name) is not None:
            self.update_player(mm.Player.objects(query).first().to_mongo()['_id'], player)
            return
        player.save()
Example #15
0
 def get_player(self, player_name, realm_name, resolve_refs=False):
     realm = mm.Realm.objects(name=realm_name).first()
     query = me.Q(name=player_name) & me.Q(realm=realm)
     if resolve_refs:
         player = mm.Player.objects(query).first()
         return self.resolve_all(player)
     return mm.Player.objects(query).first()
Example #16
0
def search_relative_field(model_class, fields, term):
    """Searches a ReferenceField's fields, returning ID's to be used in __in

    There is no JOIN, so no Assignment.objects.filter(course__title='My Course'). To
    get around this, we return a list of ID's.

    Since this is an internal tool, we allow multiple fields to AND/OR group.
    """
    offset = 0
    limit = 500
    query = model_class.objects

    criteria = None

    # If an ObjectId pattern, see if we can get an instant lookup.
    if re.match(RE_OBJECTID, term):
        q = query.filter(id=bson.ObjectId(term)).only('id')
        if q.count() == 1:  # Note: .get doesn't work, they need a QuerySet
            return q

    for field in fields:
        flt = {u'%s__icontains' % field: term}

        if not criteria:
            criteria = mongoengine.Q(**flt)
        else:
            criteria |= mongoengine.Q(**flt)

    query = query.filter(criteria)

    if offset:
        query = query.skip(offset)

    return query.limit(limit).only('id').all()
Example #17
0
 def get_user_projects(cls, user=None):
     projects = Project.objects.filter((me.Q(admin=user)
                                        | me.Q(members__in=user))
                                       & me.Q(is_active=True)).exclude(
                                           'start_date', 'end_date',
                                           'sprints')
     return projects
Example #18
0
def index_admin():
    user = current_user._get_current_object()
    now = datetime.datetime.now()

    available_classes = models.Class.objects(
        me.Q(owner=user)
        & (me.Q(started_date__lte=now)
           & me.Q(ended_date__gte=now))).order_by('ended_date')

    ass_schedule = []
    for class_ in available_classes:

        for ass_t in class_.assignment_schedule:
            if ass_t.started_date <= now and now < ass_t.ended_date:
                ass_schedule.append(
                    dict(assignment_schedule=ass_t, class_=class_))

    def order_by_ended_date(e):
        return e['assignment_schedule'].ended_date

    ass_schedule.sort(key=order_by_ended_date)

    return render_template(
        '/dashboard/index-admin.html',
        available_classes=available_classes,
        assignment_schedule=ass_schedule,
        now=datetime.datetime.now(),
    )
Example #19
0
def high_importance_papers():
    hi_papers = Paper.objects(high_importance=True).select_related()
    enzyme_types = EnzymeType.objects()

    tags = []
    for paper in hi_papers:
        for tag in paper.tags:
            if tag not in tags:
                tags.append(str(tag))
    tags = sorted(tags)

    data_by_tag = {}
    for tag in tags:
        hi_q = db.Q(high_importance=True)
        tag_q = db.Q(tags=tag)

        papers_data = list(
            Paper.objects(hi_q & tag_q).only(
                *papers_table.PAPERS_TABLE_FIELDS).order_by(
                    '-status').as_pymongo())
        papers_data = papers_table.process_papers_dict(papers_data,
                                                       show_owner=False)
        data_by_tag[tag] = papers_data

    enzyme_full_names = {}
    for enz_type in enzyme_types:
        enzyme_full_names[enz_type.enzyme_type] = enz_type.full_name

    return render_template('edit_tables/high_importance_papers.html',
                           data_by_tag=data_by_tag,
                           tags=tags,
                           enzyme_full_names=enzyme_full_names)
Example #20
0
def papers_that_need_data():
    user = user_datastore.get_user(current_user.id)
    title = "Papers that require curating"

    args = request.args.to_dict()
    q_no_user = db.Q(owner=None)
    q_no_data = db.Q(status__nin=['Complete - Awaiting review', 'Complete'])
    if 'enzyme_type' in args:
        q_e_type = db.Q(tags=args['enzyme_type'])
        title += f" - {args['enzyme_type']}"
    else:
        q_e_type = db.Q()

    papers_data = list(
        Paper.objects(q_no_user & q_no_data & q_e_type).only(
            *papers_table.PAPERS_TABLE_FIELDS).order_by(
                '-status').as_pymongo())
    papers_data = papers_table.process_papers_dict(papers_data,
                                                   show_owner=False)

    return render_template('edit_tables/edit_papers.html',
                           papers_data=papers_data,
                           papers_table_height='80vh',
                           papers_button_columns=['self_assign'],
                           show_owner=False,
                           title=title,
                           row_click_modal=False)
Example #21
0
    def _search(self, query, search_term):
        op, term = parse_like_term(search_term)

        criteria = None

        for field in self._search_fields:
            if field.name == 'bot':
                bots = BotProfile.objects(name__contains=term)
                for bot in bots:
                    if not (bot is None):
                        flt = {'%s__%s' % (field.name, 'exact'): bot.id}
                        q = mongoengine.Q(**flt)
                        if criteria is None:
                            criteria = q
                        else:
                            criteria |= q
            else:
                flt = {'%s__%s' % (field.name, op): term}
                q = mongoengine.Q(**flt)
            if criteria is None:
                criteria = q
            else:
                criteria |= q

        return query.filter(criteria)
Example #22
0
def handle_authorize_google(remote, token, user_info):

    if not user_info:
        return redirect(url_for('accounts.login'))

    user = models.User.objects(
        me.Q(username=user_info.get('name'))
        | me.Q(email=user_info.get('email'))).first()
    if not user:
        user = models.User(username=user_info.get('name'),
                           email=user_info.get('email'),
                           first_name=user_info.get('given_name'),
                           last_name=user_info.get('family_name'),
                           status='active')
        user.resources[remote.name] = user_info
        email = user_info.get('email')
        if email[:email.find('@')].isdigit():
            user.roles.append('student')
        user.save()

    login_user(user)

    if token:
        oauth2token = models.OAuth2Token(
            name=remote.name,
            user=user,
            access_token=token.get('access_token'),
            token_type=token.get('token_type'),
            refresh_token=token.get('refresh_token', None),
            expires=datetime.datetime.utcfromtimestamp(
                token.get('expires_in')))
        oauth2token.save()

    return redirect(url_for('dashboard.index'))
Example #23
0
    def nodes_not_present(self, only_biocatdb=False, max_num=None):
        """ Return a list of enzymes which are not in the ssn """

        # Get a list of all sequence objects of enzyme type
        t0 = time.time()
        sequences = Sequence.objects(
            db.Q(enzyme_type=self.enzyme_type) & db.Q(sequence__ne="")
            & db.Q(sequence__ne=None) & db.Q(sequence_unavailable__ne=True))
        if only_biocatdb is True:
            seq_objects = list(sequences)
        else:
            unirefs = UniRef50.objects(enzyme_type=self.enzyme_type_obj)
            seq_objects = list(sequences) + list(unirefs)

        # Get sequences not in nodes
        not_in_nodes = []
        for seq_obj in seq_objects:
            if seq_obj.enzyme_name not in list(self.graph.nodes):
                if seq_obj.sequence != None:
                    if len(seq_obj.sequence) > 12:
                        not_in_nodes.append(seq_obj)

        # Return only up to the maximum number of sequences
        if max_num != None:
            if len(not_in_nodes) > max_num:
                not_in_nodes = not_in_nodes[0:max_num]

        t1 = time.time()
        self.log(
            f"Identified {len(not_in_nodes)} {self.enzyme_type} proteins which need adding, in {round(t1 - t0, 1)} seconds"
        )
        return not_in_nodes
Example #24
0
    def create_date_range_filter(self, start_date=None, end_date=None):
        """
        Create a date filter based on start and end issue dates specified in the repo config.

        :param start_date: start time for the filter
        :type  start_date: datetime.datetime
        :param end_date: end time for the filter
        :type  end_date: datetime.datetime

        :return: Q object with start and/or end dates, or None if start and end dates are not
                 provided
        :rtype:  mongoengine.Q or types.NoneType
        """
        if start_date:
            start_date = dateutils.format_iso8601_datetime(start_date)
        if end_date:
            end_date = dateutils.format_iso8601_datetime(end_date)

        if start_date and end_date:
            return mongoengine.Q(created__gte=start_date,
                                 created__lte=end_date)
        elif start_date:
            return mongoengine.Q(created__gte=start_date)
        elif end_date:
            return mongoengine.Q(created__lte=end_date)
Example #25
0
def get_existing_units(search_dicts, unit_class, repo):
    """
    Get units from the given repository that match the search terms. The unit instances will only
    have their unit key fields populated.

    :param search_dicts:    iterable of dictionaries that should be used to search units
    :type  search_dicts:    iterable
    :param unit_class:      subclass representing the type of unit to search for
    :type  unit_class:      pulp_rpm.plugins.db.models.Package
    :param repo:            repository to search in
    :type  repo:            pulp.server.db.model.Repository

    :return:    generator of unit_class instances with only their unit key fields populated
    :rtype:     generator
    """
    unit_fields = unit_class.unit_key_fields
    for segment in paginate(search_dicts):
        unit_filters = {'$or': list(segment)}
        units_q = mongoengine.Q(__raw__=unit_filters)
        association_q = mongoengine.Q(unit_type_id=unit_class._content_type_id.default)

        for result in repo_controller.find_repo_content_units(repo, units_q=units_q,
                                                              repo_content_unit_q=association_q,
                                                              unit_fields=unit_fields,
                                                              yield_content_unit=True):
            yield result
def task_check_ssn_status():
    for enzyme_type in EnzymeType.objects():
        ssn_query = list(SSN_record.objects(enzyme_type=enzyme_type))
        if len(ssn_query) > 1:
            print(
                f'Warning - multiple ssn records for {enzyme_type} - deleting extras'
            )
            for i in range(1, len(ssn_query)):
                ssn_query[i].delete()

    if len(current_app.blast_queue.jobs) + len(
            current_app.process_blasts_queue.jobs) + len(
                current_app.alignment_queue.jobs) == 0:
        print('Checking ssn status')
        ssn_records = SSN_record.objects().select_related()

        for ssn_r in ssn_records:
            if ssn_r.status != 'Complete' and ssn_r.enzyme_type.bioinformatics_status == 'Complete':
                if len(UniRef50.objects(enzyme_type=ssn_r.enzyme_type)) != 0:
                    enzyme_type = ssn_r.enzyme_type.enzyme_type
                    job_name = f"{enzyme_type}_expand_ssn"
                    current_app.alignment_queue.enqueue(
                        ssn_tasks.task_expand_ssn,
                        enzyme_type,
                        job_id=job_name)
                    print(f'Queued SSN job for {enzyme_type}')

        for enz_type_obj in EnzymeType.objects():
            if enz_type_obj.bioinformatics_status == 'Complete':
                if enz_type_obj not in SSN_record.objects().distinct(
                        'enzyme_type'):
                    unirefs = UniRef50.objects(enzyme_type=enz_type_obj)
                    biocatdb_seqs = list(
                        Sequence.objects(
                            db.Q(enzyme_type=enz_type_obj.enzyme_type)
                            & db.Q(bioinformatics_ignore__ne=True)))
                    biocatdb_seqs = [
                        seq for seq in biocatdb_seqs
                        if seq.sequence != '' and seq.sequence is not None
                    ]

                    if len(unirefs) + len(biocatdb_seqs) != 0:
                        print(
                            f"No SSN for {enz_type_obj.enzyme_type}, but blasts are complete and sequences present..  creating SSN."
                        )
                        job_name = f"{enz_type_obj.enzyme_type}_expand_ssn"
                        current_app.alignment_queue.enqueue(
                            ssn_tasks.task_expand_ssn,
                            enz_type_obj.enzyme_type,
                            job_id=job_name)

    else:
        print(f"Length blast queue = {len(current_app.blast_queue.jobs)}")
        print(
            f"Length process blast queue = {len(current_app.process_blasts_queue.jobs)}"
        )
        print(
            f"Length alignment queue = {len(current_app.alignment_queue.jobs)}"
        )
Example #27
0
def index():
    classes = models.Class.objects(
        me.Q(limited_enrollment__grantees=current_user.username)
        | me.Q(limited_enrollment__grantees=current_user.email))

    courses = [class_.course for class_ in classes]

    return render_template('/courses/index.html', courses=courses)
Example #28
0
 def get_resources(self):
     query = self.owner_query()
     for selector in self.selectors:
         query &= selector.q
     if 'deleted' in self.selector_resource_cls._fields:
         query &= me.Q(deleted=None)
     if 'missing_since' in self.selector_resource_cls._fields:
         query &= me.Q(missing_since=None)
     return self.selector_resource_cls.objects(query)
Example #29
0
 def get_resources(self):
     query = self.owner_query()
     for condition in self.conditions:
         query &= condition.q
     if 'deleted' in self.condition_resource_cls._fields:
         query &= me.Q(deleted=None)
     if 'missing_since' in self.condition_resource_cls._fields:
         query &= me.Q(missing_since=None)
     return self.condition_resource_cls.objects(query)
Example #30
0
    def _repo_unit_nevra(self, erratum_unit, repo_id):
        """
        Return a list of NEVRA dicts for units in a single repo referenced by the given errata.

        Pulp errata units combine the known packages from all synced repos. Given an errata unit
        and a repo, return a list of NEVRA dicts that can be used to filter out packages not
        linked to that repo when generating a repo's updateinfo XML file. While returning that
        list of NEVRA dicts is the main goal, doing so quickly and without running out of memory
        is what makes this a little bit tricky.

        Build up a super-fancy query to get the unit ids for all NEVRA seen in these errata
        check repo/unit associations for this errata to limit the packages in the published
        updateinfo to the units in the repo being currently published.

        :param erratum_unit: The erratum unit that should be written to updateinfo.xml.
        :type erratum_unit: pulp_rpm.plugins.db.models.Errata
        :param repo_id: The repo_id of a pulp repository in which to find units
        :type repo_id: str
        :return: a list of NEVRA dicts for units in a single repo referenced by the given errata
        :rtype: list
        """
        nevra_fields = ('name', 'epoch', 'version', 'release', 'arch')
        nevra_q = mongoengine.Q()
        for pkglist in erratum_unit.pkglist:
            for pkg in pkglist['packages']:
                pkg_nevra = dict((field, pkg[field]) for field in nevra_fields)
                nevra_q |= mongoengine.Q(**pkg_nevra)

        # Aim the super-fancy query at mongo to get the units that this errata refers to
        # The scaler method on the end returns a list of tuples to try to save some memory
        # and also cut down on mongoengine model instance hydration costs.
        nevra_units = models.RPM.objects.filter(nevra_q).scalar(
            'id', *nevra_fields)

        # Split up the nevra unit entries into a mapping of the unit id to its nevra fields
        nevra_unit_map = dict(
            (nevra_unit[0], nevra_unit[1:]) for nevra_unit in nevra_units)

        # Get all of the unit ids from this errata that are associated with the current repo.
        # Cast this as a set for speedier lookups when iterating of the nevra unit map.
        repo_unit_ids = set(
            RepositoryContentUnit.objects.filter(
                unit_id__in=nevra_unit_map.keys(),
                repo_id=repo_id).scalar('unit_id'))

        # Finally(!), intersect the repo unit ids with the unit nevra ids to
        # create a list of nevra dicts that can be easily compared to the
        # errata package nevra and exclude unrelated packages
        repo_unit_nevra = []
        for nevra_unit_id, nevra_field_values in nevra_unit_map.items():
            # based on the args to scalar when nevra_units was created:
            if nevra_unit_id in repo_unit_ids:
                repo_unit_nevra.append(
                    dict(zip(nevra_fields, nevra_field_values)))

        return repo_unit_nevra