示例#1
0
def fix_bug_titles():
    """
    Go through all bugs with invalid titles and see if we can re-fetch them.
    If bugzilla is down, then bodhi simply replaces the title with
    'Unable to fetch bug title' or 'Invalid bug number'.  So lets occasionally
    see if we can re-fetch those bugs.
    """
    from bodhi.model import Bugzilla
    from sqlobject.sqlbuilder import OR
    log.debug("Running fix_bug_titles job")
    for bug in Bugzilla.select(
            OR(Bugzilla.q.title == 'Invalid bug number',
               Bugzilla.q.title == 'Unable to fetch bug title')):
        bug.fetch_details()
示例#2
0
    def _construct_state_query(self, table, state, columns):
        queries = []
        having_queries = []

        for column in columns:
            query = None
            table_field = getattr(table.q, column)

            # If the field has an aggregate function (sum, avg, etc..), then
            # this clause should be in the HAVING part of the query.
            use_having = table_field.hasSQLCall()

            if isinstance(state, NumberQueryState):
                query = self._parse_number_state(state, table_field)
            elif isinstance(state, NumberIntervalQueryState):
                query = self._parse_number_interval_state(state, table_field)
            elif isinstance(state, StringQueryState):
                query = self._parse_string_state(state, table_field)
            elif isinstance(state, DateQueryState):
                query = self._parse_date_state(state, table_field)
            elif isinstance(state, DateIntervalQueryState):
                query = self._parse_date_interval_state(state, table_field)
            else:
                raise NotImplementedError(state.__class__.__name__)

            if query and use_having:
                having_queries.append(query)
                query = None

            if query:
                queries.append(query)

        if having_queries:
            self._add_having(OR(*having_queries))

        if queries:
            return OR(*queries)
示例#3
0
文件: rss.py 项目: tyll/bodhi
    def get_critpath_updates(self, release=None, unapproved=None):
        i = 0
        entries = []
        base = config.get('base_address')
        title = 'Latest Critical Path Updates'
        query = [PackageUpdate.q.status != 'obsolete']
        if release:
            try:
                release = Release.byName(release)
            except SQLObjectNotFound:
                return dict(title='%s release not found' % release, entries=[])
            releases = [release]
            title = title + ' for %s' % release.long_name
        else:
            releases = Release.select()
        if unapproved:
            query.append(PackageUpdate.q.status != 'stable')
        for update in PackageUpdate.select(
                AND(
                    OR(*[
                        PackageUpdate.q.releaseID == release.id
                        for release in releases
                    ]), *query),
                orderBy=PackageUpdate.q.date_submitted).reversed():

            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break

            if update.critpath:
                if unapproved:
                    if update.critpath_approved:
                        continue
                entries.append({
                    'id': base + url(update.get_url()),
                    'summary': update.notes,
                    'link': base + url(update.get_url()),
                    'published': update.date_submitted,
                    'updated': update.date_submitted,
                    'title': update.title,
                })
                i += 1
        return dict(title=title,
                    subtitle="",
                    link=config.get('base_address') + url('/'),
                    entries=entries)
示例#4
0
def orCondition():
    """
    Create time filter for jobs to be run.

    A job record be should run at least once per day and may run multiple times
    on a day, but with a minimum interval of N hours between runs. A cron
    manager could even run a job session (of job records) hourly, yet skip over
    job records where the data we have is recent enough.

    This process is useful for getting data say 4 times a day at 6-hour
    intervals. Also, if a job session crashes and is restarted, it only
    continues with the jobs not completed in that job session.

    A job record should be run if ANY of the following are true: - it has never
        run, or - it has not run today (since we need values for today and we
        might not again the job session again today), or - it was run today but
        more than N hours ago (the time since the last success for today is old
        enough that it is useful to get more records for today, as we might
        want to compare changes over the day).
    """
    now = datetime.datetime.now()

    # Use current time to get midnight for today, but as datetime object.
    dateCuttoff = datetime.datetime(now.year, now.month, now.day)

    # Use minimum number of hours between job sessions to get the cuttoff time
    # for considering a job run recently.
    interval = conf.getint("TrendCron", "interval")
    hoursCuttoff = now - datetime.timedelta(hours=interval)

    # From the last two conditions, we check whether the last completed time
    # is either less than the start of today or the recent cuttoff time,
    # therefore we can simplify by comparing against the higher of the two.
    recencyCuttoff = max(dateCuttoff, hoursCuttoff)

    # The ORM needs `==` rather than `is` to build the query correctly.
    return OR(
        db.PlaceJob.q.lastCompleted == None,  # noqa: E711
        db.PlaceJob.q.lastCompleted < recencyCuttoff,
    )
示例#5
0
def search_books(search_type, case_sensitive, values, join_expressions=None,
                 orderBy=None, use_filters=False):
    if use_filters:
        config = get_config()
        lang_filter = config.getlist('filters', 'lang')
        deleted_filter = config.getint('filters', 'deleted')
        if lang_filter:
            if join_expressions is None:
                join_expressions = []
            lang_conditions = []
            for lang in lang_filter:
                lvalues = {'name': lang}
                conditions = mk_search_conditions(
                    Language, search_type, case_sensitive, lvalues)
                lang_conditions.append(conditions)
            join_expressions.append(Book.j.language)
            join_expressions.append(OR(*lang_conditions))
    conditions = mk_search_conditions(
        Book, search_type, case_sensitive, values,
        join_expressions=join_expressions)
    if use_filters and not deleted_filter:
        conditions.append(Book.q.deleted == False)  # noqa: E712
    return Book.select(AND(*conditions), orderBy=orderBy)
def searchInventory(sortby="booktitle", out_of_stock=False, **kwargs):
    # start building the filter list
    where_clause_list = []
    print("kwargs are ", kwargs, file=sys.stderr)
    for k in kwargs:
        if type(k) == bytes:
            kwargs[k] = kwargs[k].decode("utf-8")
    to_delete = [k for k in kwargs if kwargs[k] == ""]
    for td in to_delete:
        del kwargs[td]
    print(len(kwargs), file=sys.stderr)

    # clause_tables=['book', 'author', 'author_title', 'category', 'location']
    clause_tables = ["book", "author", "author_title", "location"]
    join_list = [
        LEFTJOINOn("title", "book", "book.title_id=title.id"),
        LEFTJOINOn(None, "author_title", "title.id=author_title.title_id"),
        LEFTJOINOn(None, "author", "author.id=author_title.author_id"),
        LEFTJOINOn(None, Location, Location.q.id == Book.q.locationID),
    ]
    # join_list=[LEFTJOINOn('title', 'book', 'book.title_id=title.id'), LEFTJOINOn(None, 'author_title', 'title.id=author_title.title_id'), LEFTJOINOn(None, 'author', 'author.id=author_title.author_id'), LEFTJOINOn(None, Category, Category.q.titleID==Title.q.id), LEFTJOINOn(None, Location, Location.q.id==Book.q.locationID)]
    if "the_kind" in kwargs:
        where_clause_list.append(Title.q.kindID == kwargs["the_kind"])
    if "the_location" in kwargs and len(the_location) > 1:
        where_clause_list.append(Book.q.locationID == kwargs["the_location"])
    if "title" in kwargs:
        where_clause_list.append(RLIKE(Title.q.booktitle, kwargs["title"].strip()))
    if "publisher" in kwargs:
        where_clause_list.append(RLIKE(Title.q.publisher, kwargs["publisher"].strip()))
    if "tag" in kwargs:
        where_clause_list.append(RLIKE(Title.q.tag, kwargs["tag"].strip()))
    if "isbn" in kwargs:
        isbn, price = _process_isbn(kwargs["isbn"])
        where_clause_list.append(Title.q.isbn == isbn)
    if "formatType" in kwargs:
        where_clause_list.append(Title.q.type == kwargs["formatType"].strip())
    if "owner" in kwargs:
        where_clause_list.append(RLIKE(Book.q.owner, kwargs["owner"].strip()))
    if "distributor" in kwargs:
        where_clause_list.append(
            RLIKE(Book.q.distributor, kwargs["distributor"].strip())
        )
    if "inv_begin_date" in kwargs:
        where_clause_list.append(Book.q.inventoried_when >= kwargs["inv_begin_date"])
    if "inv_end_date" in kwargs:
        where_clause_list.append(Book.q.inventoried_when < kwargs["inv_end_date"])
    if "sold_begin_date" in kwargs:
        where_clause_list.append(Book.q.sold_when >= kwargs["sold_begin_date"])
    if "sold_end_date" in kwargs:
        where_clause_list.append(Book.q.sold_when < kwargs["sold_end_date"])
    if "author" in kwargs:
        where_clause_list.append(RLIKE(Author.q.authorName, kwargs["author"].strip()))
    if "category" in kwargs:
        where_clause_list.append(
            RLIKE(Category.q.categoryName, kwargs["category"].strip())
        )
    if "status" in kwargs:
        where_clause_list.append(Book.q.status == kwargs["status"].strip())
    if "id" in kwargs:
        where_clause_list.append(Title.q.id == kwargs["id"])
    if "authorOrTitle" in kwargs:
        where_clause_list.append(
            OR(
                RLIKE(Author.q.authorName, kwargs["authorOrTitle"].strip()),
                RLIKE(Title.q.booktitle, kwargs["authorOrTitle"].strip()),
            )
        )

    where_clause = AND(*where_clause_list)

    # do search first. Note it currently doesnt let you search for every book in database, unless you use some sort of
    # trick like '1=1' for the where clause string, as the where clause string may not be blank
    titles = []
    if len(kwargs) > 1 or kwargs.setdefault("out_of_stock", False):
        titles = Title.select(
            where_clause,
            join=join_list,
            orderBy=sortby,
            clauseTables=clause_tables,
            distinct=True,
        )
    # filter for stock status
    # GROUPBY in sqlobject is complicated. We could do it but it's not worth it
    if "out_of_stock" in kwargs:
        titles = [t for t in titles if t.copies_in_status("STOCK") == 0]
    # filter on specific numbers in stock
    if "stock_less_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("STOCK") <= int(kwargs["stock_less_than"])
        ]
    if "stock_more_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("STOCK") >= int(kwargs["stock_more_than"])
        ]
    # filter by items sold
    if "sold_more_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("SOLD") >= int(kwargs["sold_more_than"])
        ]
    if "sold_less_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("SOLD") >= int(kwargs["sold_less_than"])
        ]
    print(titles, file=sys.stderr)
    return titles