Ejemplo n.º 1
0
def test_empty_AND():
    assert AND() is None
    assert AND(True) is True

    # sqlrepr() is needed because AND() returns an SQLExpression that overrides
    # comparison. The following
    #     AND('x', 'y') == "foo bar"
    # is True! (-: Eeek!
    assert sqlrepr(AND(1, 2)) == sqlrepr(SQLOp("AND", 1, 2)) == "((1) AND (2))"
    assert sqlrepr(AND(1, 2, '3'), "sqlite") == \
        sqlrepr(SQLOp("AND", 1, SQLOp("AND", 2, '3')), "sqlite") == \
        "((1) AND ((2) AND ('3')))"
Ejemplo n.º 2
0
    def search(self, states):
        """
        Execute a search.
        @param states:
        """
        if self.table is None:
            raise ValueError("table cannot be None")
        table = self.table
        queries = []
        self._having = []
        for state in states:
            search_filter = state.filter
            assert state.filter

            # Column query
            if search_filter in self._columns:
                query = self._construct_state_query(
                    table, state, self._columns[search_filter])
                if query:
                    queries.append(query)
            # Custom per filter/state query.
            elif search_filter in self._filter_query_callbacks:
                for callback in self._filter_query_callbacks[search_filter]:
                    query = callback(state)
                    if query:
                        queries.append(query)
            else:
                if (self._query == self._default_query
                        and not self._query_callbacks):
                    raise ValueError(
                        "You need to add a search column or a query callback "
                        "for filter %s" % (search_filter))

        for callback in self._query_callbacks:
            query = callback(states)
            if query:
                queries.append(query)

        if queries:
            query = AND(*queries)
        else:
            query = None

        having = None
        if self._having:
            having = AND(self._having)

        result = self._query(query, having, self.conn)
        return result.limit(self.get_limit())
Ejemplo n.º 3
0
    def relation_values(self, object_name, rows):
        joins = {}
        ids = rows.keys()
        if not ids:
            return joins

        obj = self.load_object(object_name)
        conn = obj._connection
        for column in obj.sqlmeta.joins:
            query = None
            coltype = self.get_column_type(column)
            if coltype in ('SOMultipleJoin', 'SOSQLMultipleJoin'):
                query = conn.sqlrepr(
                    Select([
                        column.soClass.q.id,
                        func.Count(column.otherClass.q.id)
                    ],
                           where=AND(
                               column.soClass.q.id == self.join_foreign_key(
                                   column), IN(column.soClass.q.id, ids)),
                           groupBy=column.soClass.q.id))

            elif coltype in ('SORelatedJoin', 'SOSQLRelatedJoin'):
                d = (column.intermediateTable, column.joinColumn,
                     column.intermediateTable, column.otherColumn,
                     column.intermediateTable, column.intermediateTable,
                     column.joinColumn, ','.join(['%s' % x for x in ids]),
                     column.intermediateTable, column.joinColumn)

                query = ("SELECT %s.%s, Count(%s.%s) FROM %s"
                         " WHERE %s.%s IN(%s) GROUP BY %s.%s" % d)

            elif coltype == 'SOSingleJoin':
                alias = self.load_label_column_for_object(
                    column.otherClassName)
                query = conn.sqlrepr(
                    Select([
                        column.soClass.q.id,
                        getattr(column.otherClass.q, alias)
                    ],
                           where=AND(
                               column.soClass.q.id == self.join_foreign_key(
                                   column), IN(column.soClass.q.id, ids))))

            if not query:
                continue
            joins[column.joinMethodName] = conn.queryAll(query)
        return joins
Ejemplo n.º 4
0
def approve_testing_updates():
    """
    Scan all testing updates and approve ones that have met the per-release
    testing requirements.

    https://fedoraproject.org/wiki/Package_update_acceptance_criteria
    """
    log.info('Running approve_testing_updates job...')
    for update in PackageUpdate.select(
            AND(PackageUpdate.q.status == 'testing',
                PackageUpdate.q.request == None)):
        # If this release does not have any testing requirements, skip it
        if not update.release.mandatory_days_in_testing:
            continue
        # If this has already met testing requirements, skip it
        if update.met_testing_requirements:
            continue
        # If this is a critpath update, skip it, since they have their own
        # testing requirements, aside from spending time in testing.
        if update.critpath:
            continue
        if update.meets_testing_requirements:
            log.info('%s now meets testing requirements' % update.title)
            update.comment(config.get('testing_approval_msg') %
                           update.days_in_testing,
                           author='bodhi')
    log.info('approve_testing_updates job complete.')
Ejemplo n.º 5
0
def searchMessages(searchText=None, fromDate=None, toDate=None):
    """
    Given an input phrase, search for Tweets in the local db and return Tweets
    which contain the phrase in their text and are with the date range.

    :param searchText: String as text to search in the Tweet messages.
        Defaults to None to get all Tweets.
    :param fromDate: optional datetime.date object. If included, filter Tweets
        which were on or after this date. Must be used with `toDate`.
    :param toDate: optional datetime.date object. If included, filter Tweets
        which were before or on this date. Must be used with `fromDate`.

    :return query: select results object for the Tweets matching the filter
        parameters, ordered by the Tweet default sorting.
    """
    if searchText:
        query = db.Tweet.select(db.Tweet.q.message.contains(searchText))
    else:
        query = db.Tweet.select()

    if fromDate and toDate:
        # SQL sees a given date as a timestamp at midnight, therefore move
        # it ahead 24 hours and check up to that date, excluding the new day.
        toDate = toDate + datetime.timedelta(days=1)
        query = query.filter(
            AND(db.Tweet.q.createdAt >= fromDate, db.Tweet.q.createdAt < toDate)
        )

    return query
Ejemplo n.º 6
0
    def GET(self, name=None, version=None):
        jobs = []

        if version:
            title = "%s %s" % (name, version)
            package = "%s/%s" % (name, version)
            query = Job.select(
                IN(
                    Job.q.package,
                    Select(
                        Package.q.id,
                        AND(Package.q.name == name,
                            Package.q.version == version))))
        else:
            title = package = name
            query = Job.select(
                IN(Job.q.package, Select(Package.q.id,
                                         Package.q.name == name)))

        result, page, nb_pages = _paginate_query(
            query.orderBy(DESC(Job.q.package)))

        jobs.extend(result)
        return render.base(page=render.tab(jobs=jobs, page=page, nb_pages=nb_pages), \
                hostname=socket.gethostname(), \
                title=title, \
                package=package, \
                archs=RebuilddConfig().arch, \
                dists=RebuilddConfig().get('build', 'dists').split(' '))
Ejemplo n.º 7
0
 def _parse_date_interval_state(self, state, table_field):
     queries = []
     if state.start:
         queries.append(table_field >= state.start)
     if state.end:
         queries.append(func.DATE(table_field) <= state.end)
     if queries:
         return AND(*queries)
Ejemplo n.º 8
0
 def _parse_number_interval_state(self, state, table_field):
     queries = []
     if state.start is not None:
         queries.append(table_field >= state.start)
     if state.end is not None:
         queries.append(table_field <= state.end)
     if queries:
         return AND(*queries)
Ejemplo n.º 9
0
 def hideCreditCardReports():
     fList = FileEntry.select(
         AND(FileEntry.q.userId == session.getUserId(), FileEntry.total
             is not None, FileEntry.q.flagged == False))
     #Bank entries are flagged on creation so they will not appear here
     for f in fList:
         if BankEntriesPostProcessor.hideCreditCardReport(
                 f.reportDate, f.total):
             f.flagged = True
Ejemplo n.º 10
0
    def filtered_query(self, obj, filters):
        if not ':' in filters:  # there should at least be a semicolon
            msg = ('filter_format_error.'
                   ' The format is column_name:value, not %s' % filters)
            raise cherrypy.HTTPRedirect(turbogears.url('/error', msg=msg))

        filters = [filter.split(':') for filter in filters.split(',')]
        conditions = tuple(
            [self.prepare_filter(obj, filter) for filter in filters])
        return obj.select(AND(*conditions))
Ejemplo n.º 11
0
    def GET(self, dist, arch=None):
        jobs = []

        result, page, nb_pages = _paginate_query(
            Job.select(AND(Job.q.arch == arch, Job.q.dist == dist),
                       orderBy=DESC(Job.q.creation_date)))

        jobs.extend(result)
        return render.base(page=render.tab(jobs=jobs, page=page, nb_pages=nb_pages), \
                arch=arch, \
                dist=dist, \
                title="%s/%s" % (dist, arch), \
                hostname=socket.gethostname(), \
                archs=RebuilddConfig().arch, \
                dists=RebuilddConfig().get('build', 'dists').split(' '))
Ejemplo n.º 12
0
 def hideCreditCardReport(inputDate, inputTotal):
     reportDate = datetime.strptime(inputDate, '%Y-%m-%d').date()
     start = datetime(reportDate.year, reportDate.month, 1).date()
     end = datetime(reportDate.year, reportDate.month, 28).date()
     # delta = timedelta(days=3)
     # start = reportDate - delta
     # end = reportDate + delta
     bList = BankEntry.select(
         AND(BankEntry.q.userId == session.getUserId(),
             BankEntry.q.date >= start, BankEntry.q.date <= end,
             BankEntry.q.debit == inputTotal))
     found = False
     for b in bList:
         b.hide = 1
         found = True
     return found
Ejemplo n.º 13
0
Archivo: rss.py Proyecto: tyll/bodhi
    def get_critpath_updates(self, release=None, unapproved=None):
        i = 0
        entries = []
        base = config.get('base_address')
        title = 'Latest Critical Path Updates'
        query = [PackageUpdate.q.status != 'obsolete']
        if release:
            try:
                release = Release.byName(release)
            except SQLObjectNotFound:
                return dict(title='%s release not found' % release, entries=[])
            releases = [release]
            title = title + ' for %s' % release.long_name
        else:
            releases = Release.select()
        if unapproved:
            query.append(PackageUpdate.q.status != 'stable')
        for update in PackageUpdate.select(
                AND(
                    OR(*[
                        PackageUpdate.q.releaseID == release.id
                        for release in releases
                    ]), *query),
                orderBy=PackageUpdate.q.date_submitted).reversed():

            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break

            if update.critpath:
                if unapproved:
                    if update.critpath_approved:
                        continue
                entries.append({
                    'id': base + url(update.get_url()),
                    'summary': update.notes,
                    'link': base + url(update.get_url()),
                    'published': update.date_submitted,
                    'updated': update.date_submitted,
                    'title': update.title,
                })
                i += 1
        return dict(title=title,
                    subtitle="",
                    link=config.get('base_address') + url('/'),
                    entries=entries)
Ejemplo n.º 14
0
def route(path):
    """

    @param path
    @return Response
    """

    urls = [path]
    tmp_str = str(path)
    while tmp_str != '':
        tmp_str = '/'.join(tmp_str.split('/')[0:-1])
        if tmp_str:
            urls.append(tmp_str)

    log.debug('Attempting to get urls %s', urls)
    record = Document.select(AND(IN(Document.q.url,
                                    urls), Document.q.archived == False,
                                 Document.q.published == True),
                             orderBy=(DESC(Document.q.url),
                                      DESC(Document.q.created)),
                             limit=1).getOne(None)

    if not record and 'index' in urls and 'auth_user' not in session:
        return redirect('/login')

    if not record:
        registry_type = REGISTRY['Error']
        public = registry_type['public']

        document = {'status': 404}
        controller = common.load_class(public['document_module'],
                                       public['document_class'], document,
                                       registry_type)
        return controller.get()

    document = Document.get_document(record)

    registry_type = REGISTRY[record.type]
    page = registry_type['public']
    controller = getattr(
        __import__(page['document_module'], fromlist=page['document_class']),
        page['document_class'])

    record_controller = controller(document, registry_type)
    return getattr(record_controller, str(request.method).lower())()
Ejemplo n.º 15
0
    def database_gen(authorOrTitle=""):
        titles = []

        # start out with the join clauses in the where clause list
        where_clause_list = []
        clause_tables = ["book", "author", "author_title"]
        join_list = [
            LEFTJOINOn("title", "book", "book.title_id=title.id"),
            LEFTJOINOn(None, "author_title", "title.id=author_title.title_id"),
            LEFTJOINOn(None, "author", "author.id=author_title.author_id"),
        ]

        # add filter clauses if they are called for
        where_clause_list.append(
            "(author.author_name RLIKE '%s' OR title.booktitle RLIKE '%s')"
            % (authorOrTitle.strip(), authorOrTitle.strip())
        )
        # AND all where clauses together
        where_clause = AND(where_clause_list)
        titles = []

        # do search.
        titles = Title.select(
            where_clause, join=join_list, clauseTables=clause_tables, distinct=True
        )
        for t1 in titles:
            yield {
                "title": t1.booktitle,
                "authors": t1.author,
                "authors_as_string": t1.authors_as_string(),
                "categories_as_string": t1.categories_as_string(),
                "list_price": t1.highest_price_book().ourprice,
                "publisher": t1.publisher,
                "isbn": t1.isbn,
                "format": t1.type,
                "kind": t1.kind.kindName,
                "known_title": t1,
            }
Ejemplo n.º 16
0
def search_books(search_type, case_sensitive, values, join_expressions=None,
                 orderBy=None, use_filters=False):
    if use_filters:
        config = get_config()
        lang_filter = config.getlist('filters', 'lang')
        deleted_filter = config.getint('filters', 'deleted')
        if lang_filter:
            if join_expressions is None:
                join_expressions = []
            lang_conditions = []
            for lang in lang_filter:
                lvalues = {'name': lang}
                conditions = mk_search_conditions(
                    Language, search_type, case_sensitive, lvalues)
                lang_conditions.append(conditions)
            join_expressions.append(Book.j.language)
            join_expressions.append(OR(*lang_conditions))
    conditions = mk_search_conditions(
        Book, search_type, case_sensitive, values,
        join_expressions=join_expressions)
    if use_filters and not deleted_filter:
        conditions.append(Book.q.deleted == False)  # noqa: E712
    return Book.select(AND(*conditions), orderBy=orderBy)
Ejemplo n.º 17
0
def testMiscOps():
    setupClass(SBButton)
    checkCount(AND(makeClause(), makeClause()), 2)
    checkCount(AND(makeClause(), EXISTS(makeSelect())), 2)
Ejemplo n.º 18
0
def _search(table, search_type, case_sensitive, values,
            expressions=None, join_expressions=None, orderBy=None):
    conditions = mk_search_conditions(
        table, search_type, case_sensitive, values, expressions=expressions,
        join_expressions=join_expressions)
    return table.select(AND(*conditions), orderBy=orderBy)
Ejemplo n.º 19
0
    def do_work(self, message=None):
        """

        {
            "documents": [{
                "parent_id": "uuid"
            }],
            "all_documents": false
        }

        {
            "documents": [],
            "all_documents": true
        }

        uuid as filename
        {
            "document": {},
            "file": {},
        }

        full key name for file

        Manifest file structure
        {
            'documents': [
                {
                    'uuid': 'some-uuid',
                    'url': 'some-url',
                    'parent_url': 'some-parent-url',
                    'parent_uuid': 'some-parent-uuid'
                },
                ...
            ],
            'full': bool
        }

        :type message: boto.sqs.message.Message | None
        :param message:
        :return:
        """

        if not message:
            return

        contents = json.loads(message.get_body())

        job_id = str(contents['Message'])
        job = JobDB.selectBy(uuid=job_id).getOne(None)
        if not job:
            log.error('Cannot find job %s', job_id)
            raise InvalidJobError('Invalid Job ID: {0}'.format(job_id))

        job.set(status='running')

        and_ops = [Document.q.archived == False, Document.q.published == True]
        if not job.message.get('all_documents'):
            and_ops.append(self._get_document_query(job.message.get('document')))

        manifest = {
            'documents': [],
            'full': job.message.get('all_documents', False)
        }

        zip_contents = StringIO()
        zip_handle = zipfile.ZipFile(zip_contents, 'w', compression=zipfile.ZIP_DEFLATED)
        for document in Document.query(Document.all(), where=AND(*and_ops)):
            parent_document = self._get_document_parent_url(document.parent)
            manifest['documents'].append({
                'uuid': document.uuid,
                'url': document.url,
                'parent_url': None if not parent_document else parent_document.url,
                'parent_uuid': None if not parent_document else parent_document.uuid
            })

            self._handle_document(document, zip_handle)
            log.info('Adding document uuid=%s to zip archive', str(document.uuid))

        zip_handle.writestr('manifest', json.dumps(manifest))
        zip_handle.close()

        zip_key = Key(self.bucket, job_id)
        zip_key.content_type = 'application/zip'
        zip_key.set_contents_from_string(zip_contents.getvalue())
        log.info("Created ZIP for Job '%s'", str(job_id))

        message = job.message
        message['download'] = {
            'bucket': self.bucket.name,
            'key': job_id
        }
        job.set(status='complete', message=message)
        log.info('Setting job=%s to complete', job_id)
Ejemplo n.º 20
0
    # get mirrors from database
    #
    mirrors = []
    if args:
        # select all mirrors matching the given identifiers
        result = conn.Server.select()
        for i in result:
            if i.identifier in args:
                mirrors.append(i)
    else:
        # select all enabled mirrors
        #
        # ignore wildcard mirrors, assuming that they can't be checked by normal means (i.e., baseurl itself may
        # not give a 200. Just some files are served maybe...
        result = conn.Server.select(
            AND(conn.Server.q.enabled, conn.Server.q.country != '**'))
        for i in result:
            mirrors.append(i)

    if not mirrors:
        sys.exit('no mirrors found')

    #
    # start work
    #
    logging.info('----- %s mirrors to check' % len(mirrors))

    for i, mirror in enumerate(mirrors):
        #mirror.status_baseurl_new = False
        #mirror.timed_out = True
Ejemplo n.º 21
0
def getInventory(queryTerms):
    print(queryTerms, file=sys.stderr)
    keys = list(queryTerms)
    print("keys are ", keys)
    for k in keys:
        if type(queryTerms[k]) == bytes:
            queryTerms[k] = queryTerms[k].decode("utf-8")

    isbnSelect = ""
    kindSelect = ""
    statusSelect = ""
    titleSelect = ""
    authorSelect = ""
    categorySelect = ""
    clauseTables = []

    if "kind" in keys:  # joins suck, avoid if possible
        kind_map = {}
        for k in [(x.kindName, x.id) for x in list(Kind.select())]:
            kind_map[k[0]] = k[1]
        try:
            kind_id = kind_map[queryTerms["kind"]]
            kindSelect = Book.sqlrepr(
                AND(
                    Field("book", "title_id") == Field("title", "id"),
                    Field("title", "kind_id") == kind_id,
                )
            )
        except:
            pass

    if "status" in keys:
        statusSelect = Book.sqlrepr(Field("book", "status") == queryTerms["status"])

    if (
        ("title" in keys)
        or ("authorName" in keys)
        or ("kind" in keys)
        or ("categoryName" in keys)
        or ("isbn" in keys)
    ):
        clauseTables.append("title")
        # we are going to need to do a join

        if "title" in keys:
            titleSelect = Book.sqlrepr(
                AND(
                    Field("book", "title_id") == Field("title", "id"),
                    RLIKE(Field("title", "booktitle"), queryTerms["title"]),
                )
            )

        if "isbn" in keys:
            isbn, price = _process_isbn(queryTerms["isbn"])
            print("isbn and price are ", isbn, price)
            titleSelect = Book.sqlrepr(
                AND(
                    Field("book", "title_id") == Field("title", "id"),
                    Field("title", "isbn") == isbn,
                )
            )

        if "authorName" in keys:
            # authorSelect="""book.title_id = title.id AND author.title_id=title.id AND author.author_name RLIKE %s""" % (Book.sqlrepr(queryTerms["authorName"]))
            authorSelect = Book.sqlrepr(
                AND(
                    Field("book", "title_id") == Field("title", "id"),
                    Field("author", "id") == Field("author_title", "author_id"),
                    Field("title", "id") == Field("author_title", "title_id"),
                    RLIKE(Field("author", "author_name"), queryTerms["authorName"]),
                )
            )
            clauseTables.append("author")
            clauseTables.append("author_title")

        if "categoryName" in keys:
            categorySelect = (
                """book.title_id = title.id AND category.title_id=title.id AND category.category_name RLIKE %s"""
                % (Book.sqlrepr(queryTerms["categoryName"]))
            )
            clauseTables.append("category")
    try:
        books = Book.select(
            " AND ".join(
                [
                    term
                    for term in [
                        statusSelect,
                        titleSelect,
                        authorSelect,
                        kindSelect,
                        categorySelect,
                    ]
                    if term != ""
                ]
            ),
            clauseTables=clauseTables,
            distinct=True,
        )
    except TypeError:
        books = Book.select(
            " AND ".join(
                [
                    term
                    for term in [
                        statusSelect,
                        titleSelect,
                        authorSelect,
                        kindSelect,
                        categorySelect,
                    ]
                    if term != ""
                ]
            ),
            clauseTables=clauseTables,
        )

    results = {}
    i = 1
    for book_for_info in books:
        theTitle = book_for_info.title.booktitle
        authorString = ", ".join([a.authorName for a in book_for_info.title.author])
        categoryString = ", ".join(
            [c.categoryName for c in book_for_info.title.categorys]
        )
        results[i] = (
            theTitle.capitalize(),
            authorString,
            book_for_info.listprice if book_for_info.listprice is not None else "",
            book_for_info.title.publisher
            if book_for_info.title.publisher is not None
            else "",
            book_for_info.status if book_for_info.status is not None else "",
            book_for_info.title.isbn,
            book_for_info.distributor if book_for_info.distributor is not None else "",
            book_for_info.location.locationName
            if book_for_info.location is not None
            else "",
            book_for_info.notes if book_for_info.notes is not None else "",
            book_for_info.id,
            book_for_info.title.kind and book_for_info.title.kind.kindName
            if book_for_info.title.kind is not None
            else "",
            categoryString,
            book_for_info.title.type if book_for_info.title.type is not None else "",
        )
    return results
Ejemplo n.º 22
0
Archivo: rss.py Proyecto: tyll/bodhi
    def get_feed_data(self,
                      release=None,
                      type=None,
                      status=None,
                      comments=False,
                      submitter=None,
                      builds=None,
                      user=None,
                      package=None,
                      critpath=False,
                      unapproved=None,
                      *args,
                      **kw):
        query = []
        entries = []
        date = lambda update: update.date_pushed
        order = PackageUpdate.q.date_pushed
        title = []
        critpath = critpath in (True, 'True', 'true')
        unapproved = unapproved in (True, 'True', 'true')

        if critpath:
            return self.get_critpath_updates(release=release,
                                             unapproved=unapproved)
        if comments:
            return self.get_latest_comments(user=user)
        if package:
            return self.get_package_updates(package, release)
        if release:
            try:
                rel = Release.byName(release.upper())
            except SQLObjectNotFound:
                return dict(title='%s not found' % release, entries=[])
            query.append(PackageUpdate.q.releaseID == rel.id)
            title.append(rel.long_name)
        if type:
            query.append(PackageUpdate.q.type == type)
            title.append(type.title())
        if status:
            query.append(PackageUpdate.q.status == status)
            if status == 'pending':
                date = lambda update: update.date_submitted
                order = PackageUpdate.q.date_submitted
            else:
                # Let's only show pushed testing/stable updates
                query.append(PackageUpdate.q.pushed == True)
            title.append(status.title())
        else:
            query.append(PackageUpdate.q.pushed == True)

        if submitter:
            query.append(PackageUpdate.q.submitter == submitter)
            title.append("submitted by %s" % submitter)

        if builds:
            query.append(PackageUpdate.q.builds == builds)
            title.append("for %s" % builds)

        updates = PackageUpdate.select(AND(*query), orderBy=order).reversed()

        for update in updates:
            delta = datetime.utcnow() - update.date_submitted
            if delta and delta.days > config.get('feeds.num_days_to_show'):
                if len(entries) >= config.get('feeds.max_entries'):
                    break
            entries.append({
                'id':
                config.get('base_address') + url(update.get_url()),
                'summary':
                update.notes,
                'published':
                date(update),
                'link':
                config.get('base_address') + url(update.get_url()),
                'title':
                "%s %sUpdate: %s" %
                (update.release.long_name,
                 update.type == 'security' and 'Security ' or '', update.title)
            })
            if len(update.bugs):
                bugs = "<b>Resolved Bugs</b><br/>"
                for bug in update.bugs:
                    bugs += "<a href=%s>%d</a> - %s<br/>" % (
                        bug.get_url(), bug.bz_id, bug.title)
                entries[-1]['summary'] = "%s<br/>%s" % (bugs[:-2],
                                                        entries[-1]['summary'])

        title.append('Updates')

        return dict(title=' '.join(title),
                    subtitle="",
                    link=config.get('base_address') + url('/'),
                    entries=entries)
Ejemplo n.º 23
0
def main():
    """
    Command-line interface to fetch Tweet data for Profile Categories.
    """
    parser = argparse.ArgumentParser(
        description="""Fetch Tweets for Profiles utility. Filter Profiles
            in the DB using a Category input, update them with new data
            and insert or update the most recent Tweets for each.
            Tweets are assigned to the '{0}' Campaign.""".format(
            UTILITY_CAMPAIGN
        )
    )

    viewGrp = parser.add_argument_group("View", "Print data to stdout")
    viewGrp.add_argument(
        "-a",
        "--available",
        action="store_true",
        help="Output available Categories in db, with Profile counts for each.",
    )

    updateGrp = parser.add_argument_group(
        "Update", "Create or update Tweet" " records."
    )
    updateGrp.add_argument(
        "-c",
        "--categories",
        metavar="CATEGORY",
        nargs="+",
        help="""List of one or more existing Categories names in the db.
            Profiles are filtered to only those which have been assigned to at
            least one of the supplied CATEGORY values, then their Tweets
            are fetched and stored. Values must be separated by a space and any
            multi-word values or values containing a hash symbol must be
            enclosed in single quotes.
            e.g. -c 'first cat' second 'third cat' '#fourth'
        """,
    )
    updateGrp.add_argument(
        "-t",
        "--tweets-per-profile",
        type=int,
        metavar="N",
        default=200,
        help="""Default 200. Count of Tweets to fetch and store for each
            profile. A value up to 200 takes a fixed time to query one
            page of Tweets for a Profile, while higher values require
            querying more pages and therefore will take longer per
            Profile and lead to a higher chance of hitting rate limits.
            A higher value also requires additional time to create or update
            records.
        """,
    )
    updateGrp.add_argument(
        "-v",
        "--verbose",
        action="store_true",
        help="""If supplied, pretty print Tweet data fetched from the
            Twitter API. Otherwise only a count of Tweets is printed
            upon completion.
        """,
    )
    updateGrp.add_argument(
        "-n",
        "--no-write",
        action="store_true",
        help="If supplied, do not write data to the db.",
    )
    updateGrp.add_argument(
        "-u",
        "--update-all-fields",
        action="store_true",
        help="""If supplied, update all fields when updating an existing
            local Tweet record. Otherwise, the default behavior is to
            only update the favorite and retweet counts of the record.
        """,
    )

    args = parser.parse_args()

    if args.available:
        printAvailableCategories()
    elif args.categories:
        inputCategories = args.categories

        categoryResult = db.Category.select(IN(db.Category.q.name, inputCategories))
        dbCategoryNames = [c.name for c in list(categoryResult)]
        missing = set(inputCategories) - set(dbCategoryNames)
        assert not missing, "Input categories not found in db: \n- {0}".format(
            "\n- ".join(missing)
        )

        # Here the AND is required to include SQLObject j-magic, so that
        # Profiles are filtered by Category.
        profResults = db.Profile.select(
            AND(db.Profile.j.categories, IN(db.Category.q.name, inputCategories))
        )
        profCount = profResults.count()
        print("Fetching Tweets for {0:,d} Profiles".format(profCount))

        try:
            campaignRec = db.Campaign.byName(UTILITY_CAMPAIGN)
        except SQLObjectNotFound:
            campaignRec = db.Campaign(name=UTILITY_CAMPAIGN, searchQuery=None)

        if profCount:
            insertOrUpdateTweetBatch(
                profResults,
                args.tweets_per_profile,
                verbose=args.verbose,
                writeToDB=not (args.no_write),
                campaignRec=campaignRec,
                onlyUpdateEngagements=not (args.update_all_fields),
            )
        else:
            raise ValueError(
                "No profiles were found in the categories"
                " provided: {}".format(inputCategories)
            )
Ejemplo n.º 24
0
    def getInventory(self, queryTerms):
        keys = queryTerms.keys()

        isbnSelect = ""
        kindSelect = ""
        statusSelect = ""
        titleSelect = ""
        authorSelect = ""
        categorySelect = ""
        clauseTables = []

        if "kind" in keys:  # joins suck, avoid if possible
            kind_map = {}
            for k in [(x.kindName, x.id) for x in list(Kind.select())]:
                kind_map[k[0]] = k[1]
            try:
                kind_id = kind_map[queryTerms['kind']]
                kindSelect = Book.sqlrepr(
                    AND(
                        Field("book", "title_id") == Field("title", "id"),
                        Field("title", "kind_id") == kind_id))
            except:
                pass

        if 'status' in keys:
            statusSelect = Book.sqlrepr(
                Field("book", "status") == queryTerms["status"])

        if ('title' in keys) or ('authorName' in keys) or ('kind' in keys) or (
                'categoryName' in keys) or ('isbn' in keys):
            clauseTables.append('title')
            #we are going to need to do a join

            if 'title' in keys:
                titleSelect = Book.sqlrepr(
                    AND(
                        Field("book", "title_id") == Field("title", "id"),
                        RLIKE(Field("title", "booktitle"),
                              queryTerms["title"])))

            if 'isbn' in keys:
                titleSelect = Book.sqlrepr(
                    AND(
                        Field("book", "title_id") == Field("title", "id"),
                        Field("title", "isbn") == queryTerms["isbn"]))

            if 'authorName' in keys:
                #~ authorSelect="""book.title_id = title.id AND author.title_id=title.id AND author.author_name RLIKE %s""" % (Book.sqlrepr(queryTerms["authorName"]))
                authorSelect = Book.sqlrepr(
                    AND(
                        Field("book", "title_id") == Field("title", "id"),
                        Field("author", "id") == Field("author_title",
                                                       "author_id"),
                        Field("title", "id") == Field("author_title",
                                                      "title_id"),
                        RLIKE(Field("author", "author_name"),
                              queryTerms["authorName"])))
                clauseTables.append('author')
                clauseTables.append('author_title')

            if 'categoryName' in keys:
                #~ categorySelect="""book.title_id = title.id AND category.title_id=title.id AND category.category_name RLIKE %s""" % (Book.sqlrepr(queryTerms["categoryName"]))
                categorySelect = Book.sqlrepr(
                    AND(
                        Field("book", "title_id") == Field("title", "id"),
                        Field("category", "title_id") == Field("title", "id"),
                        RLIKE(Field("category", "category_name"),
                              queryTerms["categoryName"])))
                clauseTables.append('category')

    # At this time, ubuntu install sqlobject 0.6.1 if apt-get install python2.4-sqlobject,
# which make the search crash, since the distinct attribute is defined somewhere after 0.6.1
        try:
            books = Book.select(string.join([
                term for term in [
                    statusSelect, titleSelect, authorSelect, kindSelect,
                    categorySelect
                ] if term != ""
            ], " AND "),
                                clauseTables=clauseTables,
                                distinct=True)
        except TypeError:
            books = Book.select(string.join([
                term for term in [
                    statusSelect, titleSelect, authorSelect, kindSelect,
                    categorySelect
                ] if term != ""
            ], " AND "),
                                clauseTables=clauseTables)

        results = {}
        i = 1
        for b in books:
            theTitle = b.title.booktitle.decode("unicode_escape")
            if b.notes == None:
                b.notes = ""
            authorString = string.join([
                a.author_name.decode("unicode_escape") for a in b.title.author
            ], ",")
            results[i] = (string.capitalize(theTitle), authorString,
                          b.listprice,
                          b.title.publisher.decode("unicode_escape"),
                          b.status.decode("unicode_escape"), b.title.isbn,
                          b.distributor.decode("unicode_escape"),
                          b.notes.decode("unicode_escape"), b.id,
                          b.title.kind and b.title.kind.kindName or '')
            i = i + 1

        return results
Ejemplo n.º 25
0
def main():

    #
    # read config file
    #
    brain_instance = None
    if '-b' in sys.argv:
        brain_instance = sys.argv[sys.argv.index('-b') + 1]

    import mb.conf
    config = mb.conf.Config(instance = brain_instance)


    LOGLEVEL = config.mirrorprobe.get('loglevel', 'INFO')
    LOGFILE = config.mirrorprobe.get('logfile', '/var/log/mirrorbrain/mirrorprobe.log')
    MAILTO = config.mirrorprobe.get('mailto', 'root@localhost')

    #
    # parse commandline
    #
    parser = OptionParser(usage="%prog [options] [<mirror identifier>+]", version="%prog 1.0")

    parser.add_option("-b", "--brain-instance",
                      dest="brain_instance",
                      default=None,
                      help="name of the MirrorBrain instance to be used",
                      metavar="NAME")

    parser.add_option("-l", "--log",
                      dest="logfile",
                      default=LOGFILE,
                      help="path to logfile",
                      metavar="LOGFILE")

    parser.add_option("-L", "--loglevel",
                      dest="loglevel",
                      default=LOGLEVEL,
                      help="Loglevel (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
                      metavar="LOGLEVEL")

    parser.add_option("-T", "--mailto",
                      dest="mailto",
                      default=MAILTO,
                      help="email adress to mail warnings to",
                      metavar="EMAIL")

    parser.add_option("-t", "--timeout",
                      dest="timeout",
                      default=20,
                      help="Timeout in seconds",
                      metavar="TIMEOUT")

    parser.add_option("-n", "--no-run",
                      dest="no_run", 
                      default=False,
                      action="store_true", 
                      help="don't update the database. Only look")

    parser.add_option("-e", "--enable-revived",
                      dest="enable_revived", 
                      default=False,
                      action="store_true", 
                      help="enable revived servers")

    (options, args) = parser.parse_args()

    socket.setdefaulttimeout(int(options.timeout))


    # an "are you alive check" is relatively useless if it is answered by an intermediate cache
    for i in ['http_proxy', 'HTTP_PROXY', 'ftp_proxy', 'FTP_PROXY']:
        if i in os.environ:
            del os.environ[i]

    LOGFORMAT = '%(asctime)s ' + config.instance + ' %(levelname)-8s %(message)s'
    DATEFORMAT = '%b %d %H:%M:%S'

    #
    # set up logging
    #
    # to file
    logging.basicConfig(level=logging.getLevelName(options.loglevel),
                        format=LOGFORMAT,
                        datefmt=DATEFORMAT,
                        filename=options.logfile,
                        filemode='a')
    ## to console
    #console = logging.StreamHandler()
    #console.setLevel(logging.getLevelName(options.loglevel))
    #formatter = logging.Formatter(LOGFORMAT, DATEFORMAT)
    #console.setFormatter(formatter)
    #logging.getLogger('').addHandler(console)

    # warnings will be mailed
    try:
        fromdomain = socket.gethostbyaddr(socket.gethostname())[0] 
    except:
        fromdomain = ''
    toaddrs = [ i.strip() for i in options.mailto.split(',') ]
    mail = logging.handlers.SMTPHandler('localhost', 
                                        'root@' + fromdomain,
                                        toaddrs,
                                        'no_subject')
    mail.setLevel(logging.WARNING)
    mailformatter = logging.Formatter(LOGFORMAT, DATEFORMAT)
    mail.setFormatter(mailformatter)
    logging.getLogger('').addHandler(mail)


    #
    # setup database connection
    #
    import mb.conn
    conn = mb.conn.Conn(config.dbconfig, 
                        debug = (options.loglevel == 'DEBUG'))


    #
    # get mirrors from database
    #
    mirrors = []
    if args:
        # select all mirrors matching the given identifiers
        result = conn.Server.select()
        for i in result:
            if i.identifier in args:
                mirrors.append(i)
    else:
        # select all enabled mirrors
        #
        # ignore wildcard mirrors, assuming that they can't be checked by normal means (i.e., baseurl itself may
        # not give a 200. Just some files are served maybe...
        result = conn.Server.select(AND(conn.Server.q.enabled, conn.Server.q.country != '**'))
        for i in result:
            mirrors.append(i)

    if not mirrors:
        sys.exit('no mirrors found')

    #
    # start work
    #
    logging.info('----- %s mirrors to check' % len(mirrors))

    for i, mirror in enumerate(mirrors):
        #mirror.status_baseurl_new = False
        #mirror.timed_out = True

        t = threading.Thread(target=probe_http, 
                             args=[mirrors[i]], 
                             name="probeThread-%s (%s)" % (mirror.id, mirror.identifier))
        # thread will keep the program from terminating.
        t.setDaemon(0)
        t.start()

    while threading.activeCount() > 1:
        logging.debug('waiting for %s threads to exit' % (threading.activeCount() - 1))
        time.sleep(1)


    for mirror in mirrors:

        logging.debug('%s: baseurl %s / baseurl_new %s' % (mirror.identifier, repr(mirror.statusBaseurl), repr(mirror.status_baseurl_new)))
        # old failure
        if not mirror.statusBaseurl and not mirror.status_baseurl_new:

            if mirror.response_code and (mirror.response_code != 200):

                mail.getSubject = lambda x: '[%s] mirrorprobe warning: %s replied with %s' \
                                    % (config.instance, mirror.identifier, mirror.response_code)

                logging.debug("""%s: (%s): response code not 200: %s: 
I am not disabling this host, and continue to watch it...
""" % (mirror.identifier, mirror.baseurl, mirror.response_code))

                # reset the getSubject method...
                mail.getSubject = lambda x: 'no subject set'

                #comment = mirror.comment or ''
                #comment += ('\n*** mirrorprobe, %s: got status code %s' % (time.ctime(), mirror.response_code))
                logging.debug('setting enabled=0 for %s' % (mirror.identifier))
                if not options.no_run:
                    # mirror.enabled = 0
                    mirror.statusBaseurl = 0
                    #mirror.comment = comment

            logging.debug('still dead: %s (%s): %s: %s' % (mirror.identifier, mirror.baseurl, mirror.response_code, mirror.response))

        # alive
        elif mirror.statusBaseurl and mirror.status_baseurl_new:
            logging.debug('alive: %s: %s' % (mirror.identifier, mirror.response))
            if mirror.enabled == 0 and options.enable_revived:
                logging.info('re-enabling %s' % mirror.identifier)
                if not options.no_run:
                    reenable(mirror)

        # new failure
        elif not mirror.status_baseurl_new and mirror.statusBaseurl:
            logging.info('FAIL: %s (%s): %s' % (mirror.identifier, mirror.baseurl, mirror.response))
            logging.debug('setting status_baseurl=0 for %s (id=%s)' % (mirror.identifier, mirror.id))
            if not options.no_run:
                mirror.statusBaseurl = 0

        # revived
        elif not mirror.statusBaseurl and mirror.status_baseurl_new == 1:
            logging.info('REVIVED: %s' % mirror.identifier)
            logging.debug('setting status_baseurl=1 for %s (id=%s)' % (mirror.identifier, mirror.id))
            if not options.no_run:
                mirror.statusBaseurl = 1
            if options.enable_revived:
                logging.info('re-enabling %s' % mirror.identifier)
                if not options.no_run:
                    reenable(mirror)
Ejemplo n.º 26
0
def searchInventory(sortby="booktitle", out_of_stock=False, **kwargs):
    # start building the filter list
    where_clause_list = []
    print("kwargs are ", kwargs, file=sys.stderr)
    for k in kwargs:
        if type(k) == bytes:
            kwargs[k] = kwargs[k].decode("utf-8")
    to_delete = [k for k in kwargs if kwargs[k] == ""]
    for td in to_delete:
        del kwargs[td]
    print(len(kwargs), file=sys.stderr)

    # clause_tables=['book', 'author', 'author_title', 'category', 'location']
    clause_tables = ["book", "author", "author_title", "location"]
    join_list = [
        LEFTJOINOn("title", "book", "book.title_id=title.id"),
        LEFTJOINOn(None, "author_title", "title.id=author_title.title_id"),
        LEFTJOINOn(None, "author", "author.id=author_title.author_id"),
        LEFTJOINOn(None, Location, Location.q.id == Book.q.locationID),
    ]
    # join_list=[LEFTJOINOn('title', 'book', 'book.title_id=title.id'), LEFTJOINOn(None, 'author_title', 'title.id=author_title.title_id'), LEFTJOINOn(None, 'author', 'author.id=author_title.author_id'), LEFTJOINOn(None, Category, Category.q.titleID==Title.q.id), LEFTJOINOn(None, Location, Location.q.id==Book.q.locationID)]
    if "the_kind" in kwargs:
        where_clause_list.append(Title.q.kindID == kwargs["the_kind"])
    if "the_location" in kwargs and len(the_location) > 1:
        where_clause_list.append(Book.q.locationID == kwargs["the_location"])
    if "title" in kwargs:
        where_clause_list.append(RLIKE(Title.q.booktitle, kwargs["title"].strip()))
    if "publisher" in kwargs:
        where_clause_list.append(RLIKE(Title.q.publisher, kwargs["publisher"].strip()))
    if "tag" in kwargs:
        where_clause_list.append(RLIKE(Title.q.tag, kwargs["tag"].strip()))
    if "isbn" in kwargs:
        isbn, price = _process_isbn(kwargs["isbn"])
        where_clause_list.append(Title.q.isbn == isbn)
    if "formatType" in kwargs:
        where_clause_list.append(Title.q.type == kwargs["formatType"].strip())
    if "owner" in kwargs:
        where_clause_list.append(RLIKE(Book.q.owner, kwargs["owner"].strip()))
    if "distributor" in kwargs:
        where_clause_list.append(
            RLIKE(Book.q.distributor, kwargs["distributor"].strip())
        )
    if "inv_begin_date" in kwargs:
        where_clause_list.append(Book.q.inventoried_when >= kwargs["inv_begin_date"])
    if "inv_end_date" in kwargs:
        where_clause_list.append(Book.q.inventoried_when < kwargs["inv_end_date"])
    if "sold_begin_date" in kwargs:
        where_clause_list.append(Book.q.sold_when >= kwargs["sold_begin_date"])
    if "sold_end_date" in kwargs:
        where_clause_list.append(Book.q.sold_when < kwargs["sold_end_date"])
    if "author" in kwargs:
        where_clause_list.append(RLIKE(Author.q.authorName, kwargs["author"].strip()))
    if "category" in kwargs:
        where_clause_list.append(
            RLIKE(Category.q.categoryName, kwargs["category"].strip())
        )
    if "status" in kwargs:
        where_clause_list.append(Book.q.status == kwargs["status"].strip())
    if "id" in kwargs:
        where_clause_list.append(Title.q.id == kwargs["id"])
    if "authorOrTitle" in kwargs:
        where_clause_list.append(
            OR(
                RLIKE(Author.q.authorName, kwargs["authorOrTitle"].strip()),
                RLIKE(Title.q.booktitle, kwargs["authorOrTitle"].strip()),
            )
        )

    where_clause = AND(*where_clause_list)

    # do search first. Note it currently doesnt let you search for every book in database, unless you use some sort of
    # trick like '1=1' for the where clause string, as the where clause string may not be blank
    titles = []
    if len(kwargs) > 1 or kwargs.setdefault("out_of_stock", False):
        titles = Title.select(
            where_clause,
            join=join_list,
            orderBy=sortby,
            clauseTables=clause_tables,
            distinct=True,
        )
    # filter for stock status
    # GROUPBY in sqlobject is complicated. We could do it but it's not worth it
    if "out_of_stock" in kwargs:
        titles = [t for t in titles if t.copies_in_status("STOCK") == 0]
    # filter on specific numbers in stock
    if "stock_less_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("STOCK") <= int(kwargs["stock_less_than"])
        ]
    if "stock_more_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("STOCK") >= int(kwargs["stock_more_than"])
        ]
    # filter by items sold
    if "sold_more_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("SOLD") >= int(kwargs["sold_more_than"])
        ]
    if "sold_less_than" in kwargs:
        titles = [
            t
            for t in titles
            if t.copies_in_status("SOLD") >= int(kwargs["sold_less_than"])
        ]
    print(titles, file=sys.stderr)
    return titles
Ejemplo n.º 27
0
    def run(self):
        serie = Serie.getSeries()[self.serieLocalID]
        self.serieUpdateStatus.emit(self.serieLocalID, 0,
                                    {'title': serie.title})

        try:
            tvDb = TheTVDBSerie(serie.tvdbID, serie.lang)
        except Exception as e:
            qDebug("Error download" + str(e))
            return False

        # Info serie
        serieInfos = tvDb.infos_serie()
        bannerPath = '%s%s.jpg' % (SERIES_BANNERS, serie.uuid)
        tvDb.download_banner(bannerPath)

        if serieInfos is None:
            return

        serie.description = serieInfos['description']
        serie.firstAired = datetime.strptime(serieInfos['firstAired'],
                                             '%Y-%m-%d')
        serie.lastUpdated = int(serieInfos['lastUpdated'])
        self.serieUpdateStatus.emit(self.serieLocalID, 1,
                                    {'title': serie.title})

        # Info episode
        episodesDb = {(e.season, e.episode) for e in serie.episodes}
        episodeList = set()
        for e in tvDb.episodes():
            number = (e['season'], e['episode'])
            episodeList.add(number)
            if e['firstAired']:
                firstAired = datetime.strptime(e['firstAired'], '%Y-%m-%d')
            else:
                firstAired = None
            if number in episodesDb:
                episode = list(Episode.select(
                    AND(Episode.q.season==int(e['season']),
                    Episode.q.episode==int(e['episode']),
                    Episode.q.serie==serie)))[0]
                episode.firstAired = firstAired
                episode.title = unicode(e['title'])
                episode.description = unicode(e['description'])
            else:
                Episode(
                    title = unicode(e['title']),
                    description = unicode(e['description']),
                    season = int(e['season']),
                    episode = int(e['episode']),
                    firstAired = firstAired,
                    serie = serie
                )

        toDelete = episodesDb - episodeList
        for season, episode in toDelete:
            Episode.deleteBy(serie=serie, season=season, episode=episode)

        # Create image path
        imgDir = SERIES_IMG + serie.uuid
        if not os.path.isdir(imgDir):
            os.mkdir(imgDir)

        # Miniature DL
        self.serieUpdateStatus.emit(
            self.serieLocalID, 2, {'title': serie.title})

        for i, nbImages in tvDb.download_miniatures(imgDir):
            self.serieUpdateStatus.emit(
                self.serieLocalID, 3,
                {'title': serie.title, 'nb': i, 'nbImages': nbImages})

        self.serieUpdated.emit(self.serieLocalID)
        serie.lastUpdate = datetime.now()
        serie.setLoaded(True)