Example #1
0
File: stats.py Project: jrburke/f1
 def history(self):
     start = request.params.get('start',None)
     end = request.params.get('end',None)
     limit = int(request.params.get('days','0'))
     opts = request.params.get('opts','').split(',')
     whereclause = []
     vars = {}
     groupby = []
     if limit and not start and not end:
         whereclause.append(History.published >= UTCDateTime.now() - timedelta(days=limit))
     if start:
         whereclause.append(History.published >= UTCDateTime.from_string(start))
     if end:
         whereclause.append(History.published < UTCDateTime.from_string(end))
     if 'perday' in opts:
         if 'domain' in opts:
             s = select([func.date_format(History.published, "%Y-%m-%d"), History.domain, func.count(History.domain)],)
             groupby.append(func.to_days(History.published))
             groupby.append(History.domain)
         else:
             s = select([func.date_format(History.published, "%Y-%m-%d"), func.count(History.id)])
             groupby.append(func.to_days(History.published))
     else:
         s = select([func.count(History.domain), History.domain])
         groupby.append(History.domain)
     
     if whereclause:
         s = s.where(and_(*whereclause))
     if groupby:
         s = s.group_by(*groupby)
     return [list(a) for a in Session.execute(s).fetchall()]
Example #2
0
File: stats.py Project: jrburke/f1
    def accounts(self):
        start = request.params.get('start',None)
        end = request.params.get('end',None)
        limit = int(request.params.get('days','0'))
        opts = request.params.get('opts','').split(',')
        groupby = []
        whereclause = []
        if limit and not start and not end:
            whereclause.append(Account.created >= UTCDateTime.now() - timedelta(days=limit))
        if start:
            whereclause.append(Account.created >= UTCDateTime.from_string(start))
        if end:
            whereclause.append(Account.created < UTCDateTime.from_string(end))
        if 'perday' in opts:
            if 'domain' in opts:
                s = select([func.date(Account.created), Account.domain, func.count(Account.id)])
                groupby.append(func.to_days(Account.created))
                groupby.append(Account.domain)
            else:
                s = select([func.date(Account.created), func.count(Account.id)])
                groupby.append(func.to_days(Account.created))
        else:
            s = select([func.count(Account.domain), Account.domain])
            groupby.append(Account.domain)

        if whereclause:
            s = s.where(*whereclause)
        if groupby:
            s = s.group_by(*groupby)
        return [list(a) for a in Session.execute(s).fetchall()]
Example #3
0
    def list_episode(self, page, count, sort_field, sort_order, status):
        try:

            session = SessionManager.Session()
            query_object = session.query(Episode).\
                filter(Episode.delete_mark == None)

            if status is not None:
                query_object = query_object.filter(Episode.status==status)
                # count total rows
                total = session.query(func.count(Episode.id)).filter(Episode.status==status).scalar()
            else:
                total = session.query(func.count(Episode.id)).scalar()

            offset = (page - 1) * count

            if sort_order == 'desc':
                episode_list = query_object.\
                    order_by(desc(getattr(Episode, sort_field))).\
                    offset(offset).\
                    limit(count).\
                    all()
            else:
                episode_list = query_object.\
                    order_by(asc(getattr(Episode, sort_field))).\
                    offset(offset).limit(count).\
                    all()

            episode_dict_list = [row2dict(episode, Episode) for episode in episode_list]

            return json_resp({'data': episode_dict_list, 'total': total})
        finally:
            SessionManager.Session.remove()
Example #4
0
    def count(self, group_by=None, search=None, vuln_filter={}):
        query = self._session.query(Vulnerability.vuln_type, func.count())\
                             .group_by(Vulnerability.vuln_type)
        query = apply_search_filter(query, self.COLUMNS_MAP, search, vuln_filter)
        total_count = dict(query.all())

        # Return total amount of services if no group-by field was provided
        result_count = { 'total_count':    sum(total_count.values()),
                         'web_vuln_count': total_count.get('VulnerabilityWeb', 0),
                         'vuln_count':     total_count.get('Vulnerability', 0), }

        if group_by is None:
            return result_count

        # Otherwise return the amount of services grouped by the field specified
        # Don't perform group-by counting on fields with less or more than 1 column mapped to it
        if group_by not in VulnerabilityDAO.COLUMNS_MAP or\
           len(VulnerabilityDAO.COLUMNS_MAP.get(group_by)) != 1:
            return None

        col = VulnerabilityDAO.COLUMNS_MAP.get(group_by)[0]
        vuln_bundle = Bundle('vuln', Vulnerability.id, col)
        query = self._session.query(vuln_bundle, func.count())\
                             .group_by(col)\
                             .outerjoin(EntityMetadata, EntityMetadata.id == Vulnerability.entity_metadata_id)

        query = apply_search_filter(query, self.COLUMNS_MAP, search, vuln_filter, self.STRICT_FILTERING)
        result = query.all()

        result_count['groups'] = [ { group_by: value[1], 'count': count } for value, count in result ]

        return result_count
Example #5
0
def organizations_and_counters():
    '''Query organizations with their counters'''
    query = DB.query(Group,
        func.count(distinct(Package.id)).label('nb_datasets'),
        func.count(distinct(Member.id)).label('nb_members')
    )
    query = query.outerjoin(CertifiedPublicService)
    query = query.outerjoin(Package, and_(
        Group.id == Package.owner_org,
        ~Package.private,
        Package.state == 'active',
    ))
    query = query.outerjoin(Member, and_(
        Member.group_id == Group.id,
        Member.state == 'active',
        Member.table_name == 'user'
    ))
    query = query.filter(Group.state == 'active')
    query = query.filter(Group.approval_status == 'approved')
    query = query.filter(Group.is_organization == True)
    query = query.group_by(Group.id, CertifiedPublicService.organization_id)
    query = query.order_by(
        CertifiedPublicService.organization_id == null(),
        desc('nb_datasets'),
        desc('nb_members'),
        Group.title
    )
    query = query.options(orm.joinedload(Group.certified_public_service))
    return query
Example #6
0
    def _analyzes_installed_packages_most_installed_world(self, post_dot_digits):
        pool_join = _gentoo_installed_packages_table.\
                join(_gentoo_installed_package_props_table).\
                join(_gentoo_package_pool_table)
        query = select([
                    GentooInstalledPackagesRel.package_id, \
                    GentooPackageString.name, \
                    func.count(GentooInstalledPackagesRel.machine_id.distinct())], \
                from_obj=[pool_join]).\
                where(
                    GentooInstalledPackagePropertiesRel.world == 1).\
                group_by(
                    GentooInstalledPackagesRel.package_id).\
                order_by(
                    func.count(GentooInstalledPackagesRel.machine_id.distinct()).desc(), \
                    GentooPackageString.name).\
                limit(_MAX_INSTALLED_PACKAGES)

        package_id_order = []
        package_dict = {}
        for i in query.execute().fetchall():
            package_id, package_name, machine_count = i

            package_id_order.append(package_id)
            package_dict[package_id] = {
                'name':package_name,
                'absolute_total':machine_count,
                'relative_total':None,
                'slots':{}
            }

        for package_id, package_data in package_dict.items():
            package_data['relative_total'] = self._relative(package_data['absolute_total'], self.gentoo_machines, post_dot_digits)

        return map(package_dict.get, package_id_order)
Example #7
0
    def get(self):
        args = self.reqparse.parse_args()
        get_current = args.current or 1
        get_limit = args.limit or 10
        get_skip = args.skip or 0
        sort = args.sort or 'name'
        order = args.order or 'asc'
        if order != 'asc':
            order = "-"
        else:
            order = ""
        if get_current:
            get_skip = (get_current * get_limit) - get_limit
        if args.filter:
            total_records = self.dbSession.query(func.count(Genre.id)).\
                filter(Genre.name.like("%" + args.filter + "%")).scalar()
            genres = self.dbSession.query(Genre).filter(Genre.name.like("%" + args.filter + "%")) \
                .order_by(order + sort).slice(get_skip, get_limit)
        else:
            total_records = self.dbSession.query(func.count(Genre.id)).scalar()
            genres = self.dbSession.query(Genre).order_by(order + sort).limit(get_limit)

        rows = []
        if genres:
            for genre in genres:
                rows.append({
                    "genreId": genre.roadieId,
                    "name": genre.name,
                    "createdDate": genre.createdDate.isoformat(),
                    "lastUpdated": "" if not genre.lastUpdated else genre.lastUpdated.isoformat()
                })

        return jsonify(rows=rows, current=args.current or 1, rowCount=len(rows), total=total_records, message="OK")
Example #8
0
    def index(self):
        c.name = config['app_conf']['site_name']
        c.title = 'Welcome'
        c.messages = []

        c.session = Session()

        file_q = c.session.query(File)

        first_file = file_q.first()

        if first_file is None:
            return render('/derived/show/please_scan.html')

        c.repo_url = first_file.root
        c.total_size = c.session.query(func.sum(File.size)).one()[0]
        c.file_count = file_q.count()
        c.avg_size = c.session.query(func.avg(File.size)).one()[0]

        res = c.session.execute(select([File.ext,
            func.count(File.ext)]).group_by(File.ext).order_by(func.count(File.ext).desc())).fetchone()

        c.ext_string = res[0]
        c.ext_count = res[1]

        c.revision = c.session.query(func.max(Revision.id)).one()[0]

        c.asset_count = c.session.query(Asset).count()
        c.collection_count = c.session.query(Collection).count()

        return render('/derived/show/index.html')
Example #9
0
def mention_stats():
    check_valid_arguments(request.args, ["to", "from", "count"])
    time_from, time_to = validate_and_set_interval(request.args)

    if 'count' in request.args:
        validate_count(request.args['count'])
        mentions = User\
            .query\
            .join(tweet_mentions_user)\
            .join(TweetWish)\
            .filter(func.unix_timestamp(TweetWish.created_at) < time_to)\
            .filter(func.unix_timestamp(TweetWish.created_at) >= time_from)\
            .add_columns(func.count(User.id))\
            .group_by(User.id)\
            .order_by(desc(func.count(User.id)))\
            .limit(request.args['count'])\
            .all()
    else:
        mentions = User\
            .query\
            .join(tweet_mentions_user)\
            .join(TweetWish)\
            .filter(func.unix_timestamp(TweetWish.created_at) < time_to)\
            .filter(func.unix_timestamp(TweetWish.created_at) >= time_from)\
            .add_columns(func.count(User.id))\
            .group_by(User.id)\
            .order_by(desc(func.count(User.id)))\

    serialized = []
    for result in mentions:
        serialized.append({'user': result[0].json_dump(),
                           'mention_count': result[1]})
    return jsonify(popular_users=serialized)
Example #10
0
    def _get_result_counts(self, cutoff):
        build_stats = dict(
            db.session.query(Build.result, func.count())
            .filter(Build.date_created >= cutoff, Build.status == Status.finished, Build.result != Result.unknown)
            .group_by(Build.result)
        )

        job_stats = dict(
            db.session.query(Job.result, func.count())
            .filter(Job.date_created >= cutoff, Job.status == Status.finished, Job.result != Result.unknown)
            .group_by(Job.result)
        )

        jobstep_stats = dict(
            db.session.query(JobStep.result, func.count())
            .filter(JobStep.date_created >= cutoff, JobStep.status == Status.finished, JobStep.result != Result.unknown)
            .group_by(JobStep.result)
        )

        context = []
        for result in Result.__members__.values():
            if result in (Result.unknown, Result.skipped):
                continue

            context.append(
                {
                    "name": unicode(result),
                    "numBuilds": build_stats.get(result, 0),
                    "numJobs": job_stats.get(result, 0),
                    "numJobSteps": jobstep_stats.get(result, 0),
                }
            )

        return context
Example #11
0
def hashtag_stats():
    check_valid_arguments(request.args, ["to", "from", "count"])
    time_from, time_to = validate_and_set_interval(request.args)

    if 'count' in request.args:
        validate_count(request.args['count'])
        hashtags = TweetWish\
            .query\
            .filter(func.unix_timestamp(TweetWish.created_at) < time_to)\
            .filter(func.unix_timestamp(TweetWish.created_at) >= time_from)\
            .join(tweet_contains_hashtag)\
            .join(Hashtag)\
            .with_entities(Hashtag.hashtag)\
            .add_columns(func.count(Hashtag.hashtag))\
            .group_by(Hashtag.hashtag)\
            .order_by(desc(func.count(Hashtag.hashtag)))\
            .limit(request.args['count'])\
            .all()
    else:
        hashtags = TweetWish\
            .query\
            .filter(func.unix_timestamp(TweetWish.created_at) < time_to)\
            .filter(func.unix_timestamp(TweetWish.created_at) >= time_from)\
            .join(tweet_contains_hashtag)\
            .join(Hashtag)\
            .with_entities(Hashtag.hashtag)\
            .add_columns(func.count(Hashtag.hashtag))\
            .group_by(Hashtag.hashtag)\
            .order_by(desc(func.count(Hashtag.hashtag)))\
            .all()

    return jsonify(
        popular_hashtags=[{'hashtag': key, 'count': value} for key, value in hashtags])
Example #12
0
    def get_top_participants(self, list_name, start, end, limit=None):
        """ Return all the participants between two given dates.

        :param list_name: The name of the mailing list in which this email
            should be searched.
        :param start: A datetime object representing the starting date of
            the interval to query.
        :param end: A datetime object representing the ending date of
            the interval to query.
        :param limit: Limit the number of participants to return. If None or
            not supplied, return them all.
        :returns: The list of thread-starting messages.
        """
        part = self.db.query(Sender.name, Email.sender_email,
                             func.count(Email.sender_email)
                ).join(Email
                ).filter(and_(
                    Email.list_name == list_name,
                    Email.date >= start,
                    Email.date < end,
                )).group_by(Email.sender_email, Sender.name
                ).order_by(desc(func.count(Email.sender_email)))
        if limit is not None:
            part = part.limit(limit)
        return part.all()
Example #13
0
    def _columns(self, total_row=False):
        columns = (
            self._column_helper("rations", "male", "st"),
            self._column_helper("rations", "female", "st"),
            self._column_helper("rations", "male", "sc"),
            self._column_helper("rations", "female", "sc"),
            self._column_helper("rations", "male", "others"),
            self._column_helper("rations", "female", "others"),
            self._column_helper("rations", "male", "disabled"),
            self._column_helper("rations", "female", "disabled"),
            self._column_helper("rations", "male", "minority"),
            self._column_helper("rations", "female", "minority"),
            self._column_helper("absent", "male"),
            self._column_helper("absent", "female"),
            self._column_helper("partial", "male"),
            self._column_helper("partial", "female"),
            self._column_helper("rations", "migrant", "male"),
            self._column_helper("rations", "migrant", "female"),
            func.count(self.table.c.case_id).filter(
                self.table.c.sex == 'M').label("child_count_male"),
            func.count(self.table.c.case_id).filter(
                self.table.c.sex == 'F').label("child_count_female"),
            func.sum(self.table.c.num_rations_distributed).filter(
                self.table.c.sex == 'M').label("thr_total_rations_male"),
            func.sum(self.table.c.num_rations_distributed).filter(
                self.table.c.sex == 'F').label("thr_total_rations_female"),
        )

        if not total_row:
            return (self.table.c.awc_id.label("owner_id"),) + columns

        return columns
Example #14
0
    def principles_worksheet(self, wb):
        from dexter.models.views import DocumentsView, DocumentPrinciplesView

        ws = wb.add_worksheet("principles")

        # supported
        rows = self.filter(
            db.session.query(DocumentPrinciplesView.c.principle_supported, func.count(1).label("count"))
            .join(Document)
            .filter(DocumentPrinciplesView.c.principle_supported != None)  # noqa
            .group_by("principle_supported")
        ).all()
        rownum = 3 + self.write_table(ws, "PrincipleSupported", rows)

        # violated
        rows = self.filter(
            db.session.query(DocumentPrinciplesView.c.principle_violated, func.count(1).label("count"))
            .join(Document)
            .filter(DocumentPrinciplesView.c.principle_violated != None)  # noqa
            .group_by("principle_violated")
        ).all()
        self.write_table(ws, "PrincipleViolated", rows, rownum=rownum)

        # raw data
        ws = wb.add_worksheet("raw_principles")

        tables = OrderedDict()
        tables["doc"] = DocumentsView
        tables["principles"] = DocumentPrinciplesView

        rows = self.filter(
            db.session.query(*self.merge_views(tables, ["document_id"])).join(Document).join(DocumentPrinciplesView)
        ).all()
        self.write_table(ws, "Principles", rows)
Example #15
0
def get_line_sum():
	line = int(request.form['line'])
	chosen_direction = request.form['direction']
	if chosen_direction in ['Inbound','Outbound']:
		for result in db.session.query(func.count(Output.ZONE_NUMBER.distinct())).filter_by(LINE_NUMBER = line, DIRECTION = chosen_direction):
			length = result[0]
			zones, fare, cost = initialize_variables(length)
		i = 0
		while (i < length):
			zones[i] =  db.session.query(Output.ZONE_NUMBER.distinct()).filter_by(LINE_NUMBER = line, DIRECTION = chosen_direction)[i][0]
			for fare_sum in db.session.query(func.sum(Output.FARE_COLLECTED)).filter_by(LINE_NUMBER = line, ZONE_NUMBER = zones[i], DIRECTION = chosen_direction):
				fare[i] = fare_sum[0]
			for cost_sum in db.session.query(func.sum(Output.TOTAL_OPERATING_COST)).filter_by(LINE_NUMBER = line, ZONE_NUMBER = zones[i], DIRECTION = chosen_direction):
				cost[i] = cost_sum[0]
			i += 1
		return zones, fare, cost
	else:
		for result in db.session.query(func.count(Output.ZONE_NUMBER.distinct())).filter_by(LINE_NUMBER = line):
			length = result[0]
			zones, fare, cost = initialize_variables(length)
		i = 0
		while (i < length):
			zones[i] =  db.session.query(Output.ZONE_NUMBER.distinct()).filter_by(LINE_NUMBER = line)[i][0]
			for fare_sum in db.session.query(func.sum(Output.FARE_COLLECTED)).filter_by(LINE_NUMBER = line, ZONE_NUMBER = zones[i]):
				fare[i] = fare_sum[0]
			for cost_sum in db.session.query(func.sum(Output.TOTAL_OPERATING_COST)).filter_by(LINE_NUMBER = line, ZONE_NUMBER = zones[i]):
				cost[i] = cost_sum[0]
			i += 1
		return zones, fare, cost
Example #16
0
    def _load_records(session):
        results = []

        qry = session.query(models.ImageFile.filehash,
            func.count('*').label('hash_count'))\
            .group_by(models.ImageFile.filehash).having(func.count('*') > 1)

        for filehash, count in session.query(models.ImageFile.filehash,
                func.count('*').label('hash_count'))\
                .group_by(models.ImageFile.filehash).having(func.count('*')
                    > 1).order_by('hash_count desc'):
            qry = session.query(models.ImageFile.id, models.ImageFile.name,
                models.ImageFile.fullpath,
                func.char_length(models.ImageFile.name).label('namelen'))\
                .filter(models.ImageFile.filehash == filehash)
            assert qry.count() == count

            files = []

            for result in qry:
                files.append(dict(name=result.name, fullpath=result.fullpath,
                    id=result.id))
            results.append(dict(hash=filehash, count=count, files=files))

        return results
Example #17
0
    def list(self, service_filter={}):
        service_bundle = Bundle('service',
                Service.id, Service.name, Service.description, Service.protocol,
                Service.status, Service.ports, Service.version, Service.owned,
                Service.interface_id,
                func.count(distinct(Vulnerability.id)).label('vuln_count'), EntityMetadata.couchdb_id,\
                EntityMetadata.revision, EntityMetadata.update_time, EntityMetadata.update_user,\
                EntityMetadata.update_action, EntityMetadata.creator, EntityMetadata.create_time,\
                EntityMetadata.update_controller_action, EntityMetadata.owner, EntityMetadata.command_id,
                func.count(distinct(Credential.id)).label("credentials_count"))

        query = self._session.query(service_bundle).\
                group_by(Service.id).\
                outerjoin(EntityMetadata, EntityMetadata.id == Service.entity_metadata_id).\
                outerjoin(Vulnerability, Service.id == Vulnerability.service_id).group_by(Service.id).\
                outerjoin(Interface, Interface.id == Service.interface_id).\
                outerjoin(Credential, (Credential.service_id == Service.id) and (Credential.host_id == None)).\
                outerjoin(Host, Host.id == Interface.host_id)

        query = apply_search_filter(query, self.COLUMNS_MAP, None, service_filter, self.STRICT_FILTERING)

        # 'LIKE' for search services started by hostId.%.%
        if service_filter.get('hostIdCouchdb') is not None:
            query = query.filter(
                EntityMetadata.couchdb_id.like(service_filter.get('hostIdCouchdb') + ".%.%"))

        raw_services = query.all()
        services = [self.__get_service_data(r.service) for r in raw_services]
        result = {'services': services}
        return result
Example #18
0
    def totals(self, row):
        """ Counts of articles and sources """
        self.scores_ws.write(row, 0, 'Articles')
        rows = self.filter(
            db.session.query(
                Medium.name,
                func.count(1).label('freq'))
            .join(Document)
            .group_by(Medium.name)
        ).all()
        self.write_simple_score_row('Total articles', rows, row)

        row += 2

        self.scores_ws.write(row, 0, 'Sources')
        rows = self.filter(
            db.session.query(
                Medium.name,
                func.count(1).label('freq'))
            .join(Document)
            .join(DocumentSource)
            .group_by(Medium.name)
        ).all()
        self.write_simple_score_row('Total sources', rows, row)

        return row
Example #19
0
    def _get_tag_suggestions(self, _tag_list):
        # pylint: disable=singleton-comparison
        # Cannot use `is` in SQLAlchemy filters

        include = [
            'exhibitor',
            'delegate',
            'market',
            'activity',
        ]

        results = []
        for path_short in include:
            q = self.orm.query(
                Orgtag.base_short,
                func.count(Org.org_id).label("freq")
            ) \
                .join(org_orgtag, org_orgtag.c.orgtag_id == Orgtag.orgtag_id) \
                .join(Org, Org.org_id == org_orgtag.c.org_id) \
                .filter(
                    Orgtag.public == True,
                    Orgtag.is_virtual == None,
                    Orgtag.path_short == path_short,
                    Org.public == True
                ) \
                .group_by(Orgtag.orgtag_id) \
                .order_by(func.count(Org.org_id).desc()) \
                .limit(10)

            results += list(q.all())

        return self._get_random_suggestions(results, 2)
Example #20
0
    def stats(self):
        query_directors = select(['persons.id', func.count('roles.person_id').label('count')],
                       from_obj=['persons', 'roles'],
                       whereclause="roles.person_id = persons.id AND roles.role_type = 'director'",
                       group_by=['persons.id'], order_by='count desc', limit=10)
        query_actors = select(['persons.id', func.count('roles.person_id').label('count')],
                       from_obj=['persons', 'roles'],
                       whereclause="roles.person_id = persons.id AND roles.role_type = 'cast'",
                       group_by=['persons.id'], order_by='count desc', limit=10)                       
        
        top_directors = DBSession.query(Person, 'count').from_statement(query_directors).all()
        top_actors = DBSession.query(Person, 'count').from_statement(query_actors).all()        
    
        ia = IMDb()

        top250_ids = [x.movieID for x in ia.get_top250_movies()]
        bottom100_ids = [x.movieID for x in ia.get_bottom100_movies()]
        
        top250_count = DBSession.query(Movie).filter(Movie.id.in_(top250_ids)).count()
        bottom100_count = DBSession.query(Movie).filter(Movie.id.in_(bottom100_ids)).count()
        total_count = DBSession.query(Movie).count()
        
        total_runtime = 1
        
        return {'top250_count': top250_count,
                'bottom100_count': bottom100_count,
                'total_count': total_count,
                'total_runtime' : total_runtime,
                'top_directors': top_directors,
                'top_actors': top_actors}
Example #21
0
    def _get_name_suggestion(self, has_name):
        # pylint: disable=singleton-comparison
        # Cannot use `is` in SQLAlchemy filters

        suggestions = []
        if has_name:
            return suggestions

        q = self.orm.query(
            Org.name,
            func.count(Orgtag.orgtag_id).label("freq")
        ) \
            .join(org_orgtag, org_orgtag.c.org_id == Org.org_id) \
            .join(Orgtag, Orgtag.orgtag_id == org_orgtag.c.orgtag_id) \
            .filter(
                Org.public == True,
                Orgtag.public == True,
                Orgtag.is_virtual == None,
            ) \
            .group_by(Org.org_id) \
            .order_by(func.count(Orgtag.orgtag_id).desc()) \
            .limit(30)

        results = q.all()

        return self._get_random_suggestions(results, 2)
Example #22
0
    def db_calculate_filedistribution(resource_id):
        log.msg("[%s] Calculating file distributions" % resource_id)

        file_distribution = {}

        query = (select([func.count()]).select_from(Files).where(Files.c.resource_id == resource_id))
        total_file_count = yield tx_pool.runQuery(query)
        total_file_count = int(total_file_count[0].count_1)

        for k, v in FileCategories().data.iteritems():
            query = (select([func.count()]).select_from(Files).where(Files.c.file_format == v).where(Files.c.resource_id == resource_id))
            count = yield tx_pool.runQuery(query)

            if count:
                count = int(count[0].count_1)

                pct = 100 * float(count)/float(total_file_count)
                file_distribution[k] = "%.1f" % pct
            else:
                file_distribution[k] = 0

        query = (ResourceMeta.update().where(ResourceMeta.c.id == resource_id).values(file_distribution=json.dumps(file_distribution)))
        yield tx_pool.runOperation(query)

        log.msg("[%s] Calculating file distributions DONE" % resource_id)
Example #23
0
def organizations_and_counters():
    '''Query organizations with their counters'''
    memberships = aliased(model.Member)

    query = DB.query(model.Group,
        func.count(distinct(model.Package.id)).label('nb_datasets'),
        func.count(distinct(memberships.id)).label('nb_members')
    )
    query = query.outerjoin(CertifiedPublicService)
    query = query.outerjoin(model.Package, and_(
        model.Group.id == model.Package.owner_org,
        ~model.Package.private,
        model.Package.state == 'active',
    ))
    query = query.outerjoin(memberships, and_(
        memberships.group_id == model.Group.id,
        memberships.state == 'active',
        memberships.table_name == 'user'
    ))
    query = query.filter(model.Group.state == 'active')
    query = query.filter(model.Group.approval_status == 'approved')
    query = query.filter(model.Group.is_organization == True)
    query = query.group_by(model.Group.id, CertifiedPublicService.organization_id)
    query = query.order_by(
        CertifiedPublicService.organization_id == null(),
        desc('nb_datasets'),
        desc('nb_members'),
        model.Group.title
    )
    return query
Example #24
0
    def get_all_users(days):
        final_users = []

        start_date = datetime.now() - timedelta(days=days)
        end_date = datetime.now()
        for single_date in daterange(start_date, end_date):
            all_users = User.query.filter(single_date >= User.registered_on).all()

            active_users = User.query.filter(
                single_date >= User.registered_on,
                single_date <= User.last_login + timedelta(days=4)
            ).all()

            created_adventure = db.session.query(
                func.count(Adventure.creator_id).label('creator_id')
            ).filter(single_date >= Adventure.created_on).group_by(Adventure.creator_id).all()

            joined_to_adventure = db.session.query(
                func.count(AdventureParticipant.user_id).label('user_id')
            ).join(Adventure).filter(
                single_date >= AdventureParticipant.joined_on,
                AdventureParticipant.user_id != Adventure.creator_id
            ).group_by(AdventureParticipant.user_id).all()

            final_users.append({
                'date': single_date,
                'all': len(all_users),
                'active': len(active_users),
                'created_adventure': len(created_adventure),
                'joined_to_adventure': len(joined_to_adventure)
            })

        final_users = json.dumps(final_users)
        return final_users
def most_interacting_domains_from_3DID(session_3DID):
    """SQLAlchemy query returns an ordered list of all interacting pairs of domains where minimum number of interactions is 100.
    
    * **session_3DID** is SQLAlchemy session that this function should use.
    
    SQL equivalent:

    .. code-block:: sql

        SELECT p1.domain_id, p2.domain_id, COUNT(p1.domain_id) AS d1, COUNT(p2.domain_id) AS d2
        FROM PDB AS p1, Interacting_PDBs AS i1, PDB AS p2, Interacting_PDBs AS i2
        WHERE p1.id = i1.PDB_first_id
        AND p2.id = i2.PDB_second_id
        AND i1.id = i2.id
        GROUP BY p1.domain_id, p2.domain_id
        HAVING d1 > 100 AND d2 > 100
        ORDER BY d1, d2;
    """
    p1 = aliased(PDB, name='p1')
    p2 = aliased(PDB, name='p2')
    i1 = aliased(Interacting_PDBs, name='i1')
    i2 = aliased(Interacting_PDBs, name='i2')
    d1 = func.count(p1.domain_id).label('d1')
    d2 = func.count(p2.domain_id).label('d2')

    most_interacting = session_3DID.query(p1.domain_id, p2.domain_id, d1, d2).filter(p1.id==i1.PDB_first_id).filter(p2.id== i2.PDB_second_id).filter(i1.id==i2.id).group_by(p1.domain_id, p2.domain_id).having(d1 > 100).having(d2 > 100).order_by(d1, d2).all()
    
    return most_interacting
Example #26
0
    def statistics(self):
        c.locations = meta.Session.query(Region, func.count(User.id)).filter(LocationTag.region_id == Region.id).filter(User.location_id == LocationTag.id).group_by(Region).all()

        c.geo_locations = meta.Session.query(User.location_city, func.count(User.id)).group_by(User.location_city).order_by(desc(func.count(User.id))).all()

        # Getting last week date range
        locale = c.locale
        from_time_str = format_date(date.today() - timedelta(7),
                                    format="short",
                                    locale=locale)
        to_time_str = format_date(date.today() + timedelta(1),
                                    format="short",
                                    locale=locale)
        from_time = parse_date(from_time_str, locale=locale)
        to_time = parse_date(to_time_str, locale=locale)

        uploads_stmt = meta.Session.query(
            Event.author_id,
            func.count(Event.created).label('uploads_count'))\
            .filter(Event.event_type == 'file_uploaded')\
            .filter(Event.created < to_time)\
            .filter(Event.created >= from_time)\
            .group_by(Event.author_id).order_by(desc('uploads_count')).limit(10).subquery()
        c.active_users = meta.Session.query(User,
                                            uploads_stmt.c.uploads_count.label('uploads'))\
                                            .join((uploads_stmt, uploads_stmt.c.author_id == User.id)).all()

        return render('/statistics.mako')
Example #27
0
    def __query_database(self, search=None, page=0, page_size=0, order_by=None, order_dir=None, host_filter={}):
        host_bundle = Bundle('host', Host.id, Host.name, Host.os, Host.description, Host.owned,\
            Host.default_gateway_ip, Host.default_gateway_mac, EntityMetadata.couchdb_id,\
            EntityMetadata.revision, EntityMetadata.update_time, EntityMetadata.update_user,\
            EntityMetadata.update_action, EntityMetadata.creator, EntityMetadata.create_time,\
            EntityMetadata.update_controller_action, EntityMetadata.owner, EntityMetadata.command_id,\
            func.group_concat(distinct(Interface.id)).label('interfaces'),\
            func.count(distinct(Vulnerability.id)).label('vuln_count'),\
            func.count(distinct(Service.id)).label('open_services_count'))

        query = self._session.query(host_bundle)\
                             .outerjoin(EntityMetadata, EntityMetadata.id == Host.entity_metadata_id)\
                             .outerjoin(Interface, Host.id == Interface.host_id)\
                             .outerjoin(Vulnerability, Host.id == Vulnerability.host_id)\
                             .outerjoin(Service, (Host.id == Service.host_id) & (Service.status.in_(('open', 'running', 'opened'))))\
                             .group_by(Host.id)

        # Apply pagination, sorting and filtering options to the query
        query = sort_results(query, self.COLUMNS_MAP, order_by, order_dir, default=Host.id)
        query = apply_search_filter(query, self.COLUMNS_MAP, search, host_filter, self.STRICT_FILTERING)
        count = get_count(query, count_col=Host.id)

        if page_size:
            query = paginate(query, page, page_size)

        results = query.all()

        return results, count
Example #28
0
def author(mit_id, conn):
    """
    Returns an author object for insertion into mongo summary collection.

    The format is as follows:
        {"_id": {"name": <name>, "mitid": <mitid>},
         "type": "author",
         "size": <num docs>,
         "downloads": <num downloads>,
         "countries": [
            {"country": <3 ltr code>, "downloads": <num downloads>},...
         ]
         "dates": [
            {"date": <YYYY-MM-DD>, "downloads": <num>},...
         ]}
    """

    requests_to_authors = requests.join(documents)\
                                  .join(documents_authors)\
                                  .join(authors)

    totals = select([
                authors.c.mit_id,
                authors.c.name,
                select([func.count()])
                    .select_from(documents_authors.join(authors))
                    .where(authors.c.mit_id==bindparam('mit_id'))
                    .label('size'),
                select([func.count()])
                    .select_from(requests_to_authors)
                    .where(authors.c.mit_id==bindparam('mit_id'))
                    .label('downloads')
                ])\
             .where(authors.c.mit_id==bindparam('mit_id'))
    countries = select([requests.c.country, func.count().label('downloads')])\
                .select_from(requests_to_authors)\
                .where(authors.c.mit_id==bindparam('mit_id'))\
                .group_by(requests.c.country)
    dates = select([
                func.date_trunc('day', requests.c.datetime).label('date'),
                func.count().label('downloads')])\
            .select_from(requests_to_authors)\
            .where(authors.c.mit_id==bindparam('mit_id'))\
            .group_by(func.date_trunc('day', requests.c.datetime))

    author_obj = {'type': 'author'}
    res = conn.execute(totals, mit_id=mit_id).first()
    author_obj['_id'] = {'name': res['name'], 'mitid': res['mit_id']}
    author_obj['size'] = res['size']
    author_obj['downloads'] = res['downloads']
    res = conn.execute(countries, mit_id=mit_id)
    for row in res:
        author_obj.setdefault('countries', [])\
            .append({'country': row['country'], 'downloads': row['downloads']})
    res = conn.execute(dates, mit_id=mit_id)
    for row in res:
        author_obj.setdefault('dates', [])\
            .append({'date': row['date'].strftime('%Y-%m-%d'),
                     'downloads': row['downloads']})
    return author_obj
def test_default_forms(session):
    """Check that each pokemon has one default form and each species has one
    default pokemon."""

    q = session.query(tables.Pokemon)
    q = q.join(tables.PokemonForm)
    q = q.filter(tables.PokemonForm.is_default==True)
    q = q.options(lazyload('*'))
    q = q.group_by(tables.Pokemon)
    q = q.add_columns(func.count(tables.PokemonForm.id))

    for pokemon, num_default_forms in q:
        if num_default_forms == 0:
            pytest.fail("pokemon %s has no default forms" % pokemon.name)
        elif num_default_forms > 1:
            pytest.fail("pokemon %s has %d default forms" % (pokemon.name, num_default_forms))

    q = session.query(tables.PokemonSpecies)
    q = q.join(tables.Pokemon)
    q = q.filter(tables.Pokemon.is_default==True)
    q = q.options(lazyload('*'))
    q = q.group_by(tables.PokemonSpecies)
    q = q.add_columns(func.count(tables.Pokemon.id))

    for species, num_default_pokemon in q:
        if num_default_pokemon == 0:
            pytest.fail("species %s has no default pokemon" % species.name)
        elif num_default_pokemon > 1:
            pytest.fail("species %s has %d default pokemon" % (species.name, num_default_pokemon))
Example #30
0
    def _column_helper(self, thr, gender_or_migrant, other=None):
        columns = self.table.c
        column = (columns.thr_eligible == 1)
        column &= {
            'rations': columns.num_rations_distributed >= 21,
            'absent': columns.num_rations_distributed.in_((0, None)),
            'partial': columns.num_rations_distributed.between(1, 20)
        }[thr]
        column &= {
            'male': columns.sex == 'M',
            'female': columns.sex == 'F',
            'migrant': columns.resident == 'no',
        }[gender_or_migrant]
        if other is None:
            return func.count(self.table.c.case_id).filter(column).label(
                "thr_rations_{}_{}".format(thr, gender_or_migrant))

        column &= {
            'st': columns.caste == 'st',
            'sc': columns.caste == 'sc',
            'others': columns.caste.notin_(('st', 'sc')),
            'disabled': columns.disabled == '1',
            'minority': columns.minority == '1',
            'male': columns.sex == 'M',
            'female': columns.sex == 'F',
        }[other]
        return func.count(self.table.c.case_id).filter(column).label(
            "thr_rations_{}_{}".format(gender_or_migrant, other))
Example #31
0
import csv
"""
 generate a CSV file for them with a line for each post that includes 
 1)the number of comments the post has, 
 2)the number of comments with positive sentiment,
 3)the number of comments with negative sentiment
"""

db_url = 'postgres://*****:*****@3.220.167.6:80/forum'
engine = create_engine(db_url)
sm = sessionmaker(bind=engine)
session = sm()

comments = (session.query(Comment.post_id,
                          func.count("*").label("comments")).group_by(
                              Comment.post_id).subquery())

positive_comments = (session.query(
    Comment.post_id,
    func.count("*").label("positive_comments")).filter(
        Comment.sentiment == 'positive').group_by(Comment.post_id).subquery())
negative_comments = (session.query(
    Comment.post_id,
    func.count("*").label("negative_comments")).filter(
        Comment.sentiment == 'negative').group_by(Comment.post_id).subquery())

final_query = (session.query(
    Post,
    comments.c.comments,
    positive_comments.c.positive_comments,
Example #32
0
def main():
    # insert User
    print 'Insert Example\n'
    user1 = User('junki', 'Junki Kim', '12345')
    user2 = User('aaa', 'aaaa bb', '654321')
    query.insertUser(user1)
    query.insertUser(user2)

    # show table
    print "\n\n Show tables\n"
    query.show_tables()

    # select user
    print 'Select Example\n'
    query.selectUser(user1)

    # delete user
    print 'Delete Example\n'
    query.deleteUser(user2)
    print "\n\n Show tabales\n"
    query.show_tables()

    # update user
    print 'Update Example\n'
    updateuser = User('junki', 'Junki Kim', '654321')
    query.updateUser(user1, updateuser)
    query.show_tables()

    # Relationship
    print 'Relationship Example\n'
    print "Create tmpUser('aaa', 'bbb aaa', '12345678')\n"
    tmpUser = User('aaa', 'bbb aaa', '12345678')
    print "tmpUser's email address\n"
    tmpUser.addresses

    print 'Add email address\n'
    tmpUser.addresses = [
        Address(email_address='*****@*****.**'),
        Address(email_address='*****@*****.**')
    ]
    tmpUser.addresses

    print 'Insert tmpUser'
    query.insertUser(tmpUser)

    print 'Select tmpUser'
    query.selectUser(tmpUser)

    # Join
    joinUser1 = db_session.query(
        User, Address).filter(User.id == Address.user_id).filter(
            Address.email_address == '*****@*****.**').all()
    print joinUser1

    print 'SQL JOIN'
    joinUser2 = db_session.query(User).join(Address).filter(
        Address.email_address == '*****@*****.**').all()
    print joinUser2

    # Sub Query
    stmt = db_session.query(Address.user_id,
                            func.count('*').label('address_count')).group_by(
                                Address.user_id).subquery()

    for u, count in db_session.query(User, stmt.c.address_count).outerjoin(
            stmt, User.id == stmt.c.user_id).order_by(User.id):
        print u, count

    # Session Close
    db_session.close()
Example #33
0
def get_nominees(request):
    """
    Returns
    -------
    JSON-serialisable payload with filtered list of nominees that `request.user`
    can view for the current period. Each nominees has labelled data to help
    with categorising client-side.
    """
    location = get_config_value(request.registry.settings,
                                constants.HOMEBASE_LOCATION_KEY)
    current_period = Period.get_current_period(request.dbsession)
    if not current_period:
        return interpolate_template(FEEDBACK_ENDED_TEMPLATE)

    if current_period.subperiod(location) == Period.ENROLLMENT_SUBPERIOD:
        return interpolate_template(ENTRY_PENDING_TEMPLATE,
                                    period_name=current_period.name)

    if current_period.subperiod(location) != Period.ENTRY_SUBPERIOD:
        return interpolate_template(ENTRY_ENDED_TEMPLATE,
                                    period_name=current_period.name)

    own_username = request.user.username

    query = request.dbsession.query(User, func.count(FeedbackForm.id)).join(
        Nominee, User.username == Nominee.username)
    base = (
        query.outerjoin(
            FeedbackForm,
            and_(
                User.username == FeedbackForm.to_username,
                FeedbackForm.from_username == own_username,
                FeedbackForm.is_summary == False,  # noqa
                FeedbackForm.period_id == Nominee.period_id,
            ),
        ).filter(Nominee.username != own_username).filter(
            Nominee.period_id == current_period.id))

    # restrict users outside configured business unit to see only those
    # employees that invited them
    if EXTERNAL_BUSINESS_UNIT_ROLE in request.effective_principals:
        base = base.join(
            ExternalInvite,
            and_(
                ExternalInvite.to_username == own_username,
                ExternalInvite.period_id == current_period.id,
                User.username == ExternalInvite.from_username,
            ),
        )

    joined = base.group_by(User).order_by(asc(User.first_name)).all()

    payload = []
    for nominated_user, form in joined:
        if not nominated_user:
            continue
        manager = nominated_user.manager
        if manager:
            manager_display_name = " ".join(
                [manager.first_name, manager.last_name])
        else:
            manager_display_name = "-"
        payload.append({
            "username": nominated_user.username,
            "displayName": nominated_user.display_name,
            "department": nominated_user.department,
            "managerDisplayName": manager_display_name,
            "position": nominated_user.position,
            "hasExistingFeedback": True if form else False,
        })
    request.response.status_int = 200
    return {"period": current_period.name, "nominees": payload}
Example #34
0
    def make_admin_trash_panel(self):
        board = self.board
        table = model.backup
        session = model.Session()
        template_kwargs = {}

        # List of current threads *and* orphaned posts.
        threads = []

        if str(self.page).startswith('t'):
            self.page = self.page[1:]
            sql = table.select().where(and_(or_(table.c.postnum == self.page,
                                                table.c.parent == self.page),
                                            table.c.board_name == board.name))\
                                .order_by(table.c.timestampofarchival.desc(),
                                          table.c.postnum.asc())
            thread = [dict(x.items()) for x in session.execute(sql).fetchall()]

            if not thread:
                raise WakaError('Thread not found.')

            threads = [{'posts': thread}]

            template_kwargs = {
                'postform':
                board.options['ALLOW_TEXTONLY']
                or board.options['ALLOW_IMAGES'],
                'image_inp':
                board.options['ALLOW_IMAGES'],
                'textonly_inp':
                0,
                'threads':
                threads,
                'thread':
                self.page,
                'parent':
                self.page
            }

        elif config.POST_BACKUP:
            max_res = board.options['IMAGES_PER_PAGE']

            sqlcond = and_(
                or_(
                    table.c.parent == 0,
                    and_(
                        table.c.parent > 0,
                        not_(
                            exists([table.c.num],
                                   table.c.parent == table.c.postnum)))),
                table.c.board_name == board.name)

            # Acquire the number of full threads *and* orphaned posts.
            sql = select([func.count()], sqlcond, table)\
                  .order_by(table.c.timestampofarchival.desc(),
                              table.c.postnum.asc())

            thread_ct = session.execute(sql).fetchone()[0]

            total = int(thread_ct + max_res - 1) / max_res
            offset = self.page * max_res

            (pages, prevpage, nextpage) \
                = board.get_board_page_data(self.page, total,
                                            admin_page='postbackups')

            last_page = len(pages) - 1
            if self.page > last_page and last_page > 0:
                self.page = last_page

            sql = table.select().where(sqlcond)\
                  .order_by(table.c.timestampofarchival.desc(),
                              table.c.num.asc())\
                  .limit(board.options['IMAGES_PER_PAGE'])\
                  .offset(offset)
            threads = [{'posts' : [dict(x.items())]} \
                for x in session.execute(sql)]

            # Loop through 'posts' key in each dictionary in the threads
            # list.
            for item in threads:
                thread = item['posts']
                threadnum = thread[0]['postnum']
                postcount = imgcount = shownimages = 0

                # Orphaned?
                item['standalone'] = 0

                if not thread[0]['parent']:
                    sql = select(
                        [func.count(), func.count(table.c.image)],
                        table.c.parent == threadnum, table)

                    (postcount, imgcount) = session.execute(sql).fetchone()

                    max_res = board.options['REPLIES_PER_THREAD']
                    offset = postcount - imgcount if postcount > max_res \
                                                  else 0

                    sql = table.select().where(table.c.parent == threadnum)\
                            .order_by(table.c.timestampofarchival.desc(),
                                      table.c.postnum.asc())\
                            .limit(max_res)\
                            .offset(offset)
                    thread.extend([dict(x.items()) \
                                       for x in session.execute(sql)])

                else:
                    item['standalone'] = 1

                for post in thread:
                    image_dir \
                        = os.path.join(board.path, board.options['IMG_DIR'])

                    thumb_dir \
                        = os.path.join(board.path, board.options['THUMB_DIR'])

                    base_thumb = os.path.basename(post['thumbnail'] or '')
                    base_image = os.path.basename(post['image'] or '')

                    base_filename = (post['image'] or '')\
                        .replace(image_dir, '').lstrip('/')

                    backup_dir = os.path.join(board.url,
                                              board.options['ARCHIVE_DIR'],
                                              board.options['BACKUP_DIR'])

                    if post['image']:
                        post['image'] = os.path.join(backup_dir, base_image)
                        shownimages += 1

                    if re.match(board.options['THUMB_DIR'], post['thumbnail']
                                or ''):
                        post['thumbnail'] \
                            = os.path.join(backup_dir, base_thumb)

                item['omit'] = postcount - max_res if postcount > max_res\
                                                   else 0

                item['omitimages'] = imgcount - shownimages \
                                     if imgcount > shownimages else 0

                template_kwargs = {'postform' \
                                      : board.options['ALLOW_TEXTONLY'] or
                                        board.options['ALLOW_IMAGES'],
                                  'image_inp' : board.options['ALLOW_IMAGES'],
                                   'textonly_inp' \
                                      : board.options['ALLOW_IMAGES'] and
                                        board.options['ALLOW_TEXTONLY'],
                                   'nextpage' : nextpage,
                                   'prevpage' : prevpage,
                                   'threads' : threads,
                                   'pages' : pages}

        Template.__init__(self, 'backup_panel_template', **template_kwargs)
def get_surrounding_count_species_by_group2inpn(id_area, buffer=10000):
    """

    :param id_area:
    :return:
    """
    query_surrounding_territory = (
        DB.session.query(
            Taxref.group2_inpn,
            funcfilter(
                func.count(distinct(Taxref.cd_ref)),
                TMaxThreatenedStatus.threatened == True,
            ).label("threatened"),
            funcfilter(
                func.count(distinct(Taxref.cd_ref)),
                TMaxThreatenedStatus.threatened == False,
            ).label("not_threatened"),
        )
        .distinct()
        .filter(LAreas.id_area == id_area)
        .filter(Synthese.cd_nom == Taxref.cd_nom)
        .filter(Synthese.the_geom_local.ST_DWithin(LAreas.geom, buffer))
        .outerjoin(TMaxThreatenedStatus, TMaxThreatenedStatus.cd_nom == Taxref.cd_ref)
        .group_by(Taxref.group2_inpn)
        .order_by(Taxref.group2_inpn)
    )
    surrounding_territory_data = query_surrounding_territory.all()

    query_territory = (
        DB.session.query(
            Taxref.group2_inpn,
            funcfilter(
                func.count(distinct(Taxref.cd_ref)),
                TMaxThreatenedStatus.threatened == True,
            ).label("threatened"),
            funcfilter(
                func.count(distinct(Taxref.cd_ref)),
                TMaxThreatenedStatus.threatened == False,
            ).label("not_threatened"),
        )
        .distinct()
        .filter(LAreas.id_area == id_area)
        .filter(Synthese.cd_nom == Taxref.cd_nom)
        .filter(CorAreaSynthese.id_synthese == Synthese.id_synthese)
        .filter(CorAreaSynthese.id_area == LAreas.id_area)
        .outerjoin(TMaxThreatenedStatus, TMaxThreatenedStatus.cd_nom == Taxref.cd_ref)
        .group_by(Taxref.group2_inpn)
        .order_by(Taxref.group2_inpn)
    )

    territory_data = query_territory.all()

    print(surrounding_territory_data)
    print(territory_data)

    taxo_groups = list(set(g.group2_inpn for g in surrounding_territory_data))
    taxo_groups.sort()
    print(taxo_groups)
    # result["territory"] = []

    response = {}
    response["labels"] = taxo_groups
    response["surrounding"] = {
        "not_threatened": [],
        "threatened": [],
    }
    response["territory"] = {
        "not_threatened": [],
        "threatened": [],
    }
    for t in taxo_groups:
        for r in surrounding_territory_data:
            if r.group2_inpn == t:
                response["surrounding"]["threatened"].append(r.threatened)
                response["surrounding"]["not_threatened"].append(r.not_threatened)
        for r in territory_data:
            if r.group2_inpn == t:
                response["territory"]["threatened"].append(r.threatened)
                response["territory"]["not_threatened"].append(r.not_threatened)

    return (
        jsonify(response),
        200,
    )
def home_stats():
    """

    :return:
    """
    data_to_count = [
        {"label": "count_dataset", "column": Synthese.id_dataset},
        {"label": "count_occtax", "column": Synthese.id_synthese},
        {"label": "count_taxa", "column": Synthese.cd_nom},
        {"label": "count_observers", "column": Synthese.observers},
    ]
    result = {}
    try:
        taxa = aliased(
            (
                DB.session.query(Synthese.cd_nom)
                .distinct()
                .cte(name="taxa", recursive=False)
            ),
            name="taxa_cte",
        )
        # taxa = DB.session.query(Synthese.cd_nom).distinct()
        # occtax = aliased(
        #     (
        #         DB.session.query(Synthese.id_synthese)
        #         .distinct()
        #         .cte(name="occtax", recursive=False)
        #     ),
        #     name="occtax_cte",
        # )
        observers = aliased(
            (
                DB.session.query(Synthese.observers)
                .distinct()
                .cte(name="observers", recursive=False)
            ),
            name="observers_cte",
        )
        dataset = aliased(
            (
                DB.session.query(Synthese.id_dataset)
                .distinct()
                .cte(name="dataset", recursive=False)
            ),
            name="dataset_cte",
        )
        result["count_taxa"] = DB.session.query(func.count(taxa.c.cd_nom)).one()[0]
        current_app.logger.info(
            "<homestats query> {}".format(DB.session.query(func.count(taxa.c.cd_nom)))
        )
        result["count_occtax"] = DB.session.query(
            func.count(Synthese.id_synthese)
        ).one()[0]
        result["count_dataset"] = DB.session.query(
            func.count(dataset.c.id_dataset)
        ).one()[0]
        result["count_observers"] = DB.session.query(
            func.count(observers.c.observers)
        ).one()[0]

        # query = DB.session.query(
        #     func.count(taxa.c.cd_nom).label("count_taxa"),
        #     func.count(occtax.c.id_synthese).label("count_occtax"),
        #     func.count(dataset.c.id_dataset).label("count_dataset"),
        #     func.count(observers.c.observers).label("count_observers"),
        # )
        # current_app.logger.info("<homestat query>: {}".format(query))
        # result = query.one()
        # return jsonify(result._asdict())
        return jsonify(result)

    except Exception as e:
        current_app.logger.error("<main_area_info> ERROR: {}".format(e))
        return {"Error": str(e)}, 400
Example #37
0
def delete_galaxy(connection, galaxy_ids):
    for galaxy_id in galaxy_ids:
        transaction = connection.begin()
        galaxy = connection.execute(
            select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id)).first()
        if galaxy is None:
            LOG.info('Error: Galaxy with galaxy_id of %d was not found',
                     galaxy_id)
        else:
            LOG.info('Deleting Galaxy with galaxy_id of %d - %s', galaxy_id,
                     galaxy[GALAXY.c.name])
            area_count = connection.execute(
                select([func.count(AREA.c.area_id)]).where(
                    AREA.c.galaxy_id == galaxy[GALAXY.c.galaxy_id])).first()[0]
            counter = 1

            for area_id1 in connection.execute(
                    select([AREA.c.area_id]).where(AREA.c.galaxy_id == galaxy[
                        GALAXY.c.galaxy_id]).order_by(AREA.c.area_id)):
                LOG.info("Deleting galaxy {0} area {1}. {2} of {3}".format(
                    galaxy_id, area_id1[0], counter, area_count))
                connection.execute(PIXEL_RESULT.delete().where(
                    PIXEL_RESULT.c.area_id == area_id1[0]))

                # Give the rest of the world a chance to access the database
                time.sleep(0.1)
                counter += 1

                if shutdown() is True:
                    transaction.rollback()
                    raise SystemExit

            LOG.info("Deleting FITS headers for galaxy {0}".format(galaxy_id))
            connection.execute(FITS_HEADER.delete().where(
                FITS_HEADER.c.galaxy_id == galaxy[GALAXY.c.galaxy_id]))

            # Now empty the bucket of the sed files
            s3helper = S3Helper()
            bucket = s3helper.get_bucket(get_sed_files_bucket())
            galaxy_file_name = get_galaxy_file_name(galaxy[GALAXY.c.name],
                                                    galaxy[GALAXY.c.run_id],
                                                    galaxy[GALAXY.c.galaxy_id])
            for key in bucket.list(prefix='{0}/'.format(galaxy_file_name)):
                # Ignore the key
                if key.key.endswith('/'):
                    continue

                bucket.delete_key(key)

                if shutdown() is True:
                    transaction.rollback()
                    raise SystemExit

            # Now the folder
            key = Key(bucket)
            key.key = '{0}/'.format(galaxy_file_name)
            bucket.delete_key(key)

        LOG.info('Galaxy with galaxy_id of %d was deleted', galaxy_id)
        connection.execute(
            GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id).values(
                status_id=DELETED, status_time=datetime.datetime.now()))

        if shutdown() is True:
            transaction.rollback()
            raise SystemExit

        transaction.commit()
Example #38
0
 def get_comment_count(self, session=None):
     if session is None:
         session = self.get_session()
     sub = self.get_reddit_object_sub(session)
     return session.query(func.count(Comment.id)).join(Post)\
         .filter(Post.significant_reddit_object_id.in_(sub)).scalar()
Example #39
0
inventory_list = [
    {
        'cookie_name': 'peanut butter',
        'cookie_recipe_url': 'http://some.aweso.me/cookie/peanut.html',
        'cookie_sku': 'PB01',
        'quantity': '24',
        'unit_cost': '0.25'
    },
    {
        'cookie_name': 'oatmeal raisin',
        'cookie_recipe_url': 'http://some.okay.me/cookie/raisin.html',
        'cookie_sku': 'EWW01',
        'quantity': '100',
        'unit_cost': '1.00'
    }
]
result = connection.execute(ins, inventory_list)

s = select([func.sum(cookies.c.quantity)])
rp = connection.execute(s)
# print([record[record.keys()[0]] for record in rp])
print('Quantity count scalar: ', rp.scalar())

s = select([func.count(cookies.c.cookie_name).label('inventory_count')])
rp = connection.execute(s)
record = rp.first()
print('Keys: ', record.keys())
print('Count, (inventory_count): ', record.inventory_count)
# page 28
 def get_count_prescrib(self):
     return db.session.query(func.count(PrescribingData.id)).scalar()
Example #41
0
 def indicator_count(cls):
     return (select([
         func.count(Indicator.id)
     ]).where(Indicator.event_id == cls.id).label("indicator_count"))
Example #42
0
 def _get_instance_counts(self):
     return (db.query(func.count(Resource.resource_id)).join(
         Account, Resource.account_id == Account.account_id).filter(
             Account.account_id.in_(session['accounts']),
             Account.enabled == 1,
             Resource.resource_type_id == ec2_type_id).first()[0])
Example #43
0
def main():
    # is capture
    # True:  nav will hidden
    # False: nope
    capture = request.args.get('capture')
    month = request.args.get('month')
    # time type
    date = datetime.datetime.now()
    c_month = int(date.strftime('%m'))
    c_day = int(date.strftime('%d'))
    c_year = int(date.strftime('%Y'))
    c_quarter = 0
    day_first = ''
    day_last = ''
    c_quarter_first = 0
    c_quarter_last = 0
    if c_month in [1, 2, 3]:
        c_quarter = 1
        c_quarter_first = 1
        c_quarter_last = 3
    elif c_month in [4, 5, 6]:
        c_quarter = 2
        c_quarter_first = 4
        c_quarter_last = 6
    elif c_month in [7, 8, 9]:
        c_quarter = 3
        c_quarter_first = 7
        c_quarter_last = 9
    elif c_month in [10, 11, 12]:
        c_quarter = 4
        c_quarter_first = 10
        c_quarter_last = 12

    # time type
    time_type = request.args.get('tt')
    if time_type not in ['w', 'm', 'q', 'a']:
        # default tt
        time_type = 'w'

    # calculate first day/last day and VT's x axis data
    c_mark = '#'
    trend_scan = {'file': [], 'line': [], 'project': [], 'task': []}
    amount_vulnerability = {
        'new': {
            'total': 0,
            'time_type': 0
        },
        'fixed': {
            'total': 0,
            'time_type': 0
        }
    }
    # Vulnerability Trend (VT)
    vt_x = []
    if time_type == 'm':
        p_month = 0
        if month is None:
            p_month = int(time.strftime('%m', time.localtime()))
        elif int(month) <= 12:
            p_month = int(month)

        current_time = time.strftime('%Y-{month}-{day}', time.localtime())
        day_first = current_time.format(month=p_month, day=1)
        day_last = current_time.format(month=p_month, day=31)

        for month in range(1, 13):
            x_time = '{month}月'.format(month=month)
            c_year = int(time.strftime('%Y', time.localtime()))
            start = '{year}-{month}-{day}'.format(year=c_year,
                                                  month=month,
                                                  day=1)
            next_month = datetime.date(
                c_year, month, 1).replace(day=28) + datetime.timedelta(days=4)
            end = next_month - datetime.timedelta(days=next_month.day)
            x_data = CobraResults.count_by_time(start, end)
            x_data['t'] = x_data[0] + x_data[1] + x_data[2]
            if month == p_month:
                amount_vulnerability['new']['time_type'] += x_data['t']
                amount_vulnerability['fixed']['time_type'] += x_data[2]
            vt_x.append({'time': x_time, 'data': x_data})

    elif time_type == 'q':
        for q in range(1, 5):
            x_time = 'Q{quarter}'.format(quarter=q)
            s_month = 0
            e_month = 0
            if q == 1:
                s_month = 1
                e_month = 3
            elif q == 2:
                s_month = 4
                e_month = 6
            elif q == 3:
                s_month = 7
                e_month = 9
            elif q == 4:
                s_month = 10
                e_month = 12
            cm, last_day = calendar.monthrange(c_year, e_month)
            start = '{year}-{month}-{day}'.format(year=c_year,
                                                  month=s_month,
                                                  day=1)
            end = '{year}-{month}-{day}'.format(year=c_year,
                                                month=e_month,
                                                day=last_day)
            x_data = CobraResults.count_by_time(start, end)
            x_data['t'] = x_data[0] + x_data[1] + x_data[2]
            if q == c_quarter:
                amount_vulnerability['new']['time_type'] += x_data['t']
                amount_vulnerability['fixed']['time_type'] += x_data[2]
            vt_x.append({'time': x_time, 'data': x_data})
        cm, last_day = calendar.monthrange(c_year, c_quarter_last)
        day_first = '{0}-{1}-{2}'.format(c_year, c_quarter_first, 1)
        day_last = '{0}-{1}-{2}'.format(c_year, c_quarter_last, last_day)
    else:
        # default TT(time type): w(weekly)
        week_desc = {0: '日', 1: '一', 2: '二', 3: '三', 4: '四', 5: '五', 6: '六'}
        for d in range(-7, 1):
            t = time.localtime(time.time() + (d * 86400))
            if d == -7:
                day_first = time.strftime('%Y-%m-%d', t)
            if d == 0:
                day_last = time.strftime('%Y-%m-%d', t)
            week = int(time.strftime('%w', t))
            week_d = week_desc[week]
            month = int(time.strftime('%m', t))
            day = int(time.strftime('%d', t))
            if day == c_day:
                x_time = '{0}{1}/{2}({3})'.format(c_mark, month, day, week_d)
            else:
                x_time = '{0}/{1}({2})'.format(month, day, week_d)
            # VT x data
            localtime = time.localtime(time.time() + (d * 86400))
            start_end = time.strftime('%Y-%m-%d', localtime)
            x_data = CobraResults.count_by_time(start_end, start_end)
            x_data['t'] = x_data[0] + x_data[1] + x_data[2]
            amount_vulnerability['new']['time_type'] += x_data['t']
            amount_vulnerability['fixed']['time_type'] += x_data[2]
            vt_x.append({'time': x_time, 'data': x_data})
            # scan trend data
            for k in trend_scan:
                start = time.strftime(
                    '%Y-%m-%d', time.localtime(time.time() + (d * 86400)))
                ct_count = CobraTaskInfo.count_by_time(start, start, k)
                if ct_count is None:
                    ct_count = 0
                trend_scan[k].append(ct_count)
        if time_type == 'a':
            day_first = '1997-10-10'
            day_last = time.strftime('%Y-%m-%d', time.localtime())

    # Vulnerability Data (VD)
    fixed_amount = db.session.query(
        func.count(CobraResults.id).label('count')).filter(
            CobraResults.status == 2,
            # Active project
            CobraProjects.status > 0,
            CobraResults.project_id == CobraProjects.id).group_by(
                CobraResults.status).first()
    fixed_amount = fixed_amount[0] if fixed_amount else 0

    not_fixed_amount = db.session.query(
        func.count(CobraResults.id).label('count')).filter(
            CobraResults.status < 2,
            # Active project
            CobraProjects.status > 0,
            CobraResults.project_id == CobraProjects.id).group_by(
                CobraResults.status).first()
    not_fixed_amount = not_fixed_amount[0] if not_fixed_amount else 0

    # Scan Data (SD)
    project_total = CobraProjects.query.filter(
        CobraProjects.status > 0).count()

    task_total = db.session.query(func.count(
        CobraTaskInfo.id).label('count')).filter(
            CobraProjects.status > 0,
            CobraProjects.repository == CobraTaskInfo.target).first()[0]

    file_total = db.session.query(
        func.sum(CobraTaskInfo.file_count).label('files')).filter(
            CobraProjects.status > 0,
            CobraProjects.repository == CobraTaskInfo.target).first()[0]

    line_total = db.session.query(
        func.sum(CobraTaskInfo.code_number).label('codes')).filter(
            CobraProjects.status > 0,
            CobraProjects.repository == CobraTaskInfo.target).first()[0]

    amount_scan = {
        'projects': {
            'total':
            convert_number(project_total),
            'time_type':
            convert_number(
                CobraTaskInfo.count_by_time(day_first, day_last, 'project'))
        },
        'tasks': {
            'total':
            convert_number(task_total),
            'time_type':
            convert_number(
                CobraTaskInfo.count_by_time(day_first, day_last, 'task'))
        },
        'files': {
            'total':
            convert_number(file_total),
            'time_type':
            convert_number(
                CobraTaskInfo.count_by_time(day_first, day_last, 'file'))
        },
        'lines': {
            'total':
            convert_number(line_total),
            'time_type':
            convert_number(
                CobraTaskInfo.count_by_time(day_first, day_last, 'line'))
        }
    }

    # Rule Data (RD)
    rule_amount_status = CobraRules.count_by_time(day_first, day_last)
    amount_rule = {
        'on': {
            'total': CobraRules.query.filter(CobraRules.status == 1).count(),
            'time_type': rule_amount_status[1]
        },
        'off': {
            'total': CobraRules.query.filter(CobraRules.status == 0).count(),
            'time_type': rule_amount_status[0]
        },
        'total': {
            'total': 0,
            'time_type': 0
        }
    }
    amount_rule['total']['total'] = convert_number(amount_rule['on']['total'] +
                                                   amount_rule['off']['total'])
    amount_rule['total']['time_type'] = convert_number(
        amount_rule['on']['time_type'] + amount_rule['off']['time_type'])
    amount_rule['on']['total'] = convert_number(amount_rule['on']['total'])
    amount_rule['on']['time_type'] = convert_number(
        amount_rule['on']['time_type'])
    amount_rule['off']['total'] = convert_number(amount_rule['off']['total'])
    amount_rule['off']['time_type'] = convert_number(
        amount_rule['off']['time_type'])

    # Rule Hits Rank (RHR)
    hit_rules = db.session.query(
        func.count(CobraResults.rule_id).label("cnt"),
        CobraRules.author,
        CobraRules.description,
        CobraRules.id,
    ).filter(
        CobraResults.created_at >= '{start} 00:00:00'.format(start=day_first),
        CobraResults.created_at <= '{end} 23:59:59'.format(end=day_last),
        CobraProjects.status > 0, CobraResults.project_id == CobraProjects.id,
        CobraResults.rule_id == CobraRules.id).group_by(
            CobraResults.rule_id).all()
    ranks = dict()
    hits = dict()
    hits_tmp = []
    for res in hit_rules:
        if len(ranks) < 7:
            # ranks
            if res[1] in ranks:
                rank = ranks[res[1]] + res[0]
            else:
                rank = res[0]
            ranks[res[1]] = rank
            # hits
            if res[3] in hits:
                rank = ranks[res[3]] + res[0]
            else:
                rank = res[0]
            hits[res[3]] = {'name': res[2], 'author': res[1], 'rank': rank}
    for h in hits.values():
        hits_tmp.append(h)
    ranks = sorted(ranks.items(), key=operator.itemgetter(1), reverse=True)
    hits = sorted(hits_tmp, key=lambda x: x['rank'], reverse=True)

    # new & improves rule (NIR)
    filter_group = (CobraRules.id > 0, )
    filter_group += (
        CobraRules.updated_at >= '{start} 00:00:00'.format(start=day_first),
        CobraRules.updated_at <= '{end} 23:59:59'.format(end=day_last),
    )
    new_rules = db.session.query(
        CobraRules.author, CobraRules.description).filter(*filter_group).all()
    if len(new_rules) == 0:
        new_rules = [{'author': 'Unknown', 'description': 'Nothing'}]

    rule_amount = db.session.query(CobraRules.author,
                                   func.count("*").label('counts')).group_by(
                                       CobraRules.author).all()
    rule_amount = sorted(rule_amount, key=operator.itemgetter(1), reverse=True)
    rule_amount_rank = []
    for ra in rule_amount:
        count = CobraRules.query.with_entities(CobraRules.id).filter(
            CobraRules.author == ra[0], CobraRules.status == 0).count()
        rule_amount_rank.append({
            'author': ra[0],
            'active': ra[1] - count,
            'not_active': count,
            'total': ra[1]
        })

    # vulnerabilities types
    cobra_rules = db.session.query(CobraRules.id, CobraRules.vul_id).all()
    cobra_vulnerabilities = db.session.query(CobraVuls.id,
                                             CobraVuls.name).all()

    all_rules = {}
    for x in cobra_rules:
        all_rules[x.id] = x.vul_id  # rule_id -> vul_id
    all_cobra_vulnerabilities = {}
    for x in cobra_vulnerabilities:
        all_cobra_vulnerabilities[x.id] = x.name  # vul_id -> vul_name
    # VTD
    # show all vulnerabilities
    all_vulnerabilities = db.session.query(
        CobraResults.rule_id,
        func.count("*").label('counts')).filter(
            CobraResults.updated_at >=
            '{start} 00:00:00'.format(start=day_first),
            CobraResults.updated_at <= '{end} 23:59:59'.format(end=day_last),
            # Active project
            CobraProjects.status > 0,
            CobraResults.project_id == CobraProjects.id).group_by(
                CobraResults.rule_id).all()

    vulnerabilities_types = []
    for x in all_vulnerabilities:  # results group by rule_id and count(*)
        t = {}
        # get vul name
        if x.rule_id not in all_rules:
            continue
        te = all_cobra_vulnerabilities[all_rules[x.rule_id]]
        # check if there is already a same vul name in different language
        flag = False
        for tv in vulnerabilities_types:
            if te == tv['name']:
                tv['amount'] += x.counts
                flag = True
                break
        if not flag:
            t['name'] = all_cobra_vulnerabilities[all_rules[x.rule_id]]
            t['amount'] = x.counts
        if t:
            vulnerabilities_types.append(t)
    vulnerabilities_types = sorted(vulnerabilities_types,
                                   key=lambda x: x['amount'],
                                   reverse=True)

    time_type_desc = {'w': '周', 'm': '月', 'q': '季度', 'a': '全部'}
    ttd = time_type_desc[time_type]
    comment_scan = '本{ttd}扫描数据各指标都比较平稳,无明显波动!'.format(ttd=ttd)
    if amount_rule['total']['time_type'] == 0:
        comment_rule = '本{ttd}没有新增规则'.format(ttd=ttd)
    else:
        comment_rule = '本{ttd}新增{count}条规则'.format(
            ttd=ttd, count=amount_rule['total']['time_type'])

    comment_vulnerability = '本{ttd}扫出{new}个新漏洞, 修复了{fixed}个漏洞,待修复漏洞进入漏洞修复跟进流程。'.format(
        ttd=ttd,
        new=amount_vulnerability['new']['time_type'],
        fixed=amount_vulnerability['fixed']['time_type'])

    data = {
        'amount': {
            'vulnerabilities_fixed':
            convert_number(fixed_amount),
            'vulnerabilities_not_fixed':
            convert_number(not_fixed_amount),
            'vulnerabilities_total':
            convert_number(fixed_amount + not_fixed_amount),
            'scan':
            amount_scan,
            'rule':
            amount_rule,
            'rar':
            rule_amount_rank
        },
        'trend': {
            'scan': trend_scan
        },
        'comment': {
            'scan': comment_scan,
            'rule': comment_rule,
            'vulnerability': comment_vulnerability
        },
        'ranks': ranks,
        'hits': hits,
        'rules': new_rules,
        'vulnerabilities_types': vulnerabilities_types,
        'time_type': time_type,
        'vt_x': vt_x,
        'day_first': day_first,
        'day_last': day_last,
        'capture': capture
    }
    return render_template("backend/index/overview.html", data=data)
Example #44
0
 def _get_count_by_uuids_and_user_in_db(context, uuids, user_id):
     query = (context.session.query(
         func.count(api_models.InstanceMapping.id)).filter(
             api_models.InstanceMapping.instance_uuid.in_(uuids)).filter_by(
                 queued_for_delete=False).filter_by(user_id=user_id))
     return query.scalar()
Example #45
0
def main():
    parser = argparse.ArgumentParser(
        description='Cinema booking data extract program.')
    parser.add_argument('--file',
                        type=str,
                        required=False,
                        default="result.txt",
                        help='result file name')
    parser.add_argument('--cinema',
                        type=str,
                        required=False,
                        help='target cinema')
    parser.add_argument('--merge',
                        type=bool,
                        required=False,
                        default=False,
                        help='merge different version of same movie')
    args = parser.parse_args()
    # firgure out movie booking status
    engine = models.db_connect()
    session = sessionmaker(bind=engine)()
    book_count_label = func.sum(
        models.ShowingBooking.book_seat_count).label("book_count")
    query = session.query(
        models.Showing.title, book_count_label,
        func.sum(models.Showing.total_seat_count),
        func.count(models.Showing.id)).filter(
            models.ShowingBooking.showing_id == models.Showing.id).group_by(
                models.Showing.title).order_by(book_count_label.desc())
    if args.cinema is not None:
        query = query.filter(models.Showing.cinema_name == args.cinema)
    result = {}
    for (title, book_seat_count, total_seat_count, count) in query.all():
        title = unicodedata.normalize('NFKC', title)
        cinema = args.cinema if args.cinema is not None else 'total'
        book_seat_count = 0 if book_seat_count is None else book_seat_count
        total_seat_count = 0 if total_seat_count is None else total_seat_count
        if args.merge:
            # remove movie version in title
            title = re.sub(r"^(.+)\((.+)\)$", r"\1", title)
            title = title.strip()
        if title not in result:
            result[title] = {}
            result[title]['cinema'] = cinema
            result[title]['book_seat_count'] = book_seat_count
            result[title]['total_seat_count'] = total_seat_count
            result[title]['count'] = count
        else:
            result[title]['book_seat_count'] += book_seat_count
            result[title]['total_seat_count'] += total_seat_count
            result[title]['count'] += count
    with open(args.file, 'w') as result_file:
        for title in result:
            cinema = result[title]['cinema']
            book_seat_count = result[title]['book_seat_count']
            total_seat_count = result[title]['total_seat_count']
            count = result[title]['count']
            percent = "{:.2%}".format(
                book_seat_count /
                (1 if not total_seat_count else total_seat_count))
            result_str = "{0} {1}: {2}/{3} {4} {5} times".format(
                title, cinema, book_seat_count, total_seat_count, percent,
                count)
            print(result_str)
            result_str += "\n"
            result_file.write(result_str)
    session.close()
Example #46
0
    def find_next_jobsteps(self, limit=10):
        # find projects with pending allocations
        project_list = [
            p for p, in db.session.query(JobStep.project_id, ).filter(
                JobStep.status == Status.pending_allocation, ).group_by(
                    JobStep.project_id)
        ]
        if not project_list:
            return []

        # TODO(dcramer): this should be configurably and handle more cases
        # than just 'active job' as that can be 1 step or 100 steps
        # find the total number of job steps in progress per project
        # hard limit of 10 active jobs per project
        unavail_projects = [
            p for p, c in db.session.query(
                Job.project_id,
                func.count(Job.project_id),
            ).filter(
                Job.status.in_([Status.allocated, Status.in_progress]),
                Job.project_id.in_(project_list),
            ).group_by(Job.project_id, ) if c >= 10
        ]

        filters = [
            JobStep.status == Status.pending_allocation,
        ]
        if unavail_projects:
            filters.append(~JobStep.project_id.in_(unavail_projects))

        base_queryset = JobStep.query.join(
            Job,
            JobStep.job_id == Job.id,
        ).join(
            Build,
            Job.build_id == Build.id,
        ).order_by(Build.priority.desc(), JobStep.date_created.asc())

        # prioritize a job that's has already started
        queryset = list(
            base_queryset.filter(
                Job.status.in_([Status.allocated, Status.in_progress]),
                *filters)[:limit])
        if len(queryset) == limit:
            return queryset

        results = queryset

        # now allow any prioritized project, based on order
        queryset = base_queryset.filter(*filters)
        if results:
            queryset = queryset.filter(~JobStep.id.in_(q.id for q in results))
        results.extend(queryset[:limit - len(results)])
        if len(results) >= limit:
            return results[:limit]

        # TODO(dcramer): we want to burst but not go too far. For now just
        # let burst
        queryset = base_queryset.filter(
            JobStep.status == Status.pending_allocation, )
        if results:
            queryset = queryset.filter(~JobStep.id.in_(q.id for q in results))
        results.extend(queryset[:limit - len(results)])
        return results[:limit]
Example #47
0
def msg_statistics():
    try:
        current_app.logger.info("request_json: {}".format(request.get_json()))
        req_dict = request.get_json()
    except Exception as e:
        current_app.logger.info(e)
        return jsonify(errno=RET.NOTJSON, errmsg="参数非Json格式")

    try:
        res = db.session.query(MessageLog.org_code, MessageLog.org_name,
                               MessageLog.send_class, MessageLog.msg_status,
                               func.count(MessageLog.id))
        if req_dict.get('start_date') != '':
            res = res.filter(MessageLog.created_at >= "{} 00:00:00".format(
                req_dict.get('start_date')))
        if req_dict.get('end_date') != '':
            res = res.filter(MessageLog.created_at <= "{} 23:59:59".format(
                req_dict.get('end_date')))
        if req_dict.get('msg_org') != '':
            res = res.filter(MessageLog.org_code == req_dict.get('msg_org'))
        if req_dict.get('msg_class') != '':
            res = res.filter(
                MessageLog.send_class == req_dict.get('msg_class'))
        if req_dict.get('msg_status') != '':
            res = res.filter(
                MessageLog.msg_status == req_dict.get('msg_status'))
        res = res.group_by(MessageLog.org_code, MessageLog.org_name,
                           MessageLog.send_class, MessageLog.msg_status)
        data = [{
            'org_code': r[0],
            'org_name': r[1],
            'send_class': r[2],
            'msg_status': r[3],
            'count': r[4],
            'start_date': req_dict.get('start_date'),
            'end_date': req_dict.get('end_date')
        } for r in res]
        print(data)
        if req_dict['action'] == 'search':  # 搜索
            return jsonify(errno="0", data=data, query=[req_dict])
        else:  # 导出
            file_name = 'statistics{}.xlsx'.format(
                str(int(round(time.time() * 1000))))
            file_path = 'main/static/excels/{}'.format(file_name)
            colums_name = ['医院代码', '医院名称', '短信类别', '短信状态', '数量']

            book = Excel(file_path)
            book.write_colume_name(colums_name)
            i = 1
            for row in res:
                book.write_content(i, row)
                i += 1
            book.close()
            return jsonify(errno="0", data=[{'filename': file_name}])
    except Exception as e:
        current_app.logger.error(e)
        if req_dict['action'] == 'search':
            errmsg = '数据查询错误'
        else:
            errmsg = '文件生成错误'
        return jsonify(errno="1", errmsg=errmsg)
Example #48
0
 def get_count(cls, days=90):
     with session_scope() as session:
         return session.query(func.count(cls.device_id))
Example #49
0
def event_to_pb(session, occurrence: EventOccurrence, context):
    event = occurrence.event

    next_occurrence = (event.occurrences.where(
        EventOccurrence.end_time >= now()).order_by(
            EventOccurrence.end_time.asc()).first())

    owner_community_id = None
    owner_group_id = None
    if event.owner_cluster:
        if event.owner_cluster.is_official_cluster:
            owner_community_id = event.owner_cluster.parent_node_id
        else:
            owner_group_id = event.owner_cluster.id

    attendance = occurrence.attendees.where(
        EventOccurrenceAttendee.user_id == context.user_id).one_or_none()
    attendance_state = attendance.attendee_status if attendance else None

    can_moderate = _can_moderate_event(session, event, context.user_id)

    going_count = session.execute(
        select(func.count()).
        select_from(EventOccurrenceAttendee).where_users_column_visible(
            context, EventOccurrenceAttendee.user_id).where(
                EventOccurrenceAttendee.occurrence_id == occurrence.id).where(
                    EventOccurrenceAttendee.attendee_status ==
                    AttendeeStatus.going)).scalar_one()
    maybe_count = session.execute(
        select(func.count()).
        select_from(EventOccurrenceAttendee).where_users_column_visible(
            context, EventOccurrenceAttendee.user_id).where(
                EventOccurrenceAttendee.occurrence_id == occurrence.id).where(
                    EventOccurrenceAttendee.attendee_status ==
                    AttendeeStatus.maybe)).scalar_one()

    organizer_count = session.execute(
        select(func.count()).select_from(
            EventOrganizer).where_users_column_visible(
                context, EventOrganizer.user_id).where(
                    EventOrganizer.event_id == event.id)).scalar_one()
    subscriber_count = session.execute(
        select(func.count()).select_from(
            EventSubscription).where_users_column_visible(
                context, EventSubscription.user_id).where(
                    EventSubscription.event_id == event.id)).scalar_one()

    return events_pb2.Event(
        event_id=occurrence.id,
        is_next=False
        if not next_occurrence else occurrence.id == next_occurrence.id,
        title=event.title,
        slug=event.slug,
        content=occurrence.content,
        photo_url=occurrence.photo.full_url if occurrence.photo else None,
        online_information=events_pb2.OnlineEventInformation(
            link=occurrence.link, ) if occurrence.link else None,
        offline_information=events_pb2.OfflineEventInformation(
            lat=occurrence.coordinates[0],
            lng=occurrence.coordinates[1],
            address=occurrence.address,
        ) if occurrence.geom else None,
        created=Timestamp_from_datetime(occurrence.created),
        last_edited=Timestamp_from_datetime(occurrence.last_edited),
        creator_user_id=occurrence.creator_user_id,
        start_time=Timestamp_from_datetime(occurrence.start_time),
        end_time=Timestamp_from_datetime(occurrence.end_time),
        timezone=occurrence.timezone,
        start_time_display=str(occurrence.start_time),
        end_time_display=str(occurrence.end_time),
        attendance_state=attendancestate2api[attendance_state],
        organizer=event.organizers.where(
            EventOrganizer.user_id == context.user_id).one_or_none()
        is not None,
        subscriber=event.subscribers.where(
            EventSubscription.user_id == context.user_id).one_or_none()
        is not None,
        going_count=going_count,
        maybe_count=maybe_count,
        organizer_count=organizer_count,
        subscriber_count=subscriber_count,
        owner_user_id=event.owner_user_id,
        owner_community_id=owner_community_id,
        owner_group_id=owner_group_id,
        thread=thread_to_pb(event.thread_id),
        can_edit=_is_event_owner(event, context.user_id),
        can_moderate=can_moderate,
    )
Example #50
0
                   Column('line_items_id', Integer(), primary_key=True),
                   Column('order_id', ForeignKey('orders.order_id')),
                   Column('cookie_id', ForeignKey('cookies.cookie_id')),
                   Column('quantity', Integer()),
                   Column('extended_cost', Numeric(12, 2)))

CheckConstraint('unit_cost >= 0.00', name='unit_cost_positive')
Index('ix_cookies_cookie_name', 'cookie_name')

metadata.create_all(engine)

columns = [
    orders.c.order_id, users.c.username, users.c.phone, cookies.c.cookie_name,
    line_items.c.quantity, line_items.c.extended_cost
]
cookiemon_orders = select(columns)
cookiemon_orders = cookiemon_orders.select_from(
    orders.join(users).join(line_items).join(cookies)).where(
        users.c.username == 'cookiemon')
result = connection.execute(cookiemon_orders).fetchall()
for row in result:
    print(row)

columns = [users.c.username, func.count(orders.c.order_id)]
all_orders = select(columns)
all_orders = all_orders.select_from(users.outerjoin(orders))
all_orders = all_orders.group_by(users.c.username)
result = connection.execute(all_orders).fetchall()
for row in result:
    print(row)
def get_taxa_list(id_area):
    """

    :param type:
    :return:
    """
    try:
        reproduction_id = (
            (
                DB.session.query(TNomenclatures.id_nomenclature)
                .join(
                    BibNomenclaturesTypes,
                    TNomenclatures.id_type == BibNomenclaturesTypes.id_type,
                )
                .filter(
                    and_(
                        BibNomenclaturesTypes.mnemonique.like("STATUT_BIO"),
                        TNomenclatures.cd_nomenclature.like("3"),
                    )
                )
            )
            .first()
            .id_nomenclature
        )

        print("reproduction_id", reproduction_id)
        query_territory = (
            DB.session.query(
                Taxref.cd_ref.label("id"),
                LAreas.id_area,
                LAreas.area_code,
                Taxref.cd_ref,
                func.split_part(Taxref.nom_vern, ",", 1).label("nom_vern"),
                Taxref.nom_valide,
                Taxref.group1_inpn,
                Taxref.group2_inpn,
                func.count(distinct(Synthese.id_synthese)).label("count_occtax"),
                func.count(distinct(Synthese.observers)).label("count_observer"),
                func.count(distinct(Synthese.date_min)).label("count_date"),
                func.count(distinct(Synthese.id_dataset)).label("count_dataset"),
                func.max(distinct(func.extract("year", Synthese.date_min))).label(
                    "last_year"
                ),
                func.array_agg(
                    aggregate_order_by(
                        distinct(func.extract("year", Synthese.date_min)),
                        func.extract("year", Synthese.date_min).desc(),
                    )
                ).label("list_years"),
                func.array_agg(
                    aggregate_order_by(
                        distinct(func.extract("month", Synthese.date_min)),
                        func.extract("month", Synthese.date_min).asc(),
                    )
                ).label("list_months"),
                func.bool_or(
                    Synthese.id_nomenclature_bio_status == reproduction_id
                ).label("reproduction"),
                func.max(distinct(func.extract("year", Synthese.date_min)))
                .filter(Synthese.id_nomenclature_bio_status == reproduction_id)
                .label("last_year_reproduction"),
                func.array_agg(distinct(Synthese.id_nomenclature_bio_status)).label(
                    "bio_status_id"
                ),
                case(
                    [(func.count(TaxrefProtectionEspeces.cd_nom) > 0, True)],
                    else_=False,
                ).label("protection"),
            )
            .select_from(CorAreaSynthese)
            .join(Synthese, Synthese.id_synthese == CorAreaSynthese.id_synthese)
            .join(Taxref, Synthese.cd_nom == Taxref.cd_nom)
            .join(LAreas, LAreas.id_area == CorAreaSynthese.id_area)
            .outerjoin(TaxrefLR, TaxrefLR.cd_nom == Taxref.cd_ref)
            .outerjoin(
                TaxrefProtectionEspeces, TaxrefProtectionEspeces.cd_nom == Taxref.cd_nom
            )
            .filter(LAreas.id_area == id_area)
            .group_by(
                LAreas.id_area,
                LAreas.area_code,
                Taxref.cd_ref,
                Taxref.nom_vern,
                Taxref.nom_valide,
                Taxref.group1_inpn,
                Taxref.group2_inpn,
            )
            .order_by(
                func.count(distinct(Synthese.id_synthese)).desc(),
                Taxref.group1_inpn,
                Taxref.group2_inpn,
                Taxref.nom_valide,
            )
        )
        print("query_territory", query_territory)
        result = query_territory.all()
        count = len(result)
        data = []
        for r in result:
            dict = r._asdict()
            bio_status = []
            for s in r.bio_status_id:
                bio_status.append(get_nomenclature(s))
                dict["bio_status"] = bio_status
            redlist = get_redlist_status(r.cd_ref)
            dict["redlist"] = redlist
            data.append(dict)

        redlistless_data = list(filter(redlist_list_is_null, data))
        print("redlistless_data", len(redlistless_data))
        redlist_data = list(filter(redlist_is_not_null, data))
        print("redlist_data", len(redlist_data))
        redlist_sorted_data = sorted(
            redlist_data,
            key=lambda k: (
                k["redlist"][0]["priority_order"],
                k["redlist"][0]["threatened"],
            ),
        )
        sorted_data = redlist_sorted_data + list(redlistless_data)
        return jsonify({"count": count, "data": sorted_data}), 200

    except Exception as e:
        error = "<get_taxa_list> ERROR: {}".format(e)
        current_app.logger.error(error)
        return {"Error": error}, 400
Example #52
0
    def _load_tree_rows(self, conn, where, order_by, parent_id):
        """Load rows as a tree."""
        if where is not None:
            # FIXME: If we have a where clause, we cant load the results lazily
            # because, we don't know if a row's children/grandchildren/etc will
            # match.  If this optimization (loading the leafs and the necessary
            # parents until the root) good enough?
            children = {}
            node_mapper = {}

            def load_rows(where_):
                query = self.select(conn,
                                    self.table,
                                    columns=self.table.columns,
                                    where=where_,
                                    order_by=order_by)
                for row in query:
                    row_id = row[self.id_column_idx]
                    if row_id in node_mapper:
                        continue

                    c_list = children.setdefault(row[self.parent_column_idx],
                                                 [])
                    node = Node(data=row)
                    c_list.append(node)
                    node_mapper[row_id] = node

            load_rows(where)
            if not children:
                return

            # Load parents incrementally until we are left with the root
            while children.keys() != [None]:
                parents_to_load = []
                for parent, c_list in children.items():
                    if parent is None:
                        continue

                    node = node_mapper.get(parent, None)
                    if node is None:
                        parents_to_load.append(parent)
                        continue

                    node.extend(c_list)
                    node.children_len = len(node)
                    del children[parent]

                if parents_to_load:
                    where = self.table.columns[self.ID_COLUMN].in_(
                        parents_to_load)
                    load_rows(where)

            for node in children[None]:
                yield node
        else:
            # If there's no where clause, we can load the results lazily
            where = self.table.columns[self.PARENT_ID_COLUMN] == parent_id

            if self.CHILDREN_LEN_COLUMN is None:
                count_table = alias(self.table, '__count')
                # We could use the comparison between the columns, but that would
                # make sqlalchemy add self.table in the FROM clause, which
                # would produce wrong results.
                count_where = '%s.%s = %s.%s' % (
                    count_table.name, self.PARENT_ID_COLUMN, self.table.name,
                    self.ID_COLUMN)
                count_select = select([func.count(1)],
                                      whereclause=count_where,
                                      from_obj=[count_table])

                columns = self.table.columns.values()
                # We have to compile this here or else sqlalchemy would put
                # this inside the FROM part.
                columns.append('(%s)' % (_compile(count_select), ))
                extra_count_col = True
            else:
                columns = self.table.columns
                extra_count_col = False

            query = self.select(conn,
                                self.table,
                                columns=columns,
                                where=where,
                                order_by=order_by)

            for row in query:
                if extra_count_col:
                    children_len = row.pop(-1)
                else:
                    children_len = row[self.children_len_column_idx]

                yield Node(data=row, children_len=children_len)
Example #53
0
def obshist(plateid, session):

    # Set initial variables
    sn2, vdone = 0.0, 0

    # Get global S/N and visit information

    try:
        #find all the APOGEE plates with the same location_id and version as the current plate
        locver_plates = getLocVerPlates(session,plateid)

        sndata = session.query(func.floor(Exposure.start_time/86400.0+0.3),\
                               func.sum(Quickred.snr_standard * Quickred.snr_standard),\
                               func.count(Quickred.snr_standard),\
                               func.sum(Reduction.snr * Reduction.snr),\
                               func.count(Reduction.snr)).\
                               join(ExposureFlavor, Observation, PlatePointing, Plate).\
                               outerjoin(Quickred, Reduction).\
                               filter(Plate.plate_id.in_(locver_plates)).\
                               filter(ExposureFlavor.label=='Object').\
                               filter(or_(Quickred.snr_standard >= 10.0, Reduction.snr >= 10.0)).\
                               group_by(func.floor(Exposure.start_time/86400.0+0.3)).distinct()

        if sndata.count() > 0: good_days = [x[0] for x in sndata]
        else: good_days = []

        for i in range(sndata.count()):
            # Determine which S/N to use, QL or reduction
            if sndata[i][3] != None:
                sn = float(sndata[i][3])
                sncnt = float(sndata[i][4])
            else:
                sn = float(sndata[i][1])
                sncnt = float(sndata[i][2])
            # If 2+ visits with S/N > 10, add visit to total
            if sncnt >= 2:
                vdone += 1
            # Always add S/N^2 to total
            sn2 += sn

        # Get individual exposure S/N information
        expdata = session.query(Exposure.pk,\
                                (Quickred.snr_standard * Quickred.snr_standard),\
                                (Reduction.snr * Reduction.snr),\
                                func.floor(Exposure.start_time/86400.0+0.3)).\
                                join(ExposureFlavor, Observation, PlatePointing, Plate).\
                                outerjoin(Quickred, Reduction).\
                                filter(Plate.plate_id.in_(locver_plates)).\
                                filter(ExposureFlavor.label=='Object').\
                                filter(or_(Quickred.snr_standard >= 10.0, Reduction.snr >= 10.0)).\
                                order_by(Exposure.pk).distinct()

        # Mark whether exposures are good or bad
        exposures = []
        for e in expdata:
            if int(e[3]) in good_days: this_good = True
            else: this_good = False
            exposures.append([e[0], e[1], e[2], this_good])

        # Get vplan information from the design value table
        dvdata = session.query(DesignValue.value).\
                              join(DesignField, Design, Plate).\
                              filter(Plate.plate_id==plateid).distinct()
    except:
        raise RuntimeError("ERROR: unable to find observation history in database for plate: %s\n%s" %\
            (plateid, sys.exc_info()))
        
    try:
        vplan = int(dvdata.filter(DesignField.label=='apogee_n_design_visits').first()[0])
    except:
        vplan = 0
    try:
        cadence_flag = dvdata.filter(DesignField.label=='apogee_design_type').first()[0]
    except:
        cadence_flag = ''

    return sn2, vdone, vplan, cadence_flag, exposures
rs.fetchall()

s = select([
    items.c.name, items.c.quantity, (items.c.selling_price * 5).label('price')
]).where(items.c.quantity == 50)

print(s)
rs = conn.execute(s)
rs.keys()
rs.fetchall()

#Grouping results

from sqlalchemy.sql import func

c = [func.count("*").label('count'), customers.c.town]

s = select(c).group_by(customers.c.town)

print(s)
conn.execute(s).fetchall()

from sqlalchemy.sql import func

c = [func.count("*").label('count'), customers.c.town]

s = select(c).group_by(customers.c.town).having(func.count("*") > 2)

print(s)
rs = conn.execute(s)
rs.keys()
Example #55
0
    def load(self, params=None):
        """Execute SQL ``SELECT`` and populate ``rows`` attribute.

        Loads a maximum of ``MAX_RECS`` records at a time.

        ``params`` dict example::

            {
                'desc': False,
                'order_by': 'title',
                'where': {
                    'date': {
                        'operator': 'range',
                        'param': (0, 1403845140)
                    },
                    'search': {
                        'operator': '=',
                        'param': 'Google'}
                    }
                }
            }

        :param dict params: dict of various parameters from which to construct
            additional SQL clauses eg. ``WHERE``, ``ORDER BY``, etc.
        """
        rows = Node()
        # FIXME: Maybe we should use kwargs instead of params?
        params = params or {}

        # WHERE
        where = params.get('where', None)
        if where is not None:
            where = self._get_where_clause(where)

        # ORDER BY
        order_by = params.get('order_by', None)
        # Do a numeric ordering first, as suggested here
        # (http://stackoverflow.com/a/4204641), and then a case-insensitive one
        order_by = (order_by and [
            self.table.columns[order_by] + 0,
            collate(self.table.columns[order_by], 'NOCASE')
        ])
        if order_by is not None and params.get('desc', False):
            order_by = [desc(col) for col in order_by]

        # OFFSET
        page = params.get('page', 0)
        offset = page * self.MAX_RECS
        # A little optimization to avoid doing more queries when we
        # already loaded everything
        if page > 0 and offset >= self.total_recs:
            return rows

        # Flat
        flat = params.get('flat', False)
        if flat:
            flat_where = operator.ne(self.table.columns[self.FLAT_COLUMN],
                                     None)
            where = and_(
                where, flat_where) if where is not None else flat_where  # noqa

        with closing(sqlite3.connect(self.db_file)) as conn:
            conn.row_factory = lambda cursor, row: list(row)
            # ^^ make result lists mutable so we can change values in
            # the GTK TreeModel that uses this datasource.
            conn.create_function('rank', 1, rank)
            # TODO: ^^ only if search term in params
            with closing(conn.cursor()) as cursor:
                self._ensure_temp_view(cursor)

            if page == 0:
                # set the total record count the only the first time the
                # record set is requested
                res = self.select(conn,
                                  self.table, [func.count(1)],
                                  where=where)
                self.total_recs = int(list(res)[0][0])

            if self.PARENT_ID_COLUMN and not flat:
                rows.extend(
                    self._load_tree_rows(conn, where, order_by,
                                         params.get('parent_id', None)))
            else:
                query = self.select(conn,
                                    self.table,
                                    self.table.columns,
                                    where=where,
                                    limit=self.MAX_RECS,
                                    offset=offset,
                                    order_by=order_by)
                for row in query:
                    rows.append(Node(data=row))

        rows.children_len = len(rows)
        return rows
Example #56
0
# Connect to the database - the login string is set in the database package
ENGINE = create_engine(DB_LOGIN)
connection = ENGINE.connect()

# Check things exist
errors = []

if os.path.isdir(INPUT_DIR):
    # Is the filters file there
    if not os.path.isfile('{0}/filters.dat'.format(INPUT_DIR)):
        errors.append(
            'The file {0}/filters.dat does not exist'.format(INPUT_DIR))

    count = connection.execute(
        select([func.count(RUN.c.run_id)
                ]).where(RUN.c.run_id == RUN_ID)).first()
    if count[0] > 0:
        errors.append('The run id {0} already exists'.format(RUN_ID))

else:
    errors.append('The directory {0} does not exist.'.format(INPUT_DIR))

# we have errors
if len(errors) > 0:
    for error in errors:
        LOG.error(error)

else:
    # Now we build everything
    transaction = connection.begin()
Example #57
0
 def on_read_part_DB():
     a = P4Rm()
     read_name = a.DBDict['session'].query(func.count(RadMaxData.exp_name),
                                           RadMaxData.exp_name).group_by(
                                               RadMaxData.exp_name).all()
     P4Rm.DBDict['name'] = [name for (num, name) in read_name]
Example #58
0
def index():
    board_id = request.args.get('bd', type=int, default=None)
    sort = request.args.get('st', type=int, default=1)
    page = request.args.get(get_page_parameter(), type=int, default=1)
    boards = BoardModel.query.all()
    start = (page-1)*config.PER_PAGE
    end = start+config.PER_PAGE
    posts = None
    total = 0

    query_obj = None
    if sort == 1:
        query_obj = PostModel.query.order_by(PostModel.create_time.desc())
    elif sort == 2:
        # 加精的时间倒叙排列
        query_obj = db.session.query(PostModel).outerjoin(HighlightPostModel).order_by(HighlightPostModel.create_time.desc(), PostModel.create_time.desc())
    elif sort == 3:
    #     按照点赞的顺序  (点赞没做)
        query_obj = PostModel.query.order_by(PostModel.create_time.desc())
    elif sort == 4:
    #     按照评论数量
        query_obj = db.session.query(PostModel).outerjoin(CommentModel).group_by(PostModel.id).order_by(func.count(CommentModel.id).desc(), PostModel.create_time.desc())
    if board_id:
        posts = query_obj.filter(PostModel.board_id == board_id).slice(start, end)
        total = query_obj.filter(PostModel.board_id==board_id).count()
    else:
        posts = query_obj.slice(start, end)
        total = query_obj.count()
    pagination = Pagination(bs_version=3, page=page, total=total)
    context = {
        'boards': boards,
        'posts': posts,
        'pagination': pagination,
        'current_board': board_id,
        'current_sort': sort
    }
    return render_template('front/front_index.html', **context)
    def test_executing(self):
        # re-create a new INSERT object
        self.ins = self.users.insert()

        # execute the insert statement
        res = self.conn.execute(self.ins, uid=1, name='jack', fullname='Jack Jones')
        assert(res.inserted_primary_key == [1])
        res = self.conn.execute(self.ins, uid=2, name='wendy', fullname='Wendy Williams')
        assert(res.inserted_primary_key == [2])

        # the res variable is a ResultProxy object, analagous to DBAPI cursor

        # issue many inserts, the same is possible for update and delete
        self.conn.execute(self.addresses.insert(), [
             {'id': 1, 'user_id': 1, 'email_address': '*****@*****.**'},
             {'id': 2, 'user_id': 1, 'email_address': '*****@*****.**'},
             {'id': 3, 'user_id': 2, 'email_address': '*****@*****.**'},
             {'id': 4, 'user_id': 2, 'email_address': '*****@*****.**'}
         ])

        # test selects on the inserted values
        from sqlalchemy.sql import select

        s = select([self.users])
        res = self.conn.execute(s)
        u1 = res.fetchone()
        u2 = res.fetchone()

        # accessing rows
        assert(u1['name'] == u'jack')
        assert(u1['fullname'] == u'Jack Jones')

        assert(u2['name'] == u'wendy')
        assert(u2['fullname'] == u'Wendy Williams')

        assert(u1[1] == u1['name'])
        assert(u1[2] == u1['fullname'])

        assert(u2[1] == u2['name'])
        assert(u2[2] == u2['fullname'])

        # be sure to close the result set
        res.close()

        # use cols to access rows
        res = self.conn.execute(s)
        u3 = res.fetchone()
        u4 = res.fetchone()

        assert(u3[self.users.c.name] == u1['name'])
        assert(u3[self.users.c.fullname] == u1['fullname'])

        assert(u4[self.users.c.name] == u2['name'])
        assert(u4[self.users.c.fullname] == u2['fullname'])

        # reference individual columns in select clause
        s = select([self.users.c.name, self.users.c.fullname])
        res = self.conn.execute(s)
        u3 = res.fetchone()
        u4 = res.fetchone()

        assert(u3[self.users.c.name] == u1['name'])
        assert(u3[self.users.c.fullname] == u1['fullname'])

        assert(u4[self.users.c.name] == u2['name'])
        assert(u4[self.users.c.fullname] == u2['fullname'])

        # test joins
        # cartesian product
        usrs = [row for row in self.conn.execute(select([self.users]))]
        addrs = [row for row in self.conn.execute(select([self.addresses]))]
        prod = [row for row in self.conn.execute(select([self.users, self.addresses]))]
        assert(len(prod) == len(usrs) * len(addrs))

        # inner join on id
        s = select([self.users, self.addresses]).where(self.users.c.uid == self.addresses.c.user_id)
        inner = [row for row in self.conn.execute(s)]
        assert(len(inner) == 4)

        # operators between columns objects & other col objects/literals
        expr = self.users.c.uid == self.addresses.c.user_id
        assert('my_users.uid = addresses.user_id' == str(expr))
        # see how Teradata concats two strings
        assert(str((self.users.c.name + self.users.c.fullname).compile(bind=self.engine)) ==
               'my_users.name || my_users.fullname')

        # built-in conjunctions
        from sqlalchemy.sql import and_, or_

        s = select([(self.users.c.fullname +
                     ", " +
                     self.addresses.c.email_address).label('titles')]).where(
                         and_(
                             self.users.c.uid == self.addresses.c.user_id,
                             self.users.c.name.between('m', 'z'),
                             or_(
                                 self.addresses.c.email_address.like('*****@*****.**'),
                                 self.addresses.c.email_address.like('*****@*****.**')
                             )
                         )
                     )
        # print(s)
        res = self.conn.execute(s)
        for row in res:
            assert(str(row[0]) == u'Wendy Williams, [email protected]')

        # more joins
        # ON condition auto generated based on ForeignKey
        assert(str(self.users.join(self.addresses)) ==
               'my_users JOIN addresses ON my_users.uid = addresses.user_id')

        # specify the join ON condition
        self.users.join(self.addresses,
                        self.addresses.c.email_address.like(self.users.c.name + '%'))

        # select from clause to specify tables and the ON condition
        s = select([self.users.c.fullname]).select_from(
            self.users.join(self.addresses, self.addresses.c.email_address.like(self.users.c.name + '%')))
        res = self.conn.execute(s)
        assert(len(res.fetchall()) == 3)

        # left outer joins
        s = select([self.users.c.fullname]).select_from(self.users.outerjoin(self.addresses))
        # outer join works with teradata dialect (unlike oracle dialect < version9)

        assert(str(s) == str(s.compile(dialect=self.dialect)))

        # test bind params (positional)

        from sqlalchemy import text
        s = self.users.select(self.users.c.name.like(
                                bindparam('username', type_=String)+text("'%'")))
        res = self.conn.execute(s, username='******').fetchall()
        assert(len(res), 1)

        # functions
        from sqlalchemy.sql import func, column

        # certain function names are known by sqlalchemy
        assert(str(func.current_timestamp()), 'CURRENT_TIMESTAMP')

        # functions can be used in the select
        res = self.conn.execute(select(
            [func.max(self.addresses.c.email_address, type_=String).label(
                'max_email')])).scalar()
        assert(res, '*****@*****.**')

        # func result sets, define a function taking params x,y return q,z,r
        # useful for nested queries, subqueries - w/ dynamic params
        calculate = select([column('q'), column('z'), column('r')]).\
            select_from(
                         func.calculate(
                           bindparam('x'),
                           bindparam('y')
                         )
                       )
        calc = calculate.alias()
        s = select([self.users]).where(self.users.c.uid > calc.c.z)
        assert('SELECT my_users.uid, my_users.name, my_users.fullname\
               FROM my_users, (SELECT q, z, r\
                               FROM calculate(:x, :y)) AS anon_1\
               WHERE my_users.uid > anon_1.z', s)
        # instantiate the func
        calc1 = calculate.alias('c1').unique_params(x=17, y=45)
        calc2 = calculate.alias('c2').unique_params(x=5, y=12)

        s = select([self.users]).where(self.users.c.uid.between(calc1.c.z, calc2.c.z))
        parms = s.compile().params

        assert('x_2' in parms, 'x_1' in parms)
        assert('y_2' in parms, 'y_1' in parms)
        assert(parms['x_1'] == 17, parms['y_1'] == 45)
        assert(parms['x_2'] == 5, parms['y_2'] == 12)

        # order by asc
        stmt = select([self.users.c.name]).order_by(self.users.c.name)
        res = self.conn.execute(stmt).fetchall()

        assert('jack' == res[0][0])
        assert('wendy' == res[1][0])

        # order by desc
        stmt = select([self.users.c.name]).order_by(self.users.c.name.desc())
        res = self.conn.execute(stmt).fetchall()

        assert('wendy' == res[0][0])
        assert('jack' == res[1][0])

        # group by
        stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\
            select_from(self.users.join(self.addresses)).\
            group_by(self.users.c.name)

        res = self.conn.execute(stmt).fetchall()

        assert(res[1][0] == 'jack')
        assert(res[0][0] == 'wendy')
        assert(res[0][1] == res[1][1])

        # group by having
        stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\
            select_from(self.users.join(self.addresses)).\
            group_by(self.users.c.name).\
            having(func.length(self.users.c.name) > 4)

        res = self.conn.execute(stmt).fetchall()

        assert(res[0] == ('wendy', 2))

        # distinct
        stmt = select([self.users.c.name]).\
            where(self.addresses.c.email_address.contains(self.users.c.name)).distinct()

        res = self.conn.execute(stmt).fetchall()

        assert(len(res) == 2)
        assert(res[0][0] != res[1][0])

        # limit
        stmt = select([self.users.c.name, self.addresses.c.email_address]).\
            select_from(self.users.join(self.addresses)).\
            limit(1)

        res = self.conn.execute(stmt).fetchall()

        assert(len(res) == 1)

        # offset

        # test union and except
        from sqlalchemy.sql import except_, union

        u = union(
            self.addresses.select().where(self.addresses.c.email_address == '*****@*****.**'),
            self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')),)# .order_by(self.addresses.c.email_address)
        # print(u)
        # #res = self.conn.execute(u) this fails, syntax error order by expects pos integer?

        u = except_(
              self.addresses.select().where(self.addresses.c.email_address.like('%@%.com')),
              self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')))
        res = self.conn.execute(u).fetchall()
        assert(1, len(res))

        u = except_(
               union(
                  self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')),
                  self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**'))
               ).alias().select(), self.addresses.select(self.addresses.c.email_address.like('*****@*****.**'))
        )

        res = self.conn.execute(u).fetchall()
        assert(1, len(res))

        # scalar subqueries
        stmt = select([func.count(self.addresses.c.id)]).where(self.users.c.uid == self.addresses.c.user_id).as_scalar()

        # we can place stmt as any other column within another select
        res = self.conn.execute(select([self.users.c.name, stmt])).fetchall()

        # res is a list of tuples, one tuple per user's name
        assert(2, len(res))

        u1 = res[0]
        u2 = res[1]

        assert(len(u1) == len(u2))
        assert(u1[0] == u'jack')
        assert(u1[1] == u2[1])
        assert(u2[0] == u'wendy')

        # we can label the inner query
        stmt = select([func.count(self.addresses.c.id)]).\
            where(self.users.c.uid == self.addresses.c.user_id).\
            label("address_count")

        res = self.conn.execute(select([self.users.c.name, stmt])).fetchall()
        assert(2, len(res))

        u1 = res[0]
        u2 = res[1]

        assert(len(u1) == 2)
        assert(len(u2) == 2)

        # inserts, updates, deletes
        stmt = self.users.update().values(fullname="Fullname: " + self.users.c.name)
        res = self.conn.execute(stmt)

        assert('name_1' in res.last_updated_params())
        assert(res.last_updated_params()['name_1'] == 'Fullname: ')

        stmt = self.users.insert().values(name=bindparam('_name') + " .. name")
        res = self.conn.execute(stmt, [{'uid': 4, '_name': 'name1'}, {'uid': 5, '_name': 'name2'}, {'uid': 6, '_name': 'name3'}, ])

        # updates
        stmt = self.users.update().where(self.users.c.name == 'jack').values(name='ed')
        res = self.conn.execute(stmt)

        assert(res.rowcount == 1)
        assert(res.returns_rows is False)

        # update many with bound params
        stmt = self.users.update().where(self.users.c.name == bindparam('oldname')).\
            values(name=bindparam('newname'))
        res = self.conn.execute(stmt, [
                   {'oldname': 'jack', 'newname': 'ed'},
                   {'oldname': 'wendy', 'newname': 'mary'},
        ])

        assert(res.returns_rows is False)
        assert(res.rowcount == 1)

        res = self.conn.execute(select([self.users]).where(self.users.c.name == 'ed'))
        r = res.fetchone()
        assert(r['name'] == 'ed')

        # correlated updates
        stmt = select([self.addresses.c.email_address]).\
            where(self.addresses.c.user_id == self.users.c.uid).\
            limit(1)
        # this fails, syntax error bc of LIMIT - need TOP/SAMPLE instead
        # Note: TOP can't be in a subquery
        # res = self.conn.execute(self.users.update().values(fullname=stmt))

        # multiple table updates
        stmt = self.users.update().\
            values(name='ed wood').\
            where(self.users.c.uid == self.addresses.c.id).\
            where(self.addresses.c.email_address.startswith('ed%'))

        # this fails, teradata does update from set where not update set from where
        # #res = self.conn.execute(stmt)

        stmt = self.users.update().\
            values({
               self.users.c.name: 'ed wood',
               self.addresses.c.email_address: '*****@*****.**'
            }).\
            where(self.users.c.uid == self.addresses.c.id).\
            where(self.addresses.c.email_address.startswith('ed%'))

        # fails but works on MySQL, should this work for us?
        # #res = self.conn.execute(stmt)

        # deletes
        self.conn.execute(self.addresses.delete())
        self.conn.execute(self.users.delete().where(self.users.c.name > 'm'))

        # matched row counts
        # updates + deletes have a number indicating # rows matched by WHERE clause
        res = self.conn.execute(self.users.delete())
        assert(res.rowcount == 1)
Example #60
0
def get_users_today(date):
    with db.connect() as conn:
        result = conn.execute(
            select([func.count(USERS_TABLE.c.id)
                    ]).where(USERS_TABLE.c.date == date))
        return result.fetchone()[0]