Example #1
0
    def stats(self, survey_id):
        """Get stats for a survey."""
        result = (
            self.session
            .query(
                func.max(Survey.created_on),
                func.min(Submission.save_time),
                func.max(Submission.save_time),
                func.count(Submission.id),
            )
            .select_from(Submission)
            .join(Survey)
            # TODO: ask @jmwohl what this line is supposed to do
            # .filter(User.id == self.current_user_model.id)
            .filter(Submission.survey_id == survey_id)
            .one()
        )

        response = {
            "created_on": result[0],
            "earliest_submission_time": result[1],
            "latest_submission_time": result[2],
            "num_submissions": result[3]
        }
        return response
Example #2
0
    def annotation_objects_in_frame(self, frame):
        """
        Returns annotation objects related to this video that are visible in given time.
        AnnotationValues are lazily-loaded, in comparison to annotation_objects_in_frame_intervals()

        SQL:
            SELECT annotation_objects....., min(annotation_values.frame_from) AS min_1, max(annotation_values.frame_from) AS max_1
            FROM annotation_objects
            INNER JOIN annotation_values ON annotation_objects.id = annotation_values.annotation_object_id
            WHERE annotation_objects.video_id = %s
            GROUP BY annotation_objects.id
            HAVING min(annotation_values.frame_from) <= %s AND max(annotation_values.frame_from) >= %s
            ORDER BY min(annotation_values.frame_from), max(annotation_values.frame_from), annotation_objects.id

        :rtype: list of (AnnotationObject, int, int)
        """

        q = database.db.session.query(AnnotationObject, func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from))
        q = q.filter_by(video_id=self.id)
        q = q.join(AnnotationObject.annotation_values)
        q = q.group_by(AnnotationObject.id)
        q = q.having((func.min(AnnotationValue.frame_from) <= frame) & (func.max(AnnotationValue.frame_from) >= frame))
        q = q.order_by(func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from), AnnotationObject.id)
        q = q.all()

        return q
Example #3
0
    def summarise(cls, start_ts=0, end_ts=0):
        """"Gets usage from their snapshots between start_ts and end_ts.

        Maximal usage of the period is returned.
        Grouped by filesystem.
        """
        id_query = Snapshot.id_between(start_ts, end_ts)

        # 1. soft and hard quotas are not changed very often
        # 2. Record number of Host, Filesystem and Owner are relatively very small,
        # link them in code to avoid expand usage rows whose number is very very high
        query = Usage.query.filter(Usage.snapshot_id.in_(id_query)).\
            group_by(Usage.filesystem_id).\
            with_entities(Usage.filesystem_id,
                          func.max(Usage.soft).label('soft'),
                          func.max(Usage.hard).label('hard'),
                          func.max(Usage.usage).label('usage'))

        fq = Filesystem.query.join(Host).\
            with_entities(Filesystem.id, Host.name, Filesystem.name).all()
        file_systems = {}
        for fs in fq:
            file_systems[fs[0]] = fs[1:]

        fields = ['host', 'filesystem', 'soft', 'hard', 'usage']
        rslt = []

        for q in query.all():
            hn, fn = file_systems[q[0]]
            mappings = (hn, fn, q[1], q[2], q[3])
            rslt.append(dict(zip(fields, mappings)))
        return rslt
Example #4
0
    def summarise(self, start_ts=0, end_ts=0):
        """"Gets usage of a virtual volume between start_ts and end_ts.

        Maximal usage of the period is returned.
        """
        snapshot_ids = Snapshot.id_between(start_ts, end_ts)
        id_query = VirtualVolumeUsage.query.\
            filter(VirtualVolumeUsage.virtual_volume_id == self.id).\
            filter(VirtualVolumeUsage.snapshot_id.in_(snapshot_ids)).\
            with_entities(VirtualVolumeUsage.id)

        query = VirtualVolumeUsage.query.\
            filter(VirtualVolumeUsage.id.in_(id_query)).\
            group_by(VirtualVolumeUsage.owner_id).\
            with_entities(VirtualVolumeUsage.owner_id,
                          func.max(VirtualVolumeUsage.quota),
                          func.max(VirtualVolumeUsage.files),
                          func.max(VirtualVolumeUsage.usage))

        fields = ['owner', 'quota', 'files', 'usage']
        rslt = []

        for q in query.all():
            values = list(q)
            # almost all usages has no owner, query owner directly if needed
            if values[0]:
                values[0] = Owner.query.get(q[0]).name
            rslt.append(dict(zip(fields, values)))
        return rslt
Example #5
0
    def summarise(cls, start_ts=0, end_ts=0):
        """"Gets usage from their snapshots between start_ts and end_ts.

        Maximal usage of the period is returned.
        """
        id_query = Snapshot.id_between(start_ts, end_ts)

        query = cls.query.filter(cls.snapshot_id.in_(id_query)).\
            group_by(cls.virtual_volume_id, cls.owner_id).\
            with_entities(cls.virtual_volume_id, cls.owner_id,
                          func.max(cls.quota).label('quota'),
                          func.max(cls.files).label('files'),
                          func.max(cls.usage).label('usage'))

        fq = VirtualVolume.query.join(Filesystem).\
            with_entities(VirtualVolume.id, Filesystem.name, VirtualVolume.name).all()
        file_systems = {}
        for fs in fq:
            file_systems[fs[0]] = fs[1:]

        # Not all virtual volumes has owner
        owners = dict(Owner.query.with_entities(Owner.id, Owner.name).all())

        fields = ['filesystem', 'virtual_volume', 'owner', 'quota', 'files', 'usage']
        rslt = []

        for q in query.all():
            fn, vn = file_systems[q[0]]
            owner = owners[q[1]] if q[1] else ''
            mappings = (fn, vn, owner, q[2], q[3], q[4])
            rslt.append(dict(zip(fields, mappings)))
        return rslt
Example #6
0
    def __get_subquery(self, *args, ord_by=None):
        def add_joined_search(field_name):
            joined = db(Search.index, func.min(Search.text).label('text'),
                        func.min(Search.table_name).label('table_name'),
                        index=subquery_search.subquery().c.index).filter(
                    Search.kind.in_(tuple(field_name))).group_by(Search.index)
            return joined

        subquery_search = db(Search.index.label('index'),
                             func.sum(Search.relevance).label('relevance'),
                             func.min(Search.table_name).label('table_name'),
                             func.min(Search.md_tm).label('md_tm'),
                             func.max(Search.position).label('position'),
                             func.max(Search.text).label('text')).filter(
                or_(*self.__get_search_params(*args))).group_by('index')
        if type(ord_by) in (str, list, tuple):
            order = self.__get_order('text', 'text')
            subquery_search = add_joined_search(ord_by)
        elif type(ord_by) == int:
            ord_to_str = self.__order_by_to_str[ord_by]
            order = self.__get_order(ord_to_str, ord_to_str)
        else:
            order = self.__get_order('relevance', 'relevance')
        if 'md_tm' in str(order):
            subquery_search = subquery_search.order_by(order)
        else:
            subquery_search = subquery_search.order_by(order).order_by(
                    self.__get_order('md_tm', 'md_tm'))
        return subquery_search
Example #7
0
    def __init__(self, engine=None, start_date='1925-12-31', end_date='',
                limit=None, all_vars=None, **kwargs):
        super(CCMNamesQuery, self).__init__(engine, limit)
        logging.info("---- Creating a CCM-MSENAMES query session. ----")

        msenames = self.tables['msenames']
        ccmxpf_linktable = self.tables['ccmxpf_linktable']

        id_vars = [msenames.c.permno, msenames.c.permco,
                     ccmxpf_linktable.c.gvkey, msenames.c.comnam]

        query = sa.select(id_vars+\
                        [func.min(msenames.c.namedt).label('sdate'),
                        func.max(msenames.c.nameendt).label('edate')],
                    group_by = id_vars,
                    order_by = id_vars,
                    limit= self.limit).\
            where(ccmxpf_linktable.c.linktype.startswith('L')).\
            where(ccmxpf_linktable.c.linkprim.in_(['P','C'])).\
            where(ccmxpf_linktable.c.usedflag==1).\
            where((ccmxpf_linktable.c.linkdt <= msenames.c.namedt) |
                  (ccmxpf_linktable.c.linkdt == None)).\
            where((msenames.c.nameendt <= ccmxpf_linktable.c.linkenddt) |
                  (ccmxpf_linktable.c.linkenddt == None)).\
            where(msenames.c.permno == ccmxpf_linktable.c.lpermno).\
            where(msenames.c.permco == ccmxpf_linktable.c.lpermco)

        if start_date:
            query = query.having(func.min(msenames.c.namedt) >= start_date)

        if end_date:
            query = query.having(func.max(msenames.c.nameendt) <= end_date)

        logging.debug(query)
        self.query = query
Example #8
0
    def mls_draft_format(self, dtype):
        """
        Return number of rounds in draft, given draft type.

        :param dtype: Draft type (AcquisitionType)
        :return:
        """
        return self.session.query(func.max(PlayerDrafts.round), func.max(PlayerDrafts.selection)).filter(
            PlayerDrafts.path == dtype, PlayerDrafts.year == self.year).one()
Example #9
0
def backup_duration(bddate):
  s = select([pool.c.name, func.min(job.c.starttime), func.max(job.c.endtime)],  use_labels=True).where(and_(job.c.poolid == pool.c.poolid, cast(job.c.schedtime,Date) <= datetime.fromtimestamp(float(bddate)), cast(job.c.schedtime,Date) >= datetime.fromtimestamp(float(bddate)) - timedelta(days=1))).group_by(pool.c.name, job.c.schedtime)
  bd = db.execute(s).fetchall()
  bd_result = {}
  for bpool in bd:
    bd_result.update({ bpool[0]: { 'start': bpool[1], 'end': bpool[2] } })
  s = select([func.min(job.c.starttime), func.max(job.c.endtime)],  use_labels=True).where(job.c.poolid == pool.c.poolid)
  _min_date, _max_date = db.execute(s).fetchone()
  min_date = int(mktime((strptime(str(_min_date), "%Y-%m-%d %H:%M:%S"))))
  max_date = int(mktime((strptime(str(_max_date), "%Y-%m-%d %H:%M:%S"))))
  return render_template('backup_duration.html', title="Backup duration time", bd_result=bd_result, bddate=int(bddate), min_date=min_date, max_date=max_date)
Example #10
0
    def annotation_object_next(self, current_frame, current_annotation_object=None, to_future=True):
        """
        When current_annotation_object is None, nearest AnnotationObject in the future (in respect to current_frame) is returned.
        With current_annotation_object given, "next" AnnotationObject is returned, i.e. an object with higher ID in the current frame
        or the nearest in the future.

        If to_future is false, the search is done in the past instead of the future.

        Returned tuple contains the next object and its starting and ending frame.

        :rtype: (AnnotationObject, int, int)
        """

        q = database.db.session.query(AnnotationObject, func.min(AnnotationValue.frame_from), func.max(AnnotationValue.frame_from))
        q = q.filter_by(video_id=self.id)
        q = q.join(AnnotationObject.annotation_values)
        q = q.group_by(AnnotationObject.id)

        if to_future:
            if current_annotation_object is None:
                # nearest AnnotationObject in future
                q = q.having(func.min(AnnotationValue.frame_from) > current_frame)
                q = q.order_by(func.min(AnnotationValue.frame_from), AnnotationObject.id)
            else:
                # nearest AnnotationObject in future, or in the same frame with bigger ID
                current_id = current_annotation_object.id
                q = q.having( (func.min(AnnotationValue.frame_from) > current_frame) |
                              ((AnnotationObject.id > current_id) & (func.min(AnnotationValue.frame_from) <= current_frame) & (func.max(AnnotationValue.frame_from) >= current_frame))
                            )
                q = q.order_by(func.min(AnnotationValue.frame_from), AnnotationObject.id)
        else:
            if current_annotation_object is None:
                # nearest AnnotationObject in past
                q = q.having(func.max(AnnotationValue.frame_from) < current_frame)
                q = q.order_by(desc(func.max(AnnotationValue.frame_from)), desc(AnnotationObject.id))
            else:
                # nearest AnnotationObject in past, or in the same frame with lower ID
                current_id = current_annotation_object.id
                q = q.having( (func.max(AnnotationValue.frame_from) < current_frame) |
                              ((AnnotationObject.id < current_id) & (func.min(AnnotationValue.frame_from) <= current_frame) & (func.max(AnnotationValue.frame_from) >= current_frame))
                            )
                q = q.order_by(desc(func.max(AnnotationValue.frame_from)), desc(AnnotationObject.id))

        q = q.limit(1)
        q = q.all()

        if len(q) < 1:
            return None

        if len(q) > 1:
            raise Exception('Returned collection cannot contain more than 1 item.')

        return q[0]
Example #11
0
 def get_order(order_name, desc_asc, field):
     order_name += "+" if desc_asc == "desc" else "-"
     result = {
         "text+": lambda field_name: desc(func.max(getattr(Search, field_name, Search.text))),
         "text-": lambda field_name: asc(func.max(getattr(Search, field_name, Search.text))),
         "md_tm+": lambda field_name: desc(func.min(getattr(Search, field_name, Search.md_tm))),
         "md_tm-": lambda field_name: asc(func.min(getattr(Search, field_name, Search.md_tm))),
         "relevance+": lambda field_name: desc(func.sum(getattr(Search, field_name, Search.relevance))),
         "relevance-": lambda field_name: asc(func.sum(getattr(Search, field_name, Search.relevance))),
         "position+": lambda field_name: desc(func.max(getattr(Search, field_name, Search.position))),
         "position-": lambda field_name: asc(func.max(getattr(Search, field_name, Search.position))),
     }[order_name](field)
     return result
    def lastStatus(self, incoming_params):
        response = {}
        try:
            default_params = {'delta': 1, 'instance': 'prod'}
            params = self._create_params(default_params, incoming_params)
            result = []
            max_time = self.db_handler.get_session().query(func.max(self.tables['main'].c.time).label("max_time")).one()
            if max_time[0]:
                max_time = max_time[0]
                query = self.tables['main'].select((self.tables['main'].c.time > max_time - timedelta(hours=params['delta']))&(self.tables['main'].c.instance == params['instance']))
                cursor = query.execute()
                resultProxy = cursor.fetchall()
                for row in resultProxy:
                    result.append(dict(row.items()))
            response = {'data': result,
                        'applied_params': params,
                        'success': True}
        except Exception as e:
            response = {'data': result,
                        'incoming_params': incoming_params,
                        'default_params': [[key, default_params[key], type(default_params[key])] for key in default_params],
                        'success': False,
                        'error': type(e).__name__ + ': ' + e.message}

        return response
    def get_biggest_donations(cls, limit=None, offset=None):
        """Getter for biggest donations.

        Donations from the same person are grouped.

        Args:
            limit: Maximum number of donations to be returned.
            offset: Offset of the result.

        Returns:
            Tuple with two items. First is total number if donations. Second
            is a list of donations sorted by amount with a specified offset.
        """
        query = db.session.query(
            cls.first_name.label("first_name"),
            cls.last_name.label("last_name"),
            cls.editor_name.label("editor_name"),
            func.max(cls.payment_date).label("payment_date"),
            func.sum(cls.amount).label("amount"),
            func.sum(cls.fee).label("fee"),
        )
        query = query.filter(cls.anonymous == False)
        query = query.group_by(cls.first_name, cls.last_name, cls.editor_name)
        query = query.order_by(desc("amount"))
        count = query.count()  # Total count should be calculated before limits
        if limit is not None:
            query = query.limit(limit)
        if offset is not None:
            query = query.offset(offset)
        return count, query.all()
    def test_restart_simple_run(self):
        _do_run_simple_test_run(self, self.temp_dir, self.config, end_year=1983)
        runs_manager = RunManager(self.config)
        config = _get_run_config(temp_dir=self.temp_dir)
        runs_manager.update_environment_variables(run_resources=config)

        run_activity = runs_manager.services_db.get_table("run_activity")
        s = select([func.max(run_activity.c.run_id)])
        run_id = runs_manager.services_db.execute(s).fetchone()[0]

        s = select([run_activity.c.status], whereclause=run_activity.c.run_id == run_id)
        status = runs_manager.services_db.execute(s).fetchone()[0]

        expected = "done"
        self.assertEqual(status, expected)

        runs_manager.restart_run(run_id, restart_year=1981, project_name="eugene_gridcell", skip_urbansim=False)

        s = select([run_activity.c.status], whereclause=run_activity.c.run_id == run_id)
        status = runs_manager.services_db.execute(s).fetchone()[0]

        expected = "done"
        self.assertEqual(status, expected)

        # Restaring without running urbansim should not re-run that year.
        # TODO: test that no models are run this time.
        runs_manager.restart_run(run_id, restart_year=1982, project_name="eugene_gridcell", skip_urbansim=True)
        s = select([run_activity.c.status], whereclause=run_activity.c.run_id == run_id)
        status = runs_manager.services_db.execute(s).fetchone()[0]

        expected = "done"
        self.assertEqual(status, expected)

        self.cleanup_test_run()
Example #15
0
 def set_order_to_last(self, context):
     """Set order of the line to maximum rank + 1."""
     self._set_order(
         self.query.session.query(
             func.coalesce(func.max(self._order_column), 0))
         .filter(context)
         .as_scalar() + 1)
Example #16
0
def updateDriver(opener, driver_id):
    vehiculo = driver_id
    ultima_fecha = session.query(func.max(Data.date)).filter_by(vehicle=vehiculo).first()
    print vehiculo, ultima_fecha
    if ultima_fecha[0] is not None:
        fecha_desde = datetime.strptime(ultima_fecha[0], '%Y-%m-%d %H:%M:%S')
        fecha_desde = fecha_desde - timedelta(hours=3)
        fecha_desde = fecha_desde + timedelta(seconds=1) #LE SUMO UN SEGUNDO PARA QUE BUSQUE UN SEGUNDO DPS DEL ULTIMO DATO
        fecha_hasta = fecha_desde + timedelta(hours=5)
        fecha_desde_unix = mktime(fecha_desde.timetuple())
        fecha_hasta_unix = mktime(fecha_hasta.timetuple())
        try:
            xlsFileObject = downloadXls(opener,fecha_desde_unix,fecha_hasta_unix,vehiculo)
            rows = parseXls(xlsFileObject)
            insertRows(rows, vehiculo)
        except:
            print "No se registraron nuevos datos"
    else:
        print "Base de datos vacia"
        fechas = []
        fechas=FechaUpdate() #Si no tiene nada en la BD, busca en internet con la fecha de hoy desde las 0 hs hasta la hora actual
        
        fecha_desde_unix= fechas[0]
        fecha_hasta_unix=fechas[1]
        try:
            xlsFileObject = downloadXls(opener,fecha_desde_unix,fecha_hasta_unix,vehiculo)
            rows = parseXls(xlsFileObject)
            insertRows(rows, vehiculo)
        except:
            print "No se encotro informacion"
Example #17
0
    def __init__(self, sess, unfiltered, filt_crit, tt, window_size=WINDOW_SIZE):
        self.sess = sess
        self.unfiltered = unfiltered
        self.filt_crit = filt_crit
        self.tt = tt
        self.window_size = window_size

        self.skipped = []

        # select-only, can't be used for updates
        self.filtered_s = filtered = select(unfiltered.c).where(filt_crit).alias("filtered")

        self.selectable = (
            select(
                [
                    filtered.c.size,
                    func.count().label("inode_count"),
                    func.max(filtered.c.has_updates).label("has_updates"),
                ]
            )
            .group_by(filtered.c.size)
            .having(and_(literal_column("inode_count") > 1, literal_column("has_updates") > 0))
        )

        # This is higher than selectable.first().size, in order to also clear
        # updates without commonality.
        self.upper_bound = self.sess.query(self.unfiltered.c.size).order_by(-self.unfiltered.c.size).limit(1).scalar()
Example #18
0
def upload_media():
    uploaded_file = request.files.getlist('file')[0]
    filename = secure_filename(uploaded_file.filename)
    track_id = request.form['track_id']
    upload_directory = "{}/{}".format("media", str(request.form['track_id']))

    track = ContentTrack.query.filter_by(id=track_id).first_or_404()

    max_order = db.session.query(
        func.max(ContentUploads.order).label("max_order")
    ).filter(ContentUploads.track_id == track.id).one().max_order

    file_data = {}
    file_data['uploaded_by'] = current_user.id
    file_data['name'] = filename
    file_data['type_id'] = track.type_id
    file_data['track_id'] = track_id

    if max_order:
        file_data['order'] = max_order + 1
    else:
        file_data['order'] = 1

    file_data['uri'] = save_uploaded_file(uploaded_file, upload_directory, filename)

    content_media = ContentUploads(**file_data)  # create new object from data

    db.session.add(content_media)
    db.session.commit()

    return json.dumps(file_data)
Example #19
0
File: views.py Project: jarek/lime
def show_stats():
    joint = query.for_field(Transaction.account).filter_by(person='')

    timespan = query.all() \
        .add_columns(func.max(Transaction.date).label('maxDate')) \
        .add_columns(func.min(Transaction.date).label('minDate'))
    datespan = timespan[0][0] - timespan[0][1]

    merchants = query.group_format(Transaction.merchant)
    top_merchants = OrderedDict()
    for key in merchants.keys()[0:20]:
        top_merchants[key] = merchants[key]

    amount_data = [
        make_template_data(query.group_format(Transaction.person), "per person"),
        make_template_data(query.group_format(Transaction.account, joint), "joint transactions by account"),
        make_template_data(query.group_format(Transaction.category), "all transactions by category"),
        make_template_data(top_merchants, "top 20 merchants")
    ]

    return flask.render_template('stats.html',
                                 datespan=datespan,
                                 number_of_days=datespan.total_seconds() / (60*60*24),
                                 number_of_months=datespan.total_seconds() / (60*60*24*30),
                                 amount_data=amount_data)
Example #20
0
def matching_from_cliche_tvtropes_edges():
    # assume there are a few cliche-tvtropes edges

    with app.app_context():
        with session.begin():
            session.query(ClicheTvtropesEdge).update({'available': True})

            while True:
                max_conf = session \
                    .query(func.max(ClicheTvtropesEdge.confidence)) \
                    .filter_by(available=True) \
                    .scalar()
                if not max_conf:
                    break

                matching = session \
                    .query(ClicheTvtropesEdge) \
                    .filter_by(confidence=max_conf, available=True) \
                    .first()

                cliche_work = matching.cliche_work
                tvtropes_entity = matching.tvtropes_entity

                session.query(ClicheTvtropesEdge) \
                       .filter_by(cliche_work=cliche_work,
                                  available=True) \
                       .update({'available': False})

                session.query(ClicheTvtropesEdge) \
                       .filter_by(tvtropes_entity=tvtropes_entity,
                                  available=True) \
                       .update({'available': False})

                yield matching
Example #21
0
    def statArtistReleaseCount(self):
        artistAverageCount = self.dbSession.query(func.avg(Artist.releases))
        artistMinimumCount = self.dbSession.query(func.min(Artist.releases)).scalar()
        artistMaximumCount = self.dbSession.query(func.max(Artist.releases)).scalar()
        artistMaximumId = ''
        artistMaxName = ''

        return {
            'title': 'Artist Release Count',
            'class': 'fa-user',
            'average': {
                'type': 'string',
                'value': artistAverageCount,
                'detail': {
                    'text': ''
                }
            },
            'minimum': {
                'type': 'string',
                'value': artistMinimumCount,
                'detail': {
                    'text': 'Many'
                }
            },
            'maximum': {
                'type': 'artist',
                'value': artistMaximumCount,
                'detail': {
                    'id': artistMaximumId,
                    'thumbnailUrl': '/images/artist/thumbnail/' + artistMaximumId,
                    'detailUrl': '/artist/' + artistMaximumId,
                    'text': artistMaxName
                }
            }
        }
	def extractData(self):
		self.logger.info('Connecting to database')
		engine = create_engine(self.config['database'])
		meta = MetaData()
		meta.bind = engine
		self.job = Table('job', meta, autoload=True)
		self.job_status = Table('job_status', meta, autoload=True)
		self.job_status_type_description = Table('job_status_type_description', meta, autoload=True)

		job=self.job
		job_status=self.job_status

		s=self.job_status_type_description.select()
		self.type_db_values = s.execute().fetchall()

		self.queue_db_values = self.getJobSummary(job.c.queue)
		self.user_db_values = self.getJobSummary(job.c.userId, job.c.localUser)
		self.node_db_values = self.getJobSummary(job.c.workerNode)

		self.logger.info('Generating job list')
		node_job_status = self.config['node_job_status'].split(',')

		maximum = func.max(self.job_status.c.id).label('m')
		s1 = select([maximum]).group_by(job_status.c.jobId).alias('a')
		s2 = select([job.c.id, job.c.lrmsAbsLayerJobId, job.c.workerNode, job_status.c.type,
			job_status.c.time_stamp]).select_from(job.join(job_status).join(s1,job_status.c.id==text('m'))). \
			where(and_(job_status.c.type.in_(node_job_status)))

		self.job_db_values = s2.execute().fetchall()

		return {}
Example #23
0
    def index(self):
        c.name = config['app_conf']['site_name']
        c.title = 'Welcome'
        c.messages = []

        c.session = Session()

        file_q = c.session.query(File)

        first_file = file_q.first()

        if first_file is None:
            return render('/derived/show/please_scan.html')

        c.repo_url = first_file.root
        c.total_size = c.session.query(func.sum(File.size)).one()[0]
        c.file_count = file_q.count()
        c.avg_size = c.session.query(func.avg(File.size)).one()[0]

        res = c.session.execute(select([File.ext,
            func.count(File.ext)]).group_by(File.ext).order_by(func.count(File.ext).desc())).fetchone()

        c.ext_string = res[0]
        c.ext_count = res[1]

        c.revision = c.session.query(func.max(Revision.id)).one()[0]

        c.asset_count = c.session.query(Asset).count()
        c.collection_count = c.session.query(Collection).count()

        return render('/derived/show/index.html')
Example #24
0
    def end(self):
        today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
        # Adjust idColumn
        maxId = db.getSession().query(func.max(Task.id)).one()[0]
        self.idColumn.width = max(2, len(str(maxId)))

        # Adjust titleColumn
        self.titleColumn.width = self.maxTitleWidth
        totalWidth = sum([x.width for x in self.columns]) + len(self.columns)
        if totalWidth >= self.termWidth:
            self.titleColumn.width -= (totalWidth - self.termWidth) + len(self.columns)
        self.titleColumn.formater = TitleFormater(self.titleColumn.width, self.cryptoMgr)

        # Print table
        for sectionName, taskList in self.taskLists:
            dateSplitters = [(1, "day"), (7, "week"), (30, "month"), (30 * 4, "quarter"), (365, "year")]
            splitterRange, splitterName = dateSplitters.pop()
            splitterText = None
            self._renderTaskListHeader(sectionName)
            for task in taskList:
                while self.splitOnDate and task.creationDate > today - timedelta(splitterRange):
                    splitterText = "Last %s" % splitterName
                    if len(dateSplitters) > 0:
                        splitterRange, splitterName = dateSplitters.pop()
                    else:
                        self.splitOnDate = False

                if splitterText:
                    print(C.GREEN + splitterText.center(totalWidth) + C.RESET, file=self.out)
                    splitterText = None

                self._renderTaskListRow(task)
Example #25
0
    def last_purchase(self):

        last = db.session.query( func.max(Orders.created) ).filter( Orders.user_id == self.id ).scalar()
        if not last:
            return '-'

        return last
Example #26
0
def _last_assoc_timestamps(session, dataset):
    """
    Get the timestamps of the latest assocxtrc per runningcatalog and band.

    We can't get the assoc ID's directly, because they are unique and can't
    by put in the group by. You can get the eventual assoc ID's by joining
    this query again with the assoc table (see last_assoc_per_band func)

    args:
        session (session): A SQLAlchemy session
        dataset (Dataset): A SQLALchemy dataset model

    returns: a SQLAlchemy subquery containing  runcat id, timestamp, band id
    """
    a = aliased(Assocxtrsource, name='a_timestamps')
    e = aliased(Extractedsource, name='e_timestamps')
    r = aliased(Runningcatalog, name='r_timestamps')
    i = aliased(Image, name='i_timestamps')
    return session.query(r.id.label('runcat'),
                         func.max(i.taustart_ts).label('max_time'),
                         i.band_id.label('band')
                         ). \
        select_from(r). \
        join(a, r.id == a.runcat_id). \
        join(e, a.xtrsrc_id == e.id). \
        join(i, i.id == e.image_id). \
        group_by(r.id, i.band_id). \
        filter(i.dataset == dataset). \
        subquery(name='last_assoc_timestamps')
Example #27
0
def matching_from_cliche_wikipedia_edges():
    # assume there are a few cliche-wikipedia edges

    with app.app_context():
        with session.begin():
            session.query(ClicheWikipediaEdge).update({'available': True})

            while True:
                max_conf = session \
                    .query(func.max(ClicheWikipediaEdge.confidence)) \
                    .filter_by(available=True) \
                    .scalar()
                if not max_conf:
                    break

                matching = session \
                    .query(ClicheWikipediaEdge) \
                    .filter_by(confidence=max_conf, available=True) \
                    .first()

                cliche_work = matching.cliche_work
                wikipedia_work = matching.wikipedia_work

                session.query(ClicheWikipediaEdge) \
                       .filter_by(cliche_work=cliche_work,
                                  available=True) \
                       .update({'available': False})

                session.query(ClicheWikipediaEdge) \
                       .filter_by(wikipedia_work=wikipedia_work,
                                  available=True) \
                       .update({'available': False})

                yield matching
def get_random(session, model, field="id"):
    """
    Returns a random object from a model based on the given field.
    The field must have positive itegral values (id is the usual culprit here)
    May take longer if rows have been deleted (will try to load non-existant rows)
    """
    bad_vals = []
    max_val = session.query(func.max(getattr(model, field))).scalar()

    def values():
        if len(bad_vals) == max_val:
            yield None
        while True:
            val = random.randint(1, max_val)
            if val not in bad_vals:
                # Can only be valid once, append
                bad_vals.append(val)
                yield val

    for value in values():
        if value is None:
            return None
        obj = has_model(session, model, **{field: value})
        if obj is not None:
            return obj
Example #29
0
    def version_lookup(self, node, before=inf, cluster=0, all_props=True):
        """Lookup the current version of the given node.
           Return a list with its properties:
           (serial, node, hash, size, type, source, mtime,
            muser, uuid, checksum, cluster)
           or None if the current version is not found in the given cluster.
        """

        v = self.versions.alias('v')
        if not all_props:
            s = select([v.c.serial])
        else:
            s = select([v.c.serial, v.c.node, v.c.hash,
                        v.c.size, v.c.type, v.c.source,
                        v.c.mtime, v.c.muser, v.c.uuid,
                        v.c.checksum, v.c.cluster])
        if before != inf:
            c = select([func.max(self.versions.c.serial)],
                       self.versions.c.node == node)
            c = c.where(self.versions.c.mtime < before)
        else:
            c = select([self.nodes.c.latest_version],
                       self.nodes.c.node == node)
        s = s.where(and_(v.c.serial == c,
                         v.c.cluster == cluster))
        r = self.conn.execute(s)
        props = r.fetchone()
        r.close()
        if props:
            return props
        return None
Example #30
0
    def summary(self):
        subquery = (
            self.db.query(
                SQLPackage.name,
                func.max(SQLPackage.last_modified).label("last_modified"),
            )
            .group_by(SQLPackage.name)
            .subquery()
        )
        rows = self.db.query(
            SQLPackage.name, SQLPackage.last_modified, SQLPackage.summary
        ).filter(
            (SQLPackage.name == subquery.c.name)
            & (SQLPackage.last_modified == subquery.c.last_modified)
        )

        # Dedupe because two packages may share the same last_modified
        seen_packages = set()
        packages = []
        for row in rows:
            if row[0] in seen_packages:
                continue
            seen_packages.add(row[0])
            packages.append(
                {"name": row[0], "last_modified": row[1], "summary": row[2]}
            )
        return packages
Example #31
0
    def testNextTraceFrames(self) -> None:
        run = self.fakes.run()
        frames = self._basic_trace_frames()
        sink = self.fakes.sink("sink1")
        self.fakes.saver.add(
            TraceFrameLeafAssoc.Record(trace_frame_id=frames[1].id,
                                       leaf_id=sink.id,
                                       trace_length=1))
        self.fakes.save_all(self.db)

        with self.db.make_session() as session:
            session.add(run)
            session.commit()
            leaf_dicts = (
                self._all_leaves_by_kind(
                    session,
                    SharedTextKind.
                    SOURCE,  # pyre-fixme[6] Expected `SharedTextKind` but got `typing.Callable(SharedTextKind.SOURCE)[[Named(cls, SharedTextKind)], typing.Any]`.
                ),
                self._all_leaves_by_kind(
                    session,
                    SharedTextKind.
                    SINK,  # pyre-fixme[6] Expected `SharedTextKind` but got `typing.Callable(SharedTextKind.SINK)[[Named(cls, SharedTextKind)], typing.Any]`.Pyre
                ),
                self._all_leaves_by_kind(
                    session,
                    SharedTextKind.
                    FEATURE,  # pyre-fixme[6] Expected `SharedTextKind` but got `typing.Callable(SharedTextKind.FEATURE)[[Named(cls, SharedTextKind)], typing.Any]`.
                ),
            )

            latest_run_id = (session.query(func.max(
                Run.id)).filter(Run.status == RunStatus.FINISHED).scalar())

            next_frames = TraceOperator.next_forward_trace_frames(
                leaf_dicts, session, latest_run_id, set(), {"sink1"},
                frames[0], set())
            self.assertEqual(len(next_frames), 1)
            self.assertEqual(int(next_frames[0].id), int(frames[1].id))
Example #32
0
    def get_max_count(self, session, numerator_table, cur_year=None):
        """
        Определить максимальное значение счетчика нумератора.
        Максимальное значение счетчика необходимо производить
        с учетом текущего программного года.
        @param session: Сессия обработки SQLAlchemy.
        @param numerator_table: Таблица нумератора.
        @param cur_year: Текущий год.
            Если не определяется, то берется системный год.
        @return: Максимальное значение счетчика нумератора.
        """
        if cur_year is None:
            cur_year = self.get_actual_year()
        min_date = datetime.date(cur_year, 1, 1)
        max_date = datetime.date(cur_year, 12, 31)

        session_query = scoping.scoped_session(session)
        max_count = session_query.query(
            func.max(numerator_table.c.cur_count).label('max_count')).filter(
                sqlalchemy.between(numerator_table.c.dt_num, min_date,
                                   max_date)).one()[0]
        return max_count
Example #33
0
def w_success():

    board = Board()

    if request.method == "POST":
        title = request.form['title']
        text = request.form['textarea']

        board.title = title
        board.text = text
        board.writer = username()

        board.num = db.session.query(func.max(Board.num), ).one()[0]
        if board.num == None:
            board.num = 1
        else:
            board.num += 1

        db.session.add(board)
        db.session.commit()

    return redirect(url_for("list"))
Example #34
0
    def latest_uuid(self, uuid, cluster):
        """Return the latest version of the given uuid and cluster.

        Return a (path, serial) tuple.
        If cluster is None, all clusters are considered.

        """

        v = self.versions.alias('v')
        n = self.nodes.alias('n')
        s = select([n.c.path, v.c.serial])
        filtered = select([func.max(self.versions.c.serial)])
        filtered = filtered.where(self.versions.c.uuid == uuid)
        if cluster is not None:
            filtered = filtered.where(self.versions.c.cluster == cluster)
        s = s.where(v.c.serial == filtered)
        s = s.where(n.c.node == v.c.node)

        r = self.conn.execute(s)
        l = r.fetchone()
        r.close()
        return l
Example #35
0
    def lastStatus(self, incoming_params):
        response = {}
        try:
            default_params = {'delta': 8}
            params = self._create_params(default_params, incoming_params)
            result = []
            max_time = self.db_handler.get_session().query(
                func.max(
                    self.tables['main'].c.time_done).label("max_time")).one()
            if max_time[0]:
                max_time = max_time[0]
                query = self.tables['main'].select(
                    self.tables['main'].c.time_done > max_time -
                    timedelta(hours=params['delta']))
                cursor = query.execute()
                resultProxy = cursor.fetchall()
                for row in resultProxy:
                    result.append(dict(row.items()))
            response = {
                'data': result,
                'applied_params': params,
                'success': True
            }
        except Exception as e:
            response = {
                'data':
                result,
                'incoming_params':
                incoming_params,
                'default_params':
                [[key, default_params[key],
                  type(default_params[key])] for key in default_params],
                'success':
                False,
                'error':
                type(e).__name__ + ': ' + e.message
            }

        return response
Example #36
0
def visitor_count():
    local_zone = tz.gettz('Asia/Kuala_Lumpur')
    my_zone = tz.gettz()
    user_data = Users.query.filter_by(name=flask_login.current_user.id).first()
    owned_device = db.session.query(Device.id).filter_by(owner=user_data.id)
    date_to_show = db.session.query(func.max(Record.timestamp)).filter(Record.device_id.in_(owned_device)).scalar()
    label = []
    count = []
    if date_to_show:
        data_user = db.session.query(Record, Qr, Device).join(Qr).join(Device).filter_by(owner=user_data.id)
        latest_day = datetime(date_to_show.year, date_to_show.month, date_to_show.day, tzinfo=tz.tzutc()).astimezone(my_zone)
        latest_day = latest_day.replace(tzinfo=local_zone)
        for day in range(7):  #count record start with least day in week so we get 7 days count
            start = latest_day - timedelta(6-day)
            start = start.replace(tzinfo=local_zone)
            end = start + timedelta(1)
            end = end.replace(tzinfo=local_zone)
            record_date = start.strftime('%d/%m/%Y')
            label.append(record_date)
            record_count = data_user.filter(and_(Record.timestamp >= start.astimezone(), Record.timestamp <= end.astimezone())).count()
            count.append(record_count)
    return jsonify({'label': label, 'count': count}), 200
Example #37
0
    def on_get(self, req, resp):
        session = req.context['session']


        latest_version = session.\
            query(
                label('id', Protocol.id),
                label('max_version', func.max(Protocol.version))).\
            group_by(Protocol.id).\
            subquery()
        protocols = session.query(Protocol).\
            options(joinedload(Protocol.user)).\
            filter(
                Protocol.user_id==req.context['user'].id)
        most_recent_protocols = protocols.\
            join(latest_version, Protocol.id==latest_version.c.id).\
            filter(Protocol.version==latest_version.c.max_version).\
            all()

        protocol_schema = ProtocolSchema(many=True)
        result = protocol_schema.dump(most_recent_protocols)
        resp.context['result'] = result.data
Example #38
0
def _search_pages(session, search_statement, title_only, next_rank, page_size,
                  context, include_places, include_guides):
    rank, snippet, execute_search_statement = _gen_search_elements(
        search_statement,
        title_only,
        next_rank,
        page_size,
        [PageVersion.title],
        [PageVersion.address],
        [],
        [PageVersion.content],
    )
    if not include_places and not include_guides:
        return []

    latest_pages = (select(func.max(PageVersion.id).label("id")).join(
        Page, Page.id == PageVersion.page_id).where(
            or_(
                (Page.type == PageType.place) if include_places else False,
                (Page.type == PageType.guide) if include_guides else False,
            )).group_by(PageVersion.page_id).subquery())

    pages = execute_search_statement(
        session,
        select(Page, rank,
               snippet).join(PageVersion, PageVersion.page_id == Page.id).join(
                   latest_pages, latest_pages.c.id == PageVersion.id),
    )

    return [
        search_pb2.Result(
            rank=rank,
            place=page_to_pb(page, context)
            if page.type == PageType.place else None,
            guide=page_to_pb(page, context)
            if page.type == PageType.guide else None,
            snippet=snippet,
        ) for page, rank, snippet in pages
    ]
Example #39
0
    def setup(self) -> Dict[str, Callable]:
        with self.db.make_session() as session:
            latest_run_id = (
                session.query(func.max(Run.id))
                .filter(Run.status == RunStatus.FINISHED)
                .scalar()
            )

            self.sources_dict = self._all_leaves_by_kind(session, SharedTextKind.SOURCE)
            self.sinks_dict = self._all_leaves_by_kind(session, SharedTextKind.SINK)

        if latest_run_id.resolved() is None:
            self.warning(
                "No runs found. "
                f"Try running '{os.path.basename(sys.argv[0])} analyze' first."
            )

        self.current_run_id = latest_run_id
        print("=" * len(self.welcome_message))
        print(self.welcome_message)
        print("=" * len(self.welcome_message))
        return self.scope_vars
Example #40
0
def create_relation_stats(session, date, logger=None):
    """Add/update relation stats."""

    if logger is None:
        logger = current_app.logger

    (start, end) = date_to_timestamps(date)

    # First kill the stats for the selected date
    deleted_counter = session.query(RelationStats).filter(
        RelationStats.date == date).delete()

    # Calculate stats for selected day
    relation_stats = (session.query(
        literal(date), AircraftBeacon.device_id, AircraftBeacon.receiver_id,
        func.max(AircraftBeacon.quality),
        func.count(AircraftBeacon.timestamp)).filter(
            and_(
                between(AircraftBeacon.timestamp, start, end),
                AircraftBeacon.distance > 1000,
                AircraftBeacon.error_count == 0,
                AircraftBeacon.quality <= MAX_PLAUSIBLE_QUALITY,
                AircraftBeacon.ground_speed > 10,
            )).group_by(literal(date), AircraftBeacon.device_id,
                        AircraftBeacon.receiver_id).subquery())

    # And insert them
    ins = insert(RelationStats).from_select([
        RelationStats.date, RelationStats.device_id, RelationStats.receiver_id,
        RelationStats.quality, RelationStats.beacon_count
    ], relation_stats)
    res = session.execute(ins)
    insert_counter = res.rowcount
    session.commit()
    logger.warn("RelationStats for {}: {} deleted, {} inserted".format(
        date, deleted_counter, insert_counter))

    return "RelationStats for {}: {} deleted, {} inserted".format(
        date, deleted_counter, insert_counter)
    def _find_route_stop_effective_dates(cls, session, route_id):
        """
        find effective start date and end date for all stops of the input route, when
        queried against the trip and stop time tables.  Below are a couple of pure SQL queries that
        perform what I'm doing to get said start and end dates:

        # query all route stops with start & end dates
        SELECT t.route_id, st.stop_id, min(date), max(date)
        FROM ott.universal_calendar u, ott.trips t, ott.stop_times st
        WHERE t.service_id = u.service_id
          AND t.trip_id    = st.trip_id
        GROUP BY t.route_id, st.stop_id

        # query all stops start & end dates for a given route (used below in SQLAlchemy)
        SELECT st.stop_id, min(date), max(date)
        FROM ott.universal_calendar u, ott.trips t, ott.stop_times st
        WHERE t.service_id = u.service_id
          AND t.trip_id    = st.trip_id
          AND st.stop_id   = '1'
        GROUP BY st.stop_id

        :return hash table with stop_id as key, and tuple of (stop_id, start_date, end_date) for all route stops
        """
        ret_val = {}

        # step 1: query the route/stop start and end dates, based on stop time table
        from gtfsdb import UniversalCalendar, StopTime, Trip
        q = session.query(StopTime.stop_id, func.min(UniversalCalendar.date), func.max(UniversalCalendar.date))
        q = q.filter(UniversalCalendar.service_id == Trip.service_id)
        q = q.filter(Trip.trip_id  == StopTime.trip_id)
        q = q.filter(Trip.route_id == route_id)
        q = q.group_by(StopTime.stop_id)
        stop_dates = q.all()

        # step 2: make a hash of these dates with the stop id as the key
        for d in stop_dates:
            ret_val[d[0]] = d

        return ret_val
Example #42
0
    def __load_feeds(self):
        """Load Feeds."""

        for feed in Session().query(TblFeed).all():

            RUNTIME.feeds[feed.feed_id] = Feed(feed.feed_id)
            RUNTIME.feeds[feed.feed_id].created = feed.created
            RUNTIME.feeds[feed.feed_id].updated = feed.updated

            if feed.addr:

                pnfdev = Session().query(TblPNFDev) \
                    .filter(TblPNFDev.addr == EtherAddress(feed.addr)) \
                    .first()

                if pnfdev:

                    pnfdevs = getattr(RUNTIME, pnfdev.tbl_type)

                    RUNTIME.feeds[feed.feed_id].pnfdev = \
                        pnfdevs[feed.addr]
                    pnfdev = pnfdevs[feed.addr]
                    pnfdev.feed = RUNTIME.feeds[feed.feed_id]

                else:

                    session = Session()
                    delete = Session().query(TblFeed) \
                        .filter(TblFeed.feed_id == feed.feed_id) \
                        .first()
                    delete.pnfdev = None
                    session.commit()

        query = Session().query(TblFeed,
                                func.max(TblFeed.feed_id).label("max_id"))

        if query.one().max_id:
            self.feed_id = int(query.one().max_id)
    def testNextTraceFramesMultipleRuns(self) -> None:
        run1 = self.fakes.run()
        frames = self._basic_trace_frames()
        self.fakes.save_all(self.db)

        run2 = self.fakes.run()
        frames.extend(self._basic_trace_frames())

        sink = self.fakes.sink("sink1")
        self.fakes.saver.add_all([
            TraceFrameLeafAssoc.Record(trace_frame_id=frames[1].id,
                                       leaf_id=sink.id,
                                       trace_length=0),
            TraceFrameLeafAssoc.Record(trace_frame_id=frames[3].id,
                                       leaf_id=sink.id,
                                       trace_length=0),
        ])
        self.fakes.save_all(self.db)

        with self.db.make_session() as session:
            session.add(run1)
            session.add(run2)
            session.commit()

            leaf_dicts = (
                self._all_leaves_by_kind(session, SharedTextKind.SOURCE),
                self._all_leaves_by_kind(session, SharedTextKind.SINK),
                self._all_leaves_by_kind(session, SharedTextKind.FEATURE),
            )

            latest_run_id = (session.query(func.max(
                Run.id)).filter(Run.status == RunStatus.FINISHED).scalar())

            next_frames = TraceOperator.next_trace_frames(
                leaf_dicts, session, latest_run_id, {"sink1"}, frames[2],
                set())
            self.assertEqual(len(next_frames), 1)
            self.assertEqual(int(next_frames[0].id), int(frames[3].id))
  def _fetch_project_info_from_db(self):
    '''
    An internal method for fetching data from db

    :returns: A dataframe containing following columns
    
              project_igf_id,
              sample_igf_id,
              expected_read,
              total_read
    '''
    try:
      check_file_path(self.dbconfig_file)
      dbconf = read_dbconf_json(self.dbconfig_file)
      sa = SampleAdaptor(**dbconf)
      sa.start_session()
      query = sa.session.\
              query(Project.project_igf_id,
                    Sample.sample_igf_id,
                    func.max(Sample_attribute.attribute_value).label(self.expected_read_tag),
                    func.sum(Run_attribute.attribute_value).label(self.total_read_tag)
                   ).\
              outerjoin(Sample,Project.project_id==Sample.project_id).\
              outerjoin(Sample_attribute, Sample.sample_id==Sample_attribute.sample_id).\
              outerjoin(Experiment, Sample.sample_id==Experiment.sample_id).\
              outerjoin(Run,Experiment.experiment_id==Run.experiment_id).\
              outerjoin(Run_attribute,Run.run_id==Run_attribute.run_id).\
              filter((Experiment.platform_name.in_(self.platform_list))|(Experiment.platform_name.is_(None))).\
              filter(Sample_attribute.attribute_name==self.expected_read_tag).\
              filter((Run_attribute.attribute_name==self.r1_read_tag)|(Run_attribute.attribute_name.is_(None))).\
              group_by(Sample.sample_igf_id)
      records = sa.fetch_records(query=query,
                                 output_mode='dataframe')
      sa.close_session()
      records[self.total_read_tag] = records[self.total_read_tag].fillna(0).astype(int)
      return records
    except:
      raise
Example #45
0
def verify_record():
    from sqlalchemy.sql import func
    latest = session.query(func.max(PcrRecord.id)).scalar()
    print "[INFO] The latest record Id:", latest

    if latest > 1:
        current, last = [], []
        curr_event, last_event = [], []
        for instance in session.query(PcrRecord).filter(
                PcrRecord.id == latest).all():
            for index in range(7):
                current.append(eval("instance.pcr" + str(index)))
        for event in instance.event:
            curr_event.append([
                event.number, event.eventType, event.eventDetails,
                event.eventSize
            ])

        for instance in session.query(PcrRecord).filter(
                PcrRecord.id == latest - 1).all():
            for index in range(7):
                last.append(eval("instance.pcr" + str(index)))
        for event in instance.event:
            last_event.append([
                event.number, event.eventType, event.eventDetails,
                event.eventSize
            ])

        if current == last:
            print "[INFO] Status Safe"
            return "Safe"
        else:
            print "[INFO] Status UnSafe"
            return "Unsafe"

    else:
        print "[INFO] First time startup. Set as Safe defaultly\n"
        return "Safe"
Example #46
0
def update_a_index_eod_prices():
    sql = select([AIndexDescription.sec_code])
    df = pd.read_sql(sql, engine)
    sec_codes = df['sec_code'].tolist()
    session = DBSession()

    for sec_code in sec_codes:
        # 查询数据库中保存的最大日期T
        last_day_in_db = \
            session.query(func.max(AIndexEodPrice.trade_date)).filter(AIndexEodPrice.sec_code == sec_code).one()[0]
        if last_day_in_db is not None:
            next_trade_date = get_trade_date(last_day_in_db, 1)
        else:
            next_trade_date = get_trade_date(today_yyyymmdd, -750)

        # 如果需要更新的数据的日期不比可以获取到的最大日期大,才更新(有数据才更新)
        if next_trade_date <= last_date_in_wind:
            # 下wind的数据,多个指标,单个标的,很多天
            temp = w.wsd(sec_code, "close,pct_chg", next_trade_date,
                         last_date_in_wind, "")
            df = pd.DataFrame(temp.Data).T
            df.index = temp.Times
            df.columns = temp.Fields
            df.reset_index(inplace=True)
            df.rename({'index': 'trade_date'}, axis=1, inplace=True)
            df['sec_code'] = sec_code

            # engine.execute(delete(AIndexEodPrice).where(
            #     AIndexEodPrice.trade_date.between(next_trade_date, last_date_in_wind)))  # 删掉,避免重复
            df.to_sql(AIndexEodPrice.__tablename__,
                      engine,
                      index=False,
                      if_exists='append')  # 写入
        else:
            print('update_a_index_eod_prices: {sec_code} 无数据更新'.format(
                sec_code=sec_code))

    session.close()
    def _populate_pod_summary(self):
        """Helper to generate pod summary data."""
        self.tearDown()
        report_table_name = OCP_REPORT_TABLE_MAP['report']
        summary_table_name = OCP_REPORT_TABLE_MAP['line_item_daily_summary']

        report_table = getattr(self.accessor.report_schema, report_table_name)
        summary_table = getattr(self.accessor.report_schema,
                                summary_table_name)

        start_date = DateAccessor().today_with_timezone('UTC')

        cluster_id = 'testcluster'
        period = self.creator.create_ocp_report_period(
            start_date,
            provider_id=self.ocp_provider_id,
            cluster_id=cluster_id)
        report = self.creator.create_ocp_report(period, start_date)
        for _ in range(25):
            self.creator.create_ocp_usage_line_item(period, report)

        start_date, end_date = self.accessor._session.query(
            func.min(report_table.interval_start),
            func.max(report_table.interval_start)).first()

        start_date = start_date.replace(hour=0,
                                        minute=0,
                                        second=0,
                                        microsecond=0)
        end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)

        query = self.accessor._get_db_obj_query(summary_table_name)
        initial_count = query.count()

        self.accessor.populate_line_item_daily_table(start_date, end_date,
                                                     cluster_id)
        self.accessor.populate_line_item_daily_summary_table(
            start_date, end_date, cluster_id)
Example #48
0
    def collect(self):
        if self._db_session is None:
            raise Exception("not found mysql conn for reporting data")
        latest_create_time = self._db_session.query(
            func.max(VMReport.create_time)).scalar()
        # 有效期限
        if (datetime.now() - latest_create_time
            ).total_seconds() > self.valid_period_days * (60 * 60 * 24):
            logging.warning(
                f"the latest create time earlier than {self.valid_period_days} days"
            )
            self.set_data(None)
        rows = self._db_session.query(VMReport).filter(
            VMReport.create_time == latest_create_time).filter(
                VMReport.vm_powerstate == "poweredOn").all()
        logging.info(f"collect {len(rows)} vms info from the database")

        res = []
        for row in rows:
            res_cooked = self._cook(row)
            if res_cooked:
                res.append(res_cooked)
        self.set_data(res)
Example #49
0
 def get_max_tracking_record_id(self,
                                upstream_application_name: str) -> int:
     assert self.tracking_record_class is not None
     application_name_field = (
         self.tracking_record_class.application_name  # type: ignore
     )
     upstream_app_name_field = (
         self.tracking_record_class.
         upstream_application_name  # type: ignore
     )
     pipeline_id_field = (
         self.tracking_record_class.pipeline_id  # type: ignore
     )
     notification_id_field = (
         self.tracking_record_class.notification_id  # type: ignore
     )
     query = self.session.query(func.max(notification_id_field))
     query = query.filter(application_name_field == self.application_name)
     query = query.filter(
         upstream_app_name_field == upstream_application_name)
     query = query.filter(pipeline_id_field == self.pipeline_id)
     value = query.scalar() or 0
     return value
Example #50
0
def show_author():
    if request.method == 'POST':
        author_id = request.form['author_id']
        qry = db.session.query(
            func.max(Author.id).label("max_score"),
            func.min(Author.id).label("min_score"),
        )
        res = qry.one()
        try:
            author_id = int(author_id)
        except ValueError:
            author_id = -1
        max_id = int(res.max_score)
        min_id = int(res.min_score)
        if author_id > max_id or author_id < min_id:
            flash('Author ID not existing')
            return redirect(url_for('index_2'))
        else:
            return redirect(url_for('show_book', author_id=author_id))
    else:
        authors = Author.query.all()
        books = Book.query.all()
        return render_template('index_2.html', authors=authors, books=books)
Example #51
0
    def on_get(self, req, resp):
        session = req.context['session']
        query = req.params.get('query', '')
        protocol_schema = ProtocolSchema(many=True)

        latest_version = session.\
            query(
                label('id', Protocol.id),
                label('max_version', func.max(Protocol.version))).\
            group_by(Protocol.id).\
            subquery()
        protocols = session.query(Protocol).\
                filter(
                    Protocol.public==True,
                    Protocol.title.ilike('%' + query + '%'),
                )
        most_recent_protocols = protocols.\
            join(latest_version, Protocol.id==latest_version.c.id).\
            filter(Protocol.version==latest_version.c.max_version).\
            all()

        result = protocol_schema.dump(most_recent_protocols)
        resp.context['result'] = result.data
Example #52
0
    def test_add_to_favourite(self, test_client, user, logged_in_user, db):
        """
        GIVEN a Flask application and user is logged in
        WHEN user adds a (random) recipe to favourites
        THEN check response is valid and favourited recipe has been added to model/ table
        """
        from app.models import UserFavouriteRecipes, Recipes

        number_of_recipes, = db.session.query(func.max(Recipes.recipe_id)).first()  # Query the highest recipe_id,
        # indicating how many recipes there are.
        # This query assumes that there are no empty recipe_ids up to the highest id. If we ever add a feature where users
        # can upload + delete their uploaded recipes, this will need to be changed.

        rand_favourite = random.randint(1, number_of_recipes)  # generate a random favourite recipe_id to test
        response = add_to_favourites(test_client, rand_favourite)  # add random favourite recipe to favourites
        assert response.status_code == 200
        assert b'failure' not in response.data

        favourited = db.session.query(UserFavouriteRecipes) \
            .filter(UserFavouriteRecipes.user_id == user.id) \
            .filter(UserFavouriteRecipes.recipe_id == rand_favourite) \
            .first()
        assert favourited  # recipe is favourited if this is not None
Example #53
0
def tbl_years():
    years = {}
    for tbl in registered_models:
        tbl_name = table_name(tbl)
        if hasattr(tbl, "year"):
            if hasattr(tbl, "CACHED_YEARS"):
                years[tbl_name] = {
                    consts.OLDEST: min(tbl.CACHED_YEARS),
                    consts.LATEST: max(tbl.CACHED_YEARS)
                }
            else:
                qry = tbl.query.with_entities(
                    func.max(tbl.year).label("max_year"),
                    func.min(tbl.year).label("min_year"),
                )
                res = qry.one()
                years[tbl_name] = {
                    consts.LATEST: res.max_year,
                    consts.OLDEST: res.min_year
                }
        else:
            years[tbl_name] = None
    return years
Example #54
0
    def test_delete_from_favourites_success(self, test_client, user, logged_in_user, db):
        """
        GIVEN a Flask application and user is logged in
        WHEN user deletes a (random) recipe from favourites
        THEN check response is valid and favourited recipe has been removed from model/ table
        """
        from app.models import UserFavouriteRecipes, Recipes
        number_of_recipes, = db.session.query(func.max(Recipes.recipe_id)).first()
        rand_favourite = random.randint(1, number_of_recipes)

        add_to_favourites(test_client, rand_favourite)
        favourited = db.session.query(UserFavouriteRecipes) \
            .filter(UserFavouriteRecipes.user_id == user.id) \
            .filter(UserFavouriteRecipes.recipe_id == rand_favourite) \
            .first()
        assert favourited  # Recipe is added to favourites

        remove_from_favourites(test_client, rand_favourite)
        favourited = db.session.query(UserFavouriteRecipes) \
            .filter(UserFavouriteRecipes.user_id == user.id) \
            .filter(UserFavouriteRecipes.recipe_id == rand_favourite) \
            .first()
        assert not favourited  # Recipe is not added to favourites (removed from favourites)
Example #55
0
    def query_find_by_name(cls, name, tags, session):
        last_ids = session.query(cls.id_file,
                                 cls.name,
                                 func.max(cls.id).label('last_id')) \
            .group_by(cls.id_file, cls.name).subquery()

        query = session.query(cls) \
            .join((last_ids, and_(cls.id_file == last_ids.c.id_file,
                                  cls.name == last_ids.c.name,
                                  cls.id == last_ids.c.last_id))) \
            .join(File, File.id == cls.id_file) \
            .filter(cls.name.like("%{0}%".format(name))) \
            .order_by(cls.name)

        # Update the query with tags if user asked for it
        if tags is not None:
            query = query.join(File.tags)
            for tagid in tags:
                # check if tag exists
                session.query(Tag).filter_by(id=tagid).one()
                query = query.filter(File.tags.any(Tag.id == tagid))

        return query
Example #56
0
    def _calculate_date_range(self):
        """
        The date range is the range of publication dates for the given
        documents.
        """
        if not self.start_date or not self.end_date:
            if self.doc_ids is None:
                raise ValueError(
                    "Need either doc_ids, or both start_date and end_date")

            row = db.session.query(
                func.min(Document.published_at),
                func.max(Document.published_at))\
                .filter(Document.id.in_(self.doc_ids))\
                .first()

            if row and row[0]:
                self.start_date = row[0].date()
                self.end_date = row[1].date()
            else:
                self.start_date = self.end_date = datetime.utcnow()

        self.days = max((self.end_date - self.start_date).days, 1)
Example #57
0
def createContest():
    print("create contest page.")
    if request.method == "GET":
        return render_template("create.html")
    else:
        contest_name = request.form["contest_name"]
        start_date = request.form["start_date"]
        start_time = request.form["start_time"]
        end_date = request.form["end_date"]
        end_time = request.form["end_time"]
        problems_url = request.form.getlist("problem_url")
        pr = session.query(pm.Problem).all()

        res_mx = session.query(
            func.max(pm.Problem.contestID).label("max_contestid")).one()
        max_contestID = res_mx.max_contestid
        print(max_contestID)
        max_contestID += 1

        for url in problems_url:
            pr = pm.Problem()
            pr.problemURL = url
            pr.problem = url.split('/')[5] + url.split('/')[6]
            pr.contest = contest_name
            pr.contestID = max_contestID
            pr.participant = "springroll"
            pr.start_time = start_date + " " + start_time + ":00"
            pr.end_time = end_date + " " + end_time + ":00"
            pr.penalty = 0
            pr.last_updated = start_date + " " + start_time + ":00"

            session.add(pr)
            session.commit()

            print(pr.contestID)

        return redirect(url_for('contest', contestID=max_contestID))
Example #58
0
def other_oprate():
    ret = session.query(Users).filter_by(name='alex').all()
    ret = session.query(Users).filter(Users.id > 1, Users.name == 'eric').all()
    ret = session.query(Users).filter(Users.id.between(1, 3),
                                      Users.name == 'eric').all()
    ret = session.query(Users).filter(Users.id.in_([1, 3, 4])).all()
    ret = session.query(Users).filter(~Users.id.in_([1, 3, 4])).all()
    ret = session.query(Users).filter(
        Users.id.in_(session.query(Users.id).filter_by(name='eric'))).all()
    ret = session.query(Users).filter(and_(Users.id > 3,
                                           Users.name == 'eric')).all()
    ret = session.query(Users).filter(or_(Users.id < 2,
                                          Users.name == 'eric')).all()
    ret = session.query(Users).filter(
        or_(Users.id < 2, and_(Users.name == 'eric', Users.id > 3),
            Users.extra != "")).all()
    print('and_ and or_:', ret)

    ret = session.query(Users).filter(Users.name.like('e%')).all()
    print('%:', ret)

    ret = session.query(Users).order_by(Users.name.desc(),
                                        Users.id.asc()).all()
    print('order:', ret)

    ret = session.query(func.max(Users.id), func.sum(Users.id),
                        func.min(Users.id)).group_by(
                            Users.name).having(func.min(Users.id) > 2).all()
    print('group:', ret)

    ret = session.query(Person).join(Favor, isouter=True).all()
    print('join:', ret)

    q1 = session.query(Users.name).filter(Users.id > 2)
    q2 = session.query(Favor.caption).filter(Favor.nid < 2)
    ret = q1.union_all(q2).all()
    print('union:', ret)
Example #59
0
    def get_system_user(self):
        """
        获取系统用户,若查询到2个,就只return一个优先级高的
        :return:
        """
        system_user_list = []
        with DBContext('r') as session:
            system_user_cont = session.query(SystemUser).count()
            if system_user_cont == 1:
                # 只有一个用户,直接返回,用这个作为跳板的用户
                system_user_data = session.query(SystemUser).all()
                for data in system_user_data:
                    data_dict = model_to_dict(data)
                    data_dict['create_time'] = str(data_dict['create_time'])
                    data_dict['update_time'] = str(data_dict['update_time'])
                    if data_dict['platform_users']:
                        data_dict['platform_users'] = data_dict.get(
                            'platform_users', '').split(',')
                    system_user_list.append(data_dict)

            else:
                priority_max_info = session.query(
                    func.max(SystemUser.priority)).all()
                # return is list[tuple]
                priority_max_num = priority_max_info[0][0]
                system_user_data = session.query(SystemUser).filter(
                    SystemUser.priority == priority_max_num).first()
                data_dict = model_to_dict(system_user_data)
                data_dict['create_time'] = str(data_dict['create_time'])
                data_dict['update_time'] = str(data_dict['update_time'])
                if data_dict['platform_users']:
                    data_dict['platform_users'] = data_dict.get(
                        'platform_users', '').split(',')
                system_user_list.append(data_dict)

        # print(system_user_list)
        return system_user_list
Example #60
0
def assign_order():
    if request.method == "GET":
        techs = getTech()
        return render_template("order/assign_order.html", techs = techs)
    else:
        serial = request.form.get("serial")
        place = request.form.get("place")
        tech_code = request.form.get("tech")
        
        selectStatment = device_essentials.select().where(device_essentials.c.serial == serial
              ).where(device_essentials.c.status == "operational").limit(1)
        selectStatment = db.execute(selectStatment)
        device = selectStatment.fetchone()
        
        if device is None:
            return apology("Device serial is wrong or device is scraped", 400)
        
        table = ppm_map[device[2]]
        
        insert1 = order_essentials.insert().values(serial = serial, place = place,
                type = device[2], department = session.get("department"),
                tech_code = tech_code, date_issued = func.cast(func.now(), DATE))
        try:
            db.execute(insert1)
        except sqlalchemy.exc.IntegrityError:
            print("Error duplicate enteries")
            return apology("Can't enter duplicate enteries", 403)
        
        sel_command = order_essentials.select().with_only_columns([func.max(order_essentials.c.code)]
          ).where(order_essentials.c.serial == serial).limit(1)
        sel_command = db.execute(sel_command)
        r_code = sel_command.fetchone()[0]
        
        insert2 = table.insert().values(r_code = r_code)
        db.execute(insert2)
        
        return render_template("control/main.html", message = "Order assigned successfully")