コード例 #1
0
    def get_meter_data(self, ct_sensors: list) -> Dict[datetime.isoformat, float]:
        """Get Meter Data with Specific CT Sensors

        Args:
            ct_sensors (list): CT sensors

        Returns:
            Dict[datetime.isoformat, float]: historical power consumption data
        """
        if self.args["interval"] == "hour":
            power = func.round(cast(func.avg(MeterData.current) * VOLTAGE, DECIMAL), 2).label("power")
            time_format = "YYYY-MM-DD HH24:00:00"
        elif self.args["interval"] == "day":
            power = func.round(cast(func.avg(MeterData.current) * VOLTAGE * 24, DECIMAL), 2).label("power")
            time_format = "YYYY-MM-DD 00:00:00"
        else:
            raise Exception(f"Invalid interval: {self.args['interval']}")
        date = self.simplify_date(MeterData.created, time_format)
        criteria = [
            MeterData.created.between(self.args["start_time"], self.args["end_time"]),
            MeterData.sensor.in_(ct_sensors),
        ]
        # CT Data
        meter_ct = (
            SESSION.query(MeterData.sensor, date, power).filter(*criteria).group_by(date, MeterData.sensor).subquery()
        )
        # Group Data By Date
        meter_sum = (
            SESSION.query(func.sum(meter_ct.c.power).label("power"), meter_ct.c.datetime.label("datetime"))
            .group_by(meter_ct.c.datetime)
            .order_by(meter_ct.c.datetime)
            .all()
        )
        meter_data = {meter.datetime.isoformat(): float(meter.power) for meter in meter_sum}
        return meter_data
コード例 #2
0
def temperature():
    param = request.args.to_dict()
    # Verify that there are parameters
    if len(param) < 1:
        return jsonify({"error": "Data not found. Parameters 'start' and/or 'end' date must be provided."}), 404

    # Create database session
    session = Session(engine)

    if param.get('start'):
        data = session.query(measurement.date, func.min(measurement.tobs),\
                        func.round(func.avg(measurement.tobs),1),\
                        func.max(measurement.tobs)).\
                        filter(measurement.date >= param['start']).\
                        group_by(measurement.date).all()

    if param.get('start') and param.get('end'):
        data = session.query(measurement.date, func.min(measurement.tobs),\
                        func.round(func.avg(measurement.tobs),1),\
                        func.max(measurement.tobs)).\
                        filter(measurement.date >= param['start']).\
                        filter(measurement.date <= param['end']).\
                        group_by(measurement.date).all()

    # Close the session
    session.close()

    # Put data into a list of dicts
    temperature_list= [{"date": row[0], "min": row[1], "max": row[3], "average": row[2]} for row in data]

    if len(temperature_list) < 1:
        return jsonify({"error": "Data not found"}), 404
    
    return jsonify(temperature_list), 200
コード例 #3
0
def main(argv):

    print(Supportparams.exists('/home/clinical/DEMUX//150703_D00134_0206_AH5HGFBCXX/Unaligned4/support.txt')) # 515
    print(Datasource.exists('/home/clinical/DEMUX//150703_D00134_0206_AH5HGFBCXX/Unaligned4/Basecall_Stats_H5HGFBCXX/Demultiplex_Stats.htm')) #515
    print(Flowcell.exists('H5HGFBCXX')) # 512
    print(Demux.exists(512, 'Y101,I8,I8,Y101')) # 474
    print(Project.exists('240540')) #552
    print(Sample.exists('ADM1136A1_XTA08', 'CAGCGTTA')) #6651
    print(Unaligned.exists(18, 487, 1)) #13902

    print(xstats.parse('/mnt/hds/proj/bioinfo/DEMUX/151009_ST-E00198_0059_BH2V2YCCXX'))

    print(Backup.exists('151117_D00410_0187_AHWYGMADXX'))
    print(Backup.exists('141212_D00134_0166_AHB058ADXX'))
    print(Backup.exists('131219_D00134_0057_BH829YADXX'))
    print(Backup.exists('131219_D00134_0057_BH829YADXX', 'tape005_006'))
    print(Backup.exists('131219_D00134_0057_BH829YADXX', 'tape007_005'))

    print(Backuptape.exists('tape005_006'))

    rs = SQL.query(
        func.year(Datasource.rundate).label('year'),\
        func.month(Datasource.rundate).label('month'),\
        func.count(Datasource.datasource_id.distinct()).label('runs'),\
        func.round(func.sum(Unaligned.readcounts / 2000000), 2).label('mil reads'),\
        func.round(func.sum(Unaligned.readcounts) / (func.count(Datasource.datasource_id.distinct())*2000000), 1).label('mil reads fc lane')
    ).\
    outerjoin(Demux).\
    outerjoin(Flowcell).\
    outerjoin(Unaligned).\
    group_by(func.year(Datasource.rundate), func.month(Datasource.rundate)).\
    order_by(func.year(Datasource.rundate).desc(), func.month(Datasource.rundate).desc(), func.day(Datasource.rundate).desc()).\
    all()

    print(rs)
コード例 #4
0
async def get_campaign_reports(db: Session, zone: GeoZone, since: datetime,
                               role: str):

    query = db.query(
            MailChimpCampaign.id.label('id'), \
            AdherentMessages.sent_at.label('date'), \
            (Adherents.first_name + ' ' + Adherents.last_name).label('auteur'), \
            AdherentMessages.subject.label('titre'), \
            MailChimpCampaignReport.email_sent.label('nbEmails'), \
            MailChimpCampaignReport.open_unique.label('nbOuvertures'), \
            func.round(MailChimpCampaignReport.open_unique / MailChimpCampaignReport.email_sent, 4).label('txOuverture'), \
            MailChimpCampaignReport.click_unique.label('nbCliques'), \
            func.round(MailChimpCampaignReport.click_unique / MailChimpCampaignReport.email_sent, 4).label('txClique'), \
            MailChimpCampaignReport.unsubscribed.label('nbDesabonnements'), \
            func.round(MailChimpCampaignReport.unsubscribed / MailChimpCampaignReport.email_sent, 4).label('txDesabonnement')) \
        .join(MailChimpCampaign.message) \
        .filter(AdherentMessages.status == 'sent') \
        .filter(AdherentMessages.type == role) \
        .filter(AdherentMessages.sent_at >= since) \
        .join(MailChimpCampaign.report) \
        .join(AdherentMessages.author)

    query = filter_role(db, query, [zone], role)

    return {
        'zone': zone.name,
        'depuis': since,
        'campagnes': query.order_by(AdherentMessages.sent_at.desc()).all()
    }
コード例 #5
0
ファイル: routes.py プロジェクト: PrudyvusP/fleadert
def render_user(user_id):
    user = db.session.query(User).get_or_404(user_id)
    if not user.is_boss:
        non_executed_tasks = user.non_executed_tasks
        if 'mysql' in current_app.config['SQLALCHEMY_DATABASE_URI']:
            avg_execute_time = db.session.query(func.round(func.avg(
                func.datediff(Task.completed_on, Task.created_on)), 2)) \
                .filter(Task.executor_id == user_id) \
                .scalar()
            ratio = db.session.query(func.round(func.avg(
                (func.datediff(Task.deadline, Task.created_on) - func.datediff(Request.executed_on, Task.created_on)) /
                func.datediff(Task.deadline, Task.created_on)), 2)) \
                .join(Request, Task.requests) \
                .filter(Request.denied_on.is_(None), Task.executor_id == user_id) \
                .scalar()
        else:
            query = text(
                'SELECT ROUND(AVG(CAST((JULIANDAY(t.completed_on) - JULIANDAY(t.created_on)) AS Integer)), 2) '
                'FROM tasks t WHERE t.executor_id = :user_id')
            avg_execute_time = db.engine.execute(query, {
                'user_id': user_id
            }).scalar()
            ratio_query = (
                'SELECT ROUND(AVG((CAST(JULIANDAY(t.deadline) - JULIANDAY(t.created_on) AS REAL) - '
                'CAST(JULIANDAY(r.executed_on) - JULIANDAY(t.created_on) AS REAL)) / '
                'CAST(JULIANDAY(t.deadline)- JULIANDAY(t.created_on) AS REAL)), 2) '
                'FROM tasks t '
                'JOIN requests r ON r.task_id = t.id '
                'WHERE r.denied_on IS NULL AND t.executor_id = :user_id')
            ratio = db.engine.execute(ratio_query, {
                'user_id': user_id
            }).scalar()
        try:
            user_contribution = round(
                len(user.executionship) / db.session.query(func.count(
                    Task.id)).filter(~Task.completed_on.is_(None)).scalar() *
                100, 2)
        except ZeroDivisionError:
            user_contribution = None
        return render_template('user.html',
                               title="Личный кабинет",
                               user=user,
                               avg_execute_time=avg_execute_time,
                               non_executed_tasks=non_executed_tasks[0:5],
                               user_contribution=user_contribution,
                               ratio=ratio)
    else:
        all_tasks = db.session.query(func.count(
            Task.id)).filter(Task.status != 'выполнена').scalar()
        authorship_tasks = user.authorship
        closership_tasks = user.closership
        tasks_on_consider = user.get_tasks_on_consider()
        return render_template('user.html',
                               title="Личный кабинет",
                               user=user,
                               authorship_tasks=authorship_tasks,
                               closership_tasks=closership_tasks,
                               top_urgent_tasks=tasks_on_consider[0:5],
                               all_tasks=all_tasks,
                               count_tasks_on_consider=len(tasks_on_consider))
コード例 #6
0
ファイル: app.py プロジェクト: vpthomp/dc-crime-visualized
def aggregate():
    sel = [
        Crime.DATE,
        func.count(Crime.DATE),
        func.round(Crime.AVG, 2),
        func.round(Crime.MAX, 2),
        func.round(Crime.MIN, 2), Crime.PctIllum
    ]

    results = db.session.query(*sel).group_by(Crime.DATE).order_by(
        Crime.DATE).all()

    agg = {
        "date": [],
        "crime_count": [],
        "temp_avg": [],
        "temp_max": [],
        "temp_min": [],
        "lunar_illum": []
    }

    for result in results:
        agg["date"].append(result[0])
        agg["crime_count"].append(result[1])
        agg["temp_avg"].append(result[2])
        agg["temp_max"].append(result[3])
        agg["temp_min"].append(result[4])
        agg["lunar_illum"].append(result[5])

    # print(agg)

    return jsonify(agg)
コード例 #7
0
 def fetchtrack(self, session,
                tracker_id, start, end,
                ):
     """Fetch track data from db"""
     tid = Speed.device_info_serial
     dt = Speed.date_time
     # terrain height is tracker altitude minus tracker altitude above ground level
     elev = Speed.altitude - Speed.altitude_agl
     q = session.query(tid, dt,
                       Speed.longitude,
                       Speed.latitude,
                       Speed.altitude,
                       func.round(cast(Speed.speed_2d, Numeric), 2),
                       func.round(Speed.trajectSpeed, 2),
                       func.round(Speed.direction, 2),
                       func.round(Speed.trajectDirection, 2),
                       elev,
                       )
     q = q.filter(tid == tracker_id)
     q = q.filter(dt.between(start.isoformat(), end.isoformat()))
     q = q.filter(Speed.longitude != None)
     q = q.filter(Speed.latitude != None)
     q = q.filter(Speed.userflag == 0)
     q = q.order_by(dt)
     return q
コード例 #8
0
ファイル: windcharts.py プロジェクト: rhemz/falconweather
def query_groups(session, cutoff):
    q = session.query(
        func.round(WindMeasurement.avg_mph).label('avg'),
        func.count(WindMeasurement.avg_mph).label('count')).filter(
            WindMeasurement.epoch >= func.unix_timestamp(func.now()) -
            cutoff).group_by(func.round(WindMeasurement.avg_mph))

    averages = [(int(row.avg), int(row.count)) for row in q.all()]
    return averages
コード例 #9
0
 def ItemStock(cls,
               costCenterCode,
               date=datetime.date.today(),
               itemcodes=[]):
     return pd.read_sql(
         cls.query.filter(*cls.__FiltersOfStock(
             costCenterCode, date, itemcodes)).with_entities(
                 cls.ItemCode,
                 func.round(
                     func.sum(func.coalesce(cls.Qty, 0)),
                     6).label('stockQty')).group_by(cls.ItemCode).having(
                         func.round(func.sum(func.coalesce(cls.Qty, 0)), 6)
                         > 1 / 1000000).statement, cls.getBind())
コード例 #10
0
    def _calculate_clone_stats(self, sample_id, min_cdr3, max_cdr3,
                               include_outliers, only_full_reads):
        clone_statistics = {}
        for name, stat in _clone_contexts.items():
            clone_statistics[name] = CloneContextStats(seqs=None, **stat)

        # TODO: This should be automatically generated from _dist_fields
        query = self._session.query(
            Sequence.clone_id,
            func.round(func.avg(Sequence.v_match)).label('v_match'),
            func.round(func.avg(Sequence.j_match)).label('j_match'),
            func.round(func.avg(Sequence.j_length)).label('j_length'),
            Sequence.v_gene,
            Sequence.j_gene,
            func.count(Sequence.seq_id).label('copy_number'),
            func.round(
                func.avg(Sequence.v_length + Sequence.num_gaps)
            ).label('v_length'),
            func.round(
                func.avg(100 * Sequence.v_match / Sequence.v_length)
            ).label('v_identity'),
            Sequence.cdr3_num_nts.label('cdr3_length'),
            SelectionPressure.sigma_fwr.label('sp_fwr'),
            SelectionPressure.sigma_cdr.label('sp_cdr'),
        ).join(
            SelectionPressure,
            and_(
                SelectionPressure.clone_id == Sequence.clone_id,
                SelectionPressure.sample_id == Sequence.sample_id
            ),
            isouter=True
        ).filter(
            Sequence.sample_id == sample_id,
            ~Sequence.clone_id.is_(None)
        )

        if only_full_reads:
            query = query.filter(Sequence.partial == 0)
        query = query.group_by(Sequence.clone_id)

        for clone in query:
            clone_info = self._session.query(Clone.cdr3_nt).filter(
                Clone.id == clone.clone_id).first()
            in_frame = len(clone_info.cdr3_nt) % 3 == 0
            stop = '*' in lookups.aas_from_nts(clone_info.cdr3_nt)
            functional = in_frame and not stop
            for name, stat in clone_statistics.items():
                stat.add_if_match(clone, in_frame, stop, functional)

        self._add_stat(clone_statistics, sample_id, include_outliers,
                       only_full_reads)
コード例 #11
0
    def _calculate_clone_stats(self, sample_id, min_cdr3, max_cdr3,
                               include_outliers, only_full_reads):
        clone_statistics = {}
        for name, stat in _clone_contexts.items():
            clone_statistics[name] = CloneContextStats(seqs=None, **stat)

        # TODO: This should be automatically generated from _dist_fields
        query = self._session.query(
            Sequence.clone_id,
            func.round(func.avg(Sequence.v_match)).label('v_match'),
            func.round(func.avg(Sequence.j_match)).label('j_match'),
            func.round(func.avg(Sequence.j_length)).label('j_length'),
            Sequence.v_gene,
            Sequence.j_gene,
            func.count(Sequence.seq_id).label('copy_number'),
            func.round(
                func.avg(Sequence.v_length + Sequence.num_gaps)
            ).label('v_length'),
            func.round(
                func.avg(100 * Sequence.v_match / Sequence.v_length)
            ).label('v_identity'),
            Sequence.cdr3_num_nts.label('cdr3_length'),
            SelectionPressure.sigma_fwr.label('sp_fwr'),
            SelectionPressure.sigma_cdr.label('sp_cdr'),
        ).join(
            SelectionPressure,
            and_(
                SelectionPressure.clone_id == Sequence.clone_id,
                SelectionPressure.sample_id == Sequence.sample_id
            ),
            isouter=True
        ).filter(
            Sequence.sample_id == sample_id,
            ~Sequence.clone_id.is_(None)
        )

        if only_full_reads:
            query = query.filter(Sequence.partial == 0)
        query = query.group_by(Sequence.clone_id)

        for clone in query:
            clone_info = self._session.query(Clone.cdr3_nt).filter(
                Clone.id == clone.clone_id).first()
            in_frame = len(clone_info.cdr3_nt) % 3 == 0
            stop = '*' in lookups.aas_from_nts(clone_info.cdr3_nt)
            functional = in_frame and not stop
            for name, stat in clone_statistics.items():
                stat.add_if_match(clone, in_frame, stop, functional)

        self._add_stat(clone_statistics, sample_id, include_outliers,
                       only_full_reads)
コード例 #12
0
ファイル: gbkmut.py プロジェクト: SteveWENG/FLASKAPI
    def ClosingStock(cls, dbsites):
        try:
            li = pd.DataFrame([])
            if dbsites == None or len(dbsites) == 0:
                return li

            for l in dbsites:
                warehouses = [x.get('warehouse') for x in l.get('sites', [])]
                db = l.get('db', '')
                if len(warehouses) == 0 or db == '':
                    continue

                tmpsql = cls.query.join(grtbk, cls.COACode == grtbk.COACode) \
                    .filter(cls.Warehouse.in_(warehouses),
                            cls.TransType.in_(('N', 'C', 'P', 'X')),grtbk.omzrek=='G',
                            func.abs(func.coalesce(gbkmut.Qty, 0)) > 1 / 1000000) \
                    .with_entities(cls.Warehouse, cls.ItemCode.label('ItemCode'),
                                   func.round(func.sum(func.coalesce(cls.Qty,0)),6).label('Qty'),
                                   func.round(func.sum(func.coalesce(cls.Amt,0)),6).label('Amt'))\
                    .group_by(cls.Warehouse,cls.ItemCode)\
                    .having(func.round(func.sum(func.coalesce(cls.Qty,0)),6) > 1/1000000)

                dbconfig = cls.dbconnect
                dbconfig['database'] = str(db)
                with pymssql.connect(
                        **dbconfig
                ) as conn:  #host='192.168.0.98:1433', user='******', password='******',database='120') as conn:
                    tmp = pd.read_sql(
                        str(
                            tmpsql.statement.compile(
                                compile_kwargs={'literal_binds': True})), conn)

                if tmp.empty:
                    continue

                tmp1 = pd.DataFrame(l.get('sites'))
                tmp = merge(tmp,
                            tmp1,
                            left_on='Warehouse',
                            right_on='warehouse')
                if tmp.empty: continue

                li = li.append(tmp)

            li['ItemCost'] = li['Amt'] / li['Qty']
            li.drop(['Amt', 'Warehouse', 'warehouse'], axis=1, inplace=True)
            return li
        except Exception as e:
            raise e
コード例 #13
0
ファイル: app.py プロジェクト: dcurrigan/sqlalchemy-challenge
def temp_by_date(start_date, end_date=None):
    # Create session from Python to the DB
    session = Session(engine)

    if end_date == None:
        # Query the tobs data
        result = session.query(
            Measurement.date, func.min(Measurement.tobs),
            func.max(Measurement.tobs),
            func.round(func.avg(Measurement.tobs),
                       2)).filter(Measurement.date >= start_date).group_by(
                           Measurement.date).all()
        session.close()

        # Convert the results to a dictionary
        date_temp_data = []
        for date, min, max, avg, in result:
            dict = {}
            dict['Date'] = date
            dict['Min'] = min
            dict['Max'] = max
            dict['Avg'] = avg
            date_temp_data.append(dict)

        return jsonify(date_temp_data)

    else:
        # Query the tobs data
        result = session.query(
            Measurement.date, func.min(Measurement.tobs),
            func.max(Measurement.tobs),
            func.round(func.avg(Measurement.tobs),
                       2)).filter((Measurement.date >= start_date)
                                  & (Measurement.date <= end_date)).group_by(
                                      Measurement.date).all()
        session.close()

        # Convert the results to a dictionary
        date_temp_data = []
        for date, min, max, avg, in result:
            dict = {}
            dict['Date'] = date
            dict['Min'] = min
            dict['Max'] = max
            dict['Avg'] = avg
            date_temp_data.append(dict)

        return jsonify(date_temp_data)
コード例 #14
0
def precipitation():
    # Create our session (link) from Python to the DB
    session = Session(engine)
    """query precipitation for last year"""
    #
    ld = engine.execute(
        "SELECT strftime('%Y-%m-%d', max(Measurement.date), '-1 years') from Measurement"
    ).fetchall()
    last_year = ld[0][0]

    # Design a query to retrieve the last 12 months of precipitation data and plot the results
    # Perform a query to retrieve the data and precipitation scores
    results = session.query(Measurement.date, func.round(func.avg(Measurement.prcp),2)).filter(Measurement.date >= \
                                last_year).group_by(Measurement.date).order_by(Measurement.date).all()
    session.close()

    # Create a dictionary from the row data and append to a list of all_passengers
    prcp_list = []
    for date, prcp in results:
        prcp_dict = {}
        prcp_dict["date"] = date
        prcp_dict["prcp"] = prcp
        prcp_list.append(prcp_dict)

    return jsonify(prcp_list)
コード例 #15
0
def start_end_date(start,end):
    session=Session(engine)
    # Return a JSON list of the minimum temperature, the average temperature, and the max temperature 
    # for a given start or start-end range.
 
   
    # start_dt = dt.datetime.strptime(start, '%Y-%m-%d')
    # end_dt = dt.datetime.strptime(end, "%Y-%m-%d")

    result= session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.round(func.avg(Measurement.tobs),2)).\
        filter(Measurement.date>=start).filter(Measurement.date<=end).all()
    session.close()

    json_list=[]

    for min,max,avg in result:
        json_dict={}
        json_dict['Start Date']=start
        json_dict['End Date']=end
        json_dict['TMIN']=min
        json_dict['TMAX']=max
        json_dict['TAVG']=avg
        json_list.append(json_dict)

    return jsonify(json_list)
コード例 #16
0
    def get_stats_grouped_by_time(self,
                                  channel_id: int,
                                  filter_ts=0) -> Dict[str, StatItem]:
        """
        Groups stats by weekly intervals.

        @param channel_id: The Discord channel id number.
        @param filter_ts: Timestamp to use for filtering.
        @return: Dictionary of {str: StatItem}
        """
        out_dict: Dict[str, StatItem] = defaultdict(lambda: StatItem())
        sec_in_week = 60 * 60 * 24 * 7
        with DB.get_instance().make_session() as db_session:
            query = (
                db_session.query(
                    User.name,
                    # hack that compensates for SQLite not
                    # having FLOOR function
                    func.round((Message.timestamp / sec_in_week) -
                               0.5).label("day_time"),
                    func.sum(Message.char_count)).join(
                        Message, Message.user_id == User.id).filter(
                            Message.channel_id == channel_id).filter(
                                Message.timestamp > filter_ts).group_by(
                                    User.name,
                                    "day_time").order_by(asc("day_time")))

        for name, timestamps, character_count in query:
            if "bot" in name:
                continue
            out_dict[name].timestamps.append(np.multiply(timestamps, 7))
            out_dict[name].values.append(character_count)

        return out_dict
コード例 #17
0
ファイル: app.py プロジェクト: anthonyanader/flavorHubServer
def restaurant_on_scroll(loggedInUser):
    pageSize = 9
    page = int(request.args.get('page'))
    print(page * pageSize, db.session.query(Restaurant).count())
    hasMore = page * pageSize < db.session.query(Restaurant).count()
    restaurantNoSignIn = Restaurant.query.order_by(asc(
        Restaurant.name)).offset(page * pageSize).limit(pageSize).all()
    restaurants = []
    for restaurant in restaurantNoSignIn:
        locations = Location.query.filter_by(restaurantId=restaurant.id).all()
        cuisineObjects = CuisineType.query.filter_by(
            restaurantId=restaurant.id).all()
        averagePrices = db.session.query(
            MenuItem.category,
            func.round(func.avg(MenuItem.price), 2)).group_by(
                MenuItem.category).filter_by(restaurantId=restaurant.id).all()
        averageObject = {}
        for i in range(len(averagePrices)):
            averageObject[averagePrices[i].category] = str(averagePrices[i][1])
        cuisines = []
        for cuisine in cuisineObjects:
            cuisines.append(cuisine.name.lower().strip())
        restaurants.append({
            'locationCount': len(locations),
            'cuisines': cuisines,
            'imagePath': restaurant.img_pathfile,
            'name': restaurant.name,
            'averagePrices': averageObject
            #add rating average method, make sure its a raw query
        })
    responseData = {'restaurants': restaurants, 'hasMore': hasMore}
    return jsonify(responseData)
コード例 #18
0
ファイル: db_funcs.py プロジェクト: gitter-badger/hitorimeshi
def full_info_of_near_rests(lat=34.985458, lng=135.757755, zoom=1, limit=None):
    '''引数のlat(緯度)とlng(経度)を中心として、縦横margin*2の正方形ないにある
    レストランをTabelogテーブルから取得する
    デフォルト値は京都駅の緯度経度
    '''
    margin = _get_margin(zoom)
    left, right = lng - margin, lng + margin
    bottom, top = lat - margin, lat + margin
    box = 'POLYGON((%f %f, %f %f, %f %f, %f %f, %f %f))' \
            % (left, bottom, right, bottom, right, top, left, top, left, bottom)
            # % (x+m, y-m, x-m, y-m, x-m, y+m, x+m, y+m, x+m, y-m)
    session = Session()
    # s = session.query(Tabelog.RestaurantName, gf.wkt(Tabelog.LatLng))\
    s = session.query(
            Tabelog.id, Tabelog.Rcd, Tabelog.RestaurantName,
            Tabelog.TabelogMobileUrl, Tabelog.TotalScore, Tabelog.Situation,
            Tabelog.DinnerPrice, Tabelog.LunchPrice, Tabelog.Category,
            Tabelog.Station, Tabelog.Address, Tabelog.Tel,
            Tabelog.BusinessHours, Tabelog.Holiday,
            func.round(func.avg(UserPost.difficulty)).label('difficulty'),\
            gf.wkt(Tabelog.LatLng).label('Point'))\
            .filter(Tabelog.Rcd == UserPost.rst_id)\
            .filter(Tabelog.LatLng.within(box))\
            .group_by(UserPost.id)\
            .limit(limit)
    session.commit()
    session.close()
    return s
コード例 #19
0
def startend(start, end):

    start_date = dt.datetime.strptime(start, '%Y-%m-%d').date()

    end_date = dt.datetime.strptime(end, '%Y-%m-%d').date()

    result1 = []
    Amin1 = {}

    minimum = session.query(func.min(Measurement.tobs)).filter(
        Measurement.date.between(start_date, end_date)).all()
    minimum = list(np.ravel(minimum))
    Amin1['min'] = minimum[0]

    average = session.query(func.round(func.avg(Measurement.tobs))).filter(
        Measurement.date.between(start_date, end_date)).all()
    average = list(np.ravel(average))
    Amin1['avg'] = average[0]

    maximum = session.query(func.max(Measurement.tobs)).filter(
        Measurement.date.between(start_date, end_date)).all()
    maximum = list(np.ravel(maximum))
    Amin1['max'] = maximum[0]

    result1.append(Amin1)

    return jsonify(result1)
コード例 #20
0
def search_type():

    request_start = request.args.get("start")
    request_end = request.args.get("end")

    try:
        base_cmd = session.query(
            Measurement.date,
            func.min(Measurement.tobs),
            func.round(func.avg(Measurement.tobs), 2),
            func.max(Measurement.tobs),
        )

        if request_start:
            start = datetime.datetime.strptime(request_start,
                                               "%Y-%m-%d").date()
            base_cmd = base_cmd.filter(Measurement.date >= start)

        if request_end:
            end = datetime.datetime.strptime(request_end, "%Y-%m-%d").date()
            base_cmd = base_cmd.filter(Measurement.date <= end)

        data = base_cmd.group_by(Measurement.date).all()

        return jsonify(data)

    except Exception as e:
        return jsonify({"status": "failure", "error": str(e)})
コード例 #21
0
def daterangestartend(start=None, end=None):
    print(str(currentDT) + " - Log: date range request")

    # check that date is properly formatted
    try:
        start = datetime.datetime.strptime(start, '%Y-%m-%d')
    except:
        return ("Incorrect start date, format should be YYYY-MM-DD")

    try:
        end = datetime.datetime.strptime(end, '%Y-%m-%d')
    except:
        return ("Incorrect start date, format should be YYYY-MM-DD")

    # open session get results from start to last date (last_day)
    session = Session(engine)
    results = session.query(func.min(Measurement.tobs),
                            func.round(func.avg(Measurement.tobs), 1),
                            func.max(Measurement.tobs)).filter(
                                Measurement.date.between(
                                    start - relativedelta(days=1),
                                    end + relativedelta(days=1)))
    session.close()

    filtered_tob = []
    for mint, avgt, maxt in results:
        tob_dict = {}
        tob_dict["Max Temp"] = maxt
        tob_dict["Avg Temp"] = avgt
        tob_dict["Min Temp"] = mint
        filtered_tob.append(tob_dict)
    return jsonify(filtered_tob)
コード例 #22
0
def start(start):

    start_date = dt.datetime.strptime(start, '%Y-%m-%d').date()

    result = []
    Amin = {}

    minimum = session.query(func.min(
        Measurement.tobs)).filter(Measurement.date >= start_date).all()
    minimum = list(np.ravel(minimum))
    Amin['min'] = minimum[0]

    average = session.query(func.round(func.avg(
        Measurement.tobs))).filter(Measurement.date >= start_date).all()
    average = list(np.ravel(average))
    Amin['avg'] = average[0]

    maximum = session.query(func.max(
        Measurement.tobs)).filter(Measurement.date >= start_date).all()
    maximum = list(np.ravel(maximum))
    Amin['max'] = maximum[0]

    result.append(Amin)

    return jsonify(result)
コード例 #23
0
ファイル: app.py プロジェクト: abaker178/sqlalchemy-challenge
def precip():
    session = Session(engine)
    # Get all precipitation data averaged per day
    precip = session.query(Measurement.date, func.round(func.avg(Measurement.prcp),2)).\
        group_by(Measurement.date).all()
    session.close()
    return jsonify(dict(precip))
コード例 #24
0
    def get_temp_data(self, temp_sensors: list) -> Dict[datetime.isoformat, float]:
        """Get Temperature Data with Specific Thermo Sensors

        Args:
            temp_sensors (list): Thermo Sensors

        Returns:
            Dict[datetime.isoformat, float]: historical temperature data
        """
        if self.args["interval"] == "hour":
            time_format = "YYYY-MM-DD HH24:00:00"
        elif self.args["interval"] == "day":
            time_format = "YYYY-MM-DD 00:00:00"
        else:
            raise Exception(f"Invalid interval: {self.args['interval']}")
        date = self.simplify_date(SensorData.created, time_format)
        criteria = [
            SensorData.created.between(self.args["start_time"], self.args["end_time"]),
            SensorData.sensor.in_(temp_sensors),
        ]
        # Thermo Sensor Data
        sensor_thermo = (
            # fmt: off
            SESSION.query(func.round(
                cast(func.avg(SensorData.temperature), DECIMAL), 2
            ).label("temperature"), date)
            .filter(*criteria)
            .group_by(date)
            .all()
            # fmt: on
        )
        thermo_data = {thermo.datetime.isoformat(): float(thermo.temperature) for thermo in sensor_thermo}
        return thermo_data
コード例 #25
0
def start_end(start=None, end=None):

    try:
        start = datetime.datetime.strptime(start, "%Y-%m-%d").date()
        end = datetime.datetime.strptime(end, "%Y-%m-%d").date()

        temps_data = (session.query(
            Measurement.date,
            func.min(Measurement.tobs),
            func.round(func.avg(Measurement.tobs), 2),
            func.max(Measurement.tobs),
        ).filter(Measurement.date >= start).filter(
            Measurement.date <= end).group_by(Measurement.date).all())

        temps_list = []
        for temp in temps_data:
            temp_record = {}
            temp_record["date"] = temp[0]
            temp_record[f"min temp {degree_sign}F"] = temp[1]
            temp_record[f"avg temp {degree_sign}F"] = temp[2]
            temp_record[f"max temp {degree_sign}F"] = temp[3]
            temps_list.append(temp_record)

        return jsonify(temps_list)

    except Exception as e:
        return jsonify({"status": "failure", "error": str(e)})
コード例 #26
0
ファイル: db_funcs.py プロジェクト: petitviolet/hitorimeshi
def read_rst(rst_id):
    # 店の詳細情報取得
    session = Session()
    s = (
        session.query(
            Tabelog.Rcd.label("rst_id"),
            Tabelog.RestaurantName,
            Tabelog.TabelogMobileUrl,
            Tabelog.TotalScore,
            Tabelog.Situation,
            Tabelog.DinnerPrice,
            Tabelog.LunchPrice,
            Tabelog.Category,
            Tabelog.Station,
            Tabelog.Address,
            Tabelog.Tel,
            Tabelog.BusinessHours,
            Tabelog.Holiday,
            gf.x(Tabelog.LatLng).label("lat"),
            gf.y(Tabelog.LatLng).label("lng"),
            func.round(func.avg(UserPost.difficulty)).label("difficulty"),
            func.avg(UserPost.difficulty).label("raw_difficulty"),
        )
        .filter(Tabelog.Rcd == rst_id)
        .filter(UserPost.rst_id == Tabelog.Rcd)
        .group_by(UserPost.id)
        .first()
    )
    # gf.wkt(Tabelog.LatLng).label('Point'))\
    #         .filter('Rcd = :rcd').params(rcd=rst_id).first()
    session.commit()
    session.close()
    return s
コード例 #27
0
    def get_trilateration_points(self):
        points = []
        cells = {}
        enbys = Tower.query.group_by(func.concat(Tower.mcc, Tower.mnc, Tower.cid))
        for enb in enbys:
            enbid = enb.enodeb_id
            if not enbid in cells:
                cells[enbid] = []
            towers = Tower.query.filter(Tower.mnc == enb.mnc).filter(Tower.mcc == enb.mcc).filter(Tower.cid == enb.cid)
            towers = towers.group_by(func.concat(func.round(Tower.lat,3), Tower.lon))
            if towers.count() > 3:
                res = self.trilaterate_enodeb_location(towers)
                cells[enbid].append(SimpleNamespace(lat=res[0], lon=res[1], est_dist=50, sus_pct=self.get_suspicious_percentage_by_enodeb(towers[0])))

        for i in cells:
            if len(cells[i]) > 0:
                res = self.trilaterate_enodeb_location(cells[i], False)
                points.append({
                    'trilat': (res[0], res[1]),
                    'enodeb_id': i,
                    'max_suspiciousness': cells[i][0].sus_pct,
                    "closest_tower": self.closest_known_tower(res[0], res[1]),
                    "unique_cells": "NA", #self.get_cells_count_for_enodebid(cells[i]),
                    "sightings": "NA", #self.get_sightings_for_enodeb(cells[i]).count(),
                    "first_seen": "NA", #str(self.get_min_column_by_enodeb(cells[i], 'timestamp')),
                    "last_seen": "NA" #str(self.get_max_column_by_enodeb(cells[i], 'timestamp'))
                    })

        return points
コード例 #28
0
def courseScoreRead():

    relationships = Rs_lesson_quiz_contain.query.all()
    quiz_list = [item.quiz_id for item in relationships]
    quiz_list = list(set(quiz_list))

    query_results = \
        db.session.query(
            Rs_student_course_enrol.course_index.label('course_index'),
            QuizAttempt.student_id.label('student_id'),
            QuizAttempt.quiz_id.label('quiz_id'),
            func.round(
                cast(100 * cast(QuizAttempt.score, Float) / cast(count(Question.id), Float), Numeric),
                0
            ).label('quiz_score_percentage')
        )\
        .select_from(Rs_student_course_enrol)\
        .outerjoin(Rs_quiz_course_assign, Rs_student_course_enrol.course_index == Rs_quiz_course_assign.course_index)\
        .outerjoin(QuizAttempt, and_(Rs_student_course_enrol.student_id == QuizAttempt.student_id,Rs_quiz_course_assign.quiz_id == QuizAttempt.quiz_id))\
        .outerjoin(Rs_quiz_question_contain, QuizAttempt.quiz_id == Rs_quiz_question_contain.quiz_id)\
        .outerjoin(Question, Rs_quiz_question_contain.question_id == Question.id)\
        .filter(QuizAttempt.quiz_id.in_(quiz_list))\
        .group_by(
            Rs_student_course_enrol.course_index,
            QuizAttempt.student_id,
            QuizAttempt.quiz_id,
            QuizAttempt.score
        )\
        .order_by(
            asc(Rs_student_course_enrol.course_index),
            asc(QuizAttempt.student_id)
        )

    return query_results.all()
コード例 #29
0
def get_player_graph_data(server,
                          granularity=15,
                          start_date=None,
                          end_date=None):
    end_date = end_date or datetime.utcnow()
    start_date = start_date or end_date - timedelta(days=7)

    result = db.session.query(
        label(
            'timestamp_group',
            func.round(
                (func.unix_timestamp(ServerStatus.timestamp) - time.timezone) /
                (granularity * 60)),
        ), func.avg(ServerStatus.player_count)).filter(
            ServerStatus.server == server,
            ServerStatus.timestamp >= start_date, ServerStatus.timestamp <=
            end_date).group_by('timestamp_group').order_by(
                ServerStatus.timestamp).all()

    points = []
    for chunk, count in result:
        points.append({
            'time': int(chunk * granularity * 60 * 1000),
            'player_count': int(count)
        })

    return {
        'start_time': int(calendar.timegm(start_date.timetuple()) * 1000),
        'end_time': int(calendar.timegm(end_date.timetuple()) * 1000),
        'points': points
    }
コード例 #30
0
 def get_sales_data(cls, from_date, to_date):
     return Base.session.query(func.sum(
         cls.quantity), func.round(func.sum(cls.amount), 2)).filter(
             and_(
                 func.strftime('%Y-%m-%d', Sales.sales_date) >= from_date,
                 func.strftime('%Y-%m-%d', Sales.sales_date) <=
                 to_date)).first()
コード例 #31
0
ファイル: user_info.py プロジェクト: a289237642/zhijiantv
def article_user_query():
    """
    session.query(User.name, func.sum(User.id).label("user_id_sum")).filter(func.to_days(User.create_date)==func.to_days(func.now())).group_by(User.name).all()
    """
    try:
        res = request.get_json()
        page = res.get('page', 1)
        pagesize = res.get('paalgesize', 10)
        results = db.session.query(UserReadArticle.user_id, UserReadArticle.article_id, UserReadArticle.create_time,
                                   func.round(func.sum(UserReadArticle.random_coins), 2).label(
                                       "user_coins_sum")).order_by(
            func.sum(UserReadArticle.random_coins).desc()).group_by(UserReadArticle.user_id).all()

        data = query_to_dict(results)
        # keys = ['user_id', 'total_coin']
        # for result in results:
        #     print result[0].user_id
        #     print result[1]
        # #     data = zip(keys, result)
        # #     print data

        return jsonify(errno=0, errmsg="OK", data=data)
    except Exception as e:
        Logging.logger.error('errmsg:{0}'.format(e))
        db.session.rollback()
        return jsonify(errno="-1", errmsg='解密失败')
コード例 #32
0
def stations():
    # Create our session (link) from Python to the DB
    session = Session(engine)
    """Return a JSON list of stations from the dataset"""
    # Query all stations

    stations = session.query(Station.station, Station.name, Station.latitude,
                             func.round(Station.longitude, 5),
                             Station.elevation).all()

    session.close()

    # Create list of dictionaries
    all_stations = []

    for station, name, lat, lng, elevation in stations:
        # create two dictionaries
        st_dict = {}
        stinfo_dict = {}
        st_dict['station ID'] = station
        stinfo_dict['name'] = name
        stinfo_dict['latitude'] = lat
        stinfo_dict['longitude'] = lng
        stinfo_dict['elevation'] = elevation

        st_dict['station details'] = stinfo_dict
        all_stations.append(st_dict)

    return jsonify(all_stations)
コード例 #33
0
def get_player_graph_data(server, granularity=15, start_date=None, end_date=None):
    end_date = end_date or datetime.utcnow()
    start_date = start_date or end_date - timedelta(days=7)

    result = db.session.query(
        label(
            'timestamp_group',
            func.round(
                (func.unix_timestamp(ServerStatus.timestamp) - time.timezone) / (granularity * 60)
            ),
        ),
        func.avg(ServerStatus.player_count)
    ).filter(
        ServerStatus.server == server,
        ServerStatus.timestamp >= start_date,
        ServerStatus.timestamp <= end_date
    ).group_by('timestamp_group').order_by(
        ServerStatus.timestamp
    ).all()

    points = []
    for chunk, count in result:
        points.append({
            'time': int(chunk * granularity * 60 * 1000),
            'player_count': int(count)
        })

    return {
        'start_time': int(calendar.timegm(start_date.timetuple()) * 1000),
        'end_time': int(calendar.timegm(end_date.timetuple()) * 1000),
        'points': points
    }
コード例 #34
0
    def visit_math(self, expr):
        try:
            op = getattr(func, MATH_COMPILE_DIC[expr.node_name])
            sa_expr = op(self._expr_to_sqlalchemy[expr._input])
        except KeyError:
            if expr.node_name == 'Log':
                if expr._base is not None:
                    sa_expr = SALog('log', self._expr_to_sqlalchemy[expr._base],
                                    self._expr_to_sqlalchemy[expr._base],
                                    self._expr_to_sqlalchemy[expr._input])
                else:
                    sa_expr = SALog('log', None, self._expr_to_sqlalchemy[expr._input])
            elif expr.node_name == 'Log2':
                sa_expr = SALog('log', 2, 2, self._expr_to_sqlalchemy[expr._input])
                sa_expr = sa_expr.cast(types.df_type_to_sqlalchemy_type(expr.dtype))
            elif expr.node_name == 'Log10':
                sa_expr = SALog('log', 10, 10, self._expr_to_sqlalchemy[expr._input])
                sa_expr = sa_expr.cast(types.df_type_to_sqlalchemy_type(expr.dtype))
            elif expr.node_name == 'Trunc':
                input = self._expr_to_sqlalchemy[expr._input]
                decimals = 0 if expr._decimals is None else self._expr_to_sqlalchemy[expr._decimals]
                sa_expr = SATruncate('trunc', input, decimals)
            elif expr.node_name == 'Round':
                decimals = 0 if expr._decimals is None else self._expr_to_sqlalchemy[expr._decimals]
                sa_expr = func.round(self._expr_to_sqlalchemy[expr._input], decimals)
            else:
                raise NotImplementedError

        self._add(expr, sa_expr)
コード例 #35
0
def rankTieProcess(ranks):
    # High solvedRate
    solvedRate = dao.query(func.max(ranks.c.solvedRate).label('solvedRate')).subquery()
    ranks = dao.query(ranks).\
                filter(func.round(ranks.c.solvedRate, 4) == func.round(solvedRate.c.solvedRate, 4)).subquery()   
                     
    # High sumOfSolvedProblemCount
    solvedRate = dao.query(func.max(ranks.c.sumOfSolvedProblemCount).label('sumOfSolvedProblemCount')).subquery()
    ranks = dao.query(ranks).\
                filter(ranks.c.sumOfSolvedProblemCount == solvedRate.c.sumOfSolvedProblemCount).subquery()  
                      
    # Low solutionCheckCount
    solvedRate = dao.query(func.max(ranks.c.solutionCheckCount).label('solutionCheckCount')).subquery()
    ranks = dao.query(ranks).\
                filter(ranks.c.solutionCheckCount == solvedRate.c.solutionCheckCount)
                
    return ranks
コード例 #36
0
    def reddit_score(self):
        s = self.upvotes - self.downvotes
        order = func.log(10, func.greatest(func.abs(s), 1))
        sign = func.sign(s)
        seconds = func.date_part('epoch', self.timestamp) - 1134028003

        return func.round(func.cast(sign * order + seconds / 45000, Numeric),
                          7)
コード例 #37
0
def StartDate(start):
    start_date = dt.datetime.strptime(start, "%Y-%m-%d")
    summary_stats = session.query(
        func.min(Measurement.tobs), func.max(Measurement.tobs),
        func.round(func.avg(
            Measurement.tobs))).filter(Measurement.date >= start_date)
    summary = list(np.ravel(summary_stats))
    return jsonify(summary)
コード例 #38
0
ファイル: query.py プロジェクト: baruwaproject/baruwa2
def sa_scores(dbsession, user):
    "SA scores query"
    query = dbsession.query(func.round(Message.sascore).label('score'),
            func.count('score').label('count'))\
            .filter(Message.scaned == 1)\
            .filter(Message.whitelisted != 1)\
            .group_by('score').order_by('score')

    uquery = UserFilter(dbsession, user, query)
    query = uquery.filter()
    return query
コード例 #39
0
ファイル: query.py プロジェクト: TetraAsh/baruwa2
def sa_scores(dbsession, user):
    "SA scores query"
    query = dbsession.query(func.round(Message.sascore).label('score'),
            func.count('score').label('count'))\
            .filter(Message.scaned == 1)\
            .filter(Message.whitelisted != 1)\
            .group_by('score').order_by('score')

    #cachekey = u'sascores-%s' % user.username
    uquery = UserFilter(dbsession, user, query)
    query = uquery.filter()
    #query = query.options(FromCache('sql_cache_short', cachekey))
    return query
コード例 #40
0
ファイル: console.py プロジェクト: mizjogit/sakemon
def sensord(label, start, end, field_name, functions):
    seconds_per_sample_wanted, table, is_base_table = mtable.optimal(label, start, end)
    fields = list()
    for agg_func in functions:
        agg_func_name = str(agg_func()).replace('()', '')
        agg_field_name = field_name if is_base_table else '%s_%s' % (field_name, agg_func_name)
        fields.append(agg_func(table.c[agg_field_name]).label(agg_func_name))
    qry = session.query(table.c.timestamp, *fields) \
                 .group_by(func.round(func.unix_timestamp(table.c.timestamp).op('DIV')(seconds_per_sample_wanted))) \
                 .filter(table.c.timestamp >= start, table.c.timestamp <= end) \
                 .order_by(table.c.timestamp)
    if label:
        qry = qry.filter(table.c.probe_label == label)
    return qry
コード例 #41
0
ファイル: invoices.py プロジェクト: alishir/tcr
 def __init__(self, context):
     super(InvoicesQueryBuilder, self).__init__(context)
     self._sum_invoice = (
         DBSession.query(
             func.sum(InvoiceItem.final_price).label('final_price'), 
             InvoiceItem.invoice_id.label('invoice_id')
         )
         .group_by(InvoiceItem.invoice_id)
         .subquery()
     )
     self._sum_payments = (
         DBSession.query(
             func.sum(Income.sum).label('payments'),
             Income.invoice_id.label('invoice_id')
         )
         .group_by(Income.invoice_id)
         .subquery()
     )
     self._fields = {
         'id': Invoice.id,
         '_id': Invoice.id,
         'date': Invoice.date,
         'active_until': Invoice.active_until,
         'account': Account.name,
         'account_type': Account.account_type,
         'final_price': self._sum_invoice.c.final_price,
         'payments': self._sum_payments.c.payments,
         'debt': (
             self._sum_invoice.c.final_price - self._sum_payments.c.payments
         ),
         'payments_percent': func.coalesce(
             func.round(
                 (self._sum_payments.c.payments * 100) 
                 / self._sum_invoice.c.final_price, 2
             ),
             0
         ),
         'customer': Person.name,
         'currency': Currency.iso_code,
     }
     self._simple_search_fields = [
         Account.name,
         Person.name,
     ]
     self.build_query()
コード例 #42
0
def update_challenge_points(connection, update_team_count=True):
    """
    Update the points on each challenge to reflect their current worth.
    """
    if update_team_count:
        update_playing_teams(connection)
    solved_count = (select([cast(func.count('*'), Numeric)]).
                    select_from(Submission.__table__).
                    where(Challenge.id == Submission.challenge_id).
                    correlate(Challenge))
    team_count = select([Settings.playing_teams]).as_scalar()
    team_ratio = 1 - solved_count / team_count
    bonus = case([(team_count != 0, func.round(team_ratio, 1))], else_=1) * 100
    source = select([Challenge.base_points + bonus]).correlate(Challenge)
    query = (Challenge.__table__.update().
             where(~Challenge.manual).
             where(~Challenge.dynamic).
             values(points=source))
    connection.execute(query)
コード例 #43
0
ファイル: sakidb.py プロジェクト: mizjogit/rp
    def update(self, session, last_data_time):
        for period, agg_table in self.agg_map.iteritems():
            last = session.query(func.max(agg_table.c.timestamp)).scalar()
            if not last:
                last = session.query(func.min(self.base_table.timestamp).label('timestamp')).scalar()
            if (last_data_time - last).total_seconds() < period:
#                print "Not data for tailed agg at", period, \
#                      "last", last, \
#                      "last_data_time", last_data_time, \
#                      "seconds", (last_data_time - last).total_seconds(), \
#                      "days", (last_data_time - last).days
                continue
            last += datetime.timedelta(seconds=period)
            funs = list()
            insp = inspect(self.base_table)
            for field, pvt_funs in self.pvt.iteritems():
                funs.extend([fun(insp.columns[field]) for fun in pvt_funs])
            qry = session.query(self.base_table.timestamp, self.base_table.probe_label, *funs) \
                         .group_by(func.round(func.unix_timestamp(self.base_table.timestamp).op('DIV')(period)), self.base_table.probe_label) \
                         .filter(self.base_table.timestamp > last)
            session.execute(insert(agg_table).from_select(['timestamp', 'probe_label'] + self.pvt_fields, qry))
コード例 #44
0
ファイル: views.py プロジェクト: mianos/etemp
def sensord(id, start, end, aft_name):
    functions = aft[aft_name]
    table = inspect(DataTable).mapped_table

    fields = list()
    for agg_func in functions:
        agg_func_name = str(agg_func()).replace('()', '')
        fields.append(func.cast(agg_func(DataTable.value), types.Integer).label(agg_func_name))

    per_seconds = (end - start).total_seconds() / 100
    ots = func.to_timestamp(func.round(func.extract('epoch', DataTable.timestamp) / per_seconds) * per_seconds).label('timestamp')

    if id == 0:
        qry = g.session.query(ots, *fields) \
                       .filter(DataTable.probe == 1)    # TODO: get probe 1
    else:
        qry = g.session.query(ots, *fields) \
                       .filter(DataTable.probe == id)

    qry = qry.filter(table.c.timestamp >= start, table.c.timestamp <= end) \
             .group_by(ots) \
             .order_by(ots)
    return qry
コード例 #45
0
 def trajectDirection(self):
     """traject direction of previous location and current location"""
     azimuth = func.ST_Azimuth(func.lag(self.rawlocation).over(order_by=(self.device_info_serial, self.date_time,)), self.rawlocation)
     return func.round(cast(func.degrees(azimuth), Numeric()), 2).label('tdirection')
コード例 #46
0
ファイル: sql_con.py プロジェクト: daliagachc/ceil_bokeh
def round_time(time_interval, time_column):
    t_i = float(time_interval)
    return (func.round(time_column / t_i) * t_i).label('round_time' + time_column.key)
コード例 #47
-1
    def trajectSpeed(self):
        """traject speed by distance between previous and current location divided by current date_time - previous date_time

        round(CAST
        (
            ST_Length_Spheroid(
                ST_MakeLine(location, lag(location) over (order by device_info_serial, date_time)),
                'SPHEROID[\"WGS 84\",6378137,298.257223563]'
            )
        /
        EXTRACT(
            EPOCH FROM (date_time - lag(date_time) over (order by device_info_serial, date_time))
        )
        ) AS NUMERIC, 4)
        """
        order_by = (self.device_info_serial, self.date_time,)
        spheroid = 'SPHEROID["WGS 84",6378137,298.257223563]'
        line = func.ST_MakeLine(self.rawlocation, func.lag(self.rawlocation).over(order_by=order_by))
        distance = func.ST_Length_Spheroid(line, spheroid)
        duration = func.extract('epoch', self.date_time - func.lag(self.date_time).over(order_by=order_by))
        return func.round(cast(distance / duration, Numeric), 4).label('tspeed')