예제 #1
0
파일: jobs.py 프로젝트: zomGreg/nao_import
    def get(self):
        q = run.session.query(run.enstratus_job.c.job_id, run.enstratus_job.c.description,\
            ((run.enstratus_job.c.end_timestamp-run.enstratus_job.c.start_timestamp)/1000).label('seconds_to_complete'),\
            run.enstratus_job.c.job_status,\
            (func.from_unixtime(run.enstratus_job.c.start_timestamp/1000, '%Y-%m-%d %h:%i:%s')).label('start_date'),\
            (func.from_unixtime(run.enstratus_job.c.end_timestamp/1000, '%Y-%m-%d %h:%i:%s')).label('end_date'))

        if 'limit' in request.args:
            limit = int(request.args['limit'])

            return jsonify(results=q.order_by(
                run.enstratus_job.c.start_timestamp).limit(limit).all())
        else:
            if 'page' in request.args:
                page = int(request.args['page'])
            else:
                page = 1

            per_page = 50
            total_count = q.count()
            total_pages = int(ceil(total_count / float(per_page)))
            offset = (per_page * page)
            q = q.order_by(run.enstratus_job.c.start_timestamp).limit(
                per_page).offset(offset).all()
            run.session.close()
            return jsonify(page=page,
                           per_page=per_page,
                           total_pages=total_pages,
                           results=q)
예제 #2
0
def chartlist():
    """ 销售趋势图表 """

    resjson.action_code = 11

    # 获取查询日期
    search_time_daterange = request.args.get('search_time_daterange', '').\
        strip()

    q = db.session.query(
        func.count(Order.order_id).label('order_num'),
        func.sum(Order.pay_amount).label('pay_amount'),
        func.from_unixtime(Order.paid_time, "%Y-%m-%d").label('paid_date')).\
        filter(Order.order_type == 1).\
        filter(Order.pay_status == 2).\
        filter(Order.order_status != 3)

    # 查询日期
    if search_time_daterange:
        temp_start, temp_end = date_range(search_time_daterange)
        _cover_day, _date_norms = date_cover_day(temp_start, temp_end)
        start = temp_start
        end = temp_end
        date_norms = _date_norms
    else:
        return resjson.print_json(resjson.PARAM_ERROR)

    datas = q.filter(Order.paid_time >= start).filter(
        Order.paid_time < end).group_by('paid_date').all()

    # 数据处理
    format_result_datas = format_array_data_to_dict(datas, 'paid_date')
    return resjson.print_json(0, u'ok', {
        'data': format_result_datas,
        'date_norms': date_norms})
def upgrade():
    bind = op.get_bind()
    if bind and bind.engine.name == "mysql":
        # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER
        # TABLE … USING …". We need to copy everything and convert…
        for table_name, column_name in (("alarm", "timestamp"),
                                        ("alarm", "state_timestamp"),
                                        ("alarm_history", "timestamp")):
            existing_type = sa.types.DECIMAL(
                precision=20, scale=6, asdecimal=True)
            existing_col = sa.Column(
                column_name,
                existing_type,
                nullable=True)
            temp_col = sa.Column(
                column_name + "_ts",
                models.TimestampUTC(),
                nullable=True)
            op.add_column(table_name, temp_col)
            t = sa.sql.table(table_name, existing_col, temp_col)
            op.execute(t.update().values(
                **{column_name + "_ts": func.from_unixtime(existing_col)}))
            op.drop_column(table_name, column_name)
            op.alter_column(table_name,
                            column_name + "_ts",
                            nullable=True,
                            type_=models.TimestampUTC(),
                            existing_nullable=True,
                            existing_type=existing_type,
                            new_column_name=column_name)
def upgrade():
    bind = op.get_bind()
    if bind and bind.engine.name == "mysql":
        # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER
        # TABLE … USING …". We need to copy everything and convert…
        for table_name, column_name in (("alarm", "timestamp"),
                                        ("alarm", "state_timestamp"),
                                        ("alarm_change", "timestamp")):
            existing_type = sa.types.DECIMAL(
                precision=20, scale=6, asdecimal=True)
            existing_col = sa.Column(
                column_name,
                existing_type,
                nullable=True)
            temp_col = sa.Column(
                column_name + "_ts",
                models.TimestampUTC(),
                nullable=True)
            op.add_column(table_name, temp_col)
            t = sa.sql.table(table_name, existing_col, temp_col)
            op.execute(t.update().values(
                **{column_name + "_ts": func.from_unixtime(existing_col)}))
            op.drop_column(table_name, column_name)
            op.alter_column(table_name,
                            column_name + "_ts",
                            nullable=True,
                            type_=models.TimestampUTC(),
                            existing_nullable=True,
                            existing_type=existing_type,
                            new_column_name=column_name)
예제 #5
0
파일: queryfun.py 프로젝트: zrong/pyape
def sql_from_unixtime(dtfield, dtformat='%Y-%m-%d'):
    """
    返回一个 FROM_UINXTIME 的查询对象
    :param dtfield: 用于转换的记录名称,必须是 datetime 或者 timestamp 类型
    :param dtformat: 对应与 FROM_UNIXTIME 的第二个参数
    :return:
    """
    return func.from_unixtime(func.unix_timestamp(dtfield), dtformat)
예제 #6
0
class Block(Model):
    __tablename__ = 'block'

    number = Column(db.Integer, primary_key=True)
    witness_address = Column(db.String(255))
    timestamp = Column(db.BigInteger)
    blockid = Column(db.String(255))
    parenthash = Column(db.String(255))
    version = Column(db.Integer)

    utc0time = db.Column(db.DateTime, default=func.from_unixtime(timestamp))
예제 #7
0
def query_by_grouped_time(session,
                          cutoff,
                          column,
                          group_func=func.date,
                          sql_func=func.avg):
    q = session.query(
        sql_func(column).label('value'),
        group_func(func.from_unixtime(
            WindMeasurement.epoch)).label('date')).filter(
                WindMeasurement.epoch >=
                func.unix_timestamp(func.now()) - cutoff,
                column >= 1).group_by('date').order_by('date')

    grouped_values = [(float(row.value), row.date) for row in q.all()]
    return grouped_values
예제 #8
0
파일: jobs.py 프로젝트: zomGreg/nao_import
  def get(self):
    q = run.session.query(run.enstratus_job.c.job_id, run.enstratus_job.c.description,\
        ((run.enstratus_job.c.end_timestamp-run.enstratus_job.c.start_timestamp)/1000).label('seconds_to_complete'),\
        run.enstratus_job.c.job_status,\
        (func.from_unixtime(run.enstratus_job.c.start_timestamp/1000, '%Y-%m-%d %h:%i:%s')).label('start_date'),\
        (func.from_unixtime(run.enstratus_job.c.end_timestamp/1000, '%Y-%m-%d %h:%i:%s')).label('end_date'))

    if 'limit' in request.args:
      limit = int(request.args['limit'])

      return jsonify(results=q.order_by(run.enstratus_job.c.start_timestamp).limit(limit).all())
    else:
      if 'page' in request.args:
        page = int(request.args['page'])
      else:
        page = 1

      per_page = 50
      total_count = q.count()
      total_pages = int(ceil(total_count / float(per_page)))
      offset = (per_page * page)
      q = q.order_by(run.enstratus_job.c.start_timestamp).limit(per_page).offset(offset).all()
      run.session.close()
      return jsonify(page=page,per_page=per_page,total_pages=total_pages,results=q)
예제 #9
0
    def get_my_delegates(user_id):

        session = Session()

        day = date.today()
        end_day = day + timedelta(days=1)

        from_timestamp = time.mktime(day.timetuple())
        end_timestamp = time.mktime(end_day.timetuple())

        results = session.query(Order.id, Order.direction,func.from_unixtime(Order.created_at,'%H:%i:%s'),Product.name,Order.p_no,Order.price,Order.volume,Order.deal_volume,Order.status)\
                    .join(Product,Product.p_no==Order.p_no)\
                    .filter(Order.deleted==0, Order.user_id==user_id, Order.created_at.between(from_timestamp, end_timestamp))\
                    .order_by(Order.p_no.desc(),Order.created_at.desc()).all()

        return results
예제 #10
0
    def set_deal_data_to_redis(redis, p_no=None):

        session = Session()
        day = date.today()
        end_day = day + timedelta(days=1)

        from_timestamp = time.mktime(day.timetuple())
        end_timestamp = time.mktime(end_day.timetuple())

        results = session.query(OrderDeal.id, func.from_unixtime(OrderDeal.deal_time, '%H:%i'), OrderDeal.price,
                                OrderDeal.volume) \
            .filter(OrderDeal.p_no == p_no, OrderDeal.deleted == 0,
                    OrderDeal.deal_time.between(from_timestamp, end_timestamp)) \
            .order_by(OrderDeal.id.asc()).all()

        dr = {}

        t = 0
        p = 0
        v = 0

        for id, dt_m, price, volume in results:
            if dt_m not in dr.keys():
                dr[dt_m] = {}

                t = 0
                p = 0
                v = 0

            dr[dt_m]['price'] = price

            t = t + 1
            p = p + price
            v = v + volume
            dr[dt_m]['times'] = t
            dr[dt_m]['avg_price'] = round(p / t, 2)
            dr[dt_m]['volume'] = v

        ret = []

        prev_price = Common.get_last_price(session, p_no)
        prev_avg_price = prev_price
        x_times = ProductHandler.get_transaction_times(p_no)
        now = datetime.datetime.now().strftime('%H:%M')
        for t in x_times:
            if t not in dr.keys():
                if operator.gt(t, now):
                    break
                ret.append(tuple((t, prev_price, prev_avg_price, 0)))
            else:
                ret.append(
                    tuple((t, dr[t]['price'], dr[t]['avg_price'],
                           dr[t]['volume'])))
                prev_price = dr[t]['price']
                prev_avg_price = dr[t]['avg_price']

        data = ret
        key = "{}{}".format('REDIS_KEY_FOR_CHART_TIME_DATA_', p_no)
        redis.set(key, json.JSONEncoder().encode(data))

        session.close()
예제 #11
0
async def get_statistic(conn, number, date):
    result = await conn.execute(avtomat_log_table.select()\
        .where(avtomat_log_table.c.number == number)\
        .where(func.from_unixtime(avtomat_log_table.c.timestamp, '%Y-%m-%d') == date))
    statistic = await result.fetchall()
    return statistic
예제 #12
0
def lists(id):
    
    field = request.args.get("field")
    keywords  = request.args.get("keywords")

    options = {}
    options["category"] = Category.query.filter(Category.id==id).first()
    options["field"]    = field
    options["keywords"] = keywords if keywords is not None else ""

    condition = []
    condition.append(Article.category==id)

    if keywords is None or len(keywords.strip()) <= 0:
        keywords = ""
    else:
        if field == "title":
           condition.append(Article.title.like("%"+keywords+"%"))
        elif field == "subtitle":   
           condition.append(Article.subtitle.like("%"+keywords+"%"))
        else:
           condition.append(Article.content.like("%"+keywords+"%"))   

    page = int(request.args.get('page', 1))
    per_page = int(request.args.get('per_page', 10))
    paginate = db.session.query(Article.id,Article.title,Article.source,Article.isexamine,Category.category,func.from_unixtime((Article.addtime), "%Y-%m-%d %H:%i:%s")).filter(*condition).filter(Article.category==Category.id).order_by(db.desc(Article.id)).paginate(page, per_page, error_out=False)
    articlelist = paginate.items

    # article = db.session.query(Article.id,Article.title,Category.category,Article.addtime).filter(*condition).filter(Article.category==Category.id).all()
    # print(articlelist)
    # for k in articlelist:
    #     test( k.keys() )
    
    return render_template("admin/view/article/lists.html",admin = g.admin , menu = g.menu , options = options,paginate=paginate,articlelist=articlelist)
예제 #13
0
    def get_deal_data(p_no=None):

        if not p_no:
            raise ValueError

        session = Session()

        day = date.today()
        end_day = day + timedelta(days=1)

        from_timestamp = time.mktime(day.timetuple())
        end_timestamp = time.mktime(end_day.timetuple())

        results = session.query(OrderDeal.id, func.from_unixtime(OrderDeal.deal_time,'%H:%i'),OrderDeal.price,OrderDeal.volume)\
                    .filter(OrderDeal.p_no==p_no,OrderDeal.deleted==0, OrderDeal.deal_time.between(from_timestamp, end_timestamp))\
                    .order_by(OrderDeal.id.asc()).all()

        dr = {}

        t = 0
        p = 0
        v = 0

        for id, dt_m, price, volume in results:
            if dt_m not in dr.keys():
                dr[dt_m] = {}

                t = 0
                p = 0
                v = 0

            dr[dt_m]['price'] = price

            t = t + 1
            p = p + price
            v = v + volume
            dr[dt_m]['times'] = t
            dr[dt_m]['avg_price'] = round(p / t, 2)
            dr[dt_m]['volume'] = v

        #print(dr)

        ret = []

        prev_price = Common.get_last_price(
            session, p_no)  #ProductHandler.get_last_price(p_no)
        prev_avg_price = prev_price
        x_times = ProductHandler.get_transaction_times(p_no)
        now = datetime.now().strftime('%H:%M')
        for t in x_times:
            if t not in dr.keys():
                if operator.gt(t, now):
                    break
                ret.append(tuple((t, prev_price, prev_avg_price, 0)))
            else:
                ret.append(
                    tuple((t, dr[t]['price'], dr[t]['avg_price'],
                           dr[t]['volume'])))
                prev_price = dr[t]['price']
                prev_avg_price = dr[t]['avg_price']

        #print(ret)

        session.close()
        return ret
예제 #14
0
def _archive_access_log(engine, session, default_domain_name):
    try:
        Temp = declarative_base()

        class AccessLogToArchive(Temp):
            __tablename__ = 'accessLogToArchive'
            __table_args__ = {'prefixes': ['TEMPORARY']}
            id = Column(Integer, primary_key=True)
            time_since_epoch = Column(Numeric(15, 3))
            ip_client = Column(CHAR(15))
            http_status_code = Column(String(10))
            http_reply_size = Column(Integer)
            http_url = Column(Text)
            http_username = Column(String(100))
            userId = Column(Integer)
            archived = Column(Boolean)

        Temp.metadata.create_all(bind=engine)

        # Fill temporary table with unarchived chunk of data
        access_log_subquery = session.query(
            AccessLog.id,
            AccessLog.time_since_epoch,
            AccessLog.ip_client,
            AccessLog.http_status_code,
            AccessLog.http_reply_size,
            AccessLog.http_url,
            case([(
                sqlalchemy.or_(
                    AccessLog.http_username.contains('@'),
                    AccessLog.http_username.contains('\\'),
                    AccessLog.http_username == '-'),
                AccessLog.http_username)],
                else_=AccessLog.http_username + '@' + default_domain_name).label('http_username'),
            AccessLog.archived).filter(is_(AccessLog.archived, None)).limit(1000000)  # limit to prevent overload on a huge amount of data

        ins = insert(AccessLogToArchive).from_select([
            'id', 'time_since_epoch', 'ip_client', 'http_status_code', 'http_reply_size',
            'http_url', 'http_username', 'archived'], access_log_subquery)

        session.execute(ins)

        query_result = session.query(AccessLogToArchive.http_username, User.cn).filter(
            and_(User.authMethod == 0, User.userPrincipalName == AccessLogToArchive.http_username)).all()

        # Set user ID field
        session.query(AccessLogToArchive).filter(
            or_(
                and_(User.authMethod == 0, User.userPrincipalName == AccessLogToArchive.http_username),
                and_(User.authMethod == 1, User.ip == AccessLogToArchive.ip_client))).update(
            {AccessLogToArchive.userId: User.id}, synchronize_session=False)

        session.query(AccessLog).filter(AccessLog.id == AccessLogToArchive.id).update(
            {AccessLog.userId: AccessLogToArchive.userId}, synchronize_session=False)

        # Get host from URL: strip protocol
        session.query(AccessLogToArchive).filter(func.locate('://', AccessLogToArchive.http_url) > 0).update(
            {AccessLogToArchive.http_url:
                func.substring(AccessLogToArchive.http_url, func.locate('://', AccessLogToArchive.http_url) + 3)},
            synchronize_session=False)

        # Get host from URL: strip port and the rest
        session.query(AccessLogToArchive).filter(func.locate(':', AccessLogToArchive.http_url) > 0).update(
            {AccessLogToArchive.http_url:
                func.left(AccessLogToArchive.http_url, func.locate(':', AccessLogToArchive.http_url) - 1)},
            synchronize_session=False)

        # Get host from URL: strip everything after the first slash
        session.query(AccessLogToArchive).filter(func.locate('/', AccessLogToArchive.http_url) > 0).update(
            {AccessLogToArchive.http_url:
                func.left(AccessLogToArchive.http_url, func.locate('/', AccessLogToArchive.http_url) - 1)},
            synchronize_session=False)

        # Make summary traffic table
        subquery = session.query(
            func.date(func.from_unixtime(AccessLogToArchive.time_since_epoch)).label('date'),
            AccessLogToArchive.userId,
            AccessLogToArchive.http_url.label('host'),
            func.sum(AccessLogToArchive.http_reply_size).label('traffic')).\
            filter(AccessLogToArchive.http_status_code.like('2%')).\
            group_by(
                func.date(func.from_unixtime(AccessLogToArchive.time_since_epoch)),
                AccessLogToArchive.userId,
                AccessLogToArchive.http_url).\
            having(func.sum(AccessLogToArchive.http_reply_size) > 0).subquery()

        # Update existing rows
        session.query(AccessLogArchive).filter(
            AccessLogArchive.date == subquery.c.date,
            AccessLogArchive.userId == subquery.c.userId,
            AccessLogArchive.host == subquery.c.host).\
            update({AccessLogArchive.traffic: AccessLogArchive.traffic + subquery.c.traffic}, synchronize_session=False)

        # Insert new rows
        access_log_subquery = session.query(subquery).outerjoin(
            AccessLogArchive, and_(
                AccessLogArchive.date == subquery.c.date,
                AccessLogArchive.userId == subquery.c.userId,
                AccessLogArchive.host == subquery.c.host)).\
            filter(AccessLogArchive.id.is_(None), subquery.c.userId.isnot(None))

        ins = insert(AccessLogArchive).from_select(['date', 'userId', 'host', 'traffic'], access_log_subquery)

        session.execute(ins)

        # Mark access log chunk as archived
        session.query(AccessLog).filter(
            AccessLog.id == AccessLogToArchive.id).\
            update({AccessLog.archived: 1}, synchronize_session=False)

        session.commit()

        '''
        # Drop temp table
        AccessLogToArchive.__table__.drop(engine)
        '''
    except:
        session.rollback()
        raise