def get_progression_stats(self, session, id_):
        def float_maybe(f):
            if f is None:
                return None
            else:
                return float(f)

        mean_query = session.query(
            func.to_char(Game.match_date,
                         'YY-MM').label('date'), *self.stats_query
        ).join(PlayerGame).filter(PlayerGame.time_in_game > 0).filter(
            PlayerGame.player == id_).group_by('date').order_by('date').all()
        std_query = session.query(
            func.to_char(Game.match_date,
                         'YY-MM').label('date'), *self.std_query
        ).join(PlayerGame).filter(PlayerGame.time_in_game > 0).filter(
            PlayerGame.player == id_).group_by('date').order_by('date').all()
        mean_query = [list(q) for q in mean_query]
        std_query = [list(q) for q in std_query]
        results = []
        for q, s in zip(mean_query, std_query):
            result = {
                'name':
                datetime.datetime.strptime(q[0], '%y-%m').isoformat(),
                'average':
                self.get_wrapped_stats([float_maybe(qn) for qn in q[1:]]),
                'std_dev':
                self.get_wrapped_stats([float_maybe(qn) for qn in s[1:]])
            }
            results.append(result)
        return results
예제 #2
0
def overview_devices_count():
    time_unit_models = {
        'hour': DeviceCountHour,
        'day': DeviceCountDay,
        'month': DeviceCountMonth
    }
    time_unit, time_format = _validate_time_unit()
    model = time_unit_models[time_unit]
    charts_config = get_charts_config(time_unit=time_unit)
    start_time = charts_config['start_time']
    x_data = charts_config['x_data']

    # Query different models according to time unit
    time_devices_count = db.session \
        .query(func.to_char(model.countTime, time_format),
               func.sum(model.deviceCount)) \
        .filter(model.countTime > start_time, model.tenantID == g.tenant_uid) \
        .group_by(func.to_char(model.countTime, time_format)) \
        .order_by(func.to_char(model.countTime, time_format)).all()
    devices_count_dict = dict(time_devices_count)
    records = {
        'time': x_data,
        'value': [devices_count_dict.get(date, 0) for date in x_data]
    }
    return jsonify(records)
예제 #3
0
def usr_group_act(request):
    ses = request.session
    req = request
    params = req.params
    url_dict = req.matchdict
    
    if url_dict['act']=='grid':
        gid = 'gid' in req.GET and req.GET['gid'] or 0
        columns = [
        ColumnDT(User.id, mData='id'),
        ColumnDT(User.email, mData='email'),
        ColumnDT(User.user_name, mData='name'),
        ColumnDT(User.status, mData='status'), #, filter=_DTstatus
        ColumnDT(func.to_char(User.last_login_date, 'DD-MM-YYYY'), mData='last_login'),
        ColumnDT(func.to_char(User.registered_date, 'DD-MM-YYYY'), mData='registered'),
        ]
        query = DBSession.query().select_from(User).join(UserGroup).\
                          filter(User.id==UserGroup.user_id,
                                 UserGroup.group_id==gid)
        rowTable = DataTables(req.GET, query, columns)
        return rowTable.output_result()
        
    elif url_dict['act']=='member':
        columns = []
        gid = 'gid' in params and params['gid'] or 0
        columns.append(ColumnDT('id'))
        columns.append(ColumnDT('email'))
        columns.append(ColumnDT('user_name'))
        columns.append(ColumnDT('status'))
        query = DBSession.query(User.id, User.user_name, User.email, User.status,
                                User.last_login_date, User.registered_date).\
                  join(UserGroup).filter(UserGroup.group_id==gid)
        
        rowTable = DataTables(req, User, query, columns)
        return rowTable.output_result()
def get_columns():
    columns = [
        ColumnDT(func.concat(PosSppt.kd_propinsi,
                                              PosSppt.kd_dati2,
                                              PosSppt.kd_kecamatan,
                                              PosSppt.kd_kelurahan,
                                              PosSppt.kd_blok,
                                              PosSppt.no_urut, 
                                              PosSppt.kd_jns_op, 
                                              PosSppt.thn_pajak_sppt), mData='id'),
        ColumnDT(func.concat(PosSppt.kd_propinsi,
                                              PosSppt.kd_dati2,
                                              PosSppt.kd_kecamatan,
                                              PosSppt.kd_kelurahan,
                                              PosSppt.kd_blok,
                                              PosSppt.no_urut, 
                                              PosSppt.kd_jns_op), mData='nop'),
        ColumnDT(PosSppt.thn_pajak_sppt,           mData='thn_pajak_sppt'),
        ColumnDT(PosSppt.pbb_yg_harus_dibayar_sppt, mData='pbb_yg_harus_dibayar_sppt'),
        ColumnDT(func.to_char(PosSppt.tgl_terbit_sppt,'DD-MM-YYYY'), mData='tgl_terbit_sppt'),
        ColumnDT(func.to_char(PosSppt.tgl_cetak_sppt,'DD-MM-YYYY'),  mData='tgl_cetak_sppt'),
        ColumnDT(PosSppt.status_pembayaran_sppt,   mData='status_pembayaran_sppt'),
    ]
        
    query = PosPbbDBSession.query().select_from(PosSppt)
    return columns, query
예제 #5
0
def query_rpt():
    return DBSession.query(
        SimralKetetapan.id_trx, SimralKetetapan.no_trx,
        func.to_char(SimralKetetapan.tgl_pembukuan,
                     'DD-MM-YYYY').label('tgl_pembukuan'),
        SimralKetetapan.jns_trx, SimralKetetapan.no_bukti_trx,
        func.to_char(SimralKetetapan.tgl_bukti_trx,
                     'DD-MM-YYYY').label('tgl_bukti_trx'),
        SimralKetetapan.nm_penyetor, SimralKetetapan.kd_rekening,
        SimralKetetapan.jumlah, SimralKetetapan.kd_denda,
        SimralKetetapan.jumlah_denda, SimralKetetapan.source)
예제 #6
0
def query_rpt():
    return DBSession.query(SimralSts.id_trx, SimralSts.no_trx, SimralSts.no_sts,
                    func.to_char(SimralSts.tgl_pembukuan,'DD-MM-YYYY').label('tgl_pembukuan'),
                    SimralSts.jns_trx, SimralSts.uraian_trx, SimralSts.no_bukti_trx,
                    func.to_char(SimralSts.tgl_bukti_trx,'DD-MM-YYYY').label('tgl_bukti_trx'),
                    SimralSts.cara_penyetoran,
                    func.sum(SimralStsDetail.jumlah).label('jumlah')).\
                group_by(SimralSts.id_trx, SimralSts.no_trx, SimralSts.no_sts,
                            SimralSts.tgl_pembukuan, SimralSts.jns_trx,
                            SimralSts.uraian_trx, SimralSts.no_bukti_trx,
                            SimralSts.tgl_bukti_trx, SimralSts.cara_penyetoran)
예제 #7
0
def initial_query(session):
    return session.query(
        file_model.action_type,
        func.to_char(
            cast(file_model.action_date, Date),
            'YYYYMMDD'), file_model.assistance_type, file_model.record_type,
        file_model.fain, file_model.award_modification_amendme, file_model.uri,
        file_model.correction_late_delete_ind,
        file_model.fiscal_year_and_quarter_co, file_model.sai_number,
        file_model.awardee_or_recipient_legal,
        file_model.awardee_or_recipient_uniqu,
        file_model.legal_entity_address_line1,
        file_model.legal_entity_address_line2,
        file_model.legal_entity_address_line3,
        file_model.legal_entity_city_name, file_model.legal_entity_city_code,
        file_model.legal_entity_county_name,
        file_model.legal_entity_county_code,
        file_model.legal_entity_country_name,
        file_model.legal_entity_state_name, file_model.legal_entity_state_code,
        file_model.legal_entity_zip5, file_model.legal_entity_zip_last4,
        file_model.legal_entity_country_code,
        file_model.legal_entity_foreign_city,
        file_model.legal_entity_foreign_provi,
        file_model.legal_entity_foreign_posta,
        file_model.legal_entity_congressional, file_model.business_types,
        file_model.funding_agency_name, file_model.funding_agency_code,
        file_model.funding_sub_tier_agency_na,
        file_model.funding_sub_tier_agency_co, file_model.funding_office_code,
        file_model.awarding_agency_name, file_model.awarding_agency_code,
        file_model.awarding_sub_tier_agency_n,
        file_model.awarding_sub_tier_agency_c, file_model.awarding_office_name,
        file_model.awarding_office_code, file_model.cfda_number,
        file_model.cfda_title, file_model.place_of_performance_code,
        file_model.place_of_perform_country_c,
        file_model.place_of_perform_country_n,
        file_model.place_of_perform_county_co,
        file_model.place_of_perform_state_nam,
        file_model.place_of_perform_county_na,
        file_model.place_of_performance_city,
        file_model.place_of_performance_zip4a,
        file_model.place_of_performance_forei,
        file_model.place_of_performance_congr, file_model.award_description,
        func.to_char(cast(file_model.period_of_performance_star, Date),
                     'YYYYMMDD'),
        func.to_char(cast(file_model.period_of_performance_curr, Date),
                     'YYYYMMDD'), file_model.federal_action_obligation,
        file_model.non_federal_funding_amount, file_model.total_funding_amount,
        file_model.face_value_loan_guarantee,
        file_model.original_loan_subsidy_cost,
        file_model.business_funds_indicator, file_model.funding_office_name,
        func.to_char(cast(file_model.modified_at, Date), 'YYYYMMDD'))
예제 #8
0
def mapper_details(concept_id):
    # subquery for searching for mapped concepts and returning concept details
    concept_info = db.session.query(VConcepts)\
     .filter(VConcepts.concept_id == concept_id)\
     .subquery()

    # mapping history
    hist = db.session.query(VMapping)\
     .filter((VMapping.source_concept_id == concept_id)
      & (VMapping.valid == True))\
     .subquery()

    concept_history = db.session.query(hist)\
     .with_entities(hist.c.valid,
      func.concat(hist.c.last_name, ', ', func.substr(hist.c.first_name,1,1), ' (', hist.c.organisation_name, ')').label('mapper'),
      hist.c.destination_code_text,
      hist.c.destination_term_text,
      hist.c.event_type_name,
      func.coalesce(hist.c.comment, '').label('comment'),
      func.to_char(hist.c.insert_ts, 'YYYY-mm-dd HH24:MI').label('insert_ts'))\
     .order_by(hist.c.insert_ts.desc())\
     .all()

    # other source concepts to same destination concept
    other_concepts = db.session.query(VMapping)\
     .filter((VMapping.destination_concept_id == hist.c.destination_concept_id)
      & (VMapping.valid == True))\
     .with_entities(VMapping.valid,
      func.concat(VMapping.last_name, ', ', func.substr(VMapping.first_name,1,1), ' (', VMapping.organisation_name, ')').label('mapper'),
      VMapping.source_code_text,
      VMapping.source_term_text,
      VMapping.event_type_name,
      func.coalesce(VMapping.comment, '').label('comment'),
      func.to_char(VMapping.insert_ts, 'YYYY-mm-dd HH24:MI').label('insert_ts'))\
     .order_by(VMapping.insert_ts.desc())\
     .all()

    # concept details to front-end (details)
    concept_decoded = db.session.query(concept_info)\
     .filter(VConcepts.concept_id == concept_id)\
     .with_entities(VConcepts.code_text,
      VConcepts.term_text,
      VConcepts.obs_number)\
     .all()

    return render_template('home/details.html',
                           history=concept_history,
                           samedest=other_concepts,
                           info=concept_decoded,
                           target=session['target_system'],
                           user_org_name=session['user_organisation_name'])
예제 #9
0
def get_matching_items(search_column, search_item):
	if ('DROP TABLE' in search_item):
		return {}
	if search_column == 'id':
		if search_item.isdigit():
			return Grocery.query.filter(Grocery.id == int(search_item))
		else:
			return {}
	elif search_column == 'x_for':
		if search_item.isdigit():
			return Grocery.query.filter(Grocery.x_for == int(search_item))
		else:
			return {}
	elif '*' in search_item or '_' in search_item: 
		search_term = search_item.replace('_', '__')\
							.replace('*', '%')\
							.replace('?', '_')
	elif search_item[-1] == 's':
		search_term = search_item[0:len(search_item) - 1]
		search_term = '%{0}%'.format(search_term)
	else:
		search_term = '%{0}%'.format(search_item)
	if search_column == 'last_sold':
		return Grocery.query.filter(func.to_char(Grocery.last_sold, '%YYYY-MM-DD%').ilike(search_term,)).order_by(Grocery.id)
	return Grocery.query.filter(getattr(Grocery, search_column).ilike(search_term,)).order_by(Grocery.id)
예제 #10
0
def get_columns():
    columns = [ColumnDT(func.concat(PembayaranSppt.kd_propinsi,
                         func.concat(PembayaranSppt.kd_dati2,
                         func.concat(PembayaranSppt.kd_kecamatan,
                         func.concat(PembayaranSppt.kd_kelurahan,
                         func.concat(PembayaranSppt.kd_blok,
                         func.concat(PembayaranSppt.no_urut, 
                         func.concat(PembayaranSppt.kd_jns_op, 
                         func.concat(PembayaranSppt.thn_pajak_sppt, 
                         func.concat(PembayaranSppt.pembayaran_sppt_ke,
                         func.concat(PembayaranSppt.kd_kanwil, 
                         func.concat(PembayaranSppt.kd_kantor, 
                                PembayaranSppt.kd_tp, 
                                ))))))))))), mData='id'),
        ColumnDT(func.concat(PembayaranSppt.kd_propinsi,
                         func.concat(PembayaranSppt.kd_dati2,
                         func.concat(PembayaranSppt.kd_kecamatan,
                         func.concat(PembayaranSppt.kd_kelurahan,
                         func.concat(PembayaranSppt.kd_blok,
                         func.concat(PembayaranSppt.no_urut, PembayaranSppt.kd_jns_op)))))), mData='nop'),
        ColumnDT(PembayaranSppt.thn_pajak_sppt, mData='thn_pajak_sppt'),
        ColumnDT(PembayaranSppt.pembayaran_sppt_ke, mData='pembayaran_sppt_ke'),
        ColumnDT(PembayaranSppt.denda_sppt, mData='denda_sppt'),
        ColumnDT(PembayaranSppt.jml_sppt_yg_dibayar, mData='jml_sppt_yg_dibayar'),
        ColumnDT(func.to_char(PembayaranSppt.tgl_pembayaran_sppt,'DD-MM-YYYY'), mData='tgl_pembayaran_sppt')
        ]

    query = PbbDBSession.query().select_from(PembayaranSppt)
    return columns, query
예제 #11
0
def analyze_trips_by_moments(weekday, intervalo, years):
    """ Numero de viajes en determinados lapsos del día por un año """
    def generate_time_lapse(intervalo):
        """ Regresa un array de strings en formato %H:%M """
        delta = dt.timedelta(minutes=intervalo)  # 1440 minutos en un día
        start = dt.datetime.now().replace(hour=0, minute=0)
        tiempos = [(start + i * delta).strftime('%H:%M') for i in range(int(1440 / intervalo))]
        tiempos.append('23:59:59')  # Último momento del día
        return tiempos

    tiempos, result = generate_time_lapse(intervalo), {}
    for year in years:
        # Leer todos los viajes realizados en X año
        print('Leyendo datos del año {}'.format(year))
        trips_per_year = m.s.query(m.Trip).filter(and_(
            m.Trip.departure_time >= '{}-01-01 00:00:00'.format(year),
            m.Trip.departure_time <= '{}-12-31 23:59:59'.format(year),
            func.to_char(m.Trip.departure_time, 'ID') == str(weekday+1)  # En SQL van de 1 a 7
        ))
        df_year = pd.read_sql(trips_per_year.statement, trips_per_year.session.bind)

        # Crear una columna que represente el día de la semana en que se hizo el viaje
        # 0 es Lunes y 6 es Domingo
        df_year['weekday'] = df_year['departure_time'].apply(lambda x: x.weekday())
        # Set DatetimeIndex
        df_year.set_index('departure_time', inplace=True)

        print('Analizando datos del año {}'.format(year))
        result[year] = {tiempos[t]: len(df_year[df_year.weekday == weekday].between_time(
                        tiempos[t],
                        tiempos[t+1]).index) for t in range(len(tiempos)-1)}

    with open('data/trips_during_weekday_{}.json'.format(weekday_name(weekday)), 'w') as file:
        json.dump(result, file)
예제 #12
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.filter_by_year.choices = [
            item.start_date.strftime('%Y')
            for item in db.session.query(ForecastItem).distinct(
                extract('year', ForecastItem.start_date))
        ]

        self.filter_by_month.choices = [
            item.start_date.strftime('%B')
            for item in db.session.query(ForecastItem).distinct(
                func.to_char(ForecastItem.start_date, "FMMonth"))
        ]

        self.filter_by_employee.choices = [
            f'{item.employee_item.first_name} ' \
            f'{item.employee_item.last_name}' \
            f'{item.employee_item.department}' for item in
            db.session.query(ForecastItem)
                .join(Employee)
                .join(Department)
                .distinct(Employee.first_name)
                .order_by(Employee.first_name)
        ]

        self.filter_by_project_number.choices = [
            item.project_item.project_number
            for item in db.session.query(ForecastItem).join(Project).distinct(
                Project.project_number).order_by(Project.project_number)
        ]
예제 #13
0
def home():
    curr_user = current_user.id
    form = FilterForm()
    newTask = ForecastItemsForm()
    cal = calendar.TextCalendar(calendar.SUNDAY)
    days = []
    selected_year = datetime.today().year
    selected_month = datetime.today().month
    timeObject = cal.monthdatescalendar(selected_year, selected_month)

    for _time in timeObject:
        for _date in _time:
            if _date.month == selected_month:
                days.append(_date.strftime('%m/%d/%Y'))

    if current_user.role == 'Admin':
        data = db.session.query(ForecastItem) \
            .filter(extract('month', ForecastItem.start_date) == selected_month).all()

    elif current_user.role == 'Editor':
        data = db.session.query(ForecastItem) \
            .filter(and_(extract('month', ForecastItem.start_date) == selected_month),
                    ForecastItem.user_id == curr_user).all()
    else:
        data = db.session.query(ForecastItem) \
            .filter(and_(extract('month', ForecastItem.start_date) == selected_month),
                    ForecastItem.employee_id == current_user.employee_id).all()

    if request.method == "POST" and form.validate_on_submit:
        new_year = request.form["filter_by_year"]
        new_month = request.form["filter_by_month"]
        days = []
        selected_year = int(new_year)
        selected_month = datetime.strptime(new_month, '%B').month
        timeObject = cal.monthdatescalendar(selected_year, selected_month)

        for _time in timeObject:
            for _date in _time:
                if _date.month == selected_month:
                    days.append(_date.strftime('%m/%d/%Y'))
        form.filter_by_year.data = new_year

        form.filter_by_month.choices = [
            item.start_date.strftime('%B') for item in
            db.session.query(ForecastItem)
                .distinct(func.to_char(ForecastItem.start_date, "FMMonth"))
                .filter(extract('year', ForecastItem.start_date) == f'{new_year}')
        ]

        data = db.session.query(ForecastItem).filter(
            and_(extract('year', ForecastItem.start_date) == new_year,
                 extract('month', ForecastItem.start_date) == selected_month,
                 ForecastItem.user_id == curr_user)).all()

    return render_template('home.html',
                           days=days,
                           data=data,
                           form=form,
                           newTask=newTask)
예제 #14
0
 def get_reports_download(self, start_date: datetime, end_date: datetime):
     df = []
     mapped_response = {}
     # {
     #     "data": [
     #         {
     #             "STONE": [{
     #                 "day": "01", 
     #                 "MerchantNotFound": 0, 
     #                 "Success": 0, 
     #                 "InternalServeErrors": 0
     #             }]
     #         }
     #     ],
     #     "mapKeys": ["MerchantNotFound", "Success", "InternalServeErrors"]
     # }
     try:
         queue_files = self.session.query(func.to_char(QueueDetail.createdAt, 'YYYY-MM-DD').label("day"), 
                                          func.count(QueueDetail.detailId).label("quantity"),
                                          QueueDetail.statusCode,
                                          QueueProcesses.acquirer)\
                                   .join(QueueProcesses, QueueDetail.processId==QueueProcesses.process_id)\
                                   .group_by("day", QueueDetail.statusCode, QueueProcesses.acquirer)\
                                   .filter(between(QueueDetail.createdAt, start_date, end_date))
         df = pd.read_sql(queue_files.statement, self.session.bind)
         # pivot table
         df['quantity'] = df['quantity'].astype(int)
         # print(df)
         df_pivot = df.pivot_table(values=['quantity'], index=df.index, columns=['status_code']).fillna('0').astype(int)
         df = df.merge(df_pivot, left_index=True, right_index=True, how='outer').sort_values('day')
         df = df.groupby(by=['acquirer', 'day']).sum().reset_index().drop(['quantity'], axis=1)
         responses = {
             '200': "BAIXADOS",
             '201': "BAIXADOS",
             '400': "BADREQUEST",
             '500': "ERRO INTERNO DA ADQUIRENTE",
             '401': "SEM CONCESSÃO",
             '403': "NÃO AUTORIZADO",
             '503': "TIMEOUT",
             '0': 'SEM CAD CONCIL',
             'REPC': 'AGUAR.REPROCESSAMENTO',
             'CANC': 'CANCELADO',
         }
         df.rename(columns={(name, status): responses[status] if status in responses else name for name, status  in df.columns[2:]}, inplace=True)     
         acquirers = df['acquirer'].unique().tolist()
         mapped_response = {
             acquirer: df.loc[df['acquirer']==acquirer].to_dict(orient='records') for acquirer in acquirers
         }
         mapkeys = {}
         for acquirer in acquirers:
             mapkeys[acquirer] = []
             for data in mapped_response[acquirer]:
                 mapkeys[acquirer].extend([key for key, value in data.items() if value and key not in ('day', 'acquirer')])
                 mapkeys[acquirer] = list(set(mapkeys[acquirer]))
     except Exception as error:
         print(str(error))
     finally:
         self.session.close()
         return mapped_response, mapkeys  
예제 #15
0
def sqltracks(user):
    ''' Query to get track data from a give user '''
    ast = session.query(func.to_char(
        func.to_timestamp(Tracks.timestamp), 'HH24 DD-MM-YYYY').label('t')
    ).filter(Tracks.username==user).subquery('ast')
    query = session.query(ast.c.t, func.count(ast.c.t).label('count')
    ).group_by(ast.c.t).order_by(ast.c.t)
    return query
예제 #16
0
파일: main.py 프로젝트: cfh294/philly-crime
def models():
    model_info = db.session.query(
        CrimeModel.classifier,
        CrimeModel.area_type,
        CrimeModel.accuracy, 
        func.to_char(CrimeModel.last_run, "mm/dd/yyyy").label("last_run")
    ).order_by(CrimeModel.classifier).all()
    return render_template("models.html", model_info=model_info)
예제 #17
0
def trips_by_hour(hour, year, exclude_days=['6', '7']):
    trips = m.s.query(m.Trip).filter(and_(
        hour == extract('hour', m.Trip.departure_time),
        m.Trip.departure_time >= '{}-01-01 00:00:00'.format(year),
        m.Trip.departure_time <= '{}-12-31 23:59:59'.format(year),
        ~func.to_char(m.Trip.departure_time, 'ID').in_(exclude_days)
    ))
    return pd.read_sql(trips.statement, trips.session.bind)
예제 #18
0
    def view_act(self):
        req = self.req
        ses = req.session
        params = req.params
        url_dict = req.matchdict
        awal = self.dt_awal
        akhir = self.dt_akhir
        if url_dict['id'] == 'grid':
            if url_dict['id'] == 'grid':
                columns = [
                    ColumnDT(func.concat(
                        PembayaranSppt.kd_propinsi,
                        func.concat(
                            ".",
                            func.concat(
                                PembayaranSppt.kd_dati2,
                                func.concat(
                                    "-",
                                    func.concat(
                                        PembayaranSppt.kd_kecamatan,
                                        func.concat(
                                            ".",
                                            func.concat(
                                                PembayaranSppt.kd_kelurahan,
                                                func.concat(
                                                    "-",
                                                    func.concat(
                                                        PembayaranSppt.kd_blok,
                                                        func.concat(
                                                            ".",
                                                            func.concat(
                                                                PembayaranSppt.
                                                                no_urut,
                                                                func.concat(
                                                                    ".",
                                                                    PembayaranSppt
                                                                    .kd_jns_op)
                                                            ))))))))))),
                             mData='nop'),
                    ColumnDT(PembayaranSppt.thn_pajak_sppt, mData='tahun'),
                    ColumnDT(PembayaranSppt.pembayaran_sppt_ke, mData='ke'),
                    ColumnDT(func.to_char(PembayaranSppt.tgl_pembayaran_sppt,
                                          'DD-MM-YYYY'),
                             mData='tanggal'),
                    ColumnDT(PembayaranSppt.jml_sppt_yg_dibayar -
                             PembayaranSppt.denda_sppt,
                             mData='pokok'),
                    ColumnDT(PembayaranSppt.denda_sppt, mData='denda'),
                    ColumnDT(PembayaranSppt.jml_sppt_yg_dibayar,
                             mData='bayar'),
                    ColumnDT(PembayaranSppt.posted, mData='posted')
                ]

                query = pbbDBSession.query().select_from(PembayaranSppt).\
                                     filter(PembayaranSppt.tgl_pembayaran_sppt.between(awal,akhir)).\
                                     filter(PembayaranSppt.posted == self.posted)
                rowTable = DataTables(req.GET, query, columns)
                return rowTable.output_result()
예제 #19
0
    def view_csv(self):
        url_dict = self.req.matchdict
        query = pbbDBSession.query(func.concat(PembayaranSppt.kd_propinsi,
                               func.concat(".",
                               func.concat(PembayaranSppt.kd_dati2,
                               func.concat("-",
                               func.concat(PembayaranSppt.kd_kecamatan,
                               func.concat(".",
                               func.concat(PembayaranSppt.kd_kelurahan,
                               func.concat("-",
                               func.concat(PembayaranSppt.kd_blok,
                               func.concat(".",
                               func.concat(PembayaranSppt.no_urut,
                               func.concat(".", PembayaranSppt.kd_jns_op)))))))))))).label('nop'),
                               PembayaranSppt.thn_pajak_sppt,
                               PembayaranSppt.pembayaran_sppt_ke,
                               func.to_char(PembayaranSppt.tgl_pembayaran_sppt,'DD-MM-YYYY').label('tanggal'),
                               (PembayaranSppt.jml_sppt_yg_dibayar-PembayaranSppt.denda_sppt).label('pokok'),
                               PembayaranSppt.denda_sppt.label('denda'),
                               PembayaranSppt.jml_sppt_yg_dibayar.label('bayar'),
                               PembayaranSppt.posted,).\
                          filter(PembayaranSppt.tgl_pembayaran_sppt.between(self.dt_awal,self.dt_akhir))

        if url_dict['rpt'] == 'csv':
            filename = 'pbb-realisasi.csv'
            return csv_response(self.req, csv_rows(query), filename)

        if url_dict['rpt'] == 'pdf':
            _here = os.path.dirname(__file__)
            path = os.path.join(os.path.dirname(_here), 'static')
            print "XXXXXXXXXXXXXXXXXXX", os.path

            logo = os.path.abspath("pajak/static/img/logo.png")
            line = os.path.abspath("pajak/static/img/line.png")

            path = os.path.join(os.path.dirname(_here), 'reports')
            rml_row = open_rml_row(path + '/pbb_realisasi.row.rml')

            rows = []
            for r in query.all():
                s = rml_row.format(nop=r.nop,
                                   thn_pajak_sppt=r.thn_pajak_sppt,
                                   pembayaran_sppt_ke=r.pembayaran_sppt_ke,
                                   tanggal=r.tanggal,
                                   pokok=r.pokok,
                                   denda=r.denda,
                                   bayar=r.bayar,
                                   posted=r.posted)
                rows.append(s)

            pdf, filename = open_rml_pdf(path + '/pbb_realisasi.rml',
                                         rows=rows,
                                         company=self.req.company,
                                         departement=self.req.departement,
                                         logo=logo,
                                         line=line,
                                         address=self.req.address)
            return pdf_response(self.req, pdf, filename)
예제 #20
0
    def view_act(self):
        url_dict = self.req.matchdict
        if url_dict['id']=='grid':
            if url_dict['id']=='grid':
                columns = [
                    ColumnDT(func.concat(SpptAkrual.kd_propinsi,
                             func.concat(SpptAkrual.kd_dati2, 
                             func.concat(SpptAkrual.kd_kecamatan,
                             func.concat(SpptAkrual.kd_kelurahan,
                             func.concat(SpptAkrual.kd_blok,
                             func.concat(SpptAkrual.no_urut,
                             func.concat(SpptAkrual.kd_jns_op,
                             func.concat(SpptAkrual.thn_pajak_sppt,
                             SpptAkrual.siklus_sppt)))))))) ,
                             mData='id', global_search=True),
                    ColumnDT(func.concat(SpptAkrual.kd_propinsi,
                             func.concat(".", 
                             func.concat(SpptAkrual.kd_dati2, 
                             func.concat("-", 
                             func.concat(SpptAkrual.kd_kecamatan,
                             func.concat(".", 
                             func.concat(SpptAkrual.kd_kelurahan,
                             func.concat("-", 
                             func.concat(SpptAkrual.kd_blok,
                             func.concat(".", 
                             func.concat(SpptAkrual.no_urut,
                             func.concat(".", SpptAkrual.kd_jns_op)))))))))))) ,
                             mData='nop', global_search=True),
                    ColumnDT(SpptAkrual.thn_pajak_sppt, mData='tahun', global_search=True),
                    ColumnDT(SpptAkrual.siklus_sppt, mData='siklus', global_search=True),
                    ColumnDT(SpptAkrual.nm_wp_sppt, mData='nama_wp', global_search=True),
                    ColumnDT(SpptAkrual.pbb_yg_harus_dibayar_sppt, mData='nilai', global_search=False),
                    ColumnDT(func.to_char(SpptAkrual.tgl_terbit_sppt,'DD-MM-YYYY'), mData='tgl_terbit', global_search=True),
                    ColumnDT(func.to_char(SpptAkrual.tgl_cetak_sppt,'DD-MM-YYYY'), mData='tgl_cetak', global_search=True),
                    ColumnDT(func.to_char(SpptAkrual.create_date,'DD-MM-YYYY'), mData='tgl_proses', global_search=True),
                    ColumnDT(SpptAkrual.posted, mData='posted', global_search=True)
                ]

                query = pbbDBSession.query().select_from(SpptAkrual).\
                            filter(SpptAkrual.create_date.between(self.dt_awal, 
                                              self.dt_akhir+timedelta(days=1),)).\
                            filter(SpptAkrual.posted == self.posted)
                                     
                rowTable = DataTables(self.req.GET, query, columns)
                return rowTable.output_result()
예제 #21
0
async def popular_encounters_days(pool: Pool,
                                  column: sa.Column) -> List[Record]:
    query = (encounters_table.select().with_only_columns([
        func.to_char(column, 'Day').label('weekday'),
        func.count(encounters_table.c.id).label('count'),
    ]).group_by(text('weekday')).order_by(text('count DESC')))

    async with pool.acquire() as conn:
        return await conn.fetch(query)
예제 #22
0
        def _datetime_query(default, sqlite):
            dt_column = if_dialect(
                default=func.to_char(CoinValue.datetime, default),
                sqlite=func.strftime(sqlite, CoinValue.datetime),
            )

            return select(CoinValue, func.max(CoinValue.datetime),
                          dt_column).group_by(CoinValue.coin_id, CoinValue,
                                              dt_column)
예제 #23
0
class birdsActivity(db.Model):
    __tablename__ = "birds_activity"
    id = Column(DATETIME,
                primary_key=True,
                index=False,
                unique=True,
                nullable=False,
                default=func.to_char())
    sensor = Column(String(64), index=False, unique=False, nullable=False)
예제 #24
0
 def get_average(self, interval: int, acquirer: str):
     # nested querys
     sums = self.session.query(func.count(func.distinct(QueueDetail.detailId)).label("baixado"))\
                                   .join(QueueProcesses, QueueDetail.processId==QueueProcesses.process_id)\
                                   .group_by(func.to_char(QueueDetail.createdAt, 'YYYY-MM-DD'))\
                                   .filter(QueueDetail.statusCode == '200', QueueProcesses.acquirer==acquirer)\
                                   .subquery()
     average = self.session.query(func.avg(sums.c.baixado)).scalar() or 0
     return int(average * interval)
예제 #25
0
def overview_messages_count():
    time_unit_models = {
        'hour': EmqxBillHour, 'day': EmqxBillDay, 'month': EmqxBillMonth
    }
    time_unit, time_format = _validate_time_unit()
    model = time_unit_models[time_unit]
    charts_config = get_charts_config(time_unit=time_unit)
    start_time = charts_config['start_time']
    x_data = charts_config['x_data']
    # Query different models according to time unit
    time_messages_count = db.session \
        .query(func.to_char(model.countTime, time_format).label('msgTime'),
               model.msgType, func.sum(model.msgCount)) \
        .filter_tenant(tenant_uid=g.tenant_uid) \
        .filter(model.countTime > start_time) \
        .group_by(func.to_char(model.countTime, time_format), model.msgType) \
        .order_by(func.to_char(model.countTime, time_format)).all()
    records = _convert_query_message(time_messages_count, x_data)
    return jsonify(records)
예제 #26
0
def shows():
    # displays list of shows at /shows
    shows = Show.query.with_entities(
        Show.venue_id.label("venue_id"), Venue.name.label("venue_name"),
        Show.artist_id.label("artist_id"), Artist.name.label("artist_name"),
        Artist.image_link.label("artist_image_link"),
        func.to_char(Show.start_time, 'YYYY-MM-DD HH24:MI:SS').label(
            "start_time")).join(Venue).join(Artist).all()

    return render_template('pages/shows.html', shows=shows)
예제 #27
0
def sqltracks(user):
    ''' Query to get track data from a give user '''
    ast = session.query(
        func.to_char(func.to_timestamp(Tracks.timestamp),
                     'HH24 DD-MM-YYYY').label('t')).filter(
                         Tracks.username == user).subquery('ast')
    query = session.query(ast.c.t,
                          func.count(ast.c.t).label('count')).group_by(
                              ast.c.t).order_by(ast.c.t)
    return query
예제 #28
0
        def _datetime_id_query(default, sqlite):
            dt_column = if_dialect(
                default=func.to_char(CoinValue.datetime, default),
                sqlite=func.strftime(sqlite, CoinValue.datetime),
            )

            grouped = select(CoinValue, func.max(CoinValue.datetime),
                             dt_column).group_by(CoinValue.coin_id, CoinValue,
                                                 dt_column)

            return select(grouped.c.id.label("id")).select_from(grouped)
예제 #29
0
    def get_columns(self):
        _columns = list()
        _columns.append(Orders.id)
        _columns.append(Orders.order_name)
        _columns.append(Orders.customer_id)
        _columns.append(func.string_agg(OrderItems.product, ',').label('product_names'))
        _columns.append(func.to_char(Orders.created_at, 'Mon DDth, YYYY HH24:MI AM').label('order_date'))

        # Placeholder for missing columns for now
        _columns.append(Orders.id.label('customer_name'))
        _columns.append(Orders.id.label('customer_company'))
        return _columns
예제 #30
0
def heat_change_avg():
    query_heatbox = db.session.query(
        func.to_char(Fact.lunch_date, 'Day').distinct().label('Weekday'),
        func.avg(Fact.actual).label('avg')).group_by('Weekday')

    json_resp = []

    for item in query_heatbox:
        temp_dict = dict(weekday=item.Weekday.strip(), vsum=item.avg)
        json_resp.append(temp_dict)

    return jsonify(json_resp)
예제 #31
0
def fetch_prices_of_a_day(session: Session, ric: str,
                          jst: datetime) -> List[Tuple[datetime, Decimal]]:
    results = session \
        .query(func.to_char(in_utc(Price.t), 'YYYY-MM-DD HH24:MI:SS').label('t'),
               Price.val) \
        .filter(cast(in_jst(Price.t), Date) == jst.date(), Price.ric == ric) \
        .order_by(Price.t) \
        .all()

    return [(datetime.strptime(r.t,
                               '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC), r.val)
            for r in results]
예제 #32
0
def sitemap_xml(request):

    request.response.content_type = "text/xml"

    host = request.environ['HTTP_HOST']
    scheme = request.environ['wsgi.url_scheme']

    urls = []
    for activity, lastmod in Session.query(Activity, func.to_char(Activity.timestamp_entry,'YYYY-MM-DD')).\
        join(Status).filter(Status.name == "active").limit(25000).all():
        loc = request.route_url("activities_read_one", output="html", uid=activity.activity_identifier)
        urls.append({"loc": loc, "lastmod": lastmod})

    for stakeholder, lastmod in Session.query(Stakeholder, func.to_char(Stakeholder.timestamp_entry,'YYYY-MM-DD')).\
        join(Status).filter(Status.name == "active").limit(25000).all():
        loc = request.route_url("stakeholders_read_one", output="html", uid=stakeholder.stakeholder_identifier)
        urls.append({"loc": loc, "lastmod": lastmod})
    

    return {"urls": urls}

    
예제 #33
0
파일: stats.py 프로젝트: sysnux/astportal
   def fetch_hourly(self, page, rows, sidx, sord='asc', _search='false',
          searchOper=None, searchField=None, searchString=None, **kw):
      ''' Function called on AJAX request made by FlexGrid
      Fetch data from DB, return the list of rows + total + current page
      '''
      if not in_any_group('admin','STATS'):
         return dict(page=0, total=0, rows=[])
 
      try:
         page = int(page)
         rows = int(rows)
         offset = (page-1) * rows
      except:
         page = 1
         rows = 24
         offset = 0

      log.info('fetch_hourly : page=%d, rows=%d, offset=%d, sidx=%s, sord=%s' % (
         page, rows, offset, sidx, sord))

      # Initialize data, in case no data is available for that time slice
      data = [{'id': x, 'cell': ['%d h 00 < %d h 00' % (x, x+1), 0, None]}
         for x in range(24)]

      # Count calls by hour
      if db_engine=='oracle':
         req = func.to_char(CDR.calldate, 'HH24')
      else: # PostgreSql
         req = func.date_trunc('hour', cast(CDR.calldate, TIME))
      cdrs = DBSession.query(req, func.count(req), func.sum(CDR.billsec))
      if self.stats_type:
         # Monthly stats
         d = datetime.datetime.strptime(self.stats_type, '%m/%d/%Y')
         if db_engine=='oracle':
            cdrs = cdrs.filter(func.trunc(CDR.calldate, 'month') == \
               func.trunc(d, 'month'))
         else: # PostgreSql
            cdrs = cdrs.filter(func.date_trunc('month', CDR.calldate) == \
               func.date_trunc('month', d))
      cdrs = cdrs.group_by(req)
#      cdrs = cdrs.order_by(func.sum(CDR.billsec))

      for i, c in enumerate(cdrs):
         if db_engine=='oracle':
            j = int(c[0])
         else: # PostgreSql
            j = c[0].seconds / 3600
         data[j] =  {'id': j, 'cell': ['%d h 00 < %d h 00' % (j,j+1), c[1], hms(c[2])]}

      return dict(page=page, total=24, rows=data[offset:offset+page*rows])
예제 #34
0
def stat_daily(page, rows, offset, sidx, sord, date_filter, queues_filter):
   # Day of week distribution
   if db_engine=='oracle':
      xd = func.to_char(Queue_log.timestamp, 'D').label('dow')
      dow = [ '', u'dimanche', u'lundi', u'mardi', u'mercredi', 
         u'jeudi', u'vendredi', u'samedi']
   else: # PostgreSql
      xd = (extract('dow', Queue_log.timestamp)).label('dow')
      dow = [ u'dimanche', u'lundi', u'mardi', u'mercredi', 
         u'jeudi', u'vendredi', u'samedi']
   q = DBSession.query(xd, (func.count('*')).label('count')).\
      filter(Queue_log.queue_event_id==Queue_event.qe_id).\
      filter(Queue_event.event=='CONNECT').\
      filter(queues_filter).\
      group_by(xd)

   if date_filter is not None:
      q = q.filter(date_filter)
   
   if sidx=='count':
      q = q.order_by(func.count('*')) if sord=='asc' \
            else q.order_by(desc(func.count('*')))
   else:
      q = q.order_by(xd) if sord=='asc' \
            else q.order_by(desc(xd))

   q = q.offset(offset).limit(rows)
   total = q.count()/rows + 1

   data = []
   total_connect = 0
   for i, r in enumerate(q.all()):
      total_connect += r.count
      data.append({ 'id'  : i, 'cell': [dow[int(r.dow)], r.count, 0]
      })

   for x in data:
      pc = 100.0 * x['cell'][1] / total_connect
      x['cell'][2] = '%.1f %%' % pc

   return dict(page=page, total=total, rows=data)
def initial_query(session):
    return session.query(*[
        file_model.detached_award_proc_unique,
        file_model.piid,
        file_model.award_modification_amendme,
        file_model.transaction_number,
        file_model.referenced_idv_agency_iden,
        file_model.referenced_idv_agency_desc,
        file_model.parent_award_id,
        file_model.referenced_idv_modificatio,
        file_model.federal_action_obligation,
        file_model.total_obligated_amount,
        file_model.base_exercised_options_val,
        file_model.current_total_value_award,
        file_model.base_and_all_options_value,
        file_model.potential_total_value_awar,
        func.to_char(cast(file_model.action_date, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.period_of_performance_star, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.period_of_performance_curr, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.period_of_perf_potential_e, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.ordering_period_end_date, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.solicitation_date, Date), 'YYYYMMDD'),
        file_model.awarding_agency_code,
        file_model.awarding_agency_name,
        file_model.awarding_sub_tier_agency_c,
        file_model.awarding_sub_tier_agency_n,
        file_model.awarding_office_code,
        file_model.awarding_office_name,
        file_model.funding_agency_code,
        file_model.funding_agency_name,
        file_model.funding_sub_tier_agency_co,
        file_model.funding_sub_tier_agency_na,
        file_model.funding_office_code,
        file_model.funding_office_name,
        file_model.foreign_funding,
        file_model.foreign_funding_desc,
        file_model.sam_exception,
        file_model.sam_exception_description,
        file_model.awardee_or_recipient_uniqu,
        file_model.awardee_or_recipient_legal,
        file_model.vendor_doing_as_business_n,
        file_model.cage_code,
        file_model.ultimate_parent_unique_ide,
        file_model.ultimate_parent_legal_enti,
        file_model.legal_entity_country_code,
        file_model.legal_entity_country_name,
        file_model.legal_entity_address_line1,
        file_model.legal_entity_address_line2,
        file_model.legal_entity_city_name,
        file_model.legal_entity_state_code,
        file_model.legal_entity_state_descrip,
        file_model.legal_entity_zip4,
        file_model.legal_entity_congressional,
        file_model.vendor_phone_number,
        file_model.vendor_fax_number,
        file_model.place_of_perform_city_name,
        file_model.place_of_perform_county_na,
        file_model.place_of_performance_state,
        file_model.place_of_perfor_state_desc,
        file_model.place_of_performance_zip4a,
        file_model.place_of_performance_congr,
        file_model.place_of_perform_country_c,
        file_model.place_of_perf_country_desc,
        file_model.pulled_from,
        file_model.contract_award_type,
        file_model.contract_award_type_desc,
        file_model.idv_type,
        file_model.idv_type_description,
        file_model.multiple_or_single_award_i,
        file_model.multiple_or_single_aw_desc,
        file_model.type_of_idc,
        file_model.type_of_idc_description,
        file_model.type_of_contract_pricing,
        file_model.type_of_contract_pric_desc,
        file_model.award_description,
        file_model.action_type,
        file_model.action_type_description,
        file_model.solicitation_identifier,
        file_model.number_of_actions,
        file_model.inherently_government_func,
        file_model.inherently_government_desc,
        file_model.product_or_service_code,
        file_model.product_or_service_co_desc,
        file_model.contract_bundling,
        file_model.contract_bundling_descrip,
        file_model.dod_claimant_program_code,
        file_model.dod_claimant_prog_cod_desc,
        file_model.naics,
        file_model.naics_description,
        file_model.recovered_materials_sustai,
        file_model.recovered_materials_s_desc,
        file_model.domestic_or_foreign_entity,
        file_model.domestic_or_foreign_e_desc,
        file_model.program_system_or_equipmen,
        file_model.program_system_or_equ_desc,
        file_model.information_technology_com,
        file_model.information_technolog_desc,
        file_model.epa_designated_product,
        file_model.epa_designated_produc_desc,
        file_model.country_of_product_or_serv,
        file_model.country_of_product_or_desc,
        file_model.place_of_manufacture,
        file_model.place_of_manufacture_desc,
        file_model.subcontracting_plan,
        file_model.subcontracting_plan_desc,
        file_model.extent_competed,
        file_model.extent_compete_description,
        file_model.solicitation_procedures,
        file_model.solicitation_procedur_desc,
        file_model.type_set_aside,
        file_model.type_set_aside_description,
        file_model.evaluated_preference,
        file_model.evaluated_preference_desc,
        file_model.research,
        file_model.research_description,
        file_model.fair_opportunity_limited_s,
        file_model.fair_opportunity_limi_desc,
        file_model.other_than_full_and_open_c,
        file_model.other_than_full_and_o_desc,
        file_model.number_of_offers_received,
        file_model.commercial_item_acquisitio,
        file_model.commercial_item_acqui_desc,
        file_model.small_business_competitive,
        file_model.commercial_item_test_progr,
        file_model.commercial_item_test_desc,
        file_model.a_76_fair_act_action,
        file_model.a_76_fair_act_action_desc,
        file_model.fed_biz_opps,
        file_model.fed_biz_opps_description,
        file_model.local_area_set_aside,
        file_model.local_area_set_aside_desc,
        file_model.price_evaluation_adjustmen,
        file_model.clinger_cohen_act_planning,
        file_model.clinger_cohen_act_pla_desc,
        file_model.materials_supplies_article,
        file_model.materials_supplies_descrip,
        file_model.labor_standards,
        file_model.labor_standards_descrip,
        file_model.construction_wage_rate_req,
        file_model.construction_wage_rat_desc,
        file_model.interagency_contracting_au,
        file_model.interagency_contract_desc,
        file_model.other_statutory_authority,
        file_model.program_acronym,
        file_model.referenced_idv_type,
        file_model.referenced_idv_type_desc,
        file_model.referenced_mult_or_single,
        file_model.referenced_mult_or_si_desc,
        file_model.major_program,
        file_model.national_interest_action,
        file_model.national_interest_desc,
        file_model.cost_or_pricing_data,
        file_model.cost_or_pricing_data_desc,
        file_model.cost_accounting_standards,
        file_model.cost_accounting_stand_desc,
        file_model.government_furnished_prope,
        file_model.government_furnished_desc,
        file_model.sea_transportation,
        file_model.sea_transportation_desc,
        file_model.undefinitized_action,
        file_model.undefinitized_action_desc,
        file_model.consolidated_contract,
        file_model.consolidated_contract_desc,
        file_model.performance_based_service,
        file_model.performance_based_se_desc,
        file_model.multi_year_contract,
        file_model.multi_year_contract_desc,
        file_model.contract_financing,
        file_model.contract_financing_descrip,
        file_model.purchase_card_as_payment_m,
        file_model.purchase_card_as_paym_desc,
        file_model.contingency_humanitarian_o,
        file_model.contingency_humanitar_desc,
        file_model.alaskan_native_owned_corpo,
        file_model.american_indian_owned_busi,
        file_model.indian_tribe_federally_rec,
        file_model.native_hawaiian_owned_busi,
        file_model.tribally_owned_business,
        file_model.veteran_owned_business,
        file_model.service_disabled_veteran_o,
        file_model.woman_owned_business,
        file_model.women_owned_small_business,
        file_model.economically_disadvantaged,
        file_model.joint_venture_women_owned,
        file_model.joint_venture_economically,
        file_model.minority_owned_business,
        file_model.subcontinent_asian_asian_i,
        file_model.asian_pacific_american_own,
        file_model.black_american_owned_busin,
        file_model.hispanic_american_owned_bu,
        file_model.native_american_owned_busi,
        file_model.other_minority_owned_busin,
        file_model.contracting_officers_deter,
        file_model.contracting_officers_desc,
        file_model.emerging_small_business,
        file_model.community_developed_corpor,
        file_model.labor_surplus_area_firm,
        file_model.us_federal_government,
        file_model.federally_funded_research,
        file_model.federal_agency,
        file_model.us_state_government,
        file_model.us_local_government,
        file_model.city_local_government,
        file_model.county_local_government,
        file_model.inter_municipal_local_gove,
        file_model.local_government_owned,
        file_model.municipality_local_governm,
        file_model.school_district_local_gove,
        file_model.township_local_government,
        file_model.us_tribal_government,
        file_model.foreign_government,
        file_model.organizational_type,
        file_model.corporate_entity_not_tax_e,
        file_model.corporate_entity_tax_exemp,
        file_model.partnership_or_limited_lia,
        file_model.sole_proprietorship,
        file_model.small_agricultural_coopera,
        file_model.international_organization,
        file_model.us_government_entity,
        file_model.community_development_corp,
        file_model.domestic_shelter,
        file_model.educational_institution,
        file_model.foundation,
        file_model.hospital_flag,
        file_model.manufacturer_of_goods,
        file_model.veterinary_hospital,
        file_model.hispanic_servicing_institu,
        file_model.contracts,
        file_model.grants,
        file_model.receives_contracts_and_gra,
        file_model.airport_authority,
        file_model.council_of_governments,
        file_model.housing_authorities_public,
        file_model.interstate_entity,
        file_model.planning_commission,
        file_model.port_authority,
        file_model.transit_authority,
        file_model.subchapter_s_corporation,
        file_model.limited_liability_corporat,
        file_model.foreign_owned_and_located,
        file_model.for_profit_organization,
        file_model.nonprofit_organization,
        file_model.other_not_for_profit_organ,
        file_model.the_ability_one_program,
        file_model.private_university_or_coll,
        file_model.state_controlled_instituti,
        file_model.c1862_land_grant_college,
        file_model.c1890_land_grant_college,
        file_model.c1994_land_grant_college,
        file_model.minority_institution,
        file_model.historically_black_college,
        file_model.tribal_college,
        file_model.alaskan_native_servicing_i,
        file_model.native_hawaiian_servicing,
        file_model.school_of_forestry,
        file_model.veterinary_college,
        file_model.dot_certified_disadvantage,
        file_model.self_certified_small_disad,
        file_model.small_disadvantaged_busine,
        file_model.c8a_program_participant,
        file_model.historically_underutilized,
        file_model.sba_certified_8_a_joint_ve,
        func.to_char(cast(file_model.last_modified, Date), 'YYYYMMDD')])
예제 #36
0
    # Current position query
    pos_cols = ['id', 'symbol', 'description', 'qty', 'price', 'total_value']
    pos_cols = [c for c in positions.columns if c.name in pos_cols]
    distinct_cols = ['id', 'symbol', 'description']
    distinct_cols = [c for c in positions.columns if c.name in distinct_cols]
    date_col = positions.columns.timestamp
    id_col = positions.columns.id
    max_timestamp = func.max(date_col).label('timestamp')
    order = [positions.columns.id, positions.columns.symbol]

    if args.date:
        date = args.date
        current_pos = session.query(max_timestamp, *pos_cols)\
                             .distinct(*distinct_cols)\
                             .group_by(*pos_cols)\
                             .filter(func.to_char(date_col, 'YYYY-MM-DD')==date)\
                             .subquery()
    else:
        date_ids = session.query(max_timestamp, id_col)\
                          .group_by(id_col)\
                          .subquery()
        acc_id, ts = [date_ids.columns.id, date_ids.columns.timestamp]
        date = session.query(func.max(ts)).all()[0][0].date().isoformat()
        current_pos = session.query(positions)\
                             .filter_by(id = acc_id, timestamp = ts)\
                             .subquery()

    timestamps = session.query(current_pos.columns.timestamp)\
                        .order_by(current_pos.columns.timestamp)\
                        .all()
    latest_timestamp = timestamps[-1][0]
예제 #37
0
	def date_output(self, t):
		if self.engine == "mysql":
			return func.date_format(t, '%m/%d')
		if self.engine == "postgresql":
			return func.to_char(t, 'MM/DD')
예제 #38
0
	def date_compare(self, t):
		if self.engine == "mysql":
			return func.date_format(t, '%Y-%m-%d')
		if self.engine == "postgresql":
			return func.to_timestamp(func.to_char(t, 'YYYY-MM-DD'), 'YYYY-MM-DD')
예제 #39
0
def stat_hourly(page, rows, offset, sidx, sord, date_filter, queues_filter):
   # Hourly distribution (30 min sections)
   if db_engine=='oracle':
      xh = func.floor((sql.cast(func.to_char(Queue_log.timestamp, 'HH24'), types.INT) *60 + \
         sql.cast(func.to_char(Queue_log.timestamp, 'MI'), types.INT) ) / 30)
   else: # PostgreSql
      xh = func.floor((extract('hour', Queue_log.timestamp) * 60 + \
         extract('min', Queue_log.timestamp) ) / 30)
   xh = xh.label('xhour')

#   h_incoming = DBSession.query(xh, func.count('*').label('incoming')).\
#      filter(Queue_log.queue_event_id==Queue_event.qe_id).\
#      filter(Queue_event.event=='ENTERQUEUE').filter(queues_filter)
#   if date_filter is not None:
#      h_incoming = h_incoming.filter(date_filter)
#   h_incoming = h_incoming.group_by(xh).order_by(xh).subquery()

   h_connect = DBSession.query(
         xh, func.count('*').label('count')).\
      filter(queues_filter). \
      filter(Queue_log.queue_event_id==Queue_event.qe_id).\
      filter(Queue_event.event=='CONNECT').filter(queues_filter)
   if date_filter is not None:
      h_connect = h_connect.filter(date_filter)
   h_connect = h_connect.group_by(xh).subquery()

   h_abandon = DBSession.query(
         xh, func.count('*').label('count')).\
      filter(queues_filter). \
      filter(Queue_log.queue_event_id==Queue_event.qe_id).\
      filter(Queue_event.event=='ABANDON').filter(queues_filter)
   if date_filter is not None:
      h_abandon = h_abandon.filter(date_filter)
   h_abandon = h_abandon.group_by(xh).subquery()

   h_closed = DBSession.query(
         xh, func.count('*').label('count')).\
      filter(queues_filter). \
      filter(Queue_log.queue_event_id==Queue_event.qe_id).\
      filter(Queue_event.event=='CLOSED').filter(queues_filter)
   if date_filter is not None:
      h_closed = h_closed.filter(date_filter)
   h_closed = h_closed.group_by(xh).subquery()

   h_dissuasion = DBSession.query(
         xh, func.count('*').label('count')).\
      filter(queues_filter). \
      filter(Queue_log.queue_event_id==Queue_event.qe_id).\
      filter(Queue_event.event=='DISSUASION').filter(queues_filter)
   if date_filter is not None:
      h_dissuasion = h_dissuasion.filter(date_filter)
   h_dissuasion = h_dissuasion.group_by(xh).subquery()

   q = DBSession.query(xh, func.count('*').label('incoming'),
            h_abandon.c.count.label('abandon'),
            h_connect.c.count.label('connect'), 
            h_dissuasion.c.count.label('dissuasion'), 
            h_closed.c.count.label('closed')).\
      filter(Queue_log.queue_event_id==Queue_event.qe_id). \
      filter(Queue_event.event=='ENTERQUEUE').filter(queues_filter). \
      filter(queues_filter). \
      outerjoin((h_connect, xh==h_connect.c.xhour)). \
      outerjoin((h_abandon, xh==h_abandon.c.xhour)). \
      outerjoin((h_closed, xh==h_closed.c.xhour)). \
      outerjoin((h_dissuasion, xh==h_dissuasion.c.xhour)). \
      group_by(xh,h_abandon.c.count, h_connect.c.count, 
            h_dissuasion.c.count, h_closed.c.count)
   
   if date_filter is not None:
      q = q.filter(date_filter)

   if sidx=='incoming':
      q = q.order_by(desc(func.count('*'))) if sord=='desc' \
            else q.order_by(func.count('*'))

   elif sidx=='connect':
      q = q.order_by(desc(h_connect.c.count)) if sord=='desc' \
            else q.order_by(h_connect.c.count)

   elif sidx=='abandon':
      q = q.order_by(desc(h_abandon.c.count)) if sord=='desc' \
            else q.order_by(h_abandon.c.count)

   elif sidx=='dissuasion':
      q = q.order_by(desc(h_dissuasion.c.count)) if sord=='desc' \
            else q.order_by(h_dissuasion.c.count)

   elif sidx=='closed':
      q = q.order_by(desc(h_closed.c.count)) if sord=='desc' \
            else q.order_by(h_closed.c.count)

   else:
      q = q.order_by(desc(xh)) if sord=='desc' \
            else q.order_by(xh)

   q = q.offset(offset).limit(rows)
   total = q.count()/rows + 1
   data = []
   total_in = 0
   for i, r in enumerate(q.all()):
      total_in += r.incoming
      data.append({ 'id'  : i, 'cell': [
         u'%dh30' % (r.xhour/2) if i%2 \
            else u'%dh' % (r.xhour/2),
         r.incoming, 0, r.closed, 0, r.dissuasion, 0,
         r.abandon, 0, r.connect, 0]
      })

   for x in data:
      x['cell'][2] = '%.1f %%' % (100.0 * x['cell'][1] / total_in) \
            if x['cell'][1] else ''
      x['cell'][4] = '%.1f %%' % (100.0 * x['cell'][3] / total_in) \
            if x['cell'][3] else ''
      x['cell'][6] = '%.1f %%' % (100.0 * x['cell'][5] / total_in) \
            if x['cell'][5] else ''
      x['cell'][8] = '%.1f %%' % (100.0 * x['cell'][7] / total_in) \
            if x['cell'][7] else ''
      x['cell'][10] = '%.1f %%' % (100.0 * x['cell'][9] / total_in) \
            if x['cell'][9] else ''
   log.debug(data)
   return dict(page=page, total=total, rows=data)
def initial_query(session):
    """ Creates the initial query for D2 files.

        Args:
            session: The current DB session

        Returns:
            The base query (a select from the PublishedAwardFinancialAssistance table with the specified columns).
    """
    return session.query(
        file_model.afa_generated_unique,
        file_model.fain,
        file_model.award_modification_amendme,
        file_model.uri,
        file_model.sai_number,
        file_model.total_funding_amount,
        file_model.federal_action_obligation,
        file_model.non_federal_funding_amount,
        file_model.face_value_loan_guarantee,
        file_model.original_loan_subsidy_cost,
        func.to_char(cast(file_model.action_date, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.period_of_performance_star, Date), 'YYYYMMDD'),
        func.to_char(cast(file_model.period_of_performance_curr, Date), 'YYYYMMDD'),
        file_model.awarding_agency_code,
        file_model.awarding_agency_name,
        file_model.awarding_sub_tier_agency_c,
        file_model.awarding_sub_tier_agency_n,
        file_model.awarding_office_code,
        file_model.awarding_office_name,
        file_model.funding_agency_code,
        file_model.funding_agency_name,
        file_model.funding_sub_tier_agency_co,
        file_model.funding_sub_tier_agency_na,
        file_model.funding_office_code,
        file_model.funding_office_name,
        file_model.awardee_or_recipient_uniqu,
        file_model.awardee_or_recipient_legal,
        file_model.ultimate_parent_unique_ide,
        file_model.ultimate_parent_legal_enti,
        file_model.legal_entity_country_code,
        file_model.legal_entity_country_name,
        file_model.legal_entity_address_line1,
        file_model.legal_entity_address_line2,
        file_model.legal_entity_city_code,
        file_model.legal_entity_city_name,
        file_model.legal_entity_state_code,
        file_model.legal_entity_state_name,
        file_model.legal_entity_zip5,
        file_model.legal_entity_zip_last4,
        file_model.legal_entity_county_code,
        file_model.legal_entity_county_name,
        file_model.legal_entity_congressional,
        file_model.legal_entity_foreign_city,
        file_model.legal_entity_foreign_provi,
        file_model.legal_entity_foreign_posta,
        file_model.place_of_performance_code,
        file_model.place_of_performance_city,
        file_model.place_of_perform_county_co,
        file_model.place_of_perform_county_na,
        file_model.place_of_perform_state_nam,
        file_model.place_of_performance_zip4a,
        file_model.place_of_performance_congr,
        file_model.place_of_perform_country_c,
        file_model.place_of_perform_country_n,
        file_model.place_of_performance_forei,
        file_model.cfda_number,
        file_model.cfda_title,
        file_model.assistance_type,
        file_model.assistance_type_desc,
        file_model.award_description,
        file_model.business_funds_indicator,
        file_model.business_funds_ind_desc,
        file_model.business_types,
        file_model.business_types_desc,
        file_model.correction_delete_indicatr,
        file_model.correction_delete_ind_desc,
        file_model.action_type,
        file_model.action_type_description,
        file_model.record_type,
        file_model.record_type_description,
        func.to_char(cast(file_model.modified_at, Date), 'YYYYMMDD'))
예제 #41
0
   def __call__(self, cdr):
      ''' Calcul du coût pour un appel représenté par un CDR.
      Paramètre: objet CDR
      Renvoie: (None, None) si erreur, (TTC, HT) sinon
      '''

      if self.month is None or self.month != cdr.calldate.strftime('%Y%m'): 
         # Premier appel, ou changement de mois, il faut chercher le total du
         # mois déjà facturé
         print u'Canal %s, nouveau mois : %s -> %s' % (
            self.channel, self.month, cdr.calldate.strftime('%Y%m'))
         self.month = cdr.calldate.strftime('%Y%m')
         tot, = DBSession.query(func.sum(CDR.billsec)). \
            filter(func.to_char(cdr.calldate, 'YYYYMM')==self.month). \
            filter(CDR.calldate<cdr.calldate). \
            filter(CDR.dstchannel==self.channel). \
            filter(CDR.ht!=None).one()
         self.tot = tot if tot is not None else 0

      print u'Canal %s, mois %s, total %d' % (self.channel, self.month, self.tot)

      for z in zones:
         if (cdr.dst[2:].startswith(z)):
            break
      else:
         print '*' * 20, cdr, u'Zone pas trouvée !!!'
         return None, None

      if zones_data[z]['zone_tarifaire'] == 'Internationale':
         tarif = self.tarif[zones_data[z]['zaa']]

      elif zones_data[z]['zone_tarifaire'] == 'Nationale':
         if zones_data[z]['ile_ou_pays'] == 'TAHITI':
            tarif = self.tarif['local_intra']
         else:
            tarif = self.tarif['local_inter']

      elif zones_data[z]['zone_tarifaire'] == 'Interdit':
         print '*' * 20, cdr, u'interdit !!!'
         return None, None

      elif zones_data[z]['zone_tarifaire'] == 'Audiotel_3665':
         print '*' * 20, cdr, u'Audiotel_3665 !!!'
         return None, None

      elif zones_data[z]['zone_tarifaire'] == 'GSM':
         tarif = self.tarif['GSM']

      else: # autre zone ?
         print '*' * 20, cdr, u'Zone inconnue !!!'
         return None, None

#      print u'%s : préfixe %s, zone %s, destination %s, tarifs (%s) %s' % (
#         cdr.dst[2:], z, zones_data[z]['zaa'], zones_data[z]['ile_ou_pays'],
#         self.typ, tarif)

      forfait_min = '?'

      if self.tot > self.forfait:
         # Hors forfait
         if cdr.billsec > 60:
            # Taxation à la seconde
            ttc = int(ceil(cdr.billsec * tarif['hors_forfait'] / Decimal(60.0)))
            forfait_min = 'HORS sec'
         else:
            # Première minute indivisible
            ttc = tarif['hors_forfait']
            forfait_min = 'HORS 1 min'
      else:
         # Forfait pas épuisé
         if cdr.billsec > 60:
            # Taxation à la seconde
            ttc = int(ceil(cdr.billsec * tarif['dans_forfait'] / Decimal(60.0)))
            forfait_min = 'FORFAIT sec'
         else:
            # Première minute indivisible
            ttc = tarif['dans_forfait']
            forfait_min = 'FORFAIT 1 min'

      self.tot += ttc

      if verbose:
         print '%s : %s -> %s %d sec -> %d F.TTC (Optimum_%s forfait=%s, hors=%s, %s)' % (
            cdr.calldate, cdr.src, cdr.dst[2:], cdr.billsec, ttc, self.typ,
            tarif['dans_forfait'], tarif['hors_forfait'], forfait_min)

      return  ttc, int(round(ttc / tva))
def get_permanent_max_stops():
    """"""

    oracle_url = 'oracle://{user}:{password}@{db}'.format(
        user=gv.user, password=gv.password, db=gv.dbname)
    engine = create_engine(oracle_url)
    sess_maker = sessionmaker(bind=engine)
    session = sess_maker()

    # these are aliased to abbreviations as they're used repeatedly
    loc = aliased(Location)
    rd = aliased(RouteDef)
    rsd = aliased(RouteStopDef)
    lm = aliased(Landmark)
    lml = aliased(LandmarkLocation)
    lmt = aliased(LandmarkType)

    today = date.today()
    date_format = 'DD-MON-YY'

    # the following form a nested 'where exists' subquery that ensures
    # that a location exists as a platform (landmark_type=7)
    sub1 = (
        session.query(lmt).
        filter(lmt.landmark_type == 7,
               lmt.landmark_id == lm.landmark_id)
    )
    sub2 = (
        session.query(lm).
        filter(lm.landmark_id == lml.landmark_id,
               sub1.exists())
    )
    sub3 = (
        session.query(lml).
        filter(lml.location_id == loc.location_id,
               sub2.exists())
    )

    # this query contains checks to ensure all permanent max stops are
    # grabbed as sometimes they're shutdown temporarily
    query_stops = (
        session.query(
            loc.location_id.label(ID_FIELD),
            loc.public_location_description.label(STOP_FIELD),
            func.collect(rd.route_number.distinct()).label(ROUTES_FIELD),
            func.collect(
                rd.public_route_description.distinct()).label(DESC_FIELD),
            func.to_char(
                func.min(rsd.route_stop_begin_date),
                date_format).label('begin_date'),
            func.to_char(
                func.max(rsd.route_stop_end_date),
                date_format).label('end_date'),
            loc.x_coordinate.label(X_FIELD),
            loc.y_coordinate.label(Y_FIELD)).
        filter(
            loc.location_id == rsd.location_id,
            rd.route_number == rsd.route_number,
            rd.route_begin_date == rsd.route_begin_date,
            rd.route_end_date > today,
            rd.is_light_rail,
            rd.is_revenue,
            rsd.route_stop_end_date > today,
            or_(sub3.exists(),
                loc.passenger_access_code != 'N'),
            # Some stops may or may not go into service one day are
            # added to the system as place holders and given
            # coordinates of 0, 0
            loc.x_coordinate != 0,
            loc.y_coordinate != 0).
        group_by(
            loc.location_id,
            loc.public_location_description,
            loc.x_coordinate,
            loc.y_coordinate).
        all()
    )

    return query_stops