Beispiel #1
0
    def do_job(self):
        """
        SELECT *, coalesce(end_time,now)-start_time
        FROM task t, subject s
        WHERE t.subject_id=s.id AND start_time > GetDate() - @days
        ORDER BY start_time
        :return:
        """
        session = DBSession()
        now = datetime.now()
        time_worked = (func.julianday(func.coalesce(Task.end_time,now)) - func.julianday(Task.start_time)) * 86400

        query = session.query(Task.start_time,
                              func.coalesce(Task.end_time, now),
                              time_worked,
                              Subject.title,
                              Task.title) \
            .filter(Subject.id==Task.subject_id) \
            .filter(func.date(Task.start_time) > func.date('now', '-%s day' % self.args.days)) \
            .order_by(Task.start_time)

        print '\n'

        table = PrettyTable(['Start', 'End', 'Time', 'Subject', 'Title'])
        table.align["Title"] = "l"

        total_time = 0
        day_total = 0
        last_date = None

        for row in query:
            if last_date == None:
                last_date = row[0].date()

            if row[0].date() != last_date:
                table.add_row([
                    '', '', timedelta(seconds=round(day_total)), '', ''
                ])
                last_date = row[0].date()
                day_total = 0

            day_total += row[2]
            total_time += row[2]

            table.add_row([
                row[0],
                row[1],
                timedelta(seconds=round(row[2])),
                row[3],
                row[4],
            ])

        if day_total > 0:
            table.add_row([
                '', '', timedelta(seconds=round(day_total)), '', ''
            ])

        print table
        print 'Total Work time: %s' % timedelta(seconds=total_time)
        print
    def _get_dep_statuses(self, ti, session, dep_context):
        TI = airflow.models.TaskInstance
        TR = airflow.models.TriggerRule

        # Checking that all upstream dependencies have succeeded
        if not ti.task.upstream_list:
            yield self._passing_status(
                reason="The task instance did not have any upstream tasks.")
            raise StopIteration

        if ti.task.trigger_rule == TR.DUMMY:
            yield self._passing_status(reason="The task had a dummy trigger rule set.")
            raise StopIteration

        # TODO(unknown): this query becomes quite expensive with dags that have many
        # tasks. It should be refactored to let the task report to the dag run and get the
        # aggregates from there.
        qry = (
            session
            .query(
                func.coalesce(func.sum(
                    case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.EXCLUDED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.FAILED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
                func.count(TI.task_id),
            )
            .filter(
                TI.dag_id == ti.dag_id,
                TI.task_id.in_(ti.task.upstream_task_ids),
                TI.execution_date == ti.execution_date,
                TI.state.in_([
                    State.SUCCESS, State.FAILED, State.EXCLUDED,
                    State.UPSTREAM_FAILED, State.SKIPPED]),
            )
        )

        successes, excluded, skipped, failed, upstream_failed, done = qry.first()

        # Add excluded tasks into successful tasks as they are equivalent for
        # dependency purposes. This is done in this way, not using the
        # state_for_dependents function, due to the constraints of SQLAlchemy
        # queries.
        successes = successes + excluded

        for dep_status in self._evaluate_trigger_rule(
                ti=ti,
                successes=successes,
                skipped=skipped,
                failed=failed,
                upstream_failed=upstream_failed,
                done=done,
                flag_upstream_failed=dep_context.flag_upstream_failed,
                session=session):
            yield dep_status
Beispiel #3
0
    def _total(self, balance_uid):
        model = request.environ["sqlalchemy.model"]
        db = request.environ["sqlalchemy.session"]

        incomes = db.execute(
            select(
                [func.coalesce(func.sum(model.BalanceChange.c.amount), 0)],
                and_(model.BalanceChange.c.balance_uid == balance_uid, model.BalanceChange.c.is_income == True),
                from_obj=[model.balance_changes_table],
            )
        ).fetchone()[0]

        expenses = db.execute(
            select(
                [func.coalesce(func.sum(model.BalanceChange.c.amount), 0)],
                and_(model.BalanceChange.c.balance_uid == balance_uid, model.BalanceChange.c.is_income == False),
                from_obj=[model.balance_changes_table],
            )
        ).fetchone()[0]

        try:
            return incomes - expenses
        except:
            log.error("", exc_info=1)
            return 0
Beispiel #4
0
 def get_info_op_bphtb(cls, p_kode, p_tahun):
     pkey = FixLength(NOP)
     pkey.set_raw(p_kode)
     q = pbb_DBSession.query(cls.luas_bumi_sppt, cls.luas_bng_sppt,
             cls.njop_bumi_sppt, cls.njop_bng_sppt, DatObjekPajak.jalan_op,
             DatObjekPajak.blok_kav_no_op, DatObjekPajak.rt_op, DatObjekPajak.rw_op,
             cls.nm_wp_sppt.label('nm_wp'),
             func.coalesce(SpptOpBersama.luas_bumi_beban_sppt,0).label('luas_bumi_beban'),
             func.coalesce(SpptOpBersama.luas_bng_beban_sppt,0).label('luas_bng_beban'),
             func.coalesce(SpptOpBersama.njop_bumi_beban_sppt,0).label('njop_bumi_beban'),
             func.coalesce(SpptOpBersama.njop_bng_beban_sppt,0).label('njop_bng_beban'))
     q = q.filter(
             cls.kd_propinsi == DatObjekPajak.kd_propinsi,
             cls.kd_dati2 == DatObjekPajak.kd_dati2,
             cls.kd_kecamatan == DatObjekPajak.kd_kecamatan,
             cls.kd_kelurahan == DatObjekPajak.kd_kelurahan,
             cls.kd_blok == DatObjekPajak.kd_blok,
             cls.no_urut == DatObjekPajak.no_urut,
             cls.kd_jns_op == DatObjekPajak.kd_jns_op)
     q = q.outerjoin(SpptOpBersama)
     return q.filter(cls.kd_propinsi == pkey['kd_propinsi'], 
                     cls.kd_dati2 == pkey['kd_dati2'], 
                     cls.kd_kecamatan == pkey['kd_kecamatan'], 
                     cls.kd_kelurahan == pkey['kd_kelurahan'], 
                     cls.kd_blok == pkey['kd_blok'], 
                     cls.no_urut == pkey['no_urut'], 
                     cls.kd_jns_op == pkey['kd_jns_op'],
                     cls.thn_pajak_sppt == p_tahun)
    def _ops_for_date_range(self, balance_uids, start_date, end_date, tags = [], change_categories = []):
        model = self.get_sa_model()
        db = self.get_sa_session()

        conditions = [or_(*[model.BalanceChange.balance_uid == balance_uid for balance_uid in balance_uids])]
        if start_date:
            conditions.append(model.BalanceChange.occurred_on >= start_date)

        if end_date:
            conditions.append(model.BalanceChange.occurred_on <= end_date)

        if isinstance(tags, list) and len(tags) > 0:
            conditions.extend([model.BalanceChange.tags.any(tag=tag.strip().lower()) for tag in tags if tag is not None and tag.strip() != ''])

        if isinstance(change_categories, list) and len(change_categories) > 0:
            conditions.extend([model.BalanceChange.change_category_uid == value.strip() for value in change_categories if value is not None and value.strip() != ''])

        try:
            summary = db.execute(select([
                    func.coalesce(func.sum(
                        case([[model.BalanceChange.amount<0, model.BalanceChange.amount]], else_=0).label("expenses"))), 
                    func.coalesce(func.sum(
                        case([[model.BalanceChange.amount>0, model.BalanceChange.amount]], else_=0).label("incomes")))
                ], 
                and_(*conditions),
                from_obj=[model.balance_changes_table])).fetchone()
            
            return {
                "expenses": summary[0],
                "incomes": summary[1],
            }
        except:
            log.error(_("Can't get summary"), exc_info=1)
            return 0
Beispiel #6
0
def packages_all_licenses_in_files(package_id):
    pac = db.packages.alias()
    pfi = db.packages_files.alias()
    fil = db.files.alias()
    fli = db.files_licenses.alias()
    lic1 = license_approved_names().alias()
    lic2 = license_approved_names().alias()
    return (select([
        pac.c.package_id,
        pac.c.name              .label('package_name'),
        func.coalesce(lic1.c.short_name, lic2.c.short_name).label('license_short_name'),
        func.count()            .label('license_found_count')
        ])
    .select_from(
        pac
        .join(pfi, pac.c.package_id == pfi.c.package_id)
        .join(fil, pfi.c.file_id == fil.c.file_id)
        .join(lic1, pfi.c.concluded_license_id == lic1.c.license_id, isouter=True)
        .join(fli, fil.c.file_id == fli.c.file_id, isouter=True)
        .join(lic2, fli.c.license_id == lic2.c.license_id)
        )
    .where(pac.c.package_id == package_id)
    .group_by(
        pac.c.package_id,
        pac.c.name,
        func.coalesce(lic1.c.short_name, lic2.c.short_name)
        )
    )
Beispiel #7
0
    def results_by_district(self):
        """ Returns the results aggregated by the distict.  """

        counted = func.coalesce(func.bool_and(BallotResult.counted), False)
        yeas = func.sum(BallotResult.yeas)
        nays = func.sum(BallotResult.nays)
        yeas_percentage = 100 * yeas / (
            cast(func.coalesce(func.nullif(yeas + nays, 0), 1), Float)
        )
        nays_percentage = 100 - yeas_percentage
        accepted = case({True: yeas > nays}, counted)
        results = self.results.with_entities(
            BallotResult.district.label('name'),
            counted.label('counted'),
            accepted.label('accepted'),
            yeas.label('yeas'),
            nays.label('nays'),
            yeas_percentage.label('yeas_percentage'),
            nays_percentage.label('nays_percentage'),
            func.sum(BallotResult.empty).label('empty'),
            func.sum(BallotResult.invalid).label('invalid'),
            func.sum(BallotResult.eligible_voters).label('eligible_voters'),
            func.array_agg(BallotResult.entity_id).label('entity_ids')
        )
        results = results.group_by(BallotResult.district)
        results = results.order_by(None).order_by(BallotResult.district)
        return results
Beispiel #8
0
 def advanced_search(self, **kwargs):
     super(TurnoversQueryBuilder, self).advanced_search(**kwargs)
     self._filter_account(kwargs.get('account_id'))
     if 'date_from' in kwargs or 'date_to' in kwargs:
         self._filter_date(
             kwargs.get('date_from'), kwargs.get('date_to')
         )
     self._cashflows_from_sub = self._cashflows_from_sub.subquery()
     self._cashflows_to_sub = self._cashflows_to_sub.subquery()
     self.query = (
         self.query
         .outerjoin(
             self._cashflows_from_sub,
             self._cashflows_from_sub.c.account_item_id == AccountItem.id
         )
         .outerjoin(
             self._cashflows_to_sub,
             self._cashflows_to_sub.c.account_item_id == AccountItem.id
         )
     )
     balance_condition = or_(
         self._cashflows_to_sub.c.revenue != None, 
         self._cashflows_from_sub.c.expenses != None
     )
     balance_expression = (
         func.coalesce(self._cashflows_to_sub.c.revenue, 0)
         - func.coalesce(self._cashflows_from_sub.c.expenses, 0)
     )
     balance_case = (balance_condition, balance_expression)
     self.update_fields({
         'expenses': self._cashflows_from_sub.c.expenses,
         'revenue': self._cashflows_to_sub.c.revenue,
         'balance': case([balance_case,], else_=None),            
     })
     ResourcesQueryBuilder.build_query(self)
Beispiel #9
0
    def test_functional_ix_two(self):
        m1 = MetaData()
        m2 = MetaData()

        t1 = Table(
            'foo', m1,
            Column('id', Integer, primary_key=True),
            Column('email', String(50)),
            Column('name', String(50))
        )
        Index(
            "email_idx",
            func.coalesce(t1.c.email, t1.c.name).desc(), unique=True)

        t2 = Table(
            'foo', m2,
            Column('id', Integer, primary_key=True),
            Column('email', String(50)),
            Column('name', String(50))
        )
        Index(
            "email_idx",
            func.coalesce(t2.c.email, t2.c.name).desc(), unique=True)

        with assertions.expect_warnings(
                "Skipped unsupported reflection",
                "autogenerate skipping functional index"
        ):
            diffs = self._fixture(m1, m2)
        eq_(diffs, [])
Beispiel #10
0
 def _filter_price(self, price_from, price_to):
     if price_from:
         self.query = self.query.filter(
             func.coalesce(LeadItem.price_from, 0) >= price_from,
         )
     if price_to:
         self.query = self.query.filter(
             func.coalesce(LeadItem.price_to, 0) <= price_to,
         )
Beispiel #11
0
 def _filter_payment(self, payment_from, payment_to):
     if payment_from:
         self.query = self.query.filter(
             func.coalesce(self._sum_payments.c.payments, 0) >= payment_from
         )
     if payment_to:
         self.query = self.query.filter(
             func.coalesce(self._sum_payments.c.payments, 0) <= payment_to
         )
Beispiel #12
0
    def percentage_by_district(self):
        """ Returns the percentage of votes aggregated by the distict. Includes
        uncounted districts and districts with no results available.

        """

        results = self.election.results
        results = results.join(ElectionResult.candidate_results)
        results = results.filter(CandidateResult.candidate_id == self.id)
        results = results.with_entities(
            ElectionResult.district.label('name'),
            func.array_agg(ElectionResult.entity_id).label('entities'),
            func.coalesce(
                func.bool_and(ElectionResult.counted), False
            ).label('counted'),
            func.sum(ElectionResult.accounted_ballots).label('total'),
            func.sum(CandidateResult.votes).label('votes'),
        )
        results = results.group_by(ElectionResult.district)
        results = results.order_by(None)
        results = results.all()
        percentage = {
            r.name: {
                'counted': r.counted,
                'entities': r.entities,
                'percentage': 100 * (r.votes / r.total) if r.total else 0.0
            } for r in results
        }

        empty = self.election.results
        empty = empty.with_entities(
            ElectionResult.district.label('name'),
            func.array_agg(ElectionResult.entity_id).label('entities'),
            func.coalesce(
                func.bool_and(ElectionResult.counted), False
            ).label('counted')
        )
        empty = empty.group_by(ElectionResult.district)
        empty = empty.order_by(None)
        for result in empty:
            update = (
                result.name not in percentage
                or (
                    set(percentage[result.name]['entities'])
                    != set(result.entities)
                )
            )
            if update:
                percentage[result.name] = {
                    'counted': result.counted,
                    'entities': result.entities,
                    'percentage': 0.0
                }

        return percentage
Beispiel #13
0
 def index(self, format='html'):
     """GET /rdfdocs: All items in the collection"""
     # url('rdfdocs')
     rdfdocs = Session.query(model.RDFDoc).filter(model.RDFDoc.active==True).join(model.RDFDoc.current_stats)
     c.query_string = '?'
     c.search = ''
     if request.GET.has_key('search'):
         rdfdocs = rdfdocs.filter(model.RDFDoc.name.ilike("%%%s%%" % request.GET['search']))
         c.query_string += 'search=%s&' % request.GET['search']
         c.search = request.GET['search']
     if request.GET.has_key('errors'):
         rdfdocs = rdfdocs.filter(model.StatResult.errors!=None)
         c.query_string += 'errors=1&'
     if request.GET.has_key('valid'):
         rdfdocs = rdfdocs.filter(model.StatResult.errors==None)
         c.query_string += 'valid=1&'
     if request.GET.has_key('sparql'):
         rdfdocs = rdfdocs.filter(model.RDFDoc.format=='sparql')
         c.query_string += 'sparql=1&'
     if request.GET.has_key('dumps'):
         rdfdocs = rdfdocs.filter(model.RDFDoc.format!='sparql')
         c.query_string += 'dumps=1&'
     c.sort_order = request.GET.get('sort')
     if request.GET.has_key('sort'):
         if request.GET['sort'] == 'triples':
             c.rdfdocs = rdfdocs.order_by(desc(func.coalesce(model.StatResult.triples, '0')))
         elif request.GET['sort'] == 'warnings':
             c.rdfdocs = rdfdocs.order_by(desc(func.coalesce(model.StatResult.warnings, '0')))
         elif request.GET['sort'] == 'format':
             c.rdfdocs = rdfdocs.order_by(func.coalesce(model.RDFDoc.format, '0'))
         elif request.GET['sort'] == 'issue':
             c.rdfdocs = rdfdocs.order_by(model.StatResult.errors)
         elif request.GET['sort'] == 'update':
             c.rdfdocs = rdfdocs.order_by(model.RDFDoc.last_updated.desc())
         else:
             c.rdfdocs = rdfdocs.order_by(model.RDFDoc.worked_on.desc(), model.RDFDoc.name, model.RDFDoc.last_updated.desc())
     else:
         c.rdfdocs = rdfdocs.order_by(model.RDFDoc.worked_on.desc(), model.RDFDoc.name, model.RDFDoc.last_updated.desc())
     if request.GET.has_key('page'):
         page = request.GET['page']
     else:
         page = 1
     page_url = PageURL_WebOb(request)
     c.rdfdocs_page = Page(c.rdfdocs, page=page, items_per_page=50, url=page_url)
     c.rdfdoc_count = c.rdfdocs.count()
     c.workers = Session.query(model.WorkerProc).order_by(model.WorkerProc.started.desc()).all()
     if format=='json' or 'application/json' in request.headers.get('accept', ''):
         response.content_type = 'application/json'
         json_rdfdocs = []
         for r in rdfdocs:
             json_rdfdocs.append(r.name)
         return json.dumps(json_rdfdocs)
     return render('/rdfdoc/index.html')
Beispiel #14
0
    def get_bayar(cls, p_kode):
        pkey = FixLength(NOP)
        pkey.set_raw(p_kode)    
        query = pbb_DBSession.query(
              func.concat(cls.kd_propinsi, '.').concat(cls.kd_dati2).concat('-').\
                   concat(cls.kd_kecamatan).concat('.').concat(cls.kd_kelurahan).concat('-').\
                   concat(cls.kd_blok).concat('.').concat(cls.no_urut).concat('-').\
                   concat(cls.kd_jns_op).label('nop'), cls.thn_pajak_sppt,
			cls.nm_wp_sppt,	cls.jln_wp_sppt, cls.blok_kav_no_wp_sppt,
			cls.rw_wp_sppt, cls.rt_wp_sppt, cls.kelurahan_wp_sppt,
            cls.kota_wp_sppt, cls.kd_pos_wp_sppt, cls.npwp_sppt, 
            cls.kd_kls_tanah, cls.kd_kls_bng, 
			cls.luas_bumi_sppt, cls.luas_bng_sppt, 
            cls.njop_bumi_sppt, cls.njop_bng_sppt, cls.njop_sppt,			
			cls.njoptkp_sppt, cls.pbb_terhutang_sppt, cls.faktor_pengurang_sppt,
			cls.status_pembayaran_sppt, 
            cls.tgl_jatuh_tempo_sppt,
			cls.pbb_yg_harus_dibayar_sppt.label('pokok'),
            func.max(PembayaranSppt.tgl_pembayaran_sppt).label('tgl_pembayaran_sppt'),
            func.sum(func.coalesce(PembayaranSppt.jml_sppt_yg_dibayar,0)).label('bayar'),
            func.sum(func.coalesce(PembayaranSppt.denda_sppt,0)).label('denda_sppt'),).\
			outerjoin(PembayaranSppt,and_(
                            cls.kd_propinsi==PembayaranSppt.kd_propinsi,
                            cls.kd_dati2==PembayaranSppt.kd_dati2,
                            cls.kd_kecamatan==PembayaranSppt.kd_kecamatan,
                            cls.kd_kelurahan==PembayaranSppt.kd_kelurahan,
                            cls.kd_blok==PembayaranSppt.kd_blok,
                            cls.no_urut==PembayaranSppt.no_urut,
                            cls.kd_jns_op==PembayaranSppt.kd_jns_op,
                            cls.thn_pajak_sppt==PembayaranSppt.thn_pajak_sppt
                            )).\
            group_by(cls.kd_propinsi, cls.kd_dati2, cls.kd_kecamatan, cls.kd_kelurahan, 
                    cls.kd_blok, cls.no_urut, cls.kd_jns_op, cls.thn_pajak_sppt,
                    cls.nm_wp_sppt,	cls.jln_wp_sppt, cls.blok_kav_no_wp_sppt,
                    cls.rw_wp_sppt, cls.rt_wp_sppt, cls.kelurahan_wp_sppt,
                    cls.kota_wp_sppt, cls.kd_pos_wp_sppt, cls.npwp_sppt, 
                    cls.kd_kls_tanah, cls.kd_kls_bng, 
                    cls.luas_bumi_sppt, cls.luas_bng_sppt, 
                    cls.njop_bumi_sppt, cls.njop_bng_sppt, cls.njop_sppt,			
                    cls.njoptkp_sppt, cls.pbb_terhutang_sppt, cls.faktor_pengurang_sppt,
                    cls.status_pembayaran_sppt, 
                    cls.tgl_jatuh_tempo_sppt,
                    cls.pbb_yg_harus_dibayar_sppt.label('pokok'),)
            
        return query.filter(cls.kd_propinsi == pkey['kd_propinsi'], 
                            cls.kd_dati2 == pkey['kd_dati2'], 
                            cls.kd_kecamatan == pkey['kd_kecamatan'], 
                            cls.kd_kelurahan == pkey['kd_kelurahan'], 
                            cls.kd_blok == pkey['kd_blok'], 
                            cls.no_urut == pkey['no_urut'], 
                            cls.kd_jns_op == pkey['kd_jns_op'],)
    def _ops_for_date_range(self, balance_uids, start_date, end_date, tags = [], incomes = True, expenses = False):
        """
        Will be removed in next release.
        """
        model = self.get_sa_model()
        db = self.get_sa_session()

        conditions = [or_(*[model.BalanceChange.balance_uid == balance_uid for balance_uid in balance_uids])]
        if incomes:
            conditions.append(model.BalanceChange.amount >= 0)
        if expenses:
            conditions.append(model.BalanceChange.amount < 0)

        if start_date:
            conditions.append(model.BalanceChange.occurred_on >= start_date)

        if end_date:
            conditions.append(model.BalanceChange.occurred_on <= end_date)

        if isinstance(tags, list) and len(tags) > 0:
            conditions.extend([model.BalanceChange.tags.any(tag=tag.strip().lower()) for tag in tags if tag is not None and tag.strip() != ''])

        try:
            return db.execute(select([func.coalesce(func.sum(model.BalanceChange.amount), 0).label("summary")], 
                and_(*conditions),
                from_obj=[model.balance_changes_table])).fetchone()[0]
        except:
            log.error("", exc_info=1)
            return 0
Beispiel #16
0
    def build_query_to_populate(self, query, full_table, aggregate_table):
        insert_columns = [aggregate_table.c.join_key]
        fk = Column(self.key, Integer)
        geom = Column(self.geometry_column, Geometry())
        bins_table = Table(self.table, full_table.metadata, fk, geom)

        if self.join_custom_data: 
            extra_data = Table("extra_data", full_table.metadata, 
                    Column("verified", Boolean),
                    Column("timestamp", DateTime),
                    Column("client_ip", Integer),
                    Column("server_ip", Integer),
                    Column("location", Geometry("Point", srid=4326)),
                    keep_existing = True)

            joining = join(full_table, extra_data,
                    and_(extra_data.c.client_ip == full_table.c.client_ip,
                        extra_data.c.server_ip == full_table.c.server_ip,
                        extra_data.c.timestamp == full_table.c.time),
                    isouter = True)
            query = query.select_from(joining)
            location = case([(extra_data.c.verified, func.coalesce(extra_data.c.location, full_table.c.location))], else_ = full_table.c.location)
        else:
            location = full_table.c.location

        select_query = (query.select_from(bins_table)
             .where(ST_Intersects(location, geom))
             .column(fk)
             .group_by(fk))
        return insert_columns, select_query
Beispiel #17
0
def find_matches(dataset, text, filter=None, exclude=None):
    entities = Entity.__table__
    match_text = normalize(text, dataset)[:254]

    # select text column and apply necesary transformations
    text_field = entities.c.name
    if dataset.normalize_text:
        text_field = entities.c.normalized
    if dataset.ignore_case:
        text_field = func.lower(text_field)
    text_field = func.left(text_field, 254)
    
    # calculate the difference percentage
    l = func.greatest(1.0, func.least(len(match_text), func.length(text_field)))
    score = func.greatest(0.0, ((l - func.levenshtein(text_field, match_text)) / l) * 100.0)
    score = func.max(score).label('score')

    # coalesce the canonical identifier
    id_ = func.coalesce(entities.c.canonical_id, entities.c.id).label('id')
    
    # apply filters
    filters = [entities.c.dataset_id==dataset.id,
               entities.c.invalid==False]
    if not dataset.match_aliases:
        filters.append(entities.c.canonical_id==None)
    if exclude is not None:
        filters.append(entities.c.id!=exclude)
    if filter is not None:
        filters.append(text_field.ilike('%%%s%%' % filter))

    q = select([id_, score], and_(*filters), [entities],
        group_by=[id_], order_by=[score.desc()])
    return Matches(q)
Beispiel #18
0
    def build_query_to_populate(self, query, full_table, aggregate_table):
        insert_columns = [aggregate_table.c.join_key]
        fk = Column(self.key, Integer)
        geom = Column(self.geometry_column, Geometry())
        bins_table = Table(self.table, full_table.metadata, fk, geom)

        if self.join_custom_data: 
            extra_data = Table("extra_data", full_table.metadata, 
                    Column("timestamp", DateTime),
                    Column("verified", Boolean),
                    Column("bigquery_key", String),
                    Column("connection_type", String),
                    Column("advertised_download", Integer),
                    Column("actual_download", Float),
                    Column("advertised_upload", Integer),
                    Column("actual_upload", Float),
                    Column("min_rtt", Integer),
                    Column("location_type", String),
                    Column("cost_of_service", Integer),
                    Column("location", Geometry("Point", srid=4326)),
                    keep_existing = True)

            joining = join(full_table, extra_data,
                    and_(extra_data.c.bigquery_key == full_table.c.bigquery_key),
                    isouter = True)
            query = query.select_from(joining)
            location = case([(extra_data.c.verified, func.coalesce(extra_data.c.location, full_table.c.location))], else_ = full_table.c.location)
        else:
            location = full_table.c.location

        select_query = (query.select_from(bins_table)
             .where(ST_Intersects(location, geom))
             .column(fk)
             .group_by(fk))
        return insert_columns, select_query
Beispiel #19
0
    def export_csv(self):
        request = self.request
        query = SipkdDBSession.query(Sp2dAdviceDet.advno, 
                                      (func.right(Sp2d.sp2dno,5)+
                                       func.coalesce(Sp2d.infix,Sp2d.infix,'')+'/'+  
                                       Sp2d.sp2dtype+'/'+
                                       func.left(Sp2d.sp2dno,4) ).label('sp2dno'), 
                                     Sp2d.sp2ddate, Sp2d.paymentfor, Sp2d.sp2damount, 
                                     Sp2d.ppnamount, Sp2d.pphamount, 
                                     (Sp2d.pot1num+Sp2d.pot2num+Sp2d.pot3num+Sp2d.pot4num+Sp2d.pot5num).label("potongan"),  
                                     Sp2d.sp2dnetto, Sp2d.bknama, Sp2d.bankposnm, Sp2d.bankaccount,
                                     Sp2d.npwp).\
                               join(Sp2d).\
                               filter(Sp2d.sp2dno==Sp2dAdviceDet.sp2dno,
                                      Sp2d.sp2dno.in_(request.params['data'].split(',')))
                                          
        r = query.first()
        header = r.keys()
        query = query.all()
        rows = []
        for item in query:
            rows.append(list(item))

        # override attributes of response
        filename = 'sp2d%s.csv' % datetime.now().strftime('%Y%m%d%H%M%S')

        self.request.response.content_disposition = 'attachment;filename=' + filename

        return {
          'header': header,
          'rows': rows,
        }
Beispiel #20
0
def get_device_tokens(login_id):
    '''
    Get device tokens for a given login. Removes duplicates per provider.
    '''
    with session_scope() as session:
        result = session.query(model.Device.platform_id,
                    func.coalesce(model.Device.device_token_new, model.Device.device_token).label('device_token')).\
                    filter(model.Device.login_id == login_id).filter(model.Device.unregistered_ts.is_(None)).all()


    # only return unique device tokens per provider (gcm, apn) to avoid sending duplicates
    devices = set()
    provider_tokens = set()
    for device in sorted(result): # sorting to make unit tests easier
        platform_id, device_token = device
        provider_id = (constants.PLATFORM_BY_PROVIDER.get(platform_id, 0)
                       or platform_id)
        # NOTE: Use unique tokens per *provider* only for known providers,
        #       and unique tokens per *platform* in other cases, since
        #       it is hard to verify providers for custom senders
        provider_token = (provider_id, device_token)
        if provider_token not in provider_tokens:
            devices.add(device)
            provider_tokens.add(provider_token)
    return list(devices)
 def _set_orderby_desc(self, query, model, limit,
                       last_id, offset, descending, orderby):
     """Return an updated query with the proper orderby and desc."""
     if orderby == 'fav_user_ids':
         n_favs = func.coalesce(func.array_length(model.fav_user_ids, 1), 0).label('n_favs')
         query = query.add_column(n_favs)
     if orderby in ['created', 'updated', 'finish_time']:
         if descending:
             query = query.order_by(desc(
                                         cast(getattr(model,
                                                      orderby),
                                              TIMESTAMP)))
         else:
             query = query.order_by(cast(getattr(model, orderby), TIMESTAMP))
     else:
         if orderby != 'fav_user_ids':
             if descending:
                 query = query.order_by(desc(getattr(model, orderby)))
             else:
                 query = query.order_by(getattr(model, orderby))
         else:
             if descending:
                 query = query.order_by(desc("n_favs"))
             else:
                 query = query.order_by("n_favs")
     if last_id:
         query = query.limit(limit)
     else:
         query = query.limit(limit).offset(offset)
     return query
Beispiel #22
0
 def issue_new(self):
     new_number = DBSession.query(
             func.coalesce(func.max(Issue.issue_number), 0) + 1
         ).filter(
             (Issue.comic_id==self.context.comic.comic_id)
         ).scalar()
     form = Form(
             self.request,
             defaults={
                 'comic_id': self.context.comic.comic_id,
                 'issue_number': new_number,
                 'created': self.utcnow,
                 },
             schema=IssueSchema,
             variable_decode=True)
     if form.validate():
         issue = form.bind(Issue())
         DBSession.add(issue)
         self.request.session.flash(
                 'Created issue #%d of %s' % (
                     issue.issue_number,
                     self.context.comic.title,
                     ))
         DBSession.flush()
         return HTTPFound(location=
                 self.request.route_url('issues', comic=self.context.comic.comic_id)
                 if self.context.comic.comic_id != 'blog' else
                 self.request.route_url('blog_index', comic='blog')
                 )
     return dict(
             create=True,
             form=FormRendererFoundation(form),
             )
    def _ops_for_date_range(self, balance_uids, start_date, end_date, incomes = True, expenses = False):
        """
        Will be removed in next release.
        """
        model = request.environ["sqlalchemy.model"]
        db = request.environ["sqlalchemy.session"]

        conditions = [or_(*[model.BalanceChange.balance_uid == balance_uid for balance_uid in balance_uids])]
        if incomes:
            conditions.append(model.BalanceChange.amount >= 0)
        if expenses:
            conditions.append(model.BalanceChange.amount < 0)

        if start_date:
            conditions.append(model.BalanceChange.occurred_on >= start_date)

        if end_date:
            conditions.append(model.BalanceChange.occurred_on <= end_date)

        try:
            return db.execute(select([func.coalesce(func.sum(model.BalanceChange.amount), 0).label("summary")], 
                and_(*conditions),
                from_obj=[model.balance_changes_table])).fetchone()[0]
        except:
            log.error("", exc_info=1)
            return 0
Beispiel #24
0
 def get_colour_map(cls):
     colour = func.coalesce(Category.colour, ParentCategory.colour)
     return dict(
         Category.query.outerjoin(ParentCategory, ParentCategory.id == Category.parent)
         .filter(colour.isnot(None))
         .values(Category.id, colour)
     )
Beispiel #25
0
def update_canonicals(canonicals):
    '''
    Update canonical data for android devices.
    '''
    global ENGINE
    binding = [{"p_{}".format(k): v for k, v in canonical.items()} for canonical in canonicals]
    device_table = model.metadata.tables['device']
    stmt = update(device_table).\
        values(device_token_new=bindparam('p_new_token')).\
        where(and_(device_table.c.login_id == bindparam('p_login_id'),
                   func.coalesce(device_table.c.device_token_new, device_table.c.device_token) == bindparam('p_old_token')))
    ENGINE.execute(stmt, binding)

    with session_scope() as session:
        query = text('SELECT keep_max_users_per_device( \
                     (:platform_id)::int2, :device_token, (:max_users_per_device)::int2)')
        for canonical in canonicals:
            session.execute(query,
                            {'platform_id': constants.PLATFORM_ANDROID,
                             'device_token': canonical['new_token'],
                             'max_users_per_device': config.max_users_per_device
                            })
            session.execute(query,
                            {'platform_id': constants.PLATFORM_ANDROID_TABLET,
                             'device_token': canonical['new_token'],
                             'max_users_per_device': config.max_users_per_device
                            })
        session.commit()
def get_reposters_for_track(args):
    user_results = []
    current_user_id = args.get('current_user_id')
    repost_track_id = args.get('repost_track_id')
    limit = args.get('limit')
    offset = args.get('offset')

    db = get_db_read_replica()
    with db.scoped_session() as session:
        # Ensure Track exists for provided repost_track_id.
        track_entry = session.query(Track).filter(
            Track.track_id == repost_track_id,
            Track.is_current == True).first()
        if track_entry is None:
            raise exceptions.NotFoundError(
                'Resource not found for provided track id')

        # Subquery to get all (user_id, follower_count) entries from Follows table.
        follower_count_subquery = (session.query(
            Follow.followee_user_id,
            func.count(Follow.followee_user_id).label(
                response_name_constants.follower_count)).filter(
                    Follow.is_current == True,
                    Follow.is_delete == False).group_by(
                        Follow.followee_user_id).subquery())

        # Get all Users that reposted track, ordered by follower_count desc & paginated.
        query = (
            session.query(
                User,
                # Replace null values from left outer join with 0 to ensure sort works correctly.
                (func.coalesce(follower_count_subquery.c.follower_count, 0)
                 ).label(response_name_constants.follower_count))
            # Left outer join to associate users with their follower count.
            .outerjoin(
                follower_count_subquery,
                follower_count_subquery.c.followee_user_id ==
                User.user_id).filter(
                    User.is_current == True,
                    # Only select users that reposted given track.
                    User.user_id.in_(
                        session.query(Repost.user_id).filter(
                            Repost.repost_item_id == repost_track_id,
                            Repost.repost_type == RepostType.track,
                            Repost.is_current == True,
                            Repost.is_delete == False)
                    )).order_by(desc(response_name_constants.follower_count)))
        user_results = add_query_pagination(query, limit, offset).all()

        # Fix format to return only Users objects with follower_count field.
        if user_results:
            users, _ = zip(*user_results)
            user_results = helpers.query_result_to_list(users)
            # bundle peripheral info into user results
            user_ids = [user['user_id'] for user in user_results]
            user_results = populate_user_metadata(session, user_ids,
                                                  user_results,
                                                  current_user_id)
    return user_results
Beispiel #27
0
def get_device_tokens(login_id):
    '''
    Get device tokens for a given login.
    '''
    session = get_session()
    return session.query(model.Device.platform_id,
                  func.coalesce(model.Device.device_token_new, model.Device.device_token).label('device_token')).\
        filter(model.Device.login_id == login_id)
    def __enter__(self):
        self.last_capture = (self.session.query(UndoAction).filter_by(
            object_type=self.object_type,
            stack_id=self.stack_id).with_entities(
                func.coalesce(func.max(UndoAction.capture_id), 0)).scalar())

        event.listen(self.app_engine, "before_execute", self.before_exec)
        event.listen(self.app_engine, "after_execute", self.after_exec)
Beispiel #29
0
def check_stock_consistency() -> list[int]:
    return [
        item[0]
        for item in db.session.query(Stock.id).outerjoin(Stock.bookings).
        group_by(Stock.id).having(Stock.dnBookedQuantity != func.coalesce(
            func.sum(Booking.quantity).filter(
                Booking.isCancelled == False), 0)).all()
    ]
Beispiel #30
0
    def get_piutang(cls, p_kode, p_tahun, p_count):
        #Digunakan untuk menampilkan sppt dan pembayarannya
        pkey = FixLength(NOP)
        pkey.set_raw(p_kode)
        p_tahun_awal = str(int(p_tahun) - p_count + 1)

        q1 = pbb_DBSession.query(cls.thn_pajak_sppt,(cls.pbb_yg_harus_dibayar_sppt).label('pokok'),
                                   cls.tgl_jatuh_tempo_sppt, cls.nm_wp_sppt,
                                   func.sum(PembayaranSppt.denda_sppt).label('denda_sppt'),
                                   func.sum(PembayaranSppt.jml_sppt_yg_dibayar).label('bayar'),
                                   (cls.pbb_yg_harus_dibayar_sppt - func.sum(
                                            (func.coalesce(PembayaranSppt.jml_sppt_yg_dibayar,0)-
                                             func.coalesce(PembayaranSppt.denda_sppt,0)))).label('sisa')

                                    ).\
              outerjoin(PembayaranSppt, and_(
                  cls.kd_propinsi==PembayaranSppt.kd_propinsi,
                  cls.kd_dati2==PembayaranSppt.kd_dati2,
                  cls.kd_kecamatan==PembayaranSppt.kd_kecamatan,
                  cls.kd_kelurahan==PembayaranSppt.kd_kelurahan,
                  cls.kd_blok==PembayaranSppt.kd_blok,
                  cls.no_urut==PembayaranSppt.no_urut,
                  cls.kd_jns_op==PembayaranSppt.kd_jns_op,
                  cls.thn_pajak_sppt==PembayaranSppt.thn_pajak_sppt
                  )).\
              filter(
                     cls.kd_propinsi == pkey['kd_propinsi'],
                     cls.kd_dati2 == pkey['kd_dati2'],
                     cls.kd_kecamatan == pkey['kd_kecamatan'],
                     cls.kd_kelurahan == pkey['kd_kelurahan'],
                     cls.kd_blok == pkey['kd_blok'],
                     cls.no_urut == pkey['no_urut'],
                     cls.kd_jns_op == pkey['kd_jns_op']).\
              filter(cls.thn_pajak_sppt.between(p_tahun_awal,p_tahun)
                    ).\
              group_by(cls.thn_pajak_sppt, cls.pbb_yg_harus_dibayar_sppt,
                      cls.tgl_jatuh_tempo_sppt, cls.nm_wp_sppt).subquery()

        query = pbb_DBSession.query(
            func.sum(q1.c.pokok).label('pokok'),
            func.sum(q1.c.denda_sppt).label('denda_sppt'),
            func.sum(q1.c.bayar).label('bayar'),
            func.sum(q1.c.sisa).label('sisa'),
        )

        return query
Beispiel #31
0
def _compile_array_agg(element, compiler, **kw):
    compiled = "%s(%s)" % (element.name, compiler.process(element.clauses))
    if element.default is None:
        return compiled
    return str(func.coalesce(
        text(compiled),
        cast(postgresql.array(element.default), element.type)
    ).compile(compiler))
Beispiel #32
0
    def counted(cls):
        expr = select([
            func.coalesce(func.bool_and(ElectionResult.counted), False)
        ])
        expr = expr.where(ElectionResult.election_id == cls.id)
        expr = expr.label('counted')

        return expr
Beispiel #33
0
 def _get_answered_and_talk_time(self, session, agent_id, **filters):
     query = (session.query(
         func.count(StatCallOnQueue.id),
         func.coalesce(func.sum(StatCallOnQueue.talktime), 0),
     ).filter(StatAgent.agent_id == agent_id).filter(
         StatCallOnQueue.status == 'answered').join(StatAgent))
     query = self._add_interval_query(StatCallOnQueue, query, **filters)
     return query.first() or (0, 0)
Beispiel #34
0
def build_count_subquery(counter_type):
    return db.session.query(DeviceCounters.device_id, func.coalesce(func.sum(DeviceCounters.value), 0).label('count')).\
        join(RowProcessed, RowProcessed.analyzer == 'packet_analyzer').\
        join(Packet, Packet.id == RowProcessed.last_row).\
        filter(DeviceCounters.counter_type == counter_type).\
        filter(DeviceCounters.last_update + func.make_interval(0,0,0,1) > Packet.date).\
        group_by(DeviceCounters.device_id).\
        subquery()
Beispiel #35
0
 def retrieve_url(self, short_representation: str):
     try:
         seed = self.encoder.decode(short_representation)
     except ValueError:
         return None
     url = URL.query.filter(
         func.coalesce(URL.seed, URL.id) == seed).one_or_none()
     return url
Beispiel #36
0
def model_fetch_task(task_id):
    task = Task.query.filter_by(user_id=current_user.id).order_by(
        func.coalesce(Task.start_time, Task.end_time), Task.end_time)
    '''task = Task.query.filter_by(user_id=current_user.id).order_by(
                case((Task.start_time is None, Task.end_time), Task.start_time))'''
    task_schema = TaskSchema(many=True)
    output = task_schema.dump(task)
    return jsonify({'task': output})
Beispiel #37
0
 def stalenessRating(self):
     """ Return the Queryable staleness rating of the mastery """
     return func.coalesce(
         cast(
             func.floor(
                 func.extract('epoch',
                              func.now() - self.lastCorrectAnswer) / 86400),
             db.Integer) / StalenessPeriod.days, 0)
Beispiel #38
0
def _compile_array_agg(element, compiler, **kw):
    compiled = "%s(%s)" % (element.name, compiler.process(element.clauses))
    if element.default is None:
        return compiled
    return str(
        func.coalesce(text(compiled),
                      cast(postgresql.array(element.default),
                           element.type)).compile(compiler))
Beispiel #39
0
    def total_spent(self):
        results = db.session.query(func.coalesce(func.sum(distinct(Output.value)), 0)) \
            .select_from(Input) \
            .join(Output, Output.address == Input.prevout_address) \
            .filter(Input.prevout_address == self.address) \
            .first()

        return results[0]
Beispiel #40
0
def test_orm_expression(dburl):
    with S(dburl, echo=ECHO) as s:
        key = func.coalesce(Book.a, 0) + Book.b
        q = s.query(Book).order_by(key, Book.id)
        check_paging_orm(q=q)

        q = s.query(Book).order_by(key.label("sort_by_me"), Book.id)
        check_paging_orm(q=q)
Beispiel #41
0
def main():
    parser = make_parser()
    args = parser.parse_args()

    if args.setup_db:
        create_tables()
        return 0

    logging.info("Starting database session")
    session = create_session()

    try:
        logging.info("Starting scraping")
        ents = scrape()
    except Exception:
        logging.exception("Scraping failed")
        return 1

    logging.info("Scraped %s inmates", len(ents))

    try:
        previous_batch, *_ = session.query(
            func.coalesce(func.max(Batch.id), 0)
        ).one()
        current_batch = previous_batch + 1

        batch = Batch(id=current_batch)
        session.add(batch)
        session.flush()

        for ent in ents:
            inmate = Inmate(
                batch_id=batch.id,
                booking_id=ent["booking_id"],
                name=ent["name"],
                race=ent["race"],
                sex=ent["sex"],
                height=ent["height"],
                weight=ent["weight"],
                hair=ent["hair"],
                eyes=ent["eyes"],
                booking_date=ent["date_booked"],
                next_court_date=ent["court_date"],
                bail=ent["bail"],
            )
            inmate.charges = [
                Charge(**charge, batch_id=batch.id)
                for charge in ent["charges"]
            ]

            session.add(inmate)
    except Exception:
        logging.exception("Persisting data failed")
        return 1

    session.commit()
    logging.info("Persisted scraped data")
    return 0
def generate_live_status_table(arch):
    averagesql = select([
        func.coalesce(func.avg(cast(stats_build.c.build_duration, Integer)), 0)
    ]).where(
        and_(
            stats_build.c.status.in_(('reproducible', 'unreproducible')),
            stats_build.c.name == sources.c.name,
            stats_build.c.suite == sources.c.suite,
            stats_build.c.architecture == sources.c.architecture,
        )
    ).as_scalar()

    query = select([
        sources.c.id,
        sources.c.suite,
        sources.c.architecture,
        sources.c.name,
        sources.c.version,
        schedule.c.date_build_started,
        results.c.status,
        results.c.build_duration,
        averagesql,
        schedule.c.job,
    ]).select_from(
        sources.join(schedule).join(results, isouter=True)
    ).where(
        and_(
            schedule.c.date_build_started != None,
            sources.c.architecture == bindparam('arch'),
        )
    ).order_by(
        schedule.c.date_scheduled
    )
    html = ''
    rows = query_db(query.params({'arch': arch}))
    html += '<p><table class="scheduled">\n' + tab
    html += '<tr><th class="center">#</th><th class="center">src pkg id</th><th class="center">suite</th><th class="center">arch</th>'
    html += '<th class=\"center\">source package</th><th class=\"center\">version</th></th>'
    html += '<th class=\"center\">build started</th><th class=\"center\">previous build status</th>'
    html += '<th class=\"center\">previous build duration</th><th class=\"center\">average build duration</th><th class=\"center\">builder job</th>'
    html += '</tr>\n'
    counter = 0
    for row in rows:
        counter += 1
        suite = row[1]
        arch = row[2]
        pkg = row[3]
        duration = convert_into_hms_string(row[7])
        avg_duration = convert_into_hms_string(row[8])
        html += tab + '<tr><td>&nbsp;</td><td>' + str(row[0]) + '</td>'
        html += '<td>' + suite + '</td><td>' + arch + '</td>'
        html += '<td><code>' + link_package(pkg, suite, arch) + '</code></td>'
        html += '<td>' + str(row[4]) + '</td><td>' + str(row[5]) + '</td>'
        html += '<td>' + convert_into_status_html(str(row[6])) + '</td><td>' + duration + '</td><td>' + avg_duration + '</td>'
        html += '<td><a href="https://tests.reproducible-builds.org/cgi-bin/nph-logwatch?' + str(row[9]) + '">' + str(row[9]) + '</a></td>'
        html += '</tr>\n'
    html += '</table></p>\n'
    return html
def tas_gtas_combo(session, period, year):
    """ Creates a combined list of TAS and GTAS data filtered by the given period/year

        Args:
            session: DB session
            period: The period for which to get GTAS data
            year: The year for which to get GTAS data

        Returns:
            A WITH clause to use with other queries
    """
    query = session.query(
            gtas_model.allocation_transfer_agency.label('allocation_transfer_agency'),
            gtas_model.agency_identifier.label('agency_identifier'),
            gtas_model.beginning_period_of_availa.label('beginning_period_of_availa'),
            gtas_model.ending_period_of_availabil.label('ending_period_of_availabil'),
            gtas_model.availability_type_code.label('availability_type_code'),
            gtas_model.main_account_code.label('main_account_code'),
            gtas_model.sub_account_code.label('sub_account_code'),
            gtas_model.amount.label('amount'),
            gtas_model.line.label('line'),
            tas_model.financial_indicator2.label('financial_indicator2'),
            tas_model.fr_entity_type.label('fr_entity_type')).\
        join(tas_model, gtas_model.tas == func.concat(func.coalesce(tas_model.allocation_transfer_agency, '000'),
                                                      func.coalesce(tas_model.agency_identifier, '000'),
                                                      func.coalesce(tas_model.beginning_period_of_availa, '0000'),
                                                      func.coalesce(tas_model.ending_period_of_availabil, '0000'),
                                                      func.coalesce(tas_model.availability_type_code, ' '),
                                                      func.coalesce(tas_model.main_account_code, '0000'),
                                                      func.coalesce(tas_model.sub_account_code, '000'))).\
        filter(gtas_model.period == period).\
        filter(gtas_model.fiscal_year == year)
    return query.cte('tas_gtas')
Beispiel #44
0
def _query_for_billing_data(table, notification_type, start_date, end_date, service_id):
    billable_type_list = {
        SMS_TYPE: NOTIFICATION_STATUS_TYPES_BILLABLE,
        EMAIL_TYPE: NOTIFICATION_STATUS_TYPES_BILLABLE,
        LETTER_TYPE: NOTIFICATION_STATUS_TYPES_BILLABLE_FOR_LETTERS
    }
    sent_by_func = func.coalesce(table.sent_by, case(
        [(table.notification_type == 'letter', 'dvla'),
         (table.notification_type == 'sms', 'unknown'),
         (table.notification_type == 'email', 'ses')]), )
    letter_page_count = case([(table.notification_type == 'letter', table.billable_units), ])

    query = db.session.query(
        table.template_id,
        table.service_id,
        table.notification_type,
        sent_by_func.label('sent_by'),
        func.coalesce(table.rate_multiplier, 1).cast(Integer).label('rate_multiplier'),
        func.coalesce(table.international, False).label('international'),
        letter_page_count.label('letter_page_count'),
        func.sum(table.billable_units).label('billable_units'),
        func.count().label('notifications_sent'),
        Service.crown,
        func.coalesce(table.postage, 'none').label('postage')
    ).filter(
        table.status.in_(billable_type_list[notification_type]),
        table.key_type != KEY_TYPE_TEST,
        table.created_at >= start_date,
        table.created_at < end_date,
        table.notification_type == notification_type,
        table.service_id == service_id
    ).group_by(
        table.template_id,
        table.service_id,
        table.notification_type,
        sent_by_func,
        letter_page_count,
        table.rate_multiplier,
        table.international,
        Service.crown,
        table.postage,
    ).join(
        Service
    )
    return query.all()
Beispiel #45
0
    def total_spent(self):
        results = db.session.query(func.coalesce(func.sum(Output.value), 0)) \
            .join(Input, Input.prevout_address == Output.address, isouter=True) \
            .filter(Output.address == self.address) \
            .filter(Output.covenant_action != 'BID') \
            .filter(Input.prevout_address != None) \
            .first()

        return results[0]
 def invalid_query(self, session, **kw):
     cte = session.query(Languoid.pk, Languoid.father_pk, Languoid.level)\
         .filter(Languoid.father_pk != None).cte(recursive=True)  # noqa
     parent = orm.aliased(Languoid)
     cte = cte.union_all(session.query(cte.c.pk, parent.father_pk, cte.c.level)\
         .join(parent, cte.c.father_pk == parent.pk)\
         .filter(parent.father_pk != None))  # noqa
     return session.query(Languoid)\
         .outerjoin(cte, Languoid.pk == cte.c.father_pk)\
         .group_by(Language.pk, Languoid.pk)\
         .having(or_(
             func.coalesce(Languoid.child_family_count, -1) !=
                 func.count(func.nullif(cte.c.level != LanguoidLevel.family, True)),
             func.coalesce(Languoid.child_language_count, -1) !=
                 func.count(func.nullif(cte.c.level != LanguoidLevel.language, True)),
             func.coalesce(Languoid.child_dialect_count, -1) !=
                 func.count(func.nullif(cte.c.level != LanguoidLevel.dialect, True))))\
         .order_by((Languoid.id))
Beispiel #47
0
 def common_entites(self):
     return [
         func.rtrim(Teacher.full_name).label("teacher"),
         func.rtrim(Discipline.title).label("discipline"),
         func.rtrim(
             func.coalesce(Potoklist.title, Kontgrp.title,
                           Kontkurs.title)).label("kont"),
         Normtime.id.label("nt"),
     ]
Beispiel #48
0
def get_total_amount_spent(departement_code: str = None) -> float:
    query = db.session.query(func.coalesce(func.sum(Booking.amount * Booking.quantity), 0))

    if departement_code:
        query = query.join(User).filter(User.departementCode == departement_code)

    return float(query \
                 .filter(Booking.isCancelled == False) \
                 .scalar())
Beispiel #49
0
 def is_in_digest_window(self, exclude_first_day=False):
     from indico.modules.rb import settings as rb_settings
     digest_start = round_up_month(date.today(), from_day=2)
     days_until_next_digest = cast(digest_start, Date) - cast(func.now(), Date)
     digest_window = func.coalesce(self.notification_before_days, rb_settings.get('notification_before_days'))
     if exclude_first_day:
         return days_until_next_digest < digest_window
     else:
         return days_until_next_digest <= digest_window
Beispiel #50
0
    def order_query(self, query):
        """ Orders the given query by the state of the collection. """

        direction = desc if self.direction == 'desc' else asc
        if self.order in inspect(self.model_class).columns.keys():
            attribute = getattr(self.model_class, self.order)
        elif self.order == 'group.name':
            attribute = func.coalesce(UserGroup.name, '')
        elif self.order == 'user.realname':
            attribute = func.coalesce(User.realname, '')
        elif self.order == 'user.username':
            attribute = func.coalesce(User.username, '')
        elif self.order == 'user.name':
            attribute = func.coalesce(User.realname, User.username, '')
        else:
            attribute = self.model_class.first_issue

        return query.order_by(None).order_by(direction(attribute))
def fetch_billing_details_for_all_services():
    billing_details = db.session.query(
        Service.id.label('service_id'),
        func.coalesce(
            Service.purchase_order_number,
            Organisation.purchase_order_number).label('purchase_order_number'),
        func.coalesce(
            Service.billing_contact_names,
            Organisation.billing_contact_names).label('billing_contact_names'),
        func.coalesce(Service.billing_contact_email_addresses,
                      Organisation.billing_contact_email_addresses).label(
                          'billing_contact_email_addresses'),
        func.coalesce(
            Service.billing_reference,
            Organisation.billing_reference).label('billing_reference'),
    ).outerjoin(Service.organisation).all()

    return billing_details
Beispiel #52
0
def find_income_before_date(u, t):
    dd = db.session.query(
            (func.coalesce(func.sum(Income.num),0)).label('num'))\
            .filter(Income.uid == u, Income.date <= t)\
            .first()
    rr = db.session.query(
            (func.coalesce(func.sum(Incomego.num),0)).label('num'))\
            .filter(Incomego.uid == u, Incomego.date <= t)\
            .first()
    if dd == None or len(dd) == 0:
        dd = [
            0,
        ]
    if rr == None or len(rr) == 0:
        rr = [
            0,
        ]
    return dd[0] - rr[0]
Beispiel #53
0
 def is_in_digest_window(self, exclude_first_day=False):
     from indico.modules.rb import settings as rb_settings
     digest_start = round_up_month(date.today(), from_day=2)
     days_until_next_digest = cast(digest_start, Date) - cast(func.now(), Date)
     digest_window = func.coalesce(self.notification_before_days, rb_settings.get('notification_before_days'))
     if exclude_first_day:
         return days_until_next_digest < digest_window
     else:
         return days_until_next_digest <= digest_window
def find_by_pro_user(
    user: User,
    booking_period: tuple[date, date],
    event_date: Optional[datetime] = None,
    venue_id: Optional[int] = None,
    page: int = 1,
    per_page_limit: int = 1000,
) -> BookingsRecapPaginated:
    # TODO: remove this block when IMPROVE_BOOKINGS_PERF is definitely adopted
    if not FeatureToggle.IMPROVE_BOOKINGS_PERF.is_active():
        sorted_booking_period = sorted(booking_period)
        bookings_recap_subquery = _filter_bookings_recap_subquery(
            user, sorted_booking_period, event_date, venue_id)

        if page == 1:
            total_bookings_recap = db.session.query(
                func.coalesce(func.sum(bookings_recap_subquery.c.quantity),
                              0)).scalar()
        else:
            total_bookings_recap = 0

        bookings_recap_query = _build_bookings_recap_query(
            bookings_recap_subquery)
        bookings_recap_query_with_duplicates = _duplicate_booking_when_quantity_is_two_legacy(
            bookings_recap_query, bookings_recap_subquery)
        paginated_bookings = (bookings_recap_query_with_duplicates.order_by(
            text('"bookingDate" DESC')).offset(
                (page - 1) * per_page_limit).limit(per_page_limit).all())

        return _paginated_bookings_sql_entities_to_bookings_recap(
            paginated_bookings=paginated_bookings,
            page=page,
            per_page_limit=per_page_limit,
            total_bookings_recap=total_bookings_recap,
            serializer=_serialize_booking_recap_legacy,
        )

    total_bookings_recap = _get_filtered_bookings_count(
        user, booking_period, event_date, venue_id)

    bookings_query = _get_filtered_booking_pro(
        pro_user=user,
        period=booking_period,
        event_date=event_date,
        venue_id=venue_id,
    )
    bookings_query = _duplicate_booking_when_quantity_is_two(bookings_query)
    bookings_page = (bookings_query.order_by(text('"bookedAt" DESC')).offset(
        (page - 1) * per_page_limit).limit(per_page_limit).all())

    return _paginated_bookings_sql_entities_to_bookings_recap(
        paginated_bookings=bookings_page,
        page=page,
        per_page_limit=per_page_limit,
        total_bookings_recap=total_bookings_recap,
    )
Beispiel #55
0
 def table_with_variants_same_c_copy(self, select_columns: Optional[list]):
     """
      Returns a table of variants of the same type of the ones contained in RegionAttrs.with_variants_same_c_copy and only
      form the individuals that own all of them on the same chromosome copy.
      :param select_columns: the list of column names to select from the result. If None, all the columns are taken.
     """
     if len(self.region_attrs.with_variants_same_c_copy) < 2:
         raise ValueError(
             'You must provide at least two Mutation instances in order to use this method.'
         )
     # selects only the mutations to be on the same chromosome copies (this set will be used two times) from all individuals
     # we will enforce the presence of all the given mutations in all the individuals later...
     interm_select_column_names = None  # means all columns
     if select_columns is not None:  # otherwise pick select_columns + minimum required
         interm_select_column_names = set(select_columns)
         interm_select_column_names.update(['item_id', 'al1', 'al2'])
     intermediate_table = self._table_with_any_of_mutations(
         interm_select_column_names, self.my_meta_t,
         *self.region_attrs.with_variants_same_c_copy)
     # groups mutations by owner in the intermediate table, and take only the owners for which sum(al1) or sum(al2)
     # equals to the number of the given mutations. That condition automatically implies the presence of all the
     # given mutations in the same individual.
     # for those owner, take all the given mutations
     result_columns = [intermediate_table]  # means all columns
     if select_columns is not None:  # otherwise pick the columns from select_columns
         result_columns = [
             intermediate_table.c[col_name] for col_name in select_columns
         ]
     stmt_as = \
         select(result_columns) \
         .where(intermediate_table.c.item_id.in_(
             select([intermediate_table.c.item_id])
             .group_by(intermediate_table.c.item_id)
             .having(
                 (func.sum(intermediate_table.c.al1) == len(
                     self.region_attrs.with_variants_same_c_copy)) |  # the ( ) around each condition are mandatory
                 (func.sum(func.coalesce(intermediate_table.c.al2, 0)) == len(
                     self.region_attrs.with_variants_same_c_copy)))
         ))
     target_t_name = utils.random_t_name_w_prefix('with_var_same_c_copy')
     stmt = utils.stmt_create_table_as(target_t_name, stmt_as,
                                       default_schema_to_use_name)
     if self.log_sql_commands:
         utils.show_stmt(
             self.connection, stmt, self.logger.debug,
             'INDIVIDUALS (+ THE GIVEN MUTATIONS) HAVING ALL THE SPECIFIED MUTATIONS ON THE SAME CHROMOSOME COPY'
         )
     self.connection.execute(stmt)
     if self.log_sql_commands:
         self.logger.debug('DROP TABLE ' + intermediate_table.name)
     intermediate_table.drop(self.connection)
     return Table(target_t_name,
                  db_meta,
                  autoload=True,
                  autoload_with=self.connection,
                  schema=default_schema_to_use_name)
    def next_dagruns_to_examine(
        cls,
        state: DagRunState,
        session: Session,
        max_number: Optional[int] = None,
    ):
        """
        Return the next DagRuns that the scheduler should attempt to schedule.

        This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
        query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
        the transaction is committed it will be unlocked.

        :rtype: list[airflow.models.DagRun]
        """
        from airflow.models.dag import DagModel

        if max_number is None:
            max_number = cls.DEFAULT_DAGRUNS_TO_EXAMINE

        # TODO: Bake this query, it is run _A lot_
        query = (
            session.query(cls)
            .filter(cls.state == state, cls.run_type != DagRunType.BACKFILL_JOB)
            .join(
                DagModel,
                DagModel.dag_id == cls.dag_id,
            )
            .filter(
                DagModel.is_paused == expression.false(),
                DagModel.is_active == expression.true(),
            )
        )
        if state == State.QUEUED:
            # For dag runs in the queued state, we check if they have reached the max_active_runs limit
            # and if so we drop them
            running_drs = (
                session.query(DagRun.dag_id, func.count(DagRun.state).label('num_running'))
                .filter(DagRun.state == DagRunState.RUNNING)
                .group_by(DagRun.dag_id)
                .subquery()
            )
            query = query.outerjoin(running_drs, running_drs.c.dag_id == DagRun.dag_id).filter(
                func.coalesce(running_drs.c.num_running, 0) < DagModel.max_active_runs
            )
        query = query.order_by(
            nulls_first(cls.last_scheduling_decision, session=session),
            cls.execution_date,
        )

        if not settings.ALLOW_FUTURE_EXEC_DATES:
            query = query.filter(DagRun.execution_date <= func.now())

        return with_row_locks(
            query.limit(max_number), of=cls, session=session, **skip_locked(session=session)
        )
Beispiel #57
0
    def test_twelve(self):
        t = self.tables.t
        actual_ts = self.bind.scalar(
            func.current_timestamp()).replace(tzinfo=None) - \
            datetime.datetime(2012, 5, 10, 12, 15, 25)

        self._test(
            func.current_timestamp() -
            func.coalesce(t.c.dtme, func.current_timestamp()),
            {"day": actual_ts.days})
Beispiel #58
0
def investments_amount():
    res = db.session.query(func.coalesce(func.sum(Investment.amount), 0))
    time_from, time_to = get_timeframes()

    if time_from > 0:
        res = res.filter(Investment.time > time_from)
    if time_to > 0:
        res = res.filter(Investment.time < time_to)

    return jsonify({"coins": str(res.scalar())})