コード例 #1
0
ファイル: InstalledComponentsDB.py プロジェクト: ahaupt/DIRAC
 def __init__( self, system = null(), module = null(), cType = null() ):
   """ just defines some instance members
   """
   self.system = system
   self.module = module
   self.cType = cType
   self.installationList = []
コード例 #2
0
    def extjsgridfilter(self, q, column, afilter):
        """Query helper to convert a extjs grid filter dict
        to a sqlalchemy query filter

        """
        if (afilter['type'] == 'numeric'):
            if (afilter['comparison'] == 'eq'):
                q = q.filter(column == afilter['value'])
            if (afilter['comparison'] == 'gt'):
                q = q.filter(column > afilter['value'])
            if (afilter['comparison'] == 'lt'):
                q = q.filter(column < afilter['value'])
        elif (afilter['type'] == 'string'):
            q = q.filter(column.contains(afilter['value']))
        elif (afilter['type'] == 'list'):
            q = q.filter(column.in_(afilter['value']))
        elif (afilter['type'] == 'boolean'):
            q = q.filter(column == afilter['value'])
        elif (afilter['type'] == 'reaction'):
            q = self.reaction_filter(q, afilter)
        elif (afilter['type'] == 'null'):
            if not afilter['value']:
                q = q.filter(column == null())  # IS NULL
            else:
                q = q.filter(column != null())  # IS NOT NULL
        return q
コード例 #3
0
ファイル: external_net_db.py プロジェクト: ytwxy99/neutron
 def _network_result_filter_hook(self, query, filters):
     vals = filters and filters.get(external_net.EXTERNAL, [])
     if not vals:
         return query
     if vals[0]:
         return query.filter((ExternalNetwork.network_id != expr.null()))
     return query.filter((ExternalNetwork.network_id == expr.null()))
コード例 #4
0
ファイル: required_signoffs.py プロジェクト: catlee/balrog
    def get(self, input_dict):
        if not self.table.select({f: input_dict.get(f) for f in self.decisionFields}):
            return problem(404, "Not Found", "Requested Required Signoff does not exist")

        try:
            page = int(connexion.request.args.get('page', 1))
            limit = int(connexion.request.args.get('limit', 100))
        except ValueError as msg:
            self.log.warning("Bad input: %s", msg)
            return problem(400, "Bad Request", str(msg))
        offset = limit * (page - 1)

        where_count = [self.table.history.data_version != null()]
        for field in self.decisionFields:
            where_count.append(getattr(self.table.history, field) == input_dict.get(field))
        total_count = self.table.history.count(where=where_count)

        where = [getattr(self.table.history, f) == input_dict.get(f) for f in self.decisionFields]
        where.append(self.table.history.data_version != null())
        revisions = self.table.history.select(
            where=where, limit=limit, offset=offset,
            order_by=[self.table.history.timestamp.desc()]
        )

        return jsonify(count=total_count, required_signoffs=revisions)
コード例 #5
0
ファイル: condition.py プロジェクト: spoqa/dodotable
 def __query__(self):
     q = super(NullSelectableSelectFilter, self).__query__()
     arg_name = 'select.{}'.format(self.attribute_name)
     s = self.request_args.get(arg_name, self.default)
     if s == self.NULL:
         q = self.attribute.is_(null())
     elif s == self.NOT_NULL:
         q = self.attribute.isnot(null())
     return q
コード例 #6
0
    def geoname_roundup():

        Session = create_session(check=True, **session_args)
        session = Session()
        insert = Insert(session)
        Session = create_session(check=False, database='CtyOD')
        insert.ctyod = Session()

        # Roundup addresses in the from_address position, to intersect with next query
        subquery = (
            session.query(Movement.id)
            .outerjoin(
                Association,
                and_(
                    Movement.from_address_id == Association.address_id,
                    Movement.to_address_id == Association.to_address_id,
                    )
                )
            .filter(Association.premises_id == null())
            .subquery()
            )

        # Roundup addresses in the to_address position
        query = (
            session.query(Movement.from_address_id, Movement.to_address_id)
            .outerjoin(
                Association,
                and_(
                    Movement.to_address_id == Association.address_id,
                    Movement.from_address_id == Association.from_address_id,
                    )
                )
            .filter(Association.premises_id == null())
            .join(subquery, Movement.id == subquery.c.movement_id)
            .group_by(Movement.from_address_id, Movement.to_address_id)
            )

        for movement in query:
            from_address = (
                session.query(Address)
                .filter_by(id=movement.from_address_id)
                .one()
                )
            to_address = (
                session.query(Address)
                .filter_by(id=movement.to_address_id)
                .one()
                )
            try:
                insert.premises(from_address, to_address)
                insert.session.commit()
            except:
                insert.session.rollback()

        insert.session.close()
        insert.ctyod.close()
コード例 #7
0
ファイル: scheduled_changes.py プロジェクト: nurav/balrog
    def get(self, sc_id):
        if not self.table.scheduled_changes.select({"sc_id": sc_id}):
            return Response(status=404, response="Scheduled change does not exist")

        try:
            page = int(request.args.get('page', 1))
            limit = int(request.args.get('limit', 100))
            assert page >= 1
        except (ValueError, AssertionError) as msg:
            self.log.warning("Bad input: %s", msg)
            return Response(status=400, response=json.dumps({"exception": msg}))

        offset = limit * (page - 1)
        total_count = self.table.scheduled_changes.history.t.count()\
            .where(self.table.scheduled_changes.history.sc_id == sc_id)\
            .where(self.table.scheduled_changes.history.data_version != null())\
            .execute()\
            .fetchone()[0]

        # Although Scheduled Changes are stored across two tables, we don't
        # expose that through the API. Because of this, we need to look up
        # history in both and return the combined version.
        # This is done by the database layer for non-history parts of Scheduled Changes, but
        # that's not feasible for History due to the inheritance structure of the tables,
        # so we do it here instead.
        revisions = self.table.scheduled_changes.history.select(
            where=[self.table.scheduled_changes.history.sc_id == sc_id,
                   self.table.scheduled_changes.history.data_version != null()],
            limit=limit,
            offset=offset,
            order_by=[self.table.scheduled_changes.history.timestamp.desc()],
        )
        # There's a big 'ol assumption here that the primary Scheduled Changes
        # table and the conditions table always keep their data version in sync.
        for r in revisions:
            cond = self.table.scheduled_changes.conditions.history.select(
                where=[self.table.scheduled_changes.conditions.history.sc_id == r["sc_id"],
                       self.table.scheduled_changes.conditions.history.data_version == r["data_version"]],
            )
            r.update(cond[0])

        ret = {
            "count": total_count,
            "revisions": [],
        }

        for rev in revisions:
            r = {}
            for k, v in rev.iteritems():
                if k == "data_version":
                    r["sc_data_version"] = v
                else:
                    r[k.replace("base_", "")] = v
            ret["revisions"].append(r)

        return jsonify(ret)
コード例 #8
0
 def visit_binary(binary):
     mapper = reverse_direction and self.parent_property.mapper or self.parent_property.parent
     if isinstance(binary.left, expression._BindParamClause) and binary.left.key in bind_to_col:
         # reverse order if the NULL is on the left side
         binary.left = binary.right
         binary.right = expression.null()
         binary.operator = operators.is_
     elif isinstance(binary.right, expression._BindParamClause) and binary.right.key in bind_to_col:
         binary.right = expression.null()
         binary.operator = operators.is_
コード例 #9
0
ファイル: InstalledComponentsDB.py プロジェクト: ahaupt/DIRAC
 def __init__( self, instance = null(),
               installationTime = null(),
               unInstallationTime = null(),
               installedBy = null(),
               unInstalledBy = null() ):
   self.instance = instance
   self.installationTime = installationTime
   self.unInstallationTime = unInstallationTime
   self.installedBy = installedBy
   self.unInstalledBy = unInstalledBy
コード例 #10
0
ファイル: util.py プロジェクト: pszafer/dlna_upnp_invention
 def visit_binary(binary):
     if isinstance(binary.left, expression._BindParamClause) and binary.left._identifying_key in nulls:
         # reverse order if the NULL is on the left side
         binary.left = binary.right
         binary.right = expression.null()
         binary.operator = operators.is_
         binary.negate = operators.isnot
     elif isinstance(binary.right, expression._BindParamClause) and binary.right._identifying_key in nulls:
         binary.right = expression.null()
         binary.operator = operators.is_
         binary.negate = operators.isnot
コード例 #11
0
ファイル: releases.py プロジェクト: nurav/balrog
    def get(self, release):
        releases = dbo.releases.getReleases(name=release, limit=1)
        if not releases:
            return Response(status=404,
                            response='Requested release does not exist')
        release = releases[0]
        table = dbo.releases.history

        try:
            page = int(request.args.get('page', 1))
            limit = int(request.args.get('limit', 10))
            assert page >= 1
        except (ValueError, AssertionError) as e:
            self.log.warning("Bad input: %s", json.dumps(e.args))
            return Response(status=400, response=json.dumps({"data": e.args}))
        offset = limit * (page - 1)
        total_count = table.t.count()\
            .where(table.name == release['name'])\
            .where(table.data_version != null())\
            .execute()\
            .fetchone()[0]

        revisions = table.select(
            where=[
                table.name == release['name'],
                table.data_version != null()
            ],
            limit=limit,
            offset=offset,
            order_by=[table.timestamp.desc()],
        )

        _mapping = [
            'data_version',
            'name',
            'product',
            'read_only',
            '_different',
            '_time_ago',
            'change_id',
            'changed_by',
            "timestamp",
        ]

        self.annotateRevisionDifferences(revisions)

        _revisions = []
        for r in revisions:
            _revisions.append(dict(
                (item, r[item])
                for item in _mapping
            ))

        return jsonify(revisions=_revisions, count=total_count)
コード例 #12
0
ファイル: history_all.py プロジェクト: njirap/balrog
def _get_filters(obj, history_table):
    query = get_input_dict()
    where = [False, False]
    where = [getattr(history_table, f) == query.get(f) for f in query]
    where.append(history_table.data_version != null())
    if hasattr(history_table, 'product'):
        where.append(history_table.product != null())
    if request.args.get('timestamp_from'):
        where.append(history_table.timestamp >= int(request.args.get('timestamp_from')))
    if request.args.get('timestamp_to'):
        where.append(history_table.timestamp <= int(request.args.get('timestamp_to')))
    return where
コード例 #13
0
ファイル: required_signoffs.py プロジェクト: njirap/balrog
 def _get_filters(self):
     query = get_input_dict()
     where = [getattr(self.table.history, f) == query.get(f) for f in query]
     where.append(self.table.history.data_version != null())
     if hasattr(self.history_table, 'product'):
         where.append(self.history_table.product != null())
     request = connexion.request
     if request.args.get('timestamp_from'):
         where.append(self.history_table.timestamp >= int(request.args.get('timestamp_from')))
     if request.args.get('timestamp_to'):
         where.append(self.history_table.timestamp <= int(request.args.get('timestamp_to')))
     return where
コード例 #14
0
ファイル: compounds.py プロジェクト: admed/molgears
 def avg_ct(self, cell_line):
     from collections import Iterable
     from sqlalchemy.sql.expression import null
     if self.ctoxicity:
         if isinstance(self.ctoxicity, Iterable):
             values = [ct.ic50 for ct in self.ctoxicity if ct.cell_line == cell_line and ct.active]
         else:
             values = null()
         try:
             self._avg_ct = round(sum(values)/len(values), 4)
         except ZeroDivisionError:
             self._avg_ct = null() # the default value
     else:
         self._avg_ct = null()
コード例 #15
0
ファイル: grids.py プロジェクト: AbhishekKumarSingh/galaxy
 def filter( self, trans, user, query, column_filter ):
     """ Modify query to filter histories by sharing status. """
     if column_filter == "All":
         pass
     elif column_filter:
         if column_filter == "private":
             query = query.filter( self.model_class.users_shared_with == null() )
             query = query.filter( self.model_class.importable == false() )
         elif column_filter == "shared":
             query = query.filter( self.model_class.users_shared_with != null() )
         elif column_filter == "accessible":
             query = query.filter( self.model_class.importable == true() )
         elif column_filter == "published":
             query = query.filter( self.model_class.published == true() )
     return query
コード例 #16
0
ファイル: models.py プロジェクト: epandurski/swpt_payments
class SuccessfulPaymentSignal(Signal):
    """Sent to the payee and the payer when an offer has been paid."""

    class __marshmallow__(Schema):
        payee_creditor_id = fields.Integer()
        offer_id = fields.Integer()
        payer_creditor_id = fields.Integer()
        payer_payment_order_seqnum = fields.Integer()
        debtor_id = fields.Integer()
        amount = fields.Integer()
        payer_note = fields.Raw()
        paid_at_ts = fields.DateTime()
        reciprocal_payment_debtor_id = fields.Integer()
        reciprocal_payment_amount = fields.Integer()
        proof_id = fields.Integer()

    payee_creditor_id = db.Column(db.BigInteger, primary_key=True)
    offer_id = db.Column(db.BigInteger, primary_key=True)
    payer_creditor_id = db.Column(db.BigInteger, primary_key=True)
    payer_payment_order_seqnum = db.Column(db.Integer, primary_key=True)
    debtor_id = db.Column(db.BigInteger, nullable=False)
    amount = db.Column(db.BigInteger, nullable=False)
    payer_note = db.Column(pg.JSON, nullable=False)
    paid_at_ts = db.Column(db.TIMESTAMP(timezone=True), nullable=False)
    reciprocal_payment_debtor_id = db.Column(db.BigInteger)
    reciprocal_payment_amount = db.Column(db.BigInteger, nullable=False)
    proof_id = db.Column(db.BigInteger, nullable=False)
    __table_args__ = (
        db.CheckConstraint(amount >= 0),
        db.CheckConstraint(reciprocal_payment_amount >= 0),
        db.CheckConstraint(or_(
            reciprocal_payment_debtor_id != null(),
            reciprocal_payment_amount == 0,
        )),
    )
コード例 #17
0
ファイル: l3_db.py プロジェクト: nitinnain/neutron
 def _network_filter_hook(self, context, original_model, conditions):
     if conditions is not None and not hasattr(conditions, "__iter__"):
         conditions = (conditions,)
     # Apply the external network filter only in non-admin context
     if not context.is_admin and hasattr(original_model, "tenant_id"):
         conditions = expr.or_(ExternalNetwork.network_id != expr.null(), *conditions)
     return conditions
コード例 #18
0
ファイル: handler.py プロジェクト: ashvark/galaxy
    def __check_jobs_at_startup( self ):
        """
        Checks all jobs that are in the 'new', 'queued' or 'running' state in
        the database and requeues or cleans up as necessary.  Only run as the
        job handler starts.
        In case the activation is enforced it will filter out the jobs of inactive users.
        """
        jobs_at_startup = []
        if self.track_jobs_in_database:
            in_list = ( model.Job.states.QUEUED,
                        model.Job.states.RUNNING )
        else:
            in_list = ( model.Job.states.NEW,
                        model.Job.states.QUEUED,
                        model.Job.states.RUNNING )
        if self.app.config.user_activation_on:
                jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                    .outerjoin( model.User ) \
                    .filter( model.Job.state.in_( in_list ) &
                             ( model.Job.handler == self.app.config.server_name ) &
                             or_( ( model.Job.user_id == null() ), ( model.User.active == true() ) ) ).all()
        else:
            jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                .filter( model.Job.state.in_( in_list ) &
                         ( model.Job.handler == self.app.config.server_name ) ).all()

        for job in jobs_at_startup:
            if not self.app.toolbox.has_tool( job.tool_id, job.tool_version, exact=True ):
                log.warning( "(%s) Tool '%s' removed from tool config, unable to recover job" % ( job.id, job.tool_id ) )
                self.job_wrapper( job ).fail( 'This tool was disabled before the job completed.  Please contact your Galaxy administrator.' )
            elif job.job_runner_name is not None and job.job_runner_external_id is None:
                # This could happen during certain revisions of Galaxy where a runner URL was persisted before the job was dispatched to a runner.
                log.debug( "(%s) Job runner assigned but no external ID recorded, adding to the job handler queue" % job.id )
                job.job_runner_name = None
                if self.track_jobs_in_database:
                    job.set_state( model.Job.states.NEW )
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            elif job.job_runner_name is not None and job.job_runner_external_id is not None and job.destination_id is None:
                # This is the first start after upgrading from URLs to destinations, convert the URL to a destination and persist
                job_wrapper = self.job_wrapper( job )
                job_destination = self.dispatcher.url_to_destination(job.job_runner_name)
                if job_destination.id is None:
                    job_destination.id = 'legacy_url'
                job_wrapper.set_job_destination(job_destination, job.job_runner_external_id)
                self.dispatcher.recover( job, job_wrapper )
                log.info('(%s) Converted job from a URL to a destination and recovered' % (job.id))
            elif job.job_runner_name is None:
                # Never (fully) dispatched
                log.debug( "(%s) No job runner assigned and job still in '%s' state, adding to the job handler queue" % ( job.id, job.state ) )
                if self.track_jobs_in_database:
                    job.set_state( model.Job.states.NEW )
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            else:
                # Already dispatched and running
                job_wrapper = self.__recover_job_wrapper( job )
                self.dispatcher.recover( job, job_wrapper )
        if self.sa_session.dirty:
            self.sa_session.flush()
コード例 #19
0
ファイル: g_engine.py プロジェクト: JuviAndaya/chellow
def get_data_sources(data_source, start_date, finish_date, forecast_date=None):

    if forecast_date is None:
        forecast_date = data_source.forecast_date

    if data_source.start_date == start_date and \
            data_source.finish_date == finish_date \
            and forecast_date == data_source.forecast_date:
        yield data_source
    else:
        for g_era in data_source.sess.query(GEra).filter(
                GEra.g_supply == data_source.g_supply,
                GEra.start_date <= finish_date,
                or_(
                    GEra.finish_date == null(),
                    GEra.finish_date >= start_date)):
            g_era_start = g_era.start_date

            if start_date < g_era_start:
                chunk_start = g_era_start
            else:
                chunk_start = start_date

            g_era_finish = g_era.finish_date

            chunk_finish = g_era_finish if \
                hh_after(finish_date, g_era_finish) else finish_date

            ds = GDataSource(
                data_source.sess, chunk_start, chunk_finish, forecast_date,
                g_era, data_source.caches, data_source.bill)
            yield ds
コード例 #20
0
ファイル: models.py プロジェクト: epandurski/swpt_payments
class PaymentProof(db.Model):
    payee_creditor_id = db.Column(db.BigInteger, primary_key=True)
    proof_id = db.Column(db.BigInteger, primary_key=True, autoincrement=True)
    proof_secret = db.Column(pg.BYTEA, nullable=False)
    payer_creditor_id = db.Column(db.BigInteger, nullable=False)
    debtor_id = db.Column(
        db.BigInteger,
        nullable=False,
        comment='The ID of the debtor through which the payment went.',
    )
    amount = db.Column(db.BigInteger, nullable=False)
    payer_note = db.Column(pg.JSON, nullable=False, default={})
    paid_at_ts = db.Column(db.TIMESTAMP(timezone=True), nullable=False, default=get_now_utc)
    reciprocal_payment_debtor_id = db.Column(db.BigInteger)
    reciprocal_payment_amount = db.Column(db.BigInteger, nullable=False)
    offer_id = db.Column(db.BigInteger, nullable=False)
    offer_created_at_ts = db.Column(db.TIMESTAMP(timezone=True), nullable=False)
    offer_description = db.Column(pg.JSON)
    __table_args__ = (
        db.CheckConstraint(amount >= 0),
        db.CheckConstraint(reciprocal_payment_amount >= 0),
        db.CheckConstraint(or_(
            reciprocal_payment_debtor_id != null(),
            reciprocal_payment_amount == 0,
        )),
        {
            'comment': 'Represents an evidence that a payment has been made to an offer. '
                       '(The corresponding offer has been deleted.)',
        }
    )
コード例 #21
0
ファイル: handler.py プロジェクト: Galaxyinternship/Galaxy
    def __check_jobs_at_startup( self ):
        """
        Checks all jobs that are in the 'new', 'queued' or 'running' state in
        the database and requeues or cleans up as necessary.  Only run as the
        job handler starts.
        In case the activation is enforced it will filter out the jobs of inactive users.
        """
        jobs_at_startup = []
        if self.track_jobs_in_database:
            in_list = ( model.Job.states.QUEUED,
                        model.Job.states.RUNNING )
        else:
            in_list = ( model.Job.states.NEW,
                        model.Job.states.QUEUED,
                        model.Job.states.RUNNING )
        if self.app.config.user_activation_on:
                jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                    .outerjoin( model.User ) \
                    .filter( model.Job.state.in_( in_list ) &
                             ( model.Job.handler == self.app.config.server_name ) &
                             or_( ( model.Job.user_id == null() ), ( model.User.active == true() ) ) ).all()
        else:
            jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                .filter( model.Job.state.in_( in_list ) &
                         ( model.Job.handler == self.app.config.server_name ) ).all()

        for job in jobs_at_startup:
            if not self.app.toolbox.has_tool( job.tool_id, job.tool_version, exact=True ):
                log.warning( "(%s) Tool '%s' removed from tool config, unable to recover job" % ( job.id, job.tool_id ) )
                self.job_wrapper( job ).fail( 'This tool was disabled before the job completed.  Please contact your Galaxy administrator.' )
            elif job.job_runner_name is not None and job.job_runner_external_id is None:
                # This could happen during certain revisions of Galaxy where a runner URL was persisted before the job was dispatched to a runner.
                log.debug( "(%s) Job runner assigned but no external ID recorded, adding to the job handler queue" % job.id )
                job.job_runner_name = None
                if self.track_jobs_in_database:
                    job.set_state( model.Job.states.NEW )
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            elif job.job_runner_name is not None and job.job_runner_external_id is not None and job.destination_id is None:
                # This is the first start after upgrading from URLs to destinations, convert the URL to a destination and persist
                job_wrapper = self.job_wrapper( job )
                job_destination = self.dispatcher.url_to_destination(job.job_runner_name)
                if job_destination.id is None:
                    job_destination.id = 'legacy_url'
                job_wrapper.set_job_destination(job_destination, job.job_runner_external_id)
                self.dispatcher.recover( job, job_wrapper )
                log.info('(%s) Converted job from a URL to a destination and recovered' % (job.id))
            elif job.job_runner_name is None:
                # Never (fully) dispatched
                log.debug( "(%s) No job runner assigned and job still in '%s' state, adding to the job handler queue" % ( job.id, job.state ) )
                if self.track_jobs_in_database:
                    job.set_state( model.Job.states.NEW )
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            else:
                # Already dispatched and running
                job_wrapper = self.__recover_job_wrapper( job )
                self.dispatcher.recover( job, job_wrapper )
        if self.sa_session.dirty:
            self.sa_session.flush()
コード例 #22
0
ファイル: sql.py プロジェクト: mahak/keystone
    def check_project_depth(self, max_depth):
        with sql.session_for_read() as session:
            obj_list = []
            # Using db table self outerjoin to find the project descendants.
            #
            # We'll only outerjoin the project table `max_depth` times to
            # check whether current project tree exceed the max depth limit.
            #
            # For example:
            #
            # If max_depth is 2, we will take the outerjoin 2 times, then the
            # SQL result may be like:
            #
            #  +---- +-------------+-------------+-------------+
            #  | No. | project1_id | project2_id | project3_id |
            #  +--- -+-------------+-------------+-------------+
            #  |  1  |  domain_x   |             |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  2  |  project_a  |             |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  3  |  domain_y   |  project_a  |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  4  |  project_b  |  project_c  |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  5  |  domain_y   |  project_b  |  project_c  |
            #  +- ---+-------------+-------------+-------------+
            #
            # `project1_id` column is the root. It is a project or a domain.
            # If `project1_id` is a project, there must exist a line that
            # `project1` is its domain.
            #
            # We got 5 lines here. It includes three scenarios:
            #
            # 1). The No.1 line means there is a domain `domain_x` which has no
            #     children. The depth is 1.
            #
            # 2). The No.2 and No.3 lines mean project `project_a` has no child
            # and its parent is domain `domain_y`. The depth is 2.
            #
            # 3). The No.4 and No.5 lines mean project `project_b` has a child
            #     `project_c` and its parent is domain `domain_y`. The depth is
            #     3. This tree hit the max depth
            #
            # So we can see that if column "project3_id" has value, it means
            # some trees hit the max depth limit.

            for _ in range(max_depth + 1):
                obj_list.append(orm.aliased(Project))

            query = session.query(*obj_list)

            for index in range(max_depth):
                query = query.outerjoin(
                    obj_list[index + 1],
                    obj_list[index].id == obj_list[index + 1].parent_id)
            exceeded_lines = query.filter(
                obj_list[-1].id != expression.null())

            if exceeded_lines:
                return [line[max_depth].id for line in exceeded_lines]
コード例 #23
0
ファイル: __init__.py プロジェクト: simonmcc/designate
    def create_tsigkey(self, context, tsigkey):
        """ Create a TSIG Key """

        if tsigkey['algorithm'] not in TSIG_SUPPORTED_ALGORITHMS:
            raise exceptions.NotImplemented('Unsupported algorithm')

        tsigkey_m = models.TsigKey()

        tsigkey_m.update({
            'designate_id': tsigkey['id'],
            'name': tsigkey['name'],
            'algorithm': tsigkey['algorithm'],
            'secret': base64.b64encode(tsigkey['secret'])
        })

        tsigkey_m.save(self.session)

        # NOTE(kiall): Prepare and execute query to install this TSIG Key on
        #              every domain. We use a manual query here since anything
        #              else would be impossibly slow.
        query_select = select([null(),
                               models.Domain.__table__.c.id,
                               "'TSIG-ALLOW-AXFR'",
                               "'%s'" % tsigkey['name']])
        query = InsertFromSelect(models.DomainMetadata.__table__, query_select)

        # NOTE(kiall): A TX is required for, at the least, SQLite.
        self.session.begin()
        self.session.execute(query)
        self.session.commit()
コード例 #24
0
ファイル: nsxv_db.py プロジェクト: aaronorosen/vmware-nsx
def allocate_edge_vnic_with_tunnel_index(session, edge_id, network_id):
    """Allocate an available edge vnic with tunnel index to network."""

    # TODO(berlin): temporary solution to let metadata and dhcp use
    # different vnics
    net_list = get_nsxv_internal_network(
        session, constants.InternalEdgePurposes.INTER_EDGE_PURPOSE)
    metadata_net_id = net_list[0]['network_id'] if net_list else None

    with session.begin(subtransactions=True):
        query = session.query(nsxv_models.NsxvEdgeVnicBinding)
        query = query.filter(
            nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id,
            nsxv_models.NsxvEdgeVnicBinding.network_id == expr.null())
        if metadata_net_id:
            vnic_binding = get_edge_vnic_binding(
                session, edge_id, metadata_net_id)
            if vnic_binding:
                vnic_index = vnic_binding.vnic_index
                query = query.filter(
                    nsxv_models.NsxvEdgeVnicBinding.vnic_index != vnic_index)

        binding = query.first()
        if not binding:
            msg = (_("Failed to allocate one available vnic on edge_id: "
                     ":%(edge_id)s to network_id: %(network_id)s") %
                   {'edge_id': edge_id, 'network_id': network_id})
            LOG.exception(msg)
            raise nsx_exc.NsxPluginException(err_msg=msg)
        binding['network_id'] = network_id
        session.add(binding)
        session.flush()
    return binding
コード例 #25
0
ファイル: InstalledComponentsDB.py プロジェクト: ahaupt/DIRAC
  def __init__( self, host = null(), **kwargs ):
    self.hostName = host
    fields = dir( self )

    for key, value in kwargs.iteritems():
      if key in fields and not re.match( '_.*', key ):
        setattr( self, key, value )
コード例 #26
0
ファイル: models.py プロジェクト: devcurmudgeon/storyboard
def _story_build_summary_query():
    # first create a subquery for task statuses
    select_items = []
    select_items.append(Story)
    select_items.append(
        expr.case(
            [(func.sum(Task.status.in_(
                ['todo', 'inprogress', 'review'])) > 0,
              'active'),
             ((func.sum(Task.status == 'merged')) > 0, 'merged')],
            else_='invalid'
        ).label('status')
    )
    for task_status in Task.TASK_STATUSES:
        select_items.append(expr.cast(
            func.sum(Task.status == task_status), Integer
        ).label(task_status))
    select_items.append(expr.null().label('task_statuses'))

    result = select(select_items, None,
                    expr.Join(Story, Task, onclause=Story.id == Task.story_id,
                              isouter=True)) \
        .group_by(Story.id) \
        .alias('story_summary')

    return result
コード例 #27
0
    def chromatogram(self):
        """Returns dict with `cutoff` key with ms_intensity_cutoff and
        `scans` key which is a list of all lvl1 scans.

        Each scan is a dict with:
            * id, scan identifier
            * rt, retention time
            * intensity
        """
        scans = []

        assigned_peaks = func.count('*').label('assigned_peaks')
        ap = self.session.query(Peak.scanid, assigned_peaks)
        ap = ap.filter(Peak.assigned_molid != null())
        ap = ap.group_by(Peak.scanid).subquery()

        q = self.session.query(Scan, ap.c.assigned_peaks)
        q = q.filter_by(mslevel=1)
        for scan, assigned_peaks in q.outerjoin(ap,
                                                Scan.scanid == ap.c.scanid):
            scans.append({
                'id': scan.scanid,
                'rt': scan.rt,
                'intensity': scan.basepeakintensity,
                'ap': assigned_peaks or 0
            })

        runInfo = self.runInfo()
        if (runInfo is not None):
            return {'scans': scans, 'cutoff': runInfo.ms_intensity_cutoff}
        else:
            return {'scans': scans, 'cutoff': None}
コード例 #28
0
    def check_project_depth(self, max_depth):
        with sql.session_for_read() as session:
            obj_list = []
            # Using db table self outerjoin to find the project descendants.
            #
            # We'll only outerjoin the project table (max_depth + 1) times to
            # check whether current project tree exceed the max depth limit.
            #
            # Note one more time here is for project act as domain.
            #
            # for example:
            # If max_depth is 2, we will take the outerjoin 3 times, then the
            # SQL result may be like:
            #  +----+-------------+-------------+-------------+-------------+
            #  | No | project1_id | project2_id | project3_id | project4_id |
            #  +----+-------------+-------------+-------------+-------------+
            #  | 1  |  project_a  |             |             |             |
            #  +----+-------------+-------------+-------------+-------------+
            #  | 2  |  domain_x   |  project_a  |             |             |
            #  +----+-------------+-------------+-------------+-------------+
            #  | 3  |  project_b  |  project_c  |             |             |
            #  +----+-------------+-------------+-------------+-------------+
            #  | 4  |  domain_x   |  project_b  |  project_c  |             |
            #  +----+-------------+-------------+-------------+-------------+
            #  | 5  |  project_d  |  project_e  |  project_f  |             |
            #  +----+-------------+-------------+-------------+-------------+
            #  | 6  |  domain_x   |  project_d  |  project_e  |  project_f  |
            #  +----+-------------+-------------+-------------+-------------+
            #
            # project1 is the root. It is a project or a domain. If project1 is
            # a project, there must exist a line that project1 is its domain.
            #
            # we got 6 lines here.
            #
            # 1). the 1, 2 line means project project_a has no child, the depth
            #    is 1.
            # 2). the 3, 4 line means project project_a has a child, the depth
            #    is 2.
            # 3). the 5, 6 line means project project_a has a grandchild, the
            #    depth is 3. this tree hit the max depth.
            # So we can see that if column "project4_id" has value, it means
            # some trees hit the max depth limit.

            outerjoin_obj_number = max_depth + 2
            for _ in range(outerjoin_obj_number):
                obj_list.append(orm.aliased(Project))

            query = session.query(*obj_list)

            outerjoin_count = max_depth + 1
            for index in range(outerjoin_count):
                query = query.outerjoin(
                    obj_list[index + 1],
                    obj_list[index].id == obj_list[index + 1].parent_id)
            exceeded_lines = query.filter(
                obj_list[-1].id != expression.null())

            if exceeded_lines:
                return [line[max_depth + 1].id for line in exceeded_lines]
コード例 #29
0
ファイル: sql.py プロジェクト: Boye-Z/123
    def check_project_depth(self, max_depth):
        with sql.session_for_read() as session:
            obj_list = []
            # Using db table self outerjoin to find the project descendants.
            #
            # We'll only outerjoin the project table `max_depth` times to
            # check whether current project tree exceed the max depth limit.
            #
            # For example:
            #
            # If max_depth is 2, we will take the outerjoin 2 times, then the
            # SQL result may be like:
            #
            #  +---- +-------------+-------------+-------------+
            #  | No. | project1_id | project2_id | project3_id |
            #  +--- -+-------------+-------------+-------------+
            #  |  1  |  domain_x   |             |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  2  |  project_a  |             |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  3  |  domain_y   |  project_a  |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  4  |  project_b  |  project_c  |             |
            #  +- ---+-------------+-------------+-------------+
            #  |  5  |  domain_y   |  project_b  |  project_c  |
            #  +- ---+-------------+-------------+-------------+
            #
            # `project1_id` column is the root. It is a project or a domain.
            # If `project1_id` is a project, there must exist a line that
            # `project1` is its domain.
            #
            # We got 5 lines here. It includes three scenarios:
            #
            # 1). The No.1 line means there is a domain `domain_x` which has no
            #     children. The depth is 1.
            #
            # 2). The No.2 and No.3 lines mean project `project_a` has no child
            # and its parent is domain `domain_y`. The depth is 2.
            #
            # 3). The No.4 and No.5 lines mean project `project_b` has a child
            #     `project_c` and its parent is domain `domain_y`. The depth is
            #     3. This tree hit the max depth
            #
            # So we can see that if column "project3_id" has value, it means
            # some trees hit the max depth limit.

            for _ in range(max_depth + 1):
                obj_list.append(orm.aliased(sql_model.Project))

            query = session.query(*obj_list)

            for index in range(max_depth):
                query = query.outerjoin(
                    obj_list[index + 1],
                    obj_list[index].id == obj_list[index + 1].parent_id)
            exceeded_lines = query.filter(obj_list[-1].id != expression.null())

            if exceeded_lines:
                return [line[max_depth].id for line in exceeded_lines]
コード例 #30
0
ファイル: script.py プロジェクト: AndyDingley/chellow
    def content():
        sess = None
        try:
            sess = session()

            sites = sess.query(Site).join(SiteEra).join(Era).filter(
                SiteEra.is_physical == true(), or_(
                    Era.finish_date == null(), Era.finish_date >= start_date),
                Era.start_date <= finish_date)
            bffr = StringIO.StringIO()
            zf = zipfile.ZipFile(bffr)

            for site in sites:
                for group in site.groups(sess, start_date, finish_date, True):
                    outs = []
                    outs.append(
                        "Site Code, Site Name, Associated Site Codes, "
                        "Sources, Generator Types, From, To,Type,Date," +
                        ','.join(map(str, range(1, 49))))
                    associates = ' '.join(
                        site.code for site in group.sites[1:])
                    source_codes = ' '.join(
                        sorted(set(sup.source.code for sup in group.supplies)))
                    gen_types = ' '.join(
                        sorted(
                            set(
                                sup.generator_type.code for sup in
                                group.supplies
                                if sup.generator_type is not None)))
                    group_start_str = hh_format(group.start_date)
                    group_finish_str = hh_format(group.finish_date)
                    for hh in group.hh_data(sess):
                        hh_start = hh['start_date']
                        if hh_start.hour == 0 and hh_start.minute == 0:
                            outs.append(
                                "\r\n" + ','.join(
                                    '"' + str(val) + '"' for val in
                                    [
                                        site.code, site.name, associates,
                                        source_codes, gen_types,
                                        group_start_str, group_finish_str,
                                        'used',
                                        hh_start.strftime('%Y-%m-%d')]))
                        used_gen_kwh = hh['imp_gen'] - hh['exp_net'] - \
                            hh['exp_gen']
                        used_3p_kwh = hh['imp_3p'] - hh['exp_3p']
                        used_kwh = hh['imp_net'] + used_gen_kwh + used_3p_kwh
                        outs.append(',' + str(round(used_kwh, 2)))
                    zf.writestr(
                        site.code + '_' +
                        group.finish_date.strftime('%Y%m%d%M%H') + '.csv',
                        ''.join(outs))
                    yield bffr.getValue()
                    bffr.truncate()
        except:
            yield traceback.format_exc()
        finally:
            if sess is not None:
                sess.close()
コード例 #31
0
ファイル: topic_query.py プロジェクト: 13steinj/tildes
    def _attach_visit_data(self) -> 'TopicQuery':
        """Join the data related to the user's last visit to the topic(s)."""
        if self.request.user.track_comment_visits:
            query = self.outerjoin(TopicVisit, and_(
                TopicVisit.topic_id == Topic.topic_id,
                TopicVisit.user == self.request.user,
            ))
            query = query.add_columns(
                TopicVisit.visit_time, TopicVisit.num_comments)
        else:
            # if the user has the feature disabled, just add literal NULLs
            query = self.add_columns(
                null().label('visit_time'),
                null().label('num_comments'),
            )

        return query
コード例 #32
0
ファイル: asset.py プロジェクト: williamchong/oice-server
 def fetch_by_library(cls, library, session=DBSession):
     library_id = library.id
     return session.query(Asset) \
         .filter(Asset.library_id == library_id) \
         .filter(Asset.is_deleted == false()) \
         .filter(Asset.storage != null()) \
         .order_by(Asset.order) \
         .all()
コード例 #33
0
ファイル: scheduled_changes.py プロジェクト: waseem18/balrog
    def get(self, sc_id):
        if not self.table.scheduled_changes.select({"sc_id": sc_id}):
            return Response(status=404,
                            response="Scheduled change does not exist")

        try:
            page = int(request.args.get('page', 1))
            limit = int(request.args.get('limit', 100))
            assert page >= 1
        except (ValueError, AssertionError) as msg:
            self.log.warning("Bad input: %s", msg)
            return Response(status=400,
                            response=json.dumps({"exception": msg}))

        offset = limit * (page - 1)
        total_count = self.table.scheduled_changes.history.t.count()\
            .where(self.table.scheduled_changes.history.sc_id == sc_id)\
            .where(self.table.scheduled_changes.history.data_version != null())\
            .execute()\
            .fetchone()[0]

        revisions = self.table.scheduled_changes.history.select(
            where=[
                self.table.scheduled_changes.history.sc_id == sc_id,
                self.table.scheduled_changes.history.data_version != null()
            ],
            limit=limit,
            offset=offset,
            order_by=[self.table.scheduled_changes.history.timestamp.asc()],
        )

        ret = {
            "count": total_count,
            "revisions": [],
        }

        for rev in revisions:
            r = {}
            for k, v in rev.iteritems():
                if k == "data_version":
                    r["sc_data_version"] = v
                else:
                    r[k.replace("base_", "")] = v
            ret["revisions"].append(r)

        return jsonify(ret)
コード例 #34
0
 def filter(self, trans, user, query, column_filter):
     """ Modify query to filter histories by sharing status. """
     if column_filter == "All":
         pass
     elif column_filter:
         if column_filter == "private":
             query = query.filter(
                 self.model_class.users_shared_with == null())
             query = query.filter(self.model_class.importable == false())
         elif column_filter == "shared":
             query = query.filter(
                 self.model_class.users_shared_with != null())
         elif column_filter == "accessible":
             query = query.filter(self.model_class.importable == true())
         elif column_filter == "published":
             query = query.filter(self.model_class.published == true())
     return query
コード例 #35
0
def _create_route_query_sta_epochs(
    session,
    service,
    net,
    sta,
    loc,
    cha,
    like_escape,
):
    return (session.query(
        null(),
        null(),
        orm.Station.code,
        orm.Network.code,
        orm.Epoch.starttime,
        orm.Epoch.endtime,
        orm.Routing.starttime,
        orm.Routing.endtime,
        orm.Endpoint.url,
    )
            # XXX(damb): Pay attention to the correct order.
            .select_from(orm.StationEpoch, orm.ChannelEpoch).join(
                orm.Network,
                orm.ChannelEpoch.network_ref == orm.Network.id).join(
                    orm.Station,
                    orm.ChannelEpoch.station_ref == orm.Station.id).join(
                        orm.StationEpoch,
                        orm.StationEpoch.station_ref == orm.Station.id).join(
                            orm.Epoch,
                            orm.StationEpoch.epoch_ref == orm.Epoch.id).join(
                                orm.EpochType,
                                orm.Epoch.epochtype_ref == orm.EpochType.id).
            join(orm.Routing, orm.Routing.epoch_ref == orm.Epoch.id).join(
                orm.Endpoint,
                orm.Routing.endpoint_ref == orm.Endpoint.id).join(
                    orm.Service,
                    orm.Endpoint.service_ref == orm.Service.id).filter(
                        orm.Network.code.like(net, escape=like_escape)).filter(
                            orm.Station.code.like(
                                sta, escape=like_escape)).filter(
                                    orm.ChannelEpoch.code.like(
                                        cha, escape=like_escape)).filter(
                                            orm.ChannelEpoch.locationcode.like(
                                                loc, escape=like_escape)).
            filter(orm.Service.name == service).filter(
                orm.EpochType.type == Epoch.STATION).distinct())
コード例 #36
0
 def _network_filter_hook(self, context, original_model, conditions):
     if conditions is not None and not hasattr(conditions, '__iter__'):
         conditions = (conditions, )
     # Apply the external network filter only in non-admin context
     if not context.is_admin and hasattr(original_model, 'tenant_id'):
         conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
                               *conditions)
     return conditions
コード例 #37
0
ファイル: script.py プロジェクト: AndyDingley/chellow
def content():
    sess = None
    try:
        sess = db.session()

        contract = Contract.get_mop_by_id(sess, contract_id)

        forecast_date = computer.forecast_date()

        yield 'Import MPAN Core, Export MPAN Core, Start Date, Finish Date'
        bill_titles = computer.contract_func(
            caches, contract, 'virtual_bill_titles', None)()
        for title in bill_titles:
            yield ',' + title
        yield '\n'

        for era in sess.query(Era).filter(
                or_(Era.finish_date == null(), Era.finish_date >= start_date),
                Era.start_date <= finish_date,
                Era.mop_contract_id == contract.id).order_by(Era.supply_id):
            import_mpan_core = era.imp_mpan_core
            if import_mpan_core is None:
                import_mpan_core_str = ''
            else:
                mpan_core = import_mpan_core
                is_import = True
                import_mpan_core_str = mpan_core

            export_mpan_core = era.exp_mpan_core
            if export_mpan_core is None:
                export_mpan_core_str = ''
            else:
                is_import = False
                mpan_core = export_mpan_core
                export_mpan_core_str = mpan_core

            yield import_mpan_core_str + ',' + export_mpan_core_str + ',' + \
                hh_format(start_date) + ',' + hh_format(finish_date) + ','
            supply_source = computer.SupplySource(
                sess, start_date, finish_date, forecast_date, era, is_import,
                None, caches)
            computer.contract_func(
                caches, contract, 'virtual_bill', None)(supply_source)
            bill = supply_source.mop_bill
            for title in bill_titles:
                if title in bill:
                    yield '"' + str(bill[title]) + '",'
                    del bill[title]
                else:
                    yield ','
            for k in sorted(bill.keys()):
                yield ',"' + k + '","' + str(bill[k]) + '"'
            yield '\n'
    except:
        yield traceback.format_exc()
    finally:
        if sess is None:
            sess.close()
コード例 #38
0
ファイル: g_engine.py プロジェクト: cavenhe/chellow
def g_rates(sess, caches, g_contract_id, date):
    try:
        return caches['g_engine']['rates'][g_contract_id][date]
    except KeyError:
        try:
            ccache = caches['g_engine']
        except KeyError:
            ccache = caches['g_engine'] = {}

        try:
            rss_cache = ccache['rates']
        except KeyError:
            rss_cache = ccache['rates'] = {}

        try:
            cont_cache = rss_cache[g_contract_id]
        except KeyError:
            cont_cache = rss_cache[g_contract_id] = {}

        try:
            return cont_cache[date]
        except KeyError:
            month_after = date + relativedelta(months=1) + relativedelta(
                days=1)
            month_before = date - relativedelta(months=1) - relativedelta(
                days=1)

            rs = sess.query(GRateScript).filter(
                GRateScript.g_contract_id == g_contract_id,
                GRateScript.start_date <= date,
                or_(GRateScript.finish_date == null(),
                    GRateScript.finish_date >= date)).first()

            if rs is None:
                rs = sess.query(GRateScript).filter(
                    GRateScript.g_contract_id == g_contract_id).order_by(
                        GRateScript.start_date.desc()).first()
                if date < rs.start_date:
                    cstart = month_before
                    cfinish = min(month_after, rs.start_date - HH)
                else:
                    cstart = max(rs.finish_date + HH, month_before)
                    cfinish = month_after
            else:
                cstart = max(rs.start_date, month_before)
                if rs.finish_date is None:
                    cfinish = month_after
                else:
                    cfinish = min(rs.finish_date, month_after)

            vals = PropDict(
                "the local rate script for contract " + str(g_contract_id) +
                " at " + hh_format(cstart) + ".", loads(rs.script), [])
            for dt in hh_range(caches, cstart, cfinish):
                if dt not in cont_cache:
                    cont_cache[dt] = vals

            return vals
コード例 #39
0
ファイル: external_net_db.py プロジェクト: ytwxy99/neutron
 def _network_filter_hook(self, context, original_model, conditions):
     if conditions is not None and not hasattr(conditions, '__iter__'):
         conditions = (conditions, )
     # Apply the external network filter only in non-admin and non-advsvc
     # context
     if self.model_query_scope(context, original_model):
         conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
                               *conditions)
     return conditions
コード例 #40
0
 def _sync_router_backlog(self):
     LOG.info(_LI('Synchronizing router (scheduling) backlog'))
     context = n_context.get_admin_context()
     query = context.session.query(l3_models.RouterHostingDeviceBinding)
     query = query.options(joinedload('router'))
     query = query.filter(l3_models.RouterHostingDeviceBinding.
                          hosting_device_id == expr.null())
     self._backlogged_routers = set(binding.router_id for binding in query)
     self._refresh_router_backlog = False
コード例 #41
0
ファイル: external_net_db.py プロジェクト: zlzlnet/neutron
 def _network_filter_hook(self, context, original_model, conditions):
     if conditions is not None and not hasattr(conditions, '__iter__'):
         conditions = (conditions, )
     # Apply the external network filter only in non-admin and non-advsvc
     # context
     if self.model_query_scope(context, original_model):
         conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
                               *conditions)
     return conditions
def upgrade(migrate_engine):
    meta = sql.MetaData()
    meta.bind = migrate_engine

    password_table = sql.Table('password', meta, autoload=True)
    with migrate_engine.begin() as conn:
        stmt = password_table.update().where(
            password_table.c.password_hash == expression.null()).values(
            {'password_hash': password_table.c.password})
        conn.execute(stmt)
コード例 #43
0
ファイル: topic_query.py プロジェクト: talhadar90/bawajee
    def _attach_visit_data(self) -> "TopicQuery":
        """Join the data related to the user's last visit to the topic(s)."""
        # pylint: disable=assignment-from-no-return
        if self.request.user.track_comment_visits:
            query = self.outerjoin(
                TopicVisit,
                and_(
                    TopicVisit.topic_id == Topic.topic_id,
                    TopicVisit.user == self.request.user,
                ),
            )
            query = query.add_columns(TopicVisit.visit_time,
                                      TopicVisit.num_comments)
        else:
            # if the user has the feature disabled, just add literal NULLs
            query = self.add_columns(null().label("visit_time"),
                                     null().label("num_comments"))

        return query
コード例 #44
0
    def _get_total_available_slots(self, context, template_id, capacity):
        """Returns available slots in idle devices based on <template_id>.

        Only slots in tenant unbound hosting devices are counted to ensure
        there is always hosting device slots available regardless of tenant.
        """
        query = context.session.query(hd_models.HostingDevice.id)
        query = query.outerjoin(
            hd_models.SlotAllocation, hd_models.HostingDevice.id ==
            hd_models.SlotAllocation.hosting_device_id)
        query = query.filter(
            hd_models.HostingDevice.template_id == template_id,
            hd_models.HostingDevice.admin_state_up == expr.true(),
            hd_models.HostingDevice.tenant_bound == expr.null())
        query = query.group_by(hd_models.HostingDevice.id)
        query = query.having(
            func.sum(hd_models.SlotAllocation.num_allocated) == expr.null())
        num_hosting_devices = query.count()
        return num_hosting_devices * capacity
コード例 #45
0
 def _sync_router_backlog(self):
     LOG.info(_LI('Synchronizing router (scheduling) backlog'))
     context = n_context.get_admin_context()
     query = context.session.query(l3_models.RouterHostingDeviceBinding)
     query = query.options(joinedload('router'))
     query = query.filter(
         l3_models.RouterHostingDeviceBinding.hosting_device_id ==
         expr.null())
     self._backlogged_routers = set(binding.router_id for binding in query)
     self._refresh_router_backlog = False
コード例 #46
0
ファイル: Setting.py プロジェクト: Synerty/peek-plugin-inbox
 def _case(self):
     pairs = set(self.cls.type_map.values())
     whens = [
         (
             literal_column("'%s'" % discriminator),
             cast(getattr(self.cls, attribute), String)
         ) for attribute, discriminator in pairs
         if attribute is not None
     ]
     return case(whens, self.cls.type, null())
def upgrade(migrate_engine):
    meta = sql.MetaData()
    meta.bind = migrate_engine

    password_table = sql.Table('password', meta, autoload=True)
    with migrate_engine.begin() as conn:
        stmt = password_table.update().where(
            password_table.c.password_hash == expression.null()).values(
                {'password_hash': password_table.c.password})
        conn.execute(stmt)
コード例 #48
0
    def _addFilter2MoleculesQuery(self,
                                  query,
                                  sorts=None,
                                  scanid=None,
                                  filters=None,
                                  mz=None):
        filters = filters or []
        q = query

        # custom filters
        fragal = aliased(Fragment)
        if (scanid is not None):
            # TODO: add score column + order by score
            q = q.add_columns(fragal.score, fragal.deltappm, fragal.mz)
            q = q.join(fragal.molecule)
            q = q.filter(fragal.parentfragid == 0)
            q = q.filter(fragal.scanid == scanid)
            if mz is not None:
                q = q.filter(fragal.mz == mz)

        if scanid is None and mz is not None:
            raise ScanRequiredError()

        # add assigned column
        assigned = func.count('*').label('assigned')
        assign_q = self.session.query(Peak.assigned_molid, assigned)
        assign_q = assign_q.filter(Peak.assigned_molid != null())
        assign_q = assign_q.group_by(Peak.assigned_molid).subquery()
        q = q.add_columns(assign_q.c.assigned).\
            outerjoin(assign_q, Molecule.molid == assign_q.c.assigned_molid)

        for afilter in filters:
            if afilter['field'] == 'assigned':
                col = assign_q.c.assigned
                afilter['type'] = 'null'
            elif afilter['field'] == 'score':
                if scanid is not None:
                    col = fragal.score
                else:
                    raise ScanRequiredError()
            elif afilter['field'] == 'deltappm':
                if scanid is not None:
                    col = fragal.deltappm
                else:
                    raise ScanRequiredError()
            else:
                # generic filters
                ffield = afilter['field']
                col = Molecule.__dict__[ffield]  # @UndefinedVariable
            q = self.extjsgridfilter(q, col, afilter)

        q = self._addSortingToMoleculesQuery(sorts, scanid, q, fragal,
                                             assign_q)

        return q
コード例 #49
0
ファイル: views.py プロジェクト: sde1000/quicktill
def pubroot(request, info, session):
    date = datetime.date.today()
    # If it's the early hours of the morning, it's more useful for us
    # to consider it still to be yesterday.
    if datetime.datetime.now().hour < 4:
        date = date - datetime.timedelta(1)
    thisweek_start = date - datetime.timedelta(date.weekday())
    thisweek_end = thisweek_start + datetime.timedelta(6)
    lastweek_start = thisweek_start - datetime.timedelta(7)
    lastweek_end = thisweek_end - datetime.timedelta(7)
    weekbefore_start = lastweek_start - datetime.timedelta(7)
    weekbefore_end = lastweek_end - datetime.timedelta(7)

    weeks = [
        ("Current week", thisweek_start, thisweek_end, business_totals(session, thisweek_start, thisweek_end)),
        ("Last week", lastweek_start, lastweek_end, business_totals(session, lastweek_start, lastweek_end)),
        (
            "The week before last",
            weekbefore_start,
            weekbefore_end,
            business_totals(session, weekbefore_start, weekbefore_end),
        ),
    ]

    currentsession = Session.current(session)
    barsummary = (
        session.query(StockLine)
        .filter(StockLine.location == "Bar")
        .order_by(StockLine.dept_id, StockLine.name)
        .options(joinedload_all("stockonsale.stocktype.unit"))
        .options(undefer_group("qtys"))
        .all()
    )
    stillage = (
        session.query(StockAnnotation)
        .join(StockItem)
        .outerjoin(StockLine)
        .filter(
            tuple_(StockAnnotation.text, StockAnnotation.time).in_(
                select(
                    [StockAnnotation.text, func.max(StockAnnotation.time)], StockAnnotation.atype == "location"
                ).group_by(StockAnnotation.text)
            )
        )
        .filter(StockItem.finished == None)
        .order_by(StockLine.name != null(), StockAnnotation.time)
        .options(joinedload_all("stockitem.stocktype.unit"))
        .options(joinedload_all("stockitem.stockline"))
        .options(undefer_group("qtys"))
        .all()
    )
    return (
        "index.html",
        {"currentsession": currentsession, "barsummary": barsummary, "stillage": stillage, "weeks": weeks},
    )
コード例 #50
0
class UserArtist(db.Model):
    user_id = Column(Integer,
                     ForeignKey('user.id',
                                onupdate="CASCADE",
                                ondelete="CASCADE"),
                     primary_key=True)

    mbid = Column(String(36),
                  ForeignKey('artist.mbid',
                             onupdate="CASCADE",
                             ondelete="CASCADE",
                             deferrable=True,
                             initially="DEFERRED"),
                  primary_key=True)
    name = Column(String(512), nullable=False)
    sort_name = Column(String(512), nullable=False)
    disambiguation = Column(String(512), nullable=False)
    art = Column(String(100),
                 nullable=True,
                 server_default=expression.null(),
                 default=None)
    date_updated = Column(DateTime(True),
                          nullable=True,
                          server_default=expression.null(),
                          default=None)
    apple_music_link = Column(String(), nullable=True)
    spotify_link = Column(String(), nullable=True)

    date_followed = Column(DateTime(True),
                           nullable=False,
                           server_default=func.now(),
                           default=func.now())
    follow_method = Column(Enum(ImportMethod))
    following = Column(Boolean(),
                       server_default=expression.false(),
                       default=True,
                       index=True)

    user = relationship(User, lazy=True, uselist=False)

    def __repr__(self):
        return '<UserArtist {} - {}>'.format(self.user_id, self.mbid)
コード例 #51
0
def _get_filters(obj, history_table):
    query = get_input_dict()
    where = [False, False]
    where = [getattr(history_table, f) == query.get(f) for f in query]
    where.append(history_table.data_version != null())
    if hasattr(history_table, "product"):
        where.append(history_table.product != null())
        if request.args.get("product"):
            where.append(history_table.product == request.args.get("product"))
    if hasattr(history_table, "channel"):
        where.append(history_table.channel != null())
        if request.args.get("channel"):
            where.append(history_table.channel == request.args.get("channel"))
    if request.args.get("timestamp_from"):
        where.append(
            history_table.timestamp >= int(request.args.get("timestamp_from")))
    if request.args.get("timestamp_to"):
        where.append(
            history_table.timestamp <= int(request.args.get("timestamp_to")))
    return where
コード例 #52
0
ファイル: manager.py プロジェクト: xingyongma/galaxy
 def __check_jobs_at_startup(self):
     if self.app.job_config.use_messaging:
         jobs_at_startup = self.app.model.context.query(Job).enable_eagerloads(False) \
             .filter((Job.state == Job.states.NEW) & (Job.handler == null())).all()
         if jobs_at_startup:
             log.info(
                 'No handler assigned at startup for the following jobs, will dispatch via message: %s',
                 ', '.join([str(j.id) for j in jobs_at_startup]))
         for job in jobs_at_startup:
             tool = self.app.toolbox.get_tool(job.tool_id, job.tool_version, exact=True)
             self.enqueue(job, tool)
コード例 #53
0
def _update_user_domain_id(migrate_engine, user_table, child_user_table):
    join = sql.join(user_table, child_user_table,
                    user_table.c.id == child_user_table.c.user_id)
    where = user_table.c.domain_id == expression.null()
    sel = (sql.select([user_table.c.id, child_user_table.c.domain_id
                       ]).select_from(join).where(where))
    with migrate_engine.begin() as conn:
        for user in conn.execute(sel):
            values = {'domain_id': user['domain_id']}
            stmt = user_table.update().where(
                user_table.c.id == user['id']).values(values)
            conn.execute(stmt)
コード例 #54
0
    def _get_inline_comments_query(self, repo_id, revision, pull_request):
        # TODO: johbo: Split this into two methods: One for PR and one for
        # commit.
        if revision:
            q = Session().query(ChangesetComment).filter(
                ChangesetComment.repo_id == repo_id,
                ChangesetComment.line_no != null(),
                ChangesetComment.f_path != null(),
                ChangesetComment.revision == revision)

        elif pull_request:
            pull_request = self.__get_pull_request(pull_request)
            if ChangesetCommentsModel.use_outdated_comments(pull_request):
                q = self._visible_inline_comments_of_pull_request(pull_request)
            else:
                q = self._all_inline_comments_of_pull_request(pull_request)

        else:
            raise Exception('Please specify commit or pull_request_id')
        q = q.order_by(ChangesetComment.comment_id.asc())
        return q
コード例 #55
0
def _make_eras(sess, nov_start, year_finish, supply_id):
    eras = (sess.query(Era).join(Supply).join(Source).join(Pc).filter(
        Era.start_date <= year_finish,
        or_(Era.finish_date == null(), Era.finish_date >= nov_start),
        Source.code.in_(("net", "gen-net")),
        Pc.code == "00",
    ).order_by(Supply.id))

    if supply_id is not None:
        eras = eras.filter(Supply.id == supply_id)

    return eras
コード例 #56
0
 def _get_filters(self):
     query = get_input_dict()
     where = [getattr(self.table.history, f) == query.get(f) for f in query]
     where.append(self.table.history.data_version != null())
     request = connexion.request
     if hasattr(self.history_table, "channel"):
         if request.args.get("channel"):
             where.append(
                 self.history_table.channel == request.args.get("channel"))
     if hasattr(self.history_table, "product"):
         where.append(self.history_table.product != null())
         if request.args.get("product"):
             where.append(
                 self.history_table.product == request.args.get("product"))
     if request.args.get("timestamp_from"):
         where.append(self.history_table.timestamp >= int(
             request.args.get("timestamp_from")))
     if request.args.get("timestamp_to"):
         where.append(self.history_table.timestamp <= int(
             request.args.get("timestamp_to")))
     return where
コード例 #57
0
    def null_empty_string(cls, session: Type[sessionmaker], field: Type[Any] = amount_damage) -> None:
        """
        Fields to be case to Numeric need to be NULL first instead of an empty string because CAST works differently
        underwater in SELECT than it does in UPDATE queries. The UPDATE query raises a
        "Incorrect DECIMAL value: '0' for column '' at row -1" error
        This update uses ORM to immediately run query against the passed session

        :param session: sqlalchemy session object to talk to the database
        :param field: Field that needs to NULLed
        :return: None
        """
        session.query(cls).filter(field == "").update({field: sase.null()}, synchronize_session=False)
コード例 #58
0
 def list_all_routers_on_hosting_devices(self, context):
     query = context.session.query(
         l3_models.RouterHostingDeviceBinding.router_id)
     query = query.filter(l3_models.RouterHostingDeviceBinding.
                          hosting_device_id != expr.null())
     router_ids = [item[0] for item in query]
     if router_ids:
         return self.get_sync_data_ext(context,
                                       router_ids=router_ids,
                                       active=True)
     else:
         return []
コード例 #59
0
 def get_prev_id(self, value, change_id):
     if value:
         release_name = value["name"]
         table = self.table.history
         old_revision = table.select(where=[
             table.name == release_name, table.change_id < change_id,
             table.data_version != null()
         ],
                                     limit=1,
                                     order_by=[table.timestamp.desc()])
         if len(old_revision) > 0:
             return old_revision[0]["change_id"]
def upgrade():
    # Refer http://alembic.zzzcomputing.com/en/latest/ops.html#alembic.operations.Operations.alter_column
    # MySQL can't ALTER a column without a full spec.
    # So including existing_type, existing_server_default, and existing_nullable
    for table_name in _table_names:
        with op.batch_alter_table(table_name,
                                  naming_convention=convention) as batch_op:
            batch_op.alter_column('pairing_algorithm',
                                  type_=EnumType(_NewPairingAlgorithm),
                                  existing_type=EnumType(_OldPairingAlgorithm),
                                  existing_server_default=null(),
                                  existing_nullable=True)