Ejemplo n.º 1
0
def purge_datasets( app, cutoff_time, remove_from_disk, info_only=False, force_retry=False ):
    # Purges deleted datasets whose update_time is older than cutoff_time.  Files may or may
    # not be removed from disk.
    dataset_count = 0
    disk_space = 0
    start = time.time()
    if force_retry:
        datasets = app.sa_session.query( app.model.Dataset ) \
                                 .filter( and_( app.model.Dataset.table.c.deleted == true(),
                                                app.model.Dataset.table.c.purgable == true(),
                                                app.model.Dataset.table.c.update_time < cutoff_time ) )
    else:
        datasets = app.sa_session.query( app.model.Dataset ) \
                                 .filter( and_( app.model.Dataset.table.c.deleted == true(),
                                                app.model.Dataset.table.c.purgable == true(),
                                                app.model.Dataset.table.c.purged == false(),
                                                app.model.Dataset.table.c.update_time < cutoff_time ) )
    for dataset in datasets:
        file_size = dataset.file_size
        _purge_dataset( app, dataset, remove_from_disk, info_only=info_only )
        dataset_count += 1
        try:
            disk_space += file_size
        except:
            pass
    stop = time.time()
    print 'Purged %d datasets' % dataset_count
    if remove_from_disk:
        print 'Freed disk space: ', disk_space
    print "Elapsed time: ", stop - start
    print "##########################################"
Ejemplo n.º 2
0
def get_users(
    number: int=10,
    offset: int=0,
    matches_all: Dict[str, Any]={},
    matches_any: Dict[str, Any]={},
    store: Store=None,
) -> Tuple[List[User], int]:
    query = store.session.query(User)
    query = query.outerjoin(InstitutionAssociation)
    query = query.outerjoin(Institution)
    query = query.order_by(User.id.asc())

    searches = {
        'bio': lambda a: User.bio.ilike('%{}%'.format(a)),
        'created_after': lambda a: User.date_created > a,
        'created_before': lambda a: User.date_created < a,
        'institution': lambda a: Institution.name.ilike('%{}%'.format(a)),
        'name': lambda a: User.name.ilike('%{}%'.format(a)),
    }

    filter_all = true()
    filter_any = false()

    for name, value in matches_all.items():
        filter_all = filter_all & searches.get(name, lambda _: true())(value)

    for name, value in matches_any.items():
        filter_any = filter_any | searches.get(name, lambda _: false())(value)

    query = query.filter(filter_all & filter_any if filter_any is not false() else filter_all)
    query = query.distinct()
    count = query.count()
    query = query.limit(number).offset(offset)

    return query, count
Ejemplo n.º 3
0
def purge_libraries(app, cutoff_time, remove_from_disk, info_only=False, force_retry=False):
    # Purges deleted libraries whose update_time is older than the cutoff_time.
    # The dataset associations of each library are also marked as deleted.
    # The Purge Dataset method will purge each Dataset as necessary
    # library.purged == True simply means that it can no longer be undeleted
    # i.e. all associated LibraryDatasets/folders are marked as deleted
    library_count = 0
    start = time.time()
    if force_retry:
        libraries = app.sa_session.query(app.model.Library) \
                                  .filter(and_(app.model.Library.table.c.deleted == true(),
                                               app.model.Library.table.c.update_time < cutoff_time))
    else:
        libraries = app.sa_session.query(app.model.Library) \
                                  .filter(and_(app.model.Library.table.c.deleted == true(),
                                               app.model.Library.table.c.purged == false(),
                                               app.model.Library.table.c.update_time < cutoff_time))
    for library in libraries:
        _purge_folder(library.root_folder, app, remove_from_disk, info_only=info_only)
        if not info_only:
            print("Purging library id ", library.id)
            library.purged = True
            app.sa_session.add(library)
            app.sa_session.flush()
        library_count += 1
    stop = time.time()
    print('# Purged %d libraries .' % library_count)
    print("Elapsed time: ", stop - start)
    print("##########################################")
Ejemplo n.º 4
0
  def _get_tasks_in_cycle(model):
    """Filter tasks with particular statuses and cycle.

    Filtering tasks with statuses "Assigned", "InProgress" and "Finished".
    Where the task is in current users cycle.
    """
    task_query = db.session.query(
        model.id.label('id'),
        literal(model.__name__).label('type'),
        literal(None).label('context_id'),
    ).join(
        Cycle,
        Cycle.id == model.cycle_id
    ).filter(
        Cycle.is_current == true(),
        model.contact_id == contact_id
    )
    return task_query.filter(
        Cycle.is_verification_needed == true(),
        model.status.in_([
            all_models.CycleTaskGroupObjectTask.ASSIGNED,
            all_models.CycleTaskGroupObjectTask.IN_PROGRESS,
            all_models.CycleTaskGroupObjectTask.FINISHED,
            all_models.CycleTaskGroupObjectTask.DECLINED,
        ])
    ).union_all(
        task_query.filter(
            Cycle.is_verification_needed == false(),
            model.status.in_([
                all_models.CycleTaskGroupObjectTask.ASSIGNED,
                all_models.CycleTaskGroupObjectTask.IN_PROGRESS,
            ])
        )
    )
Ejemplo n.º 5
0
def stats():
    """Show some devices stats."""
    sq_nt = session.query(Device.address) \
        .filter(and_(Device.tracked == false(), Device.identified == true())) \
        .subquery()

    sq_ni = session.query(Device.address) \
        .filter(and_(Device.tracked == true(), Device.identified == false())) \
        .subquery()

    sq_ntni = session.query(Device.address) \
        .filter(and_(Device.tracked == false(), Device.identified == false())) \
        .subquery()

    query = session.query(Device.address_origin, func.count(Device.id), func.count(sq_nt.c.address), func.count(sq_ni.c.address), func.count(sq_ntni.c.address)) \
        .outerjoin(sq_nt, sq_nt.c.address == Device.address) \
        .outerjoin(sq_ni, sq_ni.c.address == Device.address) \
        .outerjoin(sq_ntni, sq_ntni.c.address == Device.address) \
        .group_by(Device.address_origin)

    print('--- Devices ---')
    for [address_origin, device_count, nt_count, ni_count, ntni_count] in query.all():
        print('{:12s} Total:{:5d} - not tracked:{:3d}, not identified:{:3d}, not tracked & not identified: {:3d}'
              .format(AddressOrigin(address_origin).name,
                      device_count,
                      nt_count,
                      ni_count,
                      ntni_count))
Ejemplo n.º 6
0
    def test_is_boolean_symbols_despite_no_native(self):
        is_(
            testing.db.scalar(select([cast(true().is_(true()), Boolean)])),
            True,
        )

        is_(
            testing.db.scalar(select([cast(true().isnot(true()), Boolean)])),
            False,
        )

        is_(
            testing.db.scalar(select([cast(false().is_(false()), Boolean)])),
            True,
        )
Ejemplo n.º 7
0
 def invalid_query(self, session, **kw):
     return session.query(Languoid)\
         .filter_by(active=True, level=LanguoidLevel.family)\
         .filter(~Languoid.children.any(and_(
             Languoid.active == true(),
             Languoid.level.in_([LanguoidLevel.family, LanguoidLevel.language]))))\
         .order_by(Languoid.id)
Ejemplo n.º 8
0
def macroareas(args, languages, stats):
    ma_map = get_map(Macroarea)

    # we store references to languages to make computation of cumulated macroareas for
    # families easier
    lang_map = {}
    for hid, info in get_lginfo(args, lambda x: x.macro_area):
        if hid not in languages:
            languages[hid] = Languoid.get(hid, key='hid', default=None)
        if not languages[hid]:
            continue
        lang_map[languages[hid].pk] = languages[hid]
        a, r = update_relationship(languages[hid].macroareas, [ma_map[info.macro_area]])
        if a or r:
            stats.update(['macroarea'])

    for family in DBSession.query(Languoid)\
            .filter(Languoid.level == LanguoidLevel.family)\
            .filter(Language.active == true()):
        mas = []
        for lang in DBSession.query(TreeClosureTable.child_pk)\
                .filter(TreeClosureTable.parent_pk == family.pk):
            if lang[0] in lang_map:
                mas.extend(lang_map[lang[0]].macroareas)
        a, r = update_relationship(family.macroareas, mas)
        if a or r:
            stats.update(['macroarea'])
    args.log.info('macroareas done')
Ejemplo n.º 9
0
 def index( self, trans, deleted='False', f_email=None, **kwd ):
     """
     GET /api/users
     GET /api/users/deleted
     Displays a collection (list) of users.
     """
     rval = []
     query = trans.sa_session.query( trans.app.model.User )
     deleted = util.string_as_bool( deleted )
     if f_email:
         query = query.filter(trans.app.model.User.email.like("%%%s%%" % f_email))
     if deleted:
         query = query.filter( trans.app.model.User.table.c.deleted == true() )
         # only admins can see deleted users
         if not trans.user_is_admin():
             return []
     else:
         query = query.filter( trans.app.model.User.table.c.deleted == false() )
         # special case: user can see only their own user
         # special case2: if the galaxy admin has specified that other user email/names are
         #   exposed, we don't want special case #1
         if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
             item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
             return [item]
     for user in query:
         item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
         # If NOT configured to expose_email, do not expose email UNLESS the user is self, or
         # the user is an admin
         if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin():
             del item['username']
         if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin():
             del item['email']
         # TODO: move into api_values
         rval.append( item )
     return rval
Ejemplo n.º 10
0
def glottologmeta(request):
    q = DBSession.query(Languoid)\
        .filter(Language.active == true())\
        .filter(Languoid.status.in_(
            (LanguoidStatus.established, LanguoidStatus.unattested)))
    qt = q.filter(Languoid.father_pk == null())
    res = {
        'last_update': DBSession.query(Language.updated)
        .order_by(Language.updated.desc()).first()[0],
        'number_of_families': qt.filter(Languoid.level == LanguoidLevel.family).count(),
        'number_of_isolates': qt.filter(Languoid.level == LanguoidLevel.language).count(),
    }
    ql = q.filter(Languoid.hid != null())
    res['number_of_languages'] = {'all': ql.count()}
    res['special_families'] = OrderedDict()
    for name in SPECIAL_FAMILIES:
        l = qt.filter(Language.name == name).one()
        res['special_families'][name] = l
        res['number_of_languages'][name] = l.child_language_count

    res['number_of_languages']['l1'] = res['number_of_languages']['all'] \
        - res['number_of_languages']['Pidgin']\
        - res['number_of_languages']['Artificial Language']\
        - res['number_of_languages']['Sign Language']
    return res
Ejemplo n.º 11
0
    def _get_query_condition(self, model, fields, values):
        from sqlalchemy import true, and_

        condition = true()

        for v in fields:
            if isinstance(v, (tuple, list)):
                v = {'name':v[0]}
            elif not isinstance(v, dict):
                v = {'name':v}
            name = v['name']
            if name in values:
                render = v.get('render')
                value = values[name]
                if not value:
                    continue
                _cond = None
                if render:
                    _cond = render(model, name, value, values)
                else:
                    column = model.c[name]
                    if 'like' in v:
                        _cond = self._make_like(column, v['like'], value)
                    elif 'op' in v:
                        _cond = self._make_op(column, v['op'], value)
                    else:
                        if isinstance(value, (tuple, list)):
                            _cond = column.in_(value)
                        else:
                            _cond = column==value
                if _cond is not None:
                    condition = and_(_cond, condition)

        log.debug("condition=%s", condition)
        return condition
Ejemplo n.º 12
0
 def list_published( self, filters=None, **kwargs ):
     """
     Return a list of all published items.
     """
     published_filter = self.model_class.published == true()
     filters = self._munge_filters( published_filter, filters )
     return self.list( filters=filters, **kwargs )
Ejemplo n.º 13
0
    def deleted_histories( self, trans, **kwd ):
        """
        The number of histories that were deleted more than the specified number of days ago, but have not yet been purged.
        Also included is the number of datasets associated with the histories.
        """
        params = util.Params( kwd )
        message = ''
        if params.deleted_histories_days:
            deleted_histories_days = int( params.deleted_histories_days )
            cutoff_time = datetime.utcnow() - timedelta( days=deleted_histories_days )
            history_count = 0
            dataset_count = 0
            disk_space = 0
            histories = trans.sa_session.query( model.History ) \
                .filter( and_( model.History.table.c.deleted == true(),
                    model.History.table.c.purged == false(),
                    model.History.table.c.update_time < cutoff_time ) ) \
                .options( eagerload( 'datasets' ) )

            for history in histories:
                for hda in history.datasets:
                    if not hda.dataset.purged:
                        dataset_count += 1
                        try:
                            disk_space += hda.dataset.file_size
                        except:
                            pass
                history_count += 1
            message = "%d histories ( including a total of %d datasets ) were deleted more than %d days ago, but have not yet been purged, " \
                "disk space: %s." % ( history_count, dataset_count, deleted_histories_days, nice_size( disk_space, True ) )
        else:
            message = "Enter the number of days."
        return str( deleted_histories_days ), message
Ejemplo n.º 14
0
 def _query_published( self, filters=None, **kwargs ):
     """
     Return a query for all published items.
     """
     published_filter = self.model_class.published == true()
     filters = self._munge_filters( published_filter, filters )
     return self.query( filters=filters, **kwargs )
Ejemplo n.º 15
0
 def __init__(self, model, taxon_col, *args, **kw):
     self.taxon_col = taxon_col
     kw['choices'] = [
         (o.id, '%s %s' % (o.id, o.name)) for o in
         DBSession.query(model).filter(model.active == true()).order_by(model.id)]
     kw['model_col'] = getattr(Taxon, self.taxon_col)
     Col.__init__(self, *args, **kw)
Ejemplo n.º 16
0
 def _get_tenant_provider_security_groups(self, context, tenant_id):
     res = context.session.query(
         NsxExtendedSecurityGroupProperties.security_group_id
     ).join(securitygroups_db.SecurityGroup).filter(
         securitygroups_db.SecurityGroup.tenant_id == tenant_id,
         NsxExtendedSecurityGroupProperties.provider == sa.true()).all()
     return [r[0] for r in res]
Ejemplo n.º 17
0
    def list(self, trans, deleted=False):
        """
        Return a list of libraries from the DB.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns: query that will emit all accessible libraries
        :rtype:   sqlalchemy query
        :returns: dict of 3 sets with available actions for user's accessible
                  libraries and a set of ids of all public libraries. These are
                  used for limiting the number of queries when dictifying the
                  libraries later on.
        :rtype:   dict
        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query(trans.app.model.Library)
        library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
        restricted_library_ids = {lp.library_id for lp in (
            trans.sa_session.query(trans.model.LibraryPermissions).filter(
                trans.model.LibraryPermissions.table.c.action == library_access_action
            ).distinct())}
        prefetched_ids = {'restricted_library_ids': restricted_library_ids}
        if is_admin:
            if deleted is None:
                #  Flag is not specified, do not filter on it.
                pass
            elif deleted:
                query = query.filter(trans.app.model.Library.table.c.deleted == true())
            else:
                query = query.filter(trans.app.model.Library.table.c.deleted == false())
        else:
            #  Nonadmins can't see deleted libraries
            query = query.filter(trans.app.model.Library.table.c.deleted == false())
            current_user_role_ids = [role.id for role in trans.get_current_user_roles()]
            all_actions = trans.sa_session.query(trans.model.LibraryPermissions).filter(trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids))
            library_add_action = trans.app.security_agent.permitted_actions.LIBRARY_ADD.action
            library_modify_action = trans.app.security_agent.permitted_actions.LIBRARY_MODIFY.action
            library_manage_action = trans.app.security_agent.permitted_actions.LIBRARY_MANAGE.action
            accessible_restricted_library_ids = set()
            allowed_library_add_ids = set()
            allowed_library_modify_ids = set()
            allowed_library_manage_ids = set()
            for action in all_actions:
                if action.action == library_access_action:
                    accessible_restricted_library_ids.add(action.library_id)
                if action.action == library_add_action:
                    allowed_library_add_ids.add(action.library_id)
                if action.action == library_modify_action:
                    allowed_library_modify_ids.add(action.library_id)
                if action.action == library_manage_action:
                    allowed_library_manage_ids.add(action.library_id)
            query = query.filter(or_(
                not_(trans.model.Library.table.c.id.in_(restricted_library_ids)),
                trans.model.Library.table.c.id.in_(accessible_restricted_library_ids)
            ))
            prefetched_ids['allowed_library_add_ids'] = allowed_library_add_ids
            prefetched_ids['allowed_library_modify_ids'] = allowed_library_modify_ids
            prefetched_ids['allowed_library_manage_ids'] = allowed_library_manage_ids
        return query, prefetched_ids
Ejemplo n.º 18
0
 def apply_query_filter( self, trans, query, **kwargs ):
     if self.available_tracks is None:
         self.available_tracks = trans.app.datatypes_registry.get_available_tracks()
     return query.filter( model.HistoryDatasetAssociation.extension.in_(self.available_tracks) ) \
                 .filter( model.Dataset.state == model.Dataset.states.OK ) \
                 .filter( model.HistoryDatasetAssociation.deleted == false() ) \
                 .filter( model.HistoryDatasetAssociation.visible == true() )
Ejemplo n.º 19
0
    def index(self, trans, **kwd):
        """
        GET /api/workflows

        Displays a collection of workflows.

        :param  show_published:      if True, show also published workflows
        :type   show_published:      boolean
        """
        show_published = util.string_as_bool( kwd.get( 'show_published', 'False' ) )
        rval = []
        filter1 = ( trans.app.model.StoredWorkflow.user == trans.user )
        if show_published:
            filter1 = or_( filter1, ( trans.app.model.StoredWorkflow.published == true() ) )
        for wf in trans.sa_session.query( trans.app.model.StoredWorkflow ).filter(
                filter1, trans.app.model.StoredWorkflow.table.c.deleted == false() ).order_by(
                desc( trans.app.model.StoredWorkflow.table.c.update_time ) ).all():
            item = wf.to_dict( value_mapper={ 'id': trans.security.encode_id } )
            encoded_id = trans.security.encode_id(wf.id)
            item['url'] = url_for('workflow', id=encoded_id)
            item['owner'] = wf.user.username
            rval.append(item)
        for wf_sa in trans.sa_session.query( trans.app.model.StoredWorkflowUserShareAssociation ).filter_by(
                user=trans.user ).join( 'stored_workflow' ).filter(
                trans.app.model.StoredWorkflow.deleted == false() ).order_by(
                desc( trans.app.model.StoredWorkflow.update_time ) ).all():
            item = wf_sa.stored_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id } )
            encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
            item['url'] = url_for( 'workflow', id=encoded_id )
            item['owner'] = wf_sa.stored_workflow.user.username
            rval.append(item)
        return rval
Ejemplo n.º 20
0
 def apply(self, query):
     if self.op == self.ops.all:
         return query
     if self.op == self.ops.yes:
         return query.filter(self.sa_col == sa.true())
     if self.op == self.ops.no:
         return query.filter(self.sa_col == sa.false())
     return FilterBase.apply(self, query)
Ejemplo n.º 21
0
 def _get_custom_roles():
   """Objects for which the user is an 'owner'."""
   custom_roles_query = db.session.query(
       all_models.AccessControlList.object_id.label('id'),
       all_models.AccessControlList.object_type.label('type'),
       literal(None).label('context_id')
   ).join(
       all_models.AccessControlRole,
       all_models.AccessControlList.ac_role_id ==
       all_models.AccessControlRole.id
   ).filter(
       and_(
           all_models.AccessControlList.person_id == contact_id,
           all_models.AccessControlList.object_type.in_(model_names),
           all_models.AccessControlRole.my_work == true(),
           all_models.AccessControlRole.read == true()
       )
   )
   return custom_roles_query
def upgrade():
    table = op.create_table(
        RESOURCE_TABLE,
        sa.Column("one_row_id", sa.Boolean, server_default=sa.true(), primary_key=True),
        sa.Column("resource_version", sa.String(255)),
        sa.CheckConstraint("one_row_id", name="kube_resource_version_one_row_id")
    )
    op.bulk_insert(table, [
        {"resource_version": ""}
    ])
Ejemplo n.º 23
0
def upgrade():
    op.add_column('writeup_posts',
                  sa.Column('published', sa.Boolean(), nullable=False, server_default=sa.true()))
    op.add_column('writeup_post_versions',
                  sa.Column('active', sa.Boolean(), nullable=False, server_default=sa.true()))
    op.add_column('writeup_post_versions',
                  sa.Column('version', sa.Integer(), nullable=False, server_default='1'))
    op.alter_column('writeup_posts', 'published', server_default=None)
    op.alter_column('writeup_post_versions', 'active', server_default=None)
    op.alter_column('writeup_post_versions', 'version', server_default=None)
Ejemplo n.º 24
0
    def get_condition(self, values=None):
        from sqlalchemy import true, and_

        values = values or self.result
        condition = true()
        model = self.model

        for v in self.fields:
            if isinstance(v, (tuple, list)):
                v = {'name':v[0]}
            elif not isinstance(v, dict):
                v = {'name':v}
            name = v['name']
            if name in values:
                render = v.get('condition')
                value = values[name]
                if not value:
                    continue
                _cond = None
                if render:
                    _cond = render(model, name, value, values)
                else:
                    if name not in model.c:
                        log.debug("Can't found {} in model {}".format(name, model.__name__))
                        continue
                    column = model.c.get(name)
                    if column is None:
                        continue
                    if 'like' in v:
                        _cond = self._make_like(column, v['like'], value)
                    elif 'op' in v:
                        _cond = self._make_op(column, v['op'], value)
                    else:
                        if isinstance(value, (tuple, list)):
                            if v.get('range'):
                                _cond = None
                                if (len(value) > 0 and value[0]):
                                    if 'op' in v:
                                        _cond = self._make_op(column, v['op'][0], value[0])
                                    else:
                                        _cond = (column >= value[0]) & _cond
                                if (len(value) > 1 and value[1]):
                                    if 'op' in v:
                                        _cond = self._make_op(column, v['op'][1], value[1])
                                    else:
                                        _cond = (column <= value[1]) & _cond
                            else:
                                _cond = column.in_(value)
                        else:
                            _cond = column==value
                if _cond is not None:
                    condition = and_(_cond, condition)

        log.debug("condition=%s", condition)
        return condition
Ejemplo n.º 25
0
def get_internal_roles_json():
  """Get a list of all access control roles"""
  with benchmark("Get access roles JSON"):
    attrs = all_models.AccessControlRole.query.options(
        sqlalchemy.orm.undefer_group("AccessControlRole_complete")
    ).filter(all_models.AccessControlRole.internal == true()).all()
    published = []
    for attr in attrs:
      published.append(publish(attr))
    published = publish_representation(published)
    return as_json(published)
Ejemplo n.º 26
0
def downgrade():
  """Downgrade database schema and/or data back to the previous revision."""
  op.alter_column(
      "evidence",
      "send_by_default",
      nullable=True,
      server_default=None,
      existing_type=sa.Boolean(),
      existing_nullable=False,
      existing_server_default=sa.true()
  )
Ejemplo n.º 27
0
def alter_evidence():
  """Make send_by_default column not nullable and True by default"""
  op.alter_column(
      "evidence",
      "send_by_default",
      nullable=False,
      server_default=sa.true(),
      existing_type=sa.Boolean(),
      existing_nullable=True,
      existing_server_default=None,
  )
Ejemplo n.º 28
0
    def get_alteration(self, op, user, item_type, model_class):
        if user is None:
            roles = {WBRoleModel.anonymous_role_name}
        else:
            user = WBUserModel.query.get(user)
            roles = {r.rolename for r in user.roles}

        if roles & self.roles:
            return {'outerjoin': [], 'filter': true()}
        else:
            return {'outerjoin': [], 'filter': false()}
Ejemplo n.º 29
0
 def get_languages(self, req, language_url_pattern):
     q = DBSession.query(Language).filter(Language.active == true()).options(
         joinedload(Language.languageidentifier, LanguageIdentifier.identifier))
     for l in page_query(q):
         yield {
             '@id': language_url_pattern.format(l.id),
             'dc:title': l.name,
             'dc:identifier': [
                 {'@id': i.url(), 'schema:name': i.name}
                 for i in l.identifiers if i.url()],
         }
Ejemplo n.º 30
0
    def test_from_function(self):
        bookcases = self.tables.bookcases
        srf = lateral(func.generate_series(1, bookcases.c.bookcase_shelves))

        self.assert_compile(
            select([bookcases]).select_from(bookcases.join(srf, true())),
            "SELECT bookcases.bookcase_id, bookcases.bookcase_owner_id, "
            "bookcases.bookcase_shelves, bookcases.bookcase_width "
            "FROM bookcases JOIN "
            "LATERAL generate_series(:generate_series_1, "
            "bookcases.bookcase_shelves) AS anon_1 ON true"
        )
Ejemplo n.º 31
0
def upgrade():

    columns_and_constraints = [
        sa.Column("one_row_id", sa.Boolean, server_default=sa.true(), primary_key=True),
        sa.Column("worker_uuid", sa.String(255)),
    ]

    conn = op.get_bind()

    # alembic creates an invalid SQL for mssql and mysql dialects
    if conn.dialect.name in {"mysql"}:
        columns_and_constraints.append(sa.CheckConstraint("one_row_id<>0", name="kube_worker_one_row_id"))
    elif conn.dialect.name not in {"mssql"}:
        columns_and_constraints.append(sa.CheckConstraint("one_row_id", name="kube_worker_one_row_id"))

    table = op.create_table(RESOURCE_TABLE, *columns_and_constraints)

    op.bulk_insert(table, [{"worker_uuid": ""}])
Ejemplo n.º 32
0
 def index(self,
           trans: ProvidesUserContext,
           deleted: bool = False) -> QuotaSummaryList:
     """Displays a collection (list) of quotas."""
     rval = []
     query = trans.sa_session.query(model.Quota)
     if deleted:
         route = 'deleted_quota'
         query = query.filter(model.Quota.deleted == true())
     else:
         route = 'quota'
         query = query.filter(model.Quota.deleted == false())
     for quota in query:
         item = quota.to_dict(value_mapper={'id': trans.security.encode_id})
         encoded_id = trans.security.encode_id(quota.id)
         item['url'] = self._url_for(route, id=encoded_id)
         rval.append(item)
     return QuotaSummaryList.parse_obj(rval)
Ejemplo n.º 33
0
class Dataset(Model):
    """Git commit loaded into the database."""

    __tablename__ = 'dataset'

    id = sa.Column(sa.Boolean,
                   sa.CheckConstraint('id'),
                   primary_key=True,
                   server_default=sa.true())
    git_commit = sa.Column(sa.String(40),
                           sa.CheckConstraint('length(git_commit) = 40'),
                           nullable=False,
                           unique=True)
    git_describe = sa.Column(sa.Text,
                             sa.CheckConstraint("git_describe != ''"),
                             nullable=False,
                             unique=True)
    clean = sa.Column(sa.Boolean, nullable=False)
Ejemplo n.º 34
0
 def compute_filter(cls, **kwargs):
     f = sa.true()
     for key, value in kwargs.items():
         if '.' in key:
             rel_name, key = key.split('.', 1)
             try:
                 rel = getattr(cls, rel_name)
             except AttributeError:
                 continue
             remote_cls = rel.property.mapper.class_
             if not hasattr(remote_cls, 'compute_filter'):
                 continue
             _f = remote_cls.compute_filter(**{key: value})
             if rel.property.uselist:
                 f &= rel.any(_f)
             else:
                 f &= rel.has(_f)
     return f
Ejemplo n.º 35
0
def list_challenge_matches(challenge_id):
    offset, limit = api_util.get_offset_limit()
    where_clause, order_clause, manual_sort = api_util.get_sort_filter(
        {
            "game_id": model.games.c.id,
            "time_played": model.games.c.time_played,
        }, ["timed_out"])

    participant_clause = sqlalchemy.true()
    where_clause &= model.games.c.challenge_id == challenge_id
    for (field, _, _) in manual_sort:
        if field == "timed_out":
            participant_clause &= model.game_participants.c.timed_out

    result = match_api.list_matches_helper(offset, limit, participant_clause,
                                           where_clause, order_clause)

    return flask.jsonify(result)
Ejemplo n.º 36
0
    def test_join_lateral_w_select_implicit_subquery(self):
        table1 = self.tables.people
        table2 = self.tables.books

        subq = (select(table2.c.book_id).correlate(table1).where(
            table1.c.people_id == table2.c.book_owner_id).lateral())
        stmt = select(table1,
                      subq.c.book_id).select_from(table1.join(subq, true()))

        self.assert_compile(
            stmt,
            "SELECT people.people_id, people.age, people.name, "
            "anon_1.book_id "
            "FROM people JOIN LATERAL (SELECT books.book_id AS book_id "
            "FROM books "
            "WHERE people.people_id = books.book_owner_id) "
            "AS anon_1 ON true",
        )
Ejemplo n.º 37
0
async def query_simplified_user_actions(playback_id,
                                        *,
                                        conn=None) -> List[dict]:
    """Return the final output of user actions for a given playback.

    We no longer delete entries, instead we support many being inserted, and we just have into account the last vote
    and the skip if any.

    :param str playback_id: The playback id we want to get the actions for
    :param conn: A connection if any open
    :return: A list of the records
    """
    sub_query = sa.select([
        db.UserAction.c.user_id,
        saf.max(db.UserAction.c.ts).label('ts'),
        db.UserAction.c.playback_id,
    ]).where(db.UserAction.c.playback_id == playback_id).group_by(
        db.UserAction.c.user_id, db.UserAction.c.playback_id,
        sa.case([
            (db.UserAction.c.user_id.is_(None), db.UserAction.c.id),
        ],
                else_=0)).alias()

    query = sa.select([
        sa.distinct(db.UserAction.c.id),
        db.UserAction.c.action,
        db.UserAction.c.playback_id,
        db.UserAction.c.ts,
        db.UserAction.c.user_id,
    ]).select_from(
        db.UserAction.join(
            sub_query,
            sa.and_(
                sub_query.c.ts == db.UserAction.c.ts,
                db.UserAction.c.playback_id == sub_query.c.playback_id,
                sa.case(
                    [(sa.and_(db.UserAction.c.user_id.is_(None),
                              sub_query.c.user_id.is_(None)), sa.true())],
                    else_=db.UserAction.c.user_id == sub_query.c.user_id))))
    async with ensure_connection(conn) as conn:
        result = []
        async for user_action in await conn.execute(query):
            result.append(dict(user_action))
        return result
Ejemplo n.º 38
0
    def test_filtered_counting(self):
        parse_filter = self.history_contents_filters.parse_filter
        user2 = self.user_manager.create(**user2_data)
        history = self.history_manager.create(name='history', user=user2)
        contents = []
        contents.extend([
            self.add_hda_to_history(history, name=('hda-' + str(x)))
            for x in range(3)
        ])
        contents.append(
            self.add_list_collection_to_history(history, contents[:3]))
        contents.extend([
            self.add_hda_to_history(history, name=('hda-' + str(x)))
            for x in range(4, 6)
        ])
        contents.append(
            self.add_list_collection_to_history(history, contents[4:6]))

        self.log("should show correct count with filters")
        self.hda_manager.delete(contents[1])
        self.hda_manager.delete(contents[4])
        contents[6].deleted = True
        self.app.model.context.flush()

        contents[2].visible = False
        contents[5].visible = False
        contents[6].visible = False
        self.app.model.context.flush()

        HDA = self.hda_manager.model_class
        self.assertEqual(
            self.contents_manager.contents_count(
                history, filters=[parsed_filter("orm",
                                                HDA.deleted == true())]), 3)
        filters = [parse_filter('visible', 'eq', 'False')]
        self.assertEqual(
            self.contents_manager.contents_count(history, filters=filters), 3)

        filters = [
            parse_filter('deleted', 'eq', 'True'),
            parse_filter('visible', 'eq', 'False')
        ]
        self.assertEqual(
            self.contents_manager.contents_count(history, filters=filters), 1)
Ejemplo n.º 39
0
    def state_counts(self, history):
        """
        Return a dictionary containing the counts of all contents in each state
        keyed by the distinct states.

        Note: does not include deleted/hidden contents.
        """
        filters = [
            sql.column('deleted') == false(),
            sql.column('visible') == true()
        ]
        contents_subquery = self._union_of_contents_query(
            history, filters=filters).subquery()
        statement = (sql.select([sql.column('state'),
                                 func.count('*')
                                 ]).select_from(contents_subquery).group_by(
                                     sql.column('state')))
        counts = self.app.model.context.execute(statement).fetchall()
        return dict(counts)
Ejemplo n.º 40
0
def get_streaks(s: sqlalchemy.orm.session.Session,
                active: Optional[bool]=None,
                limit: Optional[int]=None,
                max_age: Optional[int]=None,
                ) \
        -> Sequence[Streak]:
    """Get streaks, ordered by length (longest first).

    Parameters:
        active: only return streaks with this active flag
        limit: only return (up to) limit results
        max_age: only return streaks with a win less than this many days old

    Returns:
        List of active streaks.
    """
    # The following code is a translation of this basic SQL:
    # SELECT streaks.*, count(games.streak_id) as streak_length
    # FROM streaks
    # JOIN games ON (streaks.id = games.streak_id)
    # GROUP BY streaks.id
    # HAVING streak_length > 1
    # ORDER BY streak_length DESC
    streak_length = func.count(Game.streak_id).label('streak_length')
    streak_last_activity = func.max(Game.end).label('streak_last_activity')
    q = s.query(Streak, streak_length).join(Streak.games)
    q = q.group_by(Streak.id)
    q = q.having(streak_length > 1)
    if max_age is not None:
        q = q.having(
            streak_last_activity > func.date('now', '-%s day' % max_age))
    q = q.order_by(streak_length.desc())
    if active is not None:
        q = q.filter(Streak.active == (
            sqlalchemy.true() if active else sqlalchemy.false()))
    if limit is not None:
        q = q.limit(limit)
    streaks = q.all()
    # Since we added a column to the query, the result format is:
    # ((Streak, length), (Streak, length), ...)
    # It's annoying to deal with a custom format, and recalculating the streak
    # length for a few streaks is NBD, so just return a list of Streaks
    return [t.Streak for t in streaks]
Ejemplo n.º 41
0
    def get_similar_email_users(self, similar: str, **kwargs):
        sort = kwargs.get('sort', None)
        order = kwargs.get('order', None)
        offset_number = kwargs.get('offset', None)
        limit_number = kwargs.get('limit', 5)

        pattern = '{}%'.format(similar)

        query = self.session.query(User) \
            .filter(User.is_activated == true(), User.email.like(pattern))
        if sort:
            if order == 'desc':
                from sqlalchemy import desc
                query = query.order_by(desc(sort))
            query = query.order_by(sort)
        if offset_number:
            query = query.offset(offset_number)

        return query.limit(limit_number).all()
Ejemplo n.º 42
0
def get_filter_operation(model: DeclarativeMeta,
                         where: Dict[str, Any]) -> ClauseElement:
    partial_filter = partial(get_filter_operation, model)

    for name, exprs in where.items():
        if name == "_or":
            return or_(*map(partial_filter, exprs))

        if name == "_not":
            return not_(partial_filter(exprs))

        if name == "_and":
            return and_(*map(partial_filter, exprs))

        model_property = getattr(model, name)
        partial_bool = partial(get_bool_operation, model_property)
        return and_(*(starmap(partial_bool, exprs.items())))

    return true()
Ejemplo n.º 43
0
def set_next_schedule_time(ctx, *, db, model):
    if not ctx._scheduler_queues:
        ctx._next_schedule_time = datetime.max
        return

    ctx._next_schedule_time = (db.query(
        sa.func.min(model.JobSchedule.next_execution_time)).filter(
            model.JobSchedule.queue.in_(ctx._scheduler_queues),
            model.JobSchedule.is_enabled == sa.true(),
        ).scalar())

    if ctx._next_schedule_time is None:
        log.debug('no pending schedules')
        ctx._next_schedule_time = datetime.max

    else:
        when = (ctx._next_schedule_time - ctx._now()).total_seconds()
        if when > 0:
            log.debug('tracking next schedule in %.3f seconds', when)
Ejemplo n.º 44
0
def childnodes(request):
    if request.params.get('t') == 'select2':
        query = DBSession.query(Languoid.id, Languoid.name, Languoid.level)\
            .filter(icontains(Languoid.name, request.params.get('q')))
        total = query.count()
        ms = LanguoidsMultiSelect(request, None, None, url='x')
        return dict(
            results=[ms.format_result(l) for l in query.limit(100)],
            context={},
            more=total > 500)

    query = DBSession.query(
        Languoid.pk,
        Languoid.id,
        Languoid.name,
        Languoid.level,
        func.count(TreeClosureTable.child_pk).label('children'))\
        .filter(Language.pk == TreeClosureTable.parent_pk)\
        .filter(Language.active == true())

    if request.params.get('node'):
        query = query.filter(Languoid.father_pk == int(request.params['node']))
    else:
        # narrow down selection of top-level nodes in the tree:
        query = query.filter(Languoid.father_pk == null())
        if request.params.get('q'):
            query = query.filter(Language.name.contains(request.params.get('q')))

    query = query.group_by(
        Languoid.pk,
        Languoid.id,
        Languoid.name,
        Languoid.level).order_by(Language.name)
    return [{
        'label': ('%s (%s)' % (l.name, l.children - 1))
            if l.children > 1 else l.name,
        'glottocode': l.id,
        'lname': l.name,
        'id': l.pk,
        'level': l.level.value,
        #'children': l.children
        'load_on_demand': l.children > 1} for l in query]
Ejemplo n.º 45
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    log.debug( "Fixing a discrepancy concerning deleted shared history items." )
    affected_items = 0
    start_time = time.time()
    for dataset in context.query( Dataset ).filter( and_( Dataset.deleted == true(), Dataset.purged == false() ) ):
        for dataset_instance in dataset.history_associations + dataset.library_associations:
            if not dataset_instance.deleted:
                dataset.deleted = False
                if dataset.file_size in [ None, 0 ]:
                    dataset.set_size() #Restore filesize
                affected_items += 1
                break
    context.flush()
    log.debug( "%i items affected, and restored." % ( affected_items ) )
    log.debug( "Time elapsed: %s" % ( time.time() - start_time ) )

    #fix share before hda
    log.debug( "Fixing a discrepancy concerning cleaning up deleted history items shared before HDAs." )
    dataset_by_filename = {}
    changed_associations = 0
    start_time = time.time()
    for dataset in context.query( Dataset ).filter( Dataset.external_filename.like( '%dataset_%.dat' ) ):
        if dataset.file_name in dataset_by_filename:
            guessed_dataset = dataset_by_filename[ dataset.file_name ]
        else:
            guessed_dataset = __guess_dataset_by_filename( dataset.file_name )
            if guessed_dataset and dataset.file_name != guessed_dataset.file_name:#not os.path.samefile( dataset.file_name, guessed_dataset.file_name ):
                guessed_dataset = None
            dataset_by_filename[ dataset.file_name ] = guessed_dataset

        if guessed_dataset is not None and guessed_dataset.id != dataset.id: #could we have a self referential dataset?
            for dataset_instance in dataset.history_associations + dataset.library_associations:
                dataset_instance.dataset = guessed_dataset
                changed_associations += 1
            #mark original Dataset as deleted and purged, it is no longer in use, but do not delete file_name contents
            dataset.deleted = True
            dataset.external_filename = "Dataset was result of share before HDA, and has been replaced: %s mapped to Dataset %s" % ( dataset.external_filename, guessed_dataset.id )
            dataset.purged = True #we don't really purge the file here, but we mark it as purged, since this dataset is now defunct
    context.flush()
    log.debug( "%i items affected, and restored." % ( changed_associations ) )
    log.debug( "Time elapsed: %s" % ( time.time() - start_time ) )
Ejemplo n.º 46
0
 def index(self, trans, deleted='False', f_email=None, **kwd):
     """
     GET /api/users
     GET /api/users/deleted
     Displays a collection (list) of users.
     """
     rval = []
     query = trans.sa_session.query(trans.app.model.User)
     deleted = util.string_as_bool(deleted)
     if f_email:
         query = query.filter(
             trans.app.model.User.email.like("%%%s%%" % f_email))
     if deleted:
         query = query.filter(
             trans.app.model.User.table.c.deleted == true())
         # only admins can see deleted users
         if not trans.user_is_admin():
             return []
     else:
         query = query.filter(
             trans.app.model.User.table.c.deleted == false())
         # special case: user can see only their own user
         # special case2: if the galaxy admin has specified that other user email/names are
         #   exposed, we don't want special case #1
         if not trans.user_is_admin(
         ) and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
             item = trans.user.to_dict(
                 value_mapper={'id': trans.security.encode_id})
             return [item]
     for user in query:
         item = user.to_dict(value_mapper={'id': trans.security.encode_id})
         # If NOT configured to expose_email, do not expose email UNLESS the user is self, or
         # the user is an admin
         if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin(
         ):
             del item['username']
         if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin(
         ):
             del item['email']
         # TODO: move into api_values
         rval.append(item)
     return rval
Ejemplo n.º 47
0
def _query_chunks(query, model, orderings, chunk_size):
  """Paginate query by chunks of specific size.

  The main difference here from util functions generating chunkyfied query
  provided in utils module is that here no offset is used. Chunks are queried
  here using filters which is faster comparing to offsets in case of large
  number of records in query.

  Args:
      query: Query to be paginated.
      model: Model used in `query`.
      orderings: Orderings used in `query`.
      chunk_size: Size of chunks.

  Yields:
      Objects in chunks from query query.
  """
  filters = [sa.true() for _ in orderings]
  count = query.count()
  for _ in range(0, count, chunk_size):
    # Pagination is performed by filtering here insted of using offset since
    # using offset with large values is much slower than plain filtering.
    paginated_q = query.from_self().filter(*filters).limit(chunk_size)
    chunk = paginated_q.all()
    yield chunk
    if chunk:
      # Filters should be recalculated here to return new chunk on next iter.
      # New filters should be in form "ordering field >= last in chunk" except
      # for the last field in orderings - this one should be > last in chunk.
      ge_filter_fields, gt_filter_field = orderings[:-1], orderings[-1]
      last_in_chunk = chunk[-1]

      filters = [
          op.ge(getattr(model, field), getattr(last_in_chunk, field))
          for field in ge_filter_fields
      ]
      filters.append(
          op.gt(
              getattr(model, gt_filter_field),
              getattr(last_in_chunk, gt_filter_field),
          )
      )
def upgrade():
    columns_and_constraints = [
        sa.Column("one_row_id",
                  sa.Boolean,
                  server_default=sa.true(),
                  primary_key=True),
        sa.Column("resource_version", sa.String(255))
    ]

    conn = op.get_bind()

    # alembic creates an invalid SQL for mssql dialect
    if conn.dialect.name not in ('mssql'):
        columns_and_constraints.append(
            sa.CheckConstraint("one_row_id",
                               name="kube_resource_version_one_row_id"))

    table = op.create_table(RESOURCE_TABLE, *columns_and_constraints)

    op.bulk_insert(table, [{"resource_version": ""}])
Ejemplo n.º 49
0
    def _get_chain_hops_for_port_by_network_side(context, port_id, side):
        reverse_side = constants.REVERSE_PORT_SIDE[side]

        query = context.session.query(bagpipe_db.BaGPipeChainHop)
        query = query.join(
            models_v2.Network,
            sa.or_(
                sa.and_(
                    models_v2.Network.id == getattr(bagpipe_db.BaGPipeChainHop,
                                                    side + '_network'),
                    bagpipe_db.BaGPipeChainHop.reverse_hop == false()),
                sa.and_(
                    models_v2.Network.id == getattr(bagpipe_db.BaGPipeChainHop,
                                                    reverse_side + '_network'),
                    bagpipe_db.BaGPipeChainHop.reverse_hop == true())))
        query = query.join(models_v2.Port,
                           models_v2.Port.network_id == models_v2.Network.id)
        query = query.filter(models_v2.Port.id == port_id)

        return query.all()
Ejemplo n.º 50
0
    def list(self, trans, deleted=False):
        """
        Return a list of groups from the DB.

        :returns: query that will emit all groups
        :rtype:   sqlalchemy query
        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query(trans.app.model.Group)
        if is_admin:
            if deleted is None:
                #  Flag is not specified, do not filter on it.
                pass
            elif deleted:
                query = query.filter(trans.app.model.Group.table.c.deleted == true())
            else:
                query = query.filter(trans.app.model.Group.table.c.deleted == false())
        else:
            query = query.filter(trans.app.model.Group.table.c.deleted == false())
        return query
Ejemplo n.º 51
0
async def db_worker():
    logging.info('Идет работа с базой данных...')
    db: Session = pool()
    
    for event in db.query(Event).all():
        delta = datetime.now().date() - datetime.strptime(event.date, "%d.%m.%Y").date()
        
        if delta.days > 1:
            logging.info(f'Удалено событие {event.title}')
            db.delete(event)
            db.commit()
            continue
    
    hour = datetime.now().hour
    for user in db.query(User).filter(User.banned == false(), User.subscribed == true()).all():
        if user.mailing_time == f'{hour}:00':
            await send_mailing(user.user_id)
            await asyncio.sleep(.5)
    
    db.close()
Ejemplo n.º 52
0
class BaseModel(db.Model):
    """Base model's class"""

    __abstract__ = True

    id = db.Column(db.Integer, primary_key=True)
    is_active = db.Column(
        db.Boolean,
        default=True,
        nullable=False,
        server_default=sa.true(),
        info={"label": "Is active?"},
    )
    created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
    updated_at = db.Column(
        db.DateTime,
        default=datetime.datetime.utcnow,
        onupdate=datetime.datetime.utcnow,
    )

    @property
    def is_authenticated(self):
        return True

    @property
    def is_anonymous(self):
        return False

    def __eq__(self, other):
        if isinstance(other, BaseModel):
            return self.id == other.id
        return NotImplemented

    def __ne__(self, other):
        equal = self.__eq__(other)
        if equal is NotImplemented:
            return NotImplemented
        return not equal

    def get_id(self):
        return str(self.id)
Ejemplo n.º 53
0
def lock_scheduler_queues(ctx, *, db, model, now=None):
    if now is None:
        now = ctx._now()

    Lock = model.Lock
    new_queues = set()
    for queue in (q for q in ctx._queues if q not in ctx._scheduler_queues):
        result = db.execute(
            insert(Lock.__table__).values({
                Lock.queue: queue,
                Lock.key: 'scheduler',
                Lock.lock_id: ctx._lock_id,
                Lock.worker: ctx._name,
            }).on_conflict_do_nothing().returning(Lock.queue)).scalar()
        if result is not None:
            new_queues.add(queue)

    if new_queues:
        log.info('scheduler started for queues=%s',
                 ','.join(sorted(new_queues)))
    ctx._scheduler_queues.update(new_queues)

    stale_cutoff_time = now - ctx._timeout
    stale_schedules = (db.query(model.JobSchedule).with_for_update().filter(
        model.JobSchedule.queue.in_(ctx._scheduler_queues),
        model.JobSchedule.is_enabled == sa.true(),
        model.JobSchedule.next_execution_time < stale_cutoff_time,
    ).all())
    for s in stale_schedules:
        next_execution_time = get_next_rrule_time(s.rrule, s.created_time,
                                                  stale_cutoff_time)
        log.warning(
            'execution time on schedule=%s is very old (%s seconds), skipping '
            'to next time=%s',
            s.id,
            (now - s.next_execution_time).total_seconds(),
            next_execution_time,
        )
        s.next_execution_time = next_execution_time

    set_next_schedule_time(ctx, db=db, model=model)
Ejemplo n.º 54
0
def leave_gig():
    response = request.get_json()
    user_id = response['id']

    # Removes user from the gig
    user = User.query.filter(User.spotify_id == user_id).first()
    gig_id = user.gig_id

    if user.is_host:
        gig = Gig.query.filter(Gig.id == gig_id).first()
        users_in_gig = User.query.filter(User.gig_id == gig_id).all()
        [db.session.delete(user) for user in users_in_gig]
        db.session.delete(gig)
        db.session.commit()
    else:
        db.session.delete(user)
        db.session.commit()
        host = User.query.filter(User.gig_id == gig_id, User.is_host == true())
        update_playlist(host,  gig_id)

    return jsonify({'message': 'User left successfully'})
Ejemplo n.º 55
0
    def _get_chain_hops_for_port_by_ppg_side(context, port_id, side):
        reverse_side = constants.REVERSE_PORT_SIDE[side]

        query = context.session.query(bagpipe_db.BaGPipeChainHop)
        query = query.join(
            sfc_db.PortPairGroup,
            sfc_db.PortPairGroup.id == getattr(bagpipe_db.BaGPipeChainHop,
                                               side + '_ppg'))
        query = query.join(
            sfc_db.PortPair,
            sfc_db.PortPair.portpairgroup_id == sfc_db.PortPairGroup.id)
        query = query.filter(
            sa.or_(
                sa.and_(
                    getattr(sfc_db.PortPair, reverse_side) == port_id,
                    bagpipe_db.BaGPipeChainHop.reverse_hop == false()),
                sa.and_(
                    getattr(sfc_db.PortPair, side) == port_id,
                    bagpipe_db.BaGPipeChainHop.reverse_hop == true())))

        return query.all()
Ejemplo n.º 56
0
def list_challenges():
    offset, limit = api_util.get_offset_limit()
    where_clause, order_clause, manual_sort = api_util.get_sort_filter({
        "issuer": model.challenges.c.issuer,
        "created": model.challenges.c.created,
        "finished": model.challenges.c.finished,
        "num_games": model.challenges.c.num_games,
        "winner": model.challenges.c.winner,
    }, ["finished", "participant"])

    participant_clause = sqlalchemy.true()
    for (field, op, val) in manual_sort:
        if field == "finished":
            where_clause &= model.challenges.c.status == "finished"
        elif field == "participant":
            participant_clause &= op(model.challenge_participants.c.user_id, val)

    result = list_challenges_helper(offset, limit,
                                    participant_clause,
                                    where_clause, order_clause)
    return flask.jsonify(result)
    def fetch_lent_list(self, owner_id: str) -> List[LendingEntity]:
        result = db.session.query(
            Lending.id,
            Lending.content,
            Lending.deadline,
            User.name.label('borrower_name')
        ) \
            .outerjoin(User, Lending.borrower_id == User.id) \
            .filter(Lending.owner_id == owner_id) \
            .filter(Lending.is_returned == false()) \
            .filter(Lending.is_sent_url == true()) \
            .all()

        lent_list = [
            LendingEntity(r.id,
                          r.content,
                          r.deadline,
                          borrower_name=r.borrower_name) for r in result
        ]

        return lent_list
Ejemplo n.º 58
0
    def to_filter(self, value=Empty):
        """
        :param value: 需要查询的值, 如果没有设置值则使用default, 如果default没有设置则使用true来代替?
        """
        field = self.column

        # 如果没有传值但是设置了默认值, 使用默认值代替value, 否则使用true来作为占位符
        if value is Empty:
            if self.default is not Empty:
                value = self.default
            else:
                return true()

        if self.value_process:

            if callable(self.value_process):
                value = self.value_process(value)

            value = self._value_process(value)

        return self.operator(field, value)
Ejemplo n.º 59
0
 def userless_histories( self, trans, **kwd ):
     """The number of userless histories and associated datasets that have not been updated for the specified number of days."""
     params = util.Params( kwd )
     message = ''
     if params.userless_histories_days:
         userless_histories_days = int( params.userless_histories_days )
         cutoff_time = datetime.utcnow() - timedelta( days=userless_histories_days )
         history_count = 0
         dataset_count = 0
         for history in trans.sa_session.query( model.History ) \
                 .filter( and_( model.History.table.c.user_id == null(),
                 model.History.table.c.deleted == true(),
                 model.History.table.c.update_time < cutoff_time ) ):
             for dataset in history.datasets:
                 if not dataset.deleted:
                     dataset_count += 1
             history_count += 1
         message = "%d userless histories ( including a total of %d datasets ) have not been updated for at least %d days." % ( history_count, dataset_count, userless_histories_days )
     else:
         message = "Enter the number of days."
     return str( userless_histories_days ), message
Ejemplo n.º 60
0
 def index(self, trans, deleted='False', **kwd):
     """
     GET /api/quotas
     GET /api/quotas/deleted
     Displays a collection (list) of quotas.
     """
     rval = []
     deleted = util.string_as_bool(deleted)
     query = trans.sa_session.query(trans.app.model.Quota)
     if deleted:
         route = 'deleted_quota'
         query = query.filter(trans.app.model.Quota.deleted == true())
     else:
         route = 'quota'
         query = query.filter(trans.app.model.Quota.deleted == false())
     for quota in query:
         item = quota.to_dict(value_mapper={'id': trans.security.encode_id})
         encoded_id = trans.security.encode_id(quota.id)
         item['url'] = url_for(route, id=encoded_id)
         rval.append(item)
     return rval