Beispiel #1
0
    def index(self, trans, **kwd):
        """
        GET /api/workflows

        Displays a collection of workflows.

        :param  show_published:      if True, show also published workflows
        :type   show_published:      boolean
        """
        show_published = util.string_as_bool( kwd.get( 'show_published', 'False' ) )
        rval = []
        filter1 = ( trans.app.model.StoredWorkflow.user == trans.user )
        if show_published:
            filter1 = or_( filter1, ( trans.app.model.StoredWorkflow.published == true() ) )
        for wf in trans.sa_session.query( trans.app.model.StoredWorkflow ).filter(
                filter1, trans.app.model.StoredWorkflow.table.c.deleted == false() ).order_by(
                desc( trans.app.model.StoredWorkflow.table.c.update_time ) ).all():
            item = wf.to_dict( value_mapper={ 'id': trans.security.encode_id } )
            encoded_id = trans.security.encode_id(wf.id)
            item['url'] = url_for('workflow', id=encoded_id)
            item['owner'] = wf.user.username
            rval.append(item)
        for wf_sa in trans.sa_session.query( trans.app.model.StoredWorkflowUserShareAssociation ).filter_by(
                user=trans.user ).join( 'stored_workflow' ).filter(
                trans.app.model.StoredWorkflow.deleted == false() ).order_by(
                desc( trans.app.model.StoredWorkflow.update_time ) ).all():
            item = wf_sa.stored_workflow.to_dict( value_mapper={ 'id': trans.security.encode_id } )
            encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
            item['url'] = url_for( 'workflow', id=encoded_id )
            item['owner'] = wf_sa.stored_workflow.user.username
            rval.append(item)
        return rval
Beispiel #2
0
  def get_permissions_query(model_names, permission_type='read'):
    """Prepare the query based on the allowed resources

    This filters for each of the required models based on permissions on every
    object type.
    """
    if not model_names:
      # If there are no model names set, the result of the permissions query
      # will always be false, so we can just return false instead of having an
      # empty in statement combined with an empty list joined by or statement.
      return sa.false()

    type_queries = []
    for model_name in model_names:
      contexts, resources = permissions.get_context_resource(
          model_name=model_name,
          permission_type=permission_type,
      )

      if contexts is None:
        # None context means user has full access of permission_type for the
        # given model
        type_queries.append(MysqlRecordProperty.type == model_name)
      elif resources:
        type_queries.append(sa.and_(
            MysqlRecordProperty.type == model_name,
            MysqlRecordProperty.key.in_(resources),
        ))

    if not type_queries:
      return sa.false()

    return sa.or_(*type_queries)
Beispiel #3
0
    def list(self, trans, deleted=False):
        """
        Return a list of libraries from the DB.

        :param  deleted: if True, show only ``deleted`` libraries, if False show only ``non-deleted``
        :type   deleted: boolean (optional)

        :returns: query that will emit all accessible libraries
        :rtype:   sqlalchemy query
        :returns: dict of 3 sets with available actions for user's accessible
                  libraries and a set of ids of all public libraries. These are
                  used for limiting the number of queries when dictifying the
                  libraries later on.
        :rtype:   dict
        """
        is_admin = trans.user_is_admin()
        query = trans.sa_session.query(trans.app.model.Library)
        library_access_action = trans.app.security_agent.permitted_actions.LIBRARY_ACCESS.action
        restricted_library_ids = {lp.library_id for lp in (
            trans.sa_session.query(trans.model.LibraryPermissions).filter(
                trans.model.LibraryPermissions.table.c.action == library_access_action
            ).distinct())}
        prefetched_ids = {'restricted_library_ids': restricted_library_ids}
        if is_admin:
            if deleted is None:
                #  Flag is not specified, do not filter on it.
                pass
            elif deleted:
                query = query.filter(trans.app.model.Library.table.c.deleted == true())
            else:
                query = query.filter(trans.app.model.Library.table.c.deleted == false())
        else:
            #  Nonadmins can't see deleted libraries
            query = query.filter(trans.app.model.Library.table.c.deleted == false())
            current_user_role_ids = [role.id for role in trans.get_current_user_roles()]
            all_actions = trans.sa_session.query(trans.model.LibraryPermissions).filter(trans.model.LibraryPermissions.table.c.role_id.in_(current_user_role_ids))
            library_add_action = trans.app.security_agent.permitted_actions.LIBRARY_ADD.action
            library_modify_action = trans.app.security_agent.permitted_actions.LIBRARY_MODIFY.action
            library_manage_action = trans.app.security_agent.permitted_actions.LIBRARY_MANAGE.action
            accessible_restricted_library_ids = set()
            allowed_library_add_ids = set()
            allowed_library_modify_ids = set()
            allowed_library_manage_ids = set()
            for action in all_actions:
                if action.action == library_access_action:
                    accessible_restricted_library_ids.add(action.library_id)
                if action.action == library_add_action:
                    allowed_library_add_ids.add(action.library_id)
                if action.action == library_modify_action:
                    allowed_library_modify_ids.add(action.library_id)
                if action.action == library_manage_action:
                    allowed_library_manage_ids.add(action.library_id)
            query = query.filter(or_(
                not_(trans.model.Library.table.c.id.in_(restricted_library_ids)),
                trans.model.Library.table.c.id.in_(accessible_restricted_library_ids)
            ))
            prefetched_ids['allowed_library_add_ids'] = allowed_library_add_ids
            prefetched_ids['allowed_library_modify_ids'] = allowed_library_modify_ids
            prefetched_ids['allowed_library_manage_ids'] = allowed_library_manage_ids
        return query, prefetched_ids
Beispiel #4
0
def stats():
    """Show some devices stats."""
    sq_nt = session.query(Device.address) \
        .filter(and_(Device.tracked == false(), Device.identified == true())) \
        .subquery()

    sq_ni = session.query(Device.address) \
        .filter(and_(Device.tracked == true(), Device.identified == false())) \
        .subquery()

    sq_ntni = session.query(Device.address) \
        .filter(and_(Device.tracked == false(), Device.identified == false())) \
        .subquery()

    query = session.query(Device.address_origin, func.count(Device.id), func.count(sq_nt.c.address), func.count(sq_ni.c.address), func.count(sq_ntni.c.address)) \
        .outerjoin(sq_nt, sq_nt.c.address == Device.address) \
        .outerjoin(sq_ni, sq_ni.c.address == Device.address) \
        .outerjoin(sq_ntni, sq_ntni.c.address == Device.address) \
        .group_by(Device.address_origin)

    print('--- Devices ---')
    for [address_origin, device_count, nt_count, ni_count, ntni_count] in query.all():
        print('{:12s} Total:{:5d} - not tracked:{:3d}, not identified:{:3d}, not tracked & not identified: {:3d}'
              .format(AddressOrigin(address_origin).name,
                      device_count,
                      nt_count,
                      ni_count,
                      ntni_count))
def get_users(
    number: int=10,
    offset: int=0,
    matches_all: Dict[str, Any]={},
    matches_any: Dict[str, Any]={},
    store: Store=None,
) -> Tuple[List[User], int]:
    query = store.session.query(User)
    query = query.outerjoin(InstitutionAssociation)
    query = query.outerjoin(Institution)
    query = query.order_by(User.id.asc())

    searches = {
        'bio': lambda a: User.bio.ilike('%{}%'.format(a)),
        'created_after': lambda a: User.date_created > a,
        'created_before': lambda a: User.date_created < a,
        'institution': lambda a: Institution.name.ilike('%{}%'.format(a)),
        'name': lambda a: User.name.ilike('%{}%'.format(a)),
    }

    filter_all = true()
    filter_any = false()

    for name, value in matches_all.items():
        filter_all = filter_all & searches.get(name, lambda _: true())(value)

    for name, value in matches_any.items():
        filter_any = filter_any | searches.get(name, lambda _: false())(value)

    query = query.filter(filter_all & filter_any if filter_any is not false() else filter_all)
    query = query.distinct()
    count = query.count()
    query = query.limit(number).offset(offset)

    return query, count
 def build_initial_query(self, trans, **kwd):
     return trans.sa_session.query(model.Repository) \
                            .filter(and_(model.Repository.table.c.deleted == false(),
                                         model.Repository.table.c.deprecated == false())) \
                            .join((model.RepositoryReview.table, model.RepositoryReview.table.c.repository_id == model.Repository.table.c.id)) \
                            .join((model.User.table, model.User.table.c.id == model.Repository.table.c.user_id)) \
                            .outerjoin((model.ComponentReview.table, model.ComponentReview.table.c.repository_review_id == model.RepositoryReview.table.c.id)) \
                            .outerjoin((model.Component.table, model.Component.table.c.id == model.ComponentReview.table.c.component_id))
 def build_initial_query(self, trans, **kwd):
     return trans.sa_session.query(model.Repository) \
                            .filter(and_(model.Repository.table.c.deleted == false(),
                                         model.Repository.table.c.deprecated == false())) \
                            .join(model.RepositoryMetadata.table) \
                            .filter(and_(model.RepositoryMetadata.table.c.downloadable == true(),
                                         model.RepositoryMetadata.table.c.includes_tools == true(),
                                         model.RepositoryMetadata.table.c.tools_functionally_correct == false())) \
                            .join(model.User.table)
def handle_role_associations( app, role, repository, **kwd ):
    sa_session = app.model.context.current
    message = escape( kwd.get( 'message', '' ) )
    status = kwd.get( 'status', 'done' )
    repository_owner = repository.user
    if kwd.get( 'manage_role_associations_button', False ):
        in_users_list = util.listify( kwd.get( 'in_users', [] ) )
        in_users = [ sa_session.query( app.model.User ).get( x ) for x in in_users_list ]
        # Make sure the repository owner is always associated with the repostory's admin role.
        owner_associated = False
        for user in in_users:
            if user.id == repository_owner.id:
                owner_associated = True
                break
        if not owner_associated:
            in_users.append( repository_owner )
            message += "The repository owner must always be associated with the repository's administrator role.  "
            status = 'error'
        in_groups_list = util.listify( kwd.get( 'in_groups', [] ) )
        in_groups = [ sa_session.query( app.model.Group ).get( x ) for x in in_groups_list ]
        in_repositories = [ repository ]
        app.security_agent.set_entity_role_associations( roles=[ role ],
                                                         users=in_users,
                                                         groups=in_groups,
                                                         repositories=in_repositories )
        sa_session.refresh( role )
        message += "Role <b>%s</b> has been associated with %d users, %d groups and %d repositories.  " % \
            ( escape( str( role.name ) ), len( in_users ), len( in_groups ), len( in_repositories ) )
    in_users = []
    out_users = []
    in_groups = []
    out_groups = []
    for user in sa_session.query( app.model.User ) \
                          .filter( app.model.User.table.c.deleted == false() ) \
                          .order_by( app.model.User.table.c.email ):
        if user in [ x.user for x in role.users ]:
            in_users.append( ( user.id, user.email ) )
        else:
            out_users.append( ( user.id, user.email ) )
    for group in sa_session.query( app.model.Group ) \
                           .filter( app.model.Group.table.c.deleted == false() ) \
                           .order_by( app.model.Group.table.c.name ):
        if group in [ x.group for x in role.groups ]:
            in_groups.append( ( group.id, group.name ) )
        else:
            out_groups.append( ( group.id, group.name ) )
    associations_dict = dict( in_users=in_users,
                              out_users=out_users,
                              in_groups=in_groups,
                              out_groups=out_groups,
                              message=message,
                              status=status )
    return associations_dict
Beispiel #9
0
    def test_is_boolean_symbols_despite_no_native(self):
        is_(
            testing.db.scalar(select([cast(true().is_(true()), Boolean)])),
            True,
        )

        is_(
            testing.db.scalar(select([cast(true().isnot(true()), Boolean)])),
            False,
        )

        is_(
            testing.db.scalar(select([cast(false().is_(false()), Boolean)])),
            True,
        )
Beispiel #10
0
  def get_permissions_query(model_names, permission_type='read'):
    """Prepare the query based on the allowed contexts and resources for
     each of the required objects(models).
    """
    if not model_names:
      # If there are no model names set, the result of the permissions query
      # will always be false, so we can just return false instead of having an
      # empty in statement combined with an empty list joined by or statement.
      return sa.false()

    type_queries = []
    for model_name in model_names:
      contexts, resources = permissions.get_context_resource(
          model_name=model_name,
          permission_type=permission_type,
      )
      statement = sa.and_(
          MysqlRecordProperty.type == model_name,
          context_query_filter(MysqlRecordProperty.context_id, contexts)
      )
      if resources:
        statement = sa.or_(sa.and_(MysqlRecordProperty.type == model_name,
                                   MysqlRecordProperty.key.in_(resources)),
                           statement)
      type_queries.append(statement)

    return sa.and_(
        MysqlRecordProperty.type.in_(model_names),
        sa.or_(*type_queries)
    )
Beispiel #11
0
def get_library( name, description, synopsis ):
    return gx_context().query( galaxy.model.Library ) \
                       .filter( and_( galaxy.model.Library.table.c.name == name,
                                      galaxy.model.Library.table.c.description == description,
                                      galaxy.model.Library.table.c.synopsis == synopsis,
                                      galaxy.model.Library.table.c.deleted == false() ) ) \
                       .first()
Beispiel #12
0
 def request_type_permissions(self, trans, **kwd):
     params = util.Params(kwd)
     message = util.restore_text(params.get('message', ''))
     status = params.get('status', 'done')
     request_type_id = kwd.get('id', '')
     try:
         request_type = trans.sa_session.query(trans.model.RequestType).get(trans.security.decode_id(request_type_id))
     except:
         return invalid_id_redirect(trans, 'request_type', request_type_id, 'request type', action='browse_request_types')
     roles = trans.sa_session.query(trans.model.Role) \
                             .filter(trans.model.Role.table.c.deleted == false()) \
                             .order_by(trans.model.Role.table.c.name)
     if params.get('update_roles_button', False):
         permissions = {}
         for k, v in trans.model.RequestType.permitted_actions.items():
             in_roles = [trans.sa_session.query(trans.model.Role).get(x) for x in util.listify(params.get(k + '_in', []))]
             permissions[trans.app.security_agent.get_action(v.action)] = in_roles
         trans.app.security_agent.set_request_type_permissions(request_type, permissions)
         trans.sa_session.refresh(request_type)
         message = "Permissions updated for request type '%s'" % request_type.name
     return trans.fill_template('/admin/request_type/request_type_permissions.mako',
                                request_type=request_type,
                                roles=roles,
                                status=status,
                                message=message)
Beispiel #13
0
    def list( self, trans, *args, **kwargs ):
        """ List user's pages. """
        # Handle operation
        if 'operation' in kwargs and 'id' in kwargs:
            session = trans.sa_session
            operation = kwargs['operation'].lower()
            ids = util.listify( kwargs['id'] )
            for id in ids:
                item = session.query( model.Page ).get( self.decode_id( id ) )
                if operation == "delete":
                    item.deleted = True
                if operation == "share or publish":
                    return self.sharing( trans, **kwargs )
            session.flush()

        # HACK: to prevent the insertion of an entire html document inside another
        kwargs[ 'embedded' ] = True
        # Build grid HTML.
        grid = self._page_list( trans, *args, **kwargs )

        # Build list of pages shared with user.
        shared_by_others = trans.sa_session \
            .query( model.PageUserShareAssociation ) \
            .filter_by( user=trans.get_user() ) \
            .join( model.Page.table ) \
            .filter( model.Page.deleted == false() ) \
            .order_by( desc( model.Page.update_time ) ) \
            .all()

        # Render grid wrapped in panels
        return trans.fill_template( "page/index.mako", embedded_grid=grid, shared_by_others=shared_by_others )
def purge_datasets( app, cutoff_time, remove_from_disk, info_only=False, force_retry=False ):
    # Purges deleted datasets whose update_time is older than cutoff_time.  Files may or may
    # not be removed from disk.
    dataset_count = 0
    disk_space = 0
    start = time.time()
    if force_retry:
        datasets = app.sa_session.query( app.model.Dataset ) \
                                 .filter( and_( app.model.Dataset.table.c.deleted == true(),
                                                app.model.Dataset.table.c.purgable == true(),
                                                app.model.Dataset.table.c.update_time < cutoff_time ) )
    else:
        datasets = app.sa_session.query( app.model.Dataset ) \
                                 .filter( and_( app.model.Dataset.table.c.deleted == true(),
                                                app.model.Dataset.table.c.purgable == true(),
                                                app.model.Dataset.table.c.purged == false(),
                                                app.model.Dataset.table.c.update_time < cutoff_time ) )
    for dataset in datasets:
        file_size = dataset.file_size
        _purge_dataset( app, dataset, remove_from_disk, info_only=info_only )
        dataset_count += 1
        try:
            disk_space += file_size
        except:
            pass
    stop = time.time()
    print 'Purged %d datasets' % dataset_count
    if remove_from_disk:
        print 'Freed disk space: ', disk_space
    print "Elapsed time: ", stop - start
    print "##########################################"
def get_categories(app):
    """Get all categories from the database."""
    sa_session = app.model.context.current
    return sa_session.query(app.model.Category) \
                     .filter(app.model.Category.table.c.deleted == false()) \
                     .order_by(app.model.Category.table.c.name) \
                     .all()
Beispiel #16
0
def get_user_address( user, short_desc ):
    return gx_context().query( galaxy.model.UserAddress ) \
                       .filter( and_( galaxy.model.UserAddress.table.c.user_id == user.id,
                                      galaxy.model.UserAddress.table.c.desc == short_desc,
                                      galaxy.model.UserAddress.table.c.deleted == false() ) ) \
                       .order_by( desc( galaxy.model.UserAddress.table.c.create_time ) ) \
                       .first()
    def define_tables(cls, metadata):

        Table('owners', metadata,
              Column('id', Integer, primary_key=True,
                     test_needs_autoincrement=True),
              Column('data', String(30)))

        Table('categories', metadata,
              Column('id', Integer, primary_key=True,
                     test_needs_autoincrement=True),
              Column('name', String(20)))

        Table('tests', metadata,
              Column('id', Integer, primary_key=True,
                     test_needs_autoincrement=True),
              Column('owner_id', Integer, ForeignKey('owners.id'),
                     nullable=False),
              Column('category_id', Integer, ForeignKey('categories.id'),
                     nullable=False))

        Table('options', metadata,
              Column('test_id', Integer, ForeignKey('tests.id'),
                     primary_key=True),
              Column('owner_id', Integer, ForeignKey('owners.id'),
                     primary_key=True),
              Column('someoption', sa.Boolean, server_default=sa.false(),
                     nullable=False))
Beispiel #18
0
  def _get_tasks_in_cycle(model):
    """Filter tasks with particular statuses and cycle.

    Filtering tasks with statuses "Assigned", "InProgress" and "Finished".
    Where the task is in current users cycle.
    """
    task_query = db.session.query(
        model.id.label('id'),
        literal(model.__name__).label('type'),
        literal(None).label('context_id'),
    ).join(
        Cycle,
        Cycle.id == model.cycle_id
    ).filter(
        Cycle.is_current == true(),
        model.contact_id == contact_id
    )
    return task_query.filter(
        Cycle.is_verification_needed == true(),
        model.status.in_([
            all_models.CycleTaskGroupObjectTask.ASSIGNED,
            all_models.CycleTaskGroupObjectTask.IN_PROGRESS,
            all_models.CycleTaskGroupObjectTask.FINISHED,
            all_models.CycleTaskGroupObjectTask.DECLINED,
        ])
    ).union_all(
        task_query.filter(
            Cycle.is_verification_needed == false(),
            model.status.in_([
                all_models.CycleTaskGroupObjectTask.ASSIGNED,
                all_models.CycleTaskGroupObjectTask.IN_PROGRESS,
            ])
        )
    )
Beispiel #19
0
 def index( self, trans, deleted='False', f_email=None, **kwd ):
     """
     GET /api/users
     GET /api/users/deleted
     Displays a collection (list) of users.
     """
     rval = []
     query = trans.sa_session.query( trans.app.model.User )
     deleted = util.string_as_bool( deleted )
     if f_email:
         query = query.filter(trans.app.model.User.email.like("%%%s%%" % f_email))
     if deleted:
         query = query.filter( trans.app.model.User.table.c.deleted == true() )
         # only admins can see deleted users
         if not trans.user_is_admin():
             return []
     else:
         query = query.filter( trans.app.model.User.table.c.deleted == false() )
         # special case: user can see only their own user
         # special case2: if the galaxy admin has specified that other user email/names are
         #   exposed, we don't want special case #1
         if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
             item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
             return [item]
     for user in query:
         item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
         # If NOT configured to expose_email, do not expose email UNLESS the user is self, or
         # the user is an admin
         if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin():
             del item['username']
         if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin():
             del item['email']
         # TODO: move into api_values
         rval.append( item )
     return rval
def purge_libraries( app, cutoff_time, remove_from_disk, info_only=False, force_retry=False ):
    # Purges deleted libraries whose update_time is older than the cutoff_time.
    # The dataset associations of each library are also marked as deleted.
    # The Purge Dataset method will purge each Dataset as necessary
    # library.purged == True simply means that it can no longer be undeleted
    # i.e. all associated LibraryDatasets/folders are marked as deleted
    library_count = 0
    start = time.time()
    if force_retry:
        libraries = app.sa_session.query( app.model.Library ) \
                                  .filter( and_( app.model.Library.table.c.deleted == true(),
                                                 app.model.Library.table.c.update_time < cutoff_time ) )
    else:
        libraries = app.sa_session.query( app.model.Library ) \
                                  .filter( and_( app.model.Library.table.c.deleted == true(),
                                                 app.model.Library.table.c.purged == false(),
                                                 app.model.Library.table.c.update_time < cutoff_time ) )
    for library in libraries:
        _purge_folder( library.root_folder, app, remove_from_disk, info_only=info_only )
        if not info_only:
            print "Purging library id ", library.id
            library.purged = True
            app.sa_session.add( library )
            app.sa_session.flush()
        library_count += 1
    stop = time.time()
    print '# Purged %d libraries .' % library_count
    print "Elapsed time: ", stop - start
    print "##########################################"
Beispiel #21
0
    def show(self, trans, id, deleted='False', **kwd):
        """
        show( trans, id, deleted='False' )
        * GET /api/histories/{id}:
            return the history with ``id``
        * GET /api/histories/deleted/{id}:
            return the deleted history with ``id``
        * GET /api/histories/most_recently_used:
            return the most recently used history

        :type   id:      an encoded id string
        :param  id:      the encoded id of the history to query or the string 'most_recently_used'
        :type   deleted: boolean
        :param  deleted: if True, allow information on a deleted history to be shown.

        :param  keys: same as the use of `keys` in the `index` function above
        :param  view: same as the use of `view` in the `index` function above

        :rtype:     dictionary
        :returns:   detailed history information
        """
        history_id = id
        deleted = string_as_bool(deleted)

        if history_id == "most_recently_used":
            history = self.history_manager.most_recent(trans.user,
                filters=(self.app.model.History.deleted == false()), current_history=trans.history)
        else:
            history = self.history_manager.get_accessible(self.decode_id(history_id), trans.user, current_history=trans.history)

        return self.history_serializer.serialize_to_view(history,
            user=trans.user, trans=trans, **self._parse_serialization_params(kwd, 'detailed'))
 def build_initial_query(self, trans, **kwd):
     user_id = trans.security.decode_id(kwd['id'])
     return trans.sa_session.query(model.RepositoryReview) \
                            .filter(and_(model.RepositoryReview.table.c.deleted == false(),
                                         model.RepositoryReview.table.c.user_id == user_id)) \
                            .join((model.Repository.table, model.RepositoryReview.table.c.repository_id == model.Repository.table.c.id)) \
                            .filter(model.Repository.table.c.deprecated == false())
Beispiel #23
0
def delete_userless_histories(app, cutoff_time, info_only=False, force_retry=False):
    # Deletes userless histories whose update_time value is older than the cutoff_time.
    # The purge history script will handle marking DatasetInstances as deleted.
    # Nothing is removed from disk yet.
    history_count = 0
    start = time.time()
    if force_retry:
        histories = app.sa_session.query(app.model.History) \
                                  .filter(and_(app.model.History.table.c.user_id == null(),
                                               app.model.History.table.c.update_time < cutoff_time))
    else:
        histories = app.sa_session.query(app.model.History) \
                                  .filter(and_(app.model.History.table.c.user_id == null(),
                                               app.model.History.table.c.deleted == false(),
                                               app.model.History.table.c.update_time < cutoff_time))
    for history in histories:
        if not info_only:
            print("Deleting history id ", history.id)
            history.deleted = True
            app.sa_session.add(history)
            app.sa_session.flush()
        history_count += 1
    stop = time.time()
    print("Deleted %d histories" % history_count)
    print("Elapsed time: ", stop - start)
    print("##########################################")
 def apply_query_filter( self, trans, query, **kwargs ):
     if self.available_tracks is None:
         self.available_tracks = trans.app.datatypes_registry.get_available_tracks()
     return query.filter( model.HistoryDatasetAssociation.extension.in_(self.available_tracks) ) \
                 .filter( model.Dataset.state == model.Dataset.states.OK ) \
                 .filter( model.HistoryDatasetAssociation.deleted == false() ) \
                 .filter( model.HistoryDatasetAssociation.visible == true() )
Beispiel #25
0
  def test_move_notif_to_history(self, mocked_send_email):
    """Tests moving notifications to history table."""
    # pylint: disable=unused-argument
    # pylint: disable=unused-variable
    date_time = "2018-06-10 16:55:15"
    with freeze_time(date_time):
      _, workflow = self.wf_generator.generate_workflow(
          self.one_time_workflow)

      _, cycle = self.wf_generator.generate_cycle(workflow)
      self.wf_generator.activate_workflow(workflow)

      notif_to_be_sent_ids = db.session.query(Notification.id).filter(and_(
          Notification.sent_at == None,  # noqa
          Notification.send_on == date.today(),
          Notification.repeating == false()
      )).all()

      self.assertEqual(db.session.query(Notification).count(), 5)
      self.assertEqual(db.session.query(NotificationHistory).count(), 0)

      with freeze_time(date_time):
        common.send_daily_digest_notifications()

      notif_count = db.session.query(Notification).filter(
          Notification.id.in_(notif_to_be_sent_ids)
      ).count()

      notif_history_count = db.session.query(NotificationHistory).count()

      self.assertEqual(notif_count, 0)
      self.assertEqual(notif_history_count, len(notif_to_be_sent_ids))
def upgrade():
    op.add_column('aim_l3outsides', sa.Column('bgp_enable', sa.Boolean(),
                                              server_default=sa.false(),
                                              nullable=False))
    op.add_column('aim_external_subnets',
                  sa.Column('aggregate', sa.String(64), server_default="",
                            nullable=False))
    op.add_column('aim_external_subnets',
                  sa.Column('scope', sa.String(64),
                            server_default="import-security", nullable=False))
    op.create_table(
        'aim_l3out_interface_bgp_peer_prefix',
        sa.Column('aim_id', sa.Integer, autoincrement=True),
        sa.Column('tenant_name', sa.String(64), nullable=False),
        sa.Column('l3out_name', sa.String(64), nullable=False),
        sa.Column('node_profile_name', sa.String(64), nullable=False),
        sa.Column('interface_profile_name', sa.String(64), nullable=False),
        sa.Column('interface_path', VARCHAR(512, charset='latin1'),
                  nullable=False),
        sa.Column('addr', sa.String(64), nullable=False),
        sa.Column('asn', sa.Integer),
        sa.Column('local_asn', sa.Integer),
        sa.Column('monitored', sa.Boolean, nullable=False, default=False),
        sa.PrimaryKeyConstraint('aim_id'),
        sa.UniqueConstraint('tenant_name', 'l3out_name', 'node_profile_name',
                            'interface_profile_name', 'interface_path',
                            'addr',
                            name='uniq_aim_l3out_interface_bgp_peer_pfx_id'),
        sa.Index('uniq_aim_l3out_interface_bgp_peer_pfx_idx', 'tenant_name',
                 'l3out_name', 'node_profile_name',
                 'interface_profile_name', 'interface_path', 'addr'))
Beispiel #27
0
def determine_fetches(db_session, cred):
    for thread in db_session.query(Thread).filter_by(closed=False):
        update_thread_status(thread, cred)
    db_session.flush()
    incomplete_page_ids = (
        sa.select([ThreadPost.page_id])
        .group_by(ThreadPost.page_id)
        .having(sa.func.count(ThreadPost.id) < 40)
        .as_scalar()
    )
    incomplete_pages = sa.select(
        [ThreadPage.thread_id, ThreadPage.page_num], from_obj=sa.join(ThreadPage, Thread)
    ).where(sa.and_(ThreadPage.id.in_(incomplete_page_ids), Thread.closed == sa.false()))
    fetch_status = (
        sa.select(
            [ThreadPage.thread_id.label("thread_id"), sa.func.max(ThreadPage.page_num).label("last_fetched_page")]
        )
        .group_by(ThreadPage.thread_id)
        .alias("fetch_status")
    )
    unfetched_pages = sa.select(
        [
            Thread.id.label("thread_id"),
            sa.func.generate_series(fetch_status.c.last_fetched_page + 1, Thread.page_count).label("page_num"),
        ],
        from_obj=sa.join(Thread, fetch_status, Thread.id == fetch_status.c.thread_id),
    )
    fetched_first_pages = sa.select([ThreadPage.thread_id]).where(ThreadPage.page_num == 1).as_scalar()
    unfetched_first_pages = sa.select(
        [Thread.id.label("thread_id"), sa.literal(1, sa.Integer).label("page_num")], from_obj=Thread
    ).where(Thread.id.notin_(fetched_first_pages))
    q = sa.union(incomplete_pages, unfetched_pages, unfetched_first_pages)
    q = q.order_by(q.c.thread_id.asc(), q.c.page_num.asc())
    return db_session.execute(q).fetchall()
Beispiel #28
0
  def _get_revision_type_query(model, permission_type):
    """Filter model based on availability of related objects.

    This method is used only when quering revisions. In such case only
    revisions of objects user has right permission on should be returned. It
    means, user must have either right permission on object revision belongs
    to or in case it is revision of a relationship, user must have right
    permission on at least one part of the relationship.
    """
    allowed_resources = permissions.all_resources(permission_type)
    if not allowed_resources:
      return sa.false()

    return sa.or_(
        sa.tuple_(
            model.resource_type,
            model.resource_id,
        ).in_(
            allowed_resources,
        ),
        sa.tuple_(
            model.source_type,
            model.source_id,
        ).in_(
            allowed_resources,
        ),
        sa.tuple_(
            model.destination_type,
            model.destination_id,
        ).in_(
            allowed_resources,
        ),
    )
Beispiel #29
0
    def deleted_histories( self, trans, **kwd ):
        """
        The number of histories that were deleted more than the specified number of days ago, but have not yet been purged.
        Also included is the number of datasets associated with the histories.
        """
        params = util.Params( kwd )
        message = ''
        if params.deleted_histories_days:
            deleted_histories_days = int( params.deleted_histories_days )
            cutoff_time = datetime.utcnow() - timedelta( days=deleted_histories_days )
            history_count = 0
            dataset_count = 0
            disk_space = 0
            histories = trans.sa_session.query( model.History ) \
                .filter( and_( model.History.table.c.deleted == true(),
                    model.History.table.c.purged == false(),
                    model.History.table.c.update_time < cutoff_time ) ) \
                .options( eagerload( 'datasets' ) )

            for history in histories:
                for hda in history.datasets:
                    if not hda.dataset.purged:
                        dataset_count += 1
                        try:
                            disk_space += hda.dataset.file_size
                        except:
                            pass
                history_count += 1
            message = "%d histories ( including a total of %d datasets ) were deleted more than %d days ago, but have not yet been purged, " \
                "disk space: %s." % ( history_count, dataset_count, deleted_histories_days, nice_size( disk_space, True ) )
        else:
            message = "Enter the number of days."
        return str( deleted_histories_days ), message
    def list( self, trans, *args, **kwargs ):

        # Handle operation
        if 'operation' in kwargs and 'id' in kwargs:
            session = trans.sa_session
            operation = kwargs['operation'].lower()
            ids = util.listify( kwargs['id'] )
            for id in ids:
                item = session.query( model.Visualization ).get( self.decode_id( id ) )
                if operation == "delete":
                    item.deleted = True
                if operation == "share or publish":
                    return self.sharing( trans, **kwargs )
                if operation == "copy":
                    self.copy( trans, **kwargs )
            session.flush()

        # Build list of visualizations shared with user.
        shared_by_others = trans.sa_session \
            .query( model.VisualizationUserShareAssociation ) \
            .filter_by( user=trans.get_user() ) \
            .join( model.Visualization.table ) \
            .filter( model.Visualization.deleted == false() ) \
            .order_by( desc( model.Visualization.update_time ) ) \
            .all()

        kwargs[ 'embedded' ] = True
        grid = self._user_list_grid( trans, *args, **kwargs )
        return trans.fill_template( "visualization/list.mako", embedded_grid=grid, shared_by_others=shared_by_others )
    def test_orm_filtering(self):
        user2 = self.user_manager.create(**user2_data)
        history = self.history_manager.create(name='history', user=user2)
        contents = []
        contents.extend([
            self.add_hda_to_history(history, name=('hda-' + str(x)))
            for x in range(3)
        ])
        contents.append(
            self.add_list_collection_to_history(history, contents[:3]))
        contents.extend([
            self.add_hda_to_history(history, name=('hda-' + str(x)))
            for x in range(4, 6)
        ])
        contents.append(
            self.add_list_collection_to_history(history, contents[4:6]))

        self.log("should allow filter on deleted")
        self.hda_manager.delete(contents[1])
        self.hda_manager.delete(contents[4])
        contents[6].deleted = True
        deleted = [contents[1], contents[4], contents[6]]
        self.app.model.context.flush()

        # TODO: cross db compat?
        filters = [
            parsed_filter(filter_type="orm", filter=text('deleted = 1'))
        ]
        self.assertEqual(
            self.contents_manager.contents(history, filters=filters), deleted)

        # even stranger that sqlalx can use the first model in the union (HDA) for columns across the union
        HDA = self.hda_manager.model_class
        self.assertEqual(
            self.contents_manager.contents(
                history, filters=[parsed_filter("orm",
                                                HDA.deleted == true())]),
            deleted)
        filter_limited_contents = self.contents_manager.contents(
            history,
            filters=[parsed_filter("orm", HDA.deleted == true())],
            limit=2,
            offset=1)
        self.assertEqual(filter_limited_contents, deleted[1:])

        self.log("should allow filter on visible")
        contents[2].visible = False
        contents[5].visible = False
        contents[6].visible = False
        invisible = [contents[2], contents[5], contents[6]]
        self.app.model.context.flush()

        filters = [parsed_filter("orm", text('visible = 0'))]
        self.assertEqual(
            self.contents_manager.contents(history, filters=filters),
            invisible)
        self.assertEqual(
            self.contents_manager.contents(
                history,
                filters=[parsed_filter("orm", HDA.visible == false())]),
            invisible)
        filter_limited_contents = self.contents_manager.contents(
            history,
            filters=[parsed_filter("orm", HDA.visible == false())],
            limit=2,
            offset=1)
        self.assertEqual(filter_limited_contents, invisible[1:])

        self.log("should allow filtering more than one attribute")
        deleted_and_invisible = [contents[6]]

        filters = [
            parsed_filter("orm", text('deleted = 1')),
            parsed_filter("orm", text('visible = 0'))
        ]
        self.assertEqual(
            self.contents_manager.contents(history, filters=filters),
            deleted_and_invisible)
        self.assertEqual(
            self.contents_manager.contents(
                history,
                filters=[
                    parsed_filter("orm", HDA.deleted == true()),
                    parsed_filter("orm", HDA.visible == false())
                ]), deleted_and_invisible)
        offset_too_far = self.contents_manager.contents(
            history,
            filters=[
                parsed_filter("orm", HDA.deleted == true()),
                parsed_filter("orm", HDA.visible == false())
            ],
            limit=2,
            offset=1)
        self.assertEqual(offset_too_far, [])

        self.log("should allow filtering more than one attribute")
        deleted_and_invisible = [contents[6]]
        # note the two syntaxes both work
        self.assertEqual(
            self.contents_manager.contents(
                history,
                filters=[
                    parsed_filter("orm", text('deleted = 1')),
                    parsed_filter("orm", text('visible = 0'))
                ]), deleted_and_invisible)
        self.assertEqual(
            self.contents_manager.contents(
                history,
                filters=[
                    parsed_filter("orm", HDA.deleted == true()),
                    parsed_filter("orm", HDA.visible == false())
                ]), deleted_and_invisible)
        offset_too_far = self.contents_manager.contents(
            history,
            filters=[
                parsed_filter("orm", HDA.deleted == true()),
                parsed_filter("orm", HDA.visible == false())
            ],
            limit=2,
            offset=1)
        self.assertEqual(offset_too_far, [])

        self.log("should allow filtering using like")
        # find 'hda-4'
        self.assertEqual(
            [contents[4]],
            self.contents_manager.contents(
                history, filters=[parsed_filter("orm", HDA.name.like('%-4'))]))
        # the collections added above have the default name 'test collection'
        self.assertEqual(
            self.contents_manager.subcontainers(history),
            self.contents_manager.contents(
                history,
                filters=[parsed_filter("orm", HDA.name.like('%collect%'))]))
Beispiel #32
0
    def browse_requests( self, trans, **kwd ):
        if 'operation' in kwd:
            operation = kwd['operation'].lower()
            if operation == "edit":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='edit_basic_request_info',
                                                                  cntrller='requests',
                                                                  **kwd ) )
            if operation == "add_samples":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='add_samples',
                                                                  cntrller='requests',
                                                                  **kwd ) )
            if operation == "edit_samples":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='edit_samples',
                                                                  cntrller='requests',
                                                                  **kwd ) )
            if operation == "view_request":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='view_request',
                                                                  cntrller='requests',
                                                                  **kwd ) )
            if operation == "delete":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='delete_request',
                                                                  cntrller='requests',
                                                                  **kwd ) )
            if operation == "undelete":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='undelete_request',
                                                                  cntrller='requests',
                                                                  **kwd ) )
            if operation == "view_request_history":
                return trans.response.send_redirect( web.url_for( controller='requests_common',
                                                                  action='view_request_history',
                                                                  cntrller='requests',
                                                                  **kwd ) )

        # If there are requests that have been rejected, show a message as a reminder to the user
        rejected = 0
        for request in trans.sa_session.query( trans.app.model.Request ) \
                .filter( trans.app.model.Request.table.c.deleted == false() ) \
                .filter( trans.app.model.Request.table.c.user_id == trans.user.id ):
            if request.is_rejected:
                rejected = rejected + 1
        if rejected:
            status = 'warning'
            message = "%d requests (highlighted in red) were rejected.  Click on the request name for details." % rejected
            kwd[ 'status' ] = status
            kwd[ 'message' ] = message
        # Allow the user to create a new request only if they have permission to access a request type.
        accessible_request_types = trans.app.security_agent.get_accessible_request_types( trans, trans.user )
        if accessible_request_types:
            self.request_grid.global_actions = [ grids.GridAction( "Create new request", dict( controller='requests_common',
                                                                                               action='create_request',
                                                                                               cntrller='requests' ) ) ]
        else:
            self.request_grid.global_actions = []
        # Render the list view
        return self.request_grid( trans, **kwd )
Beispiel #33
0
 def get_banner(self):
     pbid = parameter_required({'pbid': '品牌唯一值缺失'}).get('pbid')
     bb_list = BrandBanner.query.filter(BrandBanner.PBid == pbid, BrandBanner.isdelete == false()).order_by(
         BrandBanner.BBsort.asc(), BrandBanner.createtime.desc()).all()
     bbs = self._fill_bb(bb_list)
     return Success(data=bbs)
Beispiel #34
0
def purge_folders(app,
                  cutoff_time,
                  remove_from_disk,
                  info_only=False,
                  force_retry=False):
    # Purges deleted folders whose update_time is older than the cutoff_time.
    # The dataset associations of each folder are also marked as deleted.
    # The Purge Dataset method will purge each Dataset as necessary
    # libraryFolder.purged == True simply means that it can no longer be undeleted
    # i.e. all associated LibraryDatasets/folders are marked as deleted
    folder_count = 0
    start = time.time()
    if force_retry:
        folders = app.sa_session.query(app.model.LibraryFolder) \
                                .filter(and_(app.model.LibraryFolder.table.c.deleted == true(),
                                             app.model.LibraryFolder.table.c.update_time < cutoff_time))
    else:
        folders = app.sa_session.query(app.model.LibraryFolder) \
                                .filter(and_(app.model.LibraryFolder.table.c.deleted == true(),
                                             app.model.LibraryFolder.table.c.purged == false(),
                                             app.model.LibraryFolder.table.c.update_time < cutoff_time))
    for folder in folders:
        _purge_folder(folder, app, remove_from_disk, info_only=info_only)
        folder_count += 1
    stop = time.time()
    print('# Purged %d folders.' % folder_count)
    print("Elapsed time: ", stop - start)
    print("##########################################")
Beispiel #35
0
def getPubAndAuthorsAndDocsWithOffsetAndFilter(offset, limit, filters):
    """
    Get chunk of publications with corresponding authors and documents after applying filters.
    @param offset: database table offset
    @type offset: int
    @param limit: number of publications to fetch starting at offset
    @type limit: int
    @param filters: dict of filter criteria
    @type filters: dict
    @return: enumerated dict of publications
    @rtype: dict
    """
    # default is 1024 and too small for this query
    db.session.execute("SET SESSION group_concat_max_len = 100000")

    result = {i: {**r[0].to_dict(),
                'authors': uniquifySorted(
                    [
                        {'id': a,
                             'forename': r[2].split(',')[j] if r[2] is not None else '',
                             'surname': r[3].split(',')[j] if r[3] is not None else '',
                             'cleanname': r[4].split(',')[j] if r[4] is not None else ''
                        } for j, a in enumerate(r[1].split(',') if r[1] is not None else [])
                    ]),
                'documents': uniquify(
                    [
                        {
                        'id': d,
                        'publication_id': r[6].split(';')[j] if r[6] is not None else '',
                        'visible': r[7].split(';')[j] if r[7] is not None else '',
                        'remote': r[8].split(';')[j] if r[8] is not None else '',
                        'filename': r[9].split(';')[j] if r[9] is not None else ''
                         } for j, d in enumerate(r[5].split(';') if r[5] is not None else [])
                    ]),
                'keywords': uniquify(
                    [
                        {
                            'id': d,
                            'name': r[12].split(';')[k] if r[12] is not None else ''
                        } for k, d in enumerate(r[11].split(';') if r[11] is not None else [])
                    ])
                }
               for i, r in enumerate(db.session.query(
                Publications,
                func.group_concat(func.ifnull(Authors.id, '').op("ORDER BY")(Authors_publications.position)),
                func.group_concat(func.ifnull(Authors.forename, '').op("ORDER BY")(Authors_publications.position)),
                func.group_concat(func.ifnull(Authors.surname, '').op("ORDER BY")(Authors_publications.position)),
                func.group_concat(func.ifnull(Authors.cleanname, '').op("ORDER BY")(Authors_publications.position)).label('authors'),
                func.group_concat(func.ifnull(Documents.id, '').op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(func.ifnull(Documents.publication_id, '').op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(func.ifnull(Documents.visible, '').op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(func.ifnull(Documents.remote, '').op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(func.ifnull(Documents.filename, '').op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(func.ifnull(Keywords_publication.keyword_id, '').op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(Keywords.id.op('SEPARATOR')(literal_column('\';\''))),
                func.group_concat(Keywords.name.op('SEPARATOR')(literal_column('\';\'')))
                )\
                .outerjoin(Documents, Documents.publication_id == Publications.id)\
                .outerjoin(Keywords_publication, Keywords_publication.publication_id == Publications.id)\
                .outerjoin(Keywords, Keywords_publication.keyword_id == Keywords.id) \
                .filter(Publications.id == Authors_publications.publication_id) \
                .filter(Publications.public == 1) \
                .filter(Authors.id == Authors_publications.author_id) \
                .filter(Publications.year.in_(filters['year']) if 'year' in filters else true() ) \
                .filter(Publications.type.in_(filters['type']) if 'type' in filters else true() ) \
                .filter(Keywords_publication.keyword_id.in_(filters['keyword']) if 'keyword' in filters else true()) \
                .having(or_(Publications.title.like('%' + filters['search'] + '%') if 'search' in filters else true(),
                            func.group_concat(Authors.cleanname.op("ORDER BY")(Authors_publications.position)).like('%' + filters['search'] + '%') if 'search' in filters else true(),
                            func.group_concat(Authors.forename.op("ORDER BY")(Authors_publications.position)).like('%' + filters['search'] + '%') if 'search' in filters else true(),
                            func.group_concat(Authors.surname.op("ORDER BY")(Authors_publications.position)).like('%' + filters['search'] + '%') if 'search' in filters else true(),
                            func.group_concat(Keywords.name).like(
                                '%' + filters['search'] + '%') if 'search' in filters else true(),
                            func.group_concat(Publications.journal).like(
                                '%' + filters['search'] + '%') if 'search' in filters else true(),
                            func.group_concat(Publications.booktitle).like(
                                '%' + filters['search'] + '%') if 'search' in filters else true(),
                            (Publications.year == int(filters['search']) if filters['search'].isdigit() else false()) if 'search' in filters else true()
                            )
                        )\
                # .having(func.group_concat(Authors.id).op('regexp')('(^|,)' + str(filters['author']) + '(,|$)') if 'author' in filters else true())\
                .having(or_((func.group_concat(Authors.id).op('regexp')('(^|,)' + str(a) + '(,|$)') for a in filters['author'].split(',')) if 'author' in filters else true()))
                .group_by(Publications.id)\
                .order_by(Publications.year.desc(), Publications.id.desc())
                .offset(offset)
                .limit(limit))}

    db.session.close()
    return result
Beispiel #36
0
def check_and_update_repository_metadata(app, info_only=False, verbosity=1):
    """
    This method will iterate through all records in the repository_metadata
    table, checking each one for tool metadata, then checking the tool
    metadata for tests.  Each tool's metadata should look something like:
    {
      "add_to_tool_panel": true,
      "description": "",
      "guid": "toolshed.url:9009/repos/owner/name/tool_id/1.2.3",
      "id": "tool_wrapper",
      "name": "Map with Tool Wrapper",
      "requirements": [],
      "tests": [],
      "tool_config": "database/community_files/000/repo_1/tool_wrapper.xml",
      "tool_type": "default",
      "version": "1.2.3",
      "version_string_cmd": null
    }
    If the "tests" attribute is missing or empty, this script will mark the metadata record (which is specific to a changeset revision of a repository)
    not to be tested. If each "tools" attribute has at least one valid "tests" entry, this script will do nothing, and leave it available for the install
    and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
    not to be tested.
    """
    start = time.time()
    skip_metadata_ids = []
    checked_repository_ids = []
    tool_count = 0
    has_tests = 0
    no_tests = 0
    valid_revisions = 0
    invalid_revisions = 0
    records_checked = 0
    # Do not check metadata records that have an entry in the skip_tool_tests table, since they won't be tested anyway.
    print '# -------------------------------------------------------------------------------------------'
    print '# The skip_tool_test setting has been set for the following repository revision, so they will not be tested.'
    skip_metadata_ids = []
    for skip_tool_test in app.sa_session.query(app.model.SkipToolTest):
        print '# repository_metadata_id: %s, changeset_revision: %s' % \
            ( str( skip_tool_test.repository_metadata_id ), str( skip_tool_test.initial_changeset_revision ) )
        print 'reason: %s' % str(skip_tool_test.comment)
        skip_metadata_ids.append(skip_tool_test.repository_metadata_id)
    # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
    # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
    # because it's redundant to test a revision that a user can't install.
    for repository_metadata in app.sa_session.query( app.model.RepositoryMetadata ) \
                                             .filter( and_( app.model.RepositoryMetadata.table.c.downloadable == true(),
                                                            app.model.RepositoryMetadata.table.c.includes_tools == true(),
                                                            app.model.RepositoryMetadata.table.c.do_not_test == false() ) ):
        # Initialize some items.
        missing_test_components = []
        revision_has_test_data = False
        testable_revision = False
        repository = repository_metadata.repository
        records_checked += 1
        # Check the next repository revision.
        changeset_revision = str(repository_metadata.changeset_revision)
        name = repository.name
        owner = repository.user.username
        metadata = repository_metadata.metadata
        repository = repository_metadata.repository
        if repository.id not in checked_repository_ids:
            checked_repository_ids.append(repository.id)
        print '# -------------------------------------------------------------------------------------------'
        print '# Checking revision %s of %s owned by %s.' % (
            changeset_revision, name, owner)
        if repository_metadata.id in skip_metadata_ids:
            print '# Skipping revision %s of %s owned by %s because the skip_tool_test setting has been set.' % (
                changeset_revision, name, owner)
            continue
        # If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
        # only repositories that contain tools.
        tool_dicts = metadata.get('tools', None)
        if tool_dicts is not None:
            # Clone the repository up to the changeset revision we're checking.
            repo_dir = repository.repo_path(app)
            hg_util.get_repo_for_repository(app,
                                            repository=None,
                                            repo_path=repo_dir,
                                            create=False)
            work_dir = tempfile.mkdtemp(prefix="tmp-toolshed-cafr")
            cloned_ok, error_message = hg_util.clone_repository(
                repo_dir, work_dir, changeset_revision)
            if cloned_ok:
                # Iterate through all the directories in the cloned changeset revision and determine whether there's a
                # directory named test-data. If this directory is not present update the metadata record for the changeset
                # revision we're checking.
                for root, dirs, files in os.walk(work_dir):
                    if '.hg' in dirs:
                        dirs.remove('.hg')
                    if 'test-data' in dirs:
                        revision_has_test_data = True
                        test_data_path = os.path.join(
                            root, dirs[dirs.index('test-data')])
                        break
            if revision_has_test_data:
                print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
            else:
                print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
            print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
                ( changeset_revision, name, owner )
            # Inspect each tool_dict for defined functional tests.  If there
            # are no tests, this tool should not be tested, since the tool
            # functional tests only report failure if the test itself fails,
            # not if it's missing or undefined. Filtering out those
            # repositories at this step will reduce the number of "false
            # negatives" the automated functional test framework produces.
            for tool_dict in tool_dicts:
                failure_reason = ''
                problem_found = False
                tool_has_defined_tests = False
                tool_has_test_files = False
                missing_test_files = []
                tool_count += 1
                tool_id = tool_dict['id']
                tool_version = tool_dict['version']
                tool_guid = tool_dict['guid']
                if verbosity >= 1:
                    print "# Checking tool ID '%s' in changeset revision %s of %s." % (
                        tool_id, changeset_revision, name)
                defined_test_dicts = tool_dict.get('tests', None)
                if defined_test_dicts is not None:
                    # We need to inspect the <test> tags because the following tags...
                    # <tests>
                    # </tests>
                    # ...will produce the following metadata:
                    # "tests": []
                    # And the following tags...
                    # <tests>
                    #     <test>
                    #    </test>
                    # </tests>
                    # ...will produce the following metadata:
                    # "tests":
                    #    [{"inputs": [], "name": "Test-1", "outputs": [], "required_files": []}]
                    for defined_test_dict in defined_test_dicts:
                        inputs = defined_test_dict.get('inputs', [])
                        outputs = defined_test_dict.get('outputs', [])
                        if inputs and outputs:
                            # At least one tool within the repository has a valid <test> tag.
                            tool_has_defined_tests = True
                            break
                if tool_has_defined_tests:
                    print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
                        ( tool_id, changeset_revision, name )
                    has_tests += 1
                else:
                    print '# No functional tests defined for %s.' % tool_id
                    no_tests += 1
                if tool_has_defined_tests and revision_has_test_data:
                    missing_test_files = check_for_missing_test_files(
                        defined_test_dicts, test_data_path)
                    if missing_test_files:
                        print "# Tool id '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
                            ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
                    else:
                        tool_has_test_files = True
                if not revision_has_test_data:
                    failure_reason += 'Repository does not have a test-data directory. '
                    problem_found = True
                if not tool_has_defined_tests:
                    failure_reason += 'Functional test definitions missing for %s. ' % tool_id
                    problem_found = True
                if missing_test_files:
                    failure_reason += 'One or more test files are missing for tool %s: %s' % (
                        tool_id, ', '.join(missing_test_files))
                    problem_found = True
                test_errors = dict(tool_id=tool_id,
                                   tool_version=tool_version,
                                   tool_guid=tool_guid,
                                   missing_components=failure_reason)
                # Only append this error dict if it hasn't already been added.
                if problem_found:
                    if test_errors not in missing_test_components:
                        missing_test_components.append(test_errors)
                if tool_has_defined_tests and tool_has_test_files:
                    print '# Revision %s of %s owned by %s is a testable revision.' % (
                        changeset_revision, name, owner)
                    testable_revision = True
            # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
            if os.path.exists(work_dir):
                shutil.rmtree(work_dir)
            if not missing_test_components:
                valid_revisions += 1
                print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
            else:
                invalid_revisions += 1
                print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % (
                    changeset_revision, name, owner)
                if verbosity >= 1:
                    for missing_test_component in missing_test_components:
                        if 'missing_components' in missing_test_component:
                            print '# %s' % missing_test_component[
                                'missing_components']
            if not info_only:
                # Get or create the list of tool_test_results dictionaries.
                if repository_metadata.tool_test_results is not None:
                    # We'll listify the column value in case it uses the old approach of storing the results of only a single test run.
                    tool_test_results_dicts = listify(
                        repository_metadata.tool_test_results)
                else:
                    tool_test_results_dicts = []
                if tool_test_results_dicts:
                    # Inspect the tool_test_results_dict for the last test run in case it contains only a test_environment
                    # entry.  This will occur with multiple runs of this script without running the associated
                    # install_and_test_tool_sed_repositories.sh script which will further populate the tool_test_results_dict.
                    tool_test_results_dict = tool_test_results_dicts[0]
                    if len(tool_test_results_dict) <= 1:
                        # We can re-use the mostly empty tool_test_results_dict for this run because it is either empty or it contains only
                        # a test_environment entry.  If we use it we need to temporarily eliminate it from the list of tool_test_results_dicts
                        # since it will be re-inserted later.
                        tool_test_results_dict = tool_test_results_dicts.pop(0)
                    elif (len(tool_test_results_dict) == 2
                          and 'test_environment' in tool_test_results_dict and
                          'missing_test_components' in tool_test_results_dict):
                        # We can re-use tool_test_results_dict if its only entries are "test_environment" and "missing_test_components".
                        # In this case, some tools are missing tests components while others are not.
                        tool_test_results_dict = tool_test_results_dicts.pop(0)
                    else:
                        # The latest tool_test_results_dict has been populated with the results of a test run, so it cannot be used.
                        tool_test_results_dict = {}
                else:
                    # Create a new dictionary for the most recent test run.
                    tool_test_results_dict = {}
                test_environment_dict = tool_test_results_dict.get(
                    'test_environment', {})
                # Add the current time as the approximate time that this test run occurs.  A similar value will also be
                # set to the repository_metadata.time_last_tested column, but we also store it here because the Tool Shed
                # may be configured to store multiple test run results, so each must be associated with a time stamp.
                now = time.strftime("%Y-%m-%d %H:%M:%S")
                test_environment_dict['time_tested'] = now
                test_environment_dict[
                    'tool_shed_database_version'] = get_database_version(app)
                test_environment_dict[
                    'tool_shed_mercurial_version'] = __version__.version
                test_environment_dict[
                    'tool_shed_revision'] = get_repository_current_revision(
                        os.getcwd())
                tool_test_results_dict[
                    'test_environment'] = test_environment_dict
                # The repository_metadata.time_last_tested column is not changed by this script since no testing is performed here.
                if missing_test_components:
                    # If functional test definitions or test data are missing, set do_not_test = True if no tool with valid tests has been
                    # found in this revision, and:
                    # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
                    #    In this case, the revision will never be updated with the missing components, and re-testing it would be redundant.
                    # b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable
                    #    revision. In this case, if the repository is updated with test data or functional tests, the downloadable
                    #    changeset revision that was tested will either be replaced with the new changeset revision, or a new downloadable
                    #    changeset revision will be created, either of which will be automatically checked and flagged as appropriate.
                    #    In the install and test script, this behavior is slightly different, since we do want to always run functional
                    #    tests on the most recent downloadable changeset revision.
                    if should_set_do_not_test_flag(app, repository,
                                                   changeset_revision,
                                                   testable_revision):
                        print "# Setting do_not_test to True on revision %s of %s owned by %s because it is missing test components" % (
                            changeset_revision, name, owner)
                        print "# and it is not the latest downloadable revision."
                        repository_metadata.do_not_test = True
                    if not testable_revision:
                        # Even though some tools may be missing test components, it may be possible to test other tools.  Since the
                        # install and test framework filters out repositories marked as missing test components, we'll set it only if
                        # no tools can be tested.
                        print '# Setting missing_test_components to True for revision %s of %s owned by %s because all tools are missing test components.' % (
                            changeset_revision, name, owner)
                        repository_metadata.missing_test_components = True
                        print "# Setting tools_functionally_correct to False on revision %s of %s owned by %s because it is missing test components" % (
                            changeset_revision, name, owner)
                        repository_metadata.tools_functionally_correct = False
                    tool_test_results_dict[
                        'missing_test_components'] = missing_test_components
                # Store only the configured number of test runs.
                num_tool_test_results_saved = int(
                    app.config.num_tool_test_results_saved)
                if len(tool_test_results_dicts) >= num_tool_test_results_saved:
                    test_results_index = num_tool_test_results_saved - 1
                    new_tool_test_results_dicts = tool_test_results_dicts[:
                                                                          test_results_index]
                else:
                    new_tool_test_results_dicts = [
                        d for d in tool_test_results_dicts
                    ]
                # Insert the new element into the first position in the list.
                new_tool_test_results_dicts.insert(0, tool_test_results_dict)
                repository_metadata.tool_test_results = new_tool_test_results_dicts
                app.sa_session.add(repository_metadata)
                app.sa_session.flush()
    stop = time.time()
    print '# -------------------------------------------------------------------------------------------'
    print '# Checked %d repositories with %d tools in %d changeset revisions.' % (
        len(checked_repository_ids), tool_count, records_checked)
    print '# %d revisions found with functional tests and test data for all tools.' % valid_revisions
    print '# %d revisions found with one or more tools missing functional tests and/or test data.' % invalid_revisions
    print '# Found %d tools without functional tests.' % no_tests
    print '# Found %d tools with functional tests.' % has_tests
    if info_only:
        print '# Database not updated, info_only set.'
    print "# Elapsed time: ", stop - start
    print "#############################################################################"
Beispiel #37
0
 def apply_query_filter(self, trans, query, **kwargs):
     return query.filter(self.model_class.deleted == false()).filter(
         self.model_class.published == true())
Beispiel #38
0
 def index(self, trans, **kwd):
     """
     GET /api/groups
     Displays a collection (list) of groups.
     """
     rval = []
     for group in trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.deleted == false()):
         if trans.user_is_admin():
             item = group.to_dict(value_mapper={'id': trans.security.encode_id})
             encoded_id = trans.security.encode_id(group.id)
             item['url'] = url_for('group', id=encoded_id)
             rval.append(item)
     return rval
Beispiel #39
0
def downgrade():
    notification = alembic.op.create_table(
        "notification",
        sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
        sqlalchemy.Column("message", sqlalchemy.Text(collation="en_US.utf8")),
        sqlalchemy.Column("channel", sqlalchemy.Text(collation="en_US.utf8")),
        sqlalchemy.Column("subuser", sqlalchemy.Text(collation="en_US.utf8")),
        sqlalchemy.Column("useravatar",
                          sqlalchemy.Text(collation="en_US.utf8")),
        sqlalchemy.Column("eventtime",
                          sqlalchemy.DateTime(timezone=True),
                          nullable=True),
        sqlalchemy.Column("monthcount", sqlalchemy.Integer, nullable=True),
        sqlalchemy.Column("test",
                          sqlalchemy.Boolean,
                          nullable=False,
                          server_default=sqlalchemy.false()),
    )
    alembic.op.create_index("notification_idx1", "notification", ["eventtime"])

    conn = alembic.context.get_context().bind
    meta = sqlalchemy.MetaData(bind=conn)
    meta.reflect()
    events = meta.tables['events']

    all_events = conn.execute(
        sqlalchemy.select([
            events.c.event, events.c.data, events.c.time
        ]).where(events.c.event.in_({'notification', 'twitch-subscriber'})))
    notifications = []
    for event, data, time in all_events:
        if event in {'twitch-subscription', 'twitch-resubscription'}:
            message = '%(name)s just subscribed!' % data
            if data['monthcount'] is not None:
                message += ' %(monthcount)d months in a row!' % data
            notifications.append({
                'message':
                message,
                'channel':
                alembic.context.config.get_section_option(
                    "lrrbot", "channel", "loadingreadyrun"),
                'subuser':
                data['name'],
                'useravatar':
                data['avatar'],
                'eventtime':
                time,
                'monthcount':
                data['monthcount'],
                'test':
                False,
            })
        elif event == 'twitch-message':
            notifications.append({
                'message':
                data['message'],
                'channel':
                alembic.context.config.get_section_option(
                    "lrrbot", "channel", "loadingreadyrun"),
                'subuser':
                None,
                'useravatar':
                None,
                'eventtime':
                time,
                'monthcount':
                None,
                'test':
                False,
            })
    alembic.op.bulk_insert(notification, notifications)

    alembic.op.drop_table('events')
Beispiel #40
0
def administrative_delete_datasets(app, cutoff_time, cutoff_days,
                                   tool_id, template_file,
                                   config, email_only=False,
                                   info_only=False):
    # Marks dataset history association deleted and email users
    start = time.time()
    # Get HDAs older than cutoff time (ignore tool_id at this point)
    # We really only need the id column here, but sqlalchemy barfs when
    # trying to select only 1 column
    hda_ids_query = sa.select(
        (app.model.HistoryDatasetAssociation.table.c.id,
         app.model.HistoryDatasetAssociation.table.c.deleted),
        whereclause=and_(
            app.model.Dataset.table.c.deleted == false(),
            app.model.HistoryDatasetAssociation.table.c.update_time
            < cutoff_time,
            app.model.HistoryDatasetAssociation.table.c.deleted == false()),
        from_obj=[sa.outerjoin(
                  app.model.Dataset.table,
                  app.model.HistoryDatasetAssociation.table)])

    # Add all datasets associated with Histories to our list
    hda_ids = []
    hda_ids.extend(
        [row.id for row in hda_ids_query.execute()])

    # Now find the tool_id that generated the dataset (even if it was copied)
    tool_matched_ids = []
    if tool_id is not None:
        for hda_id in hda_ids:
            this_tool_id = _get_tool_id_for_hda(app, hda_id)
            if this_tool_id is not None and tool_id in this_tool_id:
                tool_matched_ids.append(hda_id)
        hda_ids = tool_matched_ids

    deleted_instance_count = 0
    user_notifications = defaultdict(list)

    # Process each of the Dataset objects
    for hda_id in hda_ids:
        user_query = sa.select(
            [app.model.HistoryDatasetAssociation.table,
             app.model.History.table,
             app.model.User.table],
            whereclause=and_(
                app.model.HistoryDatasetAssociation.table.c.id == hda_id),
            from_obj=[sa.join(app.model.User.table,
                              app.model.History.table)
                      .join(app.model.HistoryDatasetAssociation.table)],
            use_labels=True)
        for result in user_query.execute():
            user_notifications[result[app.model.User.table.c.email]].append(
                (result[app.model.HistoryDatasetAssociation.table.c.name],
                 result[app.model.History.table.c.name]))
            deleted_instance_count += 1
            if not info_only and not email_only:
                # Get the HistoryDatasetAssociation objects
                hda = app.sa_session.query(
                    app.model.HistoryDatasetAssociation).get(hda_id)
                if not hda.deleted:
                    # Mark the HistoryDatasetAssociation as deleted
                    hda.deleted = True
                    app.sa_session.add(hda)
                    print ("Marked HistoryDatasetAssociation id %d as "
                           "deleted" % hda.id)
                app.sa_session.flush()

    emailtemplate = Template(filename=template_file)
    for (email, dataset_list) in user_notifications.iteritems():
        msgtext = emailtemplate.render(email=email,
                                       datasets=dataset_list,
                                       cutoff=cutoff_days)
        subject = "Galaxy Server Cleanup " \
            "- %d datasets DELETED" % len(dataset_list)
        fromaddr = config.email_from
        print ""
        print "From: %s" % fromaddr
        print "To: %s" % email
        print "Subject: %s" % subject
        print "----------"
        print msgtext
        if not info_only:
            galaxy.util.send_mail(fromaddr, email, subject,
                                  msgtext, config)

    stop = time.time()
    print ""
    print "Marked %d dataset instances as deleted" % deleted_instance_count
    print "Total elapsed time: ", stop - start
    print "##########################################"
Beispiel #41
0
def delete_datasets(app,
                    cutoff_time,
                    remove_from_disk,
                    info_only=False,
                    force_retry=False):
    # Marks datasets as deleted if associated items are all deleted.
    start = time.time()
    if force_retry:
        history_dataset_ids_query = sa.select(
            (app.model.Dataset.table.c.id, app.model.Dataset.table.c.state),
            whereclause=app.model.HistoryDatasetAssociation.table.c.update_time
            < cutoff_time,
            from_obj=[
                sa.outerjoin(app.model.Dataset.table,
                             app.model.HistoryDatasetAssociation.table)
            ])
        library_dataset_ids_query = sa.select(
            (app.model.LibraryDataset.table.c.id,
             app.model.LibraryDataset.table.c.deleted),
            whereclause=app.model.LibraryDataset.table.c.update_time <
            cutoff_time,
            from_obj=[app.model.LibraryDataset.table])
    else:
        # We really only need the id column here, but sqlalchemy barfs when trying to select only 1 column
        history_dataset_ids_query = sa.select(
            (app.model.Dataset.table.c.id, app.model.Dataset.table.c.state),
            whereclause=and_(
                app.model.Dataset.table.c.deleted == false(),
                app.model.HistoryDatasetAssociation.table.c.update_time <
                cutoff_time,
                app.model.HistoryDatasetAssociation.table.c.deleted == true()),
            from_obj=[
                sa.outerjoin(app.model.Dataset.table,
                             app.model.HistoryDatasetAssociation.table)
            ])
        library_dataset_ids_query = sa.select(
            (app.model.LibraryDataset.table.c.id,
             app.model.LibraryDataset.table.c.deleted),
            whereclause=and_(
                app.model.LibraryDataset.table.c.deleted == true(),
                app.model.LibraryDataset.table.c.purged == false(),
                app.model.LibraryDataset.table.c.update_time < cutoff_time),
            from_obj=[app.model.LibraryDataset.table])
    deleted_dataset_count = 0
    deleted_instance_count = 0
    skip = []
    # Handle library datasets.  This is a bit tricky, so here's some clarification.  We have a list of all
    # LibraryDatasets that were marked deleted before our cutoff_time, but have not yet been marked purged.
    # A LibraryDataset object is marked purged when all of its LibraryDatasetDatasetAssociations have been
    # marked deleted.  When a LibraryDataset has been marked purged, it can never be undeleted in the data
    # library.  We have several steps to complete here.  For each LibraryDataset, get its associated Dataset
    # and add it to our accrued list of Datasets for later processing.  We mark  as deleted all of its
    # LibraryDatasetDatasetAssociations.  Then we mark the LibraryDataset as purged.  We then process our
    # list of Datasets.
    library_dataset_ids = [
        row.id for row in library_dataset_ids_query.execute()
    ]
    dataset_ids = []
    for library_dataset_id in library_dataset_ids:
        print("######### Processing LibraryDataset id:", library_dataset_id)
        # Get the LibraryDataset and the current LibraryDatasetDatasetAssociation objects
        ld = app.sa_session.query(
            app.model.LibraryDataset).get(library_dataset_id)
        ldda = ld.library_dataset_dataset_association
        # Append the associated Dataset object's id to our list of dataset_ids
        dataset_ids.append(ldda.dataset_id)
        # Mark all of the LibraryDataset's associated LibraryDatasetDatasetAssociation objects' as deleted
        if not ldda.deleted:
            ldda.deleted = True
            app.sa_session.add(ldda)
            print(
                "Marked associated LibraryDatasetDatasetAssociation id %d as deleted"
                % ldda.id)
        for expired_ldda in ld.expired_datasets:
            if not expired_ldda.deleted:
                expired_ldda.deleted = True
                app.sa_session.add(expired_ldda)
                print(
                    "Marked associated expired LibraryDatasetDatasetAssociation id %d as deleted"
                    % ldda.id)
        # Mark the LibraryDataset as purged
        ld.purged = True
        app.sa_session.add(ld)
        print("Marked LibraryDataset id %d as purged" % ld.id)
        app.sa_session.flush()
    # Add all datasets associated with Histories to our list
    dataset_ids.extend([row.id for row in history_dataset_ids_query.execute()])
    # Process each of the Dataset objects
    for dataset_id in dataset_ids:
        dataset = app.sa_session.query(app.model.Dataset).get(dataset_id)
        if dataset.id in skip:
            continue
        skip.append(dataset.id)
        print("######### Processing dataset id:", dataset_id)
        if not _dataset_is_deletable(dataset):
            print(
                "Dataset is not deletable (shared between multiple histories/libraries, at least one is not deleted)"
            )
            continue
        deleted_dataset_count += 1
        for dataset_instance in dataset.history_associations + dataset.library_associations:
            # Mark each associated HDA as deleted
            _purge_dataset_instance(dataset_instance,
                                    app,
                                    remove_from_disk,
                                    include_children=True,
                                    info_only=info_only,
                                    is_deletable=True)
            deleted_instance_count += 1
    stop = time.time()
    print(
        "Examined %d datasets, marked %d datasets and %d dataset instances (HDA) as deleted"
        % (len(skip), deleted_dataset_count, deleted_instance_count))
    print("Total elapsed time: ", stop - start)
    print("##########################################")
Beispiel #42
0
 def get_recommend_product(self):
     pbid = parameter_required({'pbid': '品牌已下架'})
     ProductBrand.query.filter(ProductBrand.PBid == pbid, ProductBrand.isdelete == false()).first_('品牌已下架')
     pb_list = self._recommend_pb_product(pbid).all_with_page()
     return Success(data=pb_list)
Beispiel #43
0
def handle_email_alerts(app,
                        host,
                        repository,
                        content_alert_str='',
                        new_repo_alert=False,
                        admin_only=False):
    """
    There are 2 complementary features that enable a tool shed user to receive email notification:
    1. Within User Preferences, they can elect to receive email when the first (or first valid)
       change set is produced for a new repository.
    2. When viewing or managing a repository, they can check the box labeled "Receive email alerts"
       which caused them to receive email alerts when updates to the repository occur.  This same feature
       is available on a per-repository basis on the repository grid within the tool shed.

    There are currently 4 scenarios for sending email notification when a change is made to a repository:
    1. An admin user elects to receive email when the first change set is produced for a new repository
       from User Preferences.  The change set does not have to include any valid content.  This allows for
       the capture of inappropriate content being uploaded to new repositories.
    2. A regular user elects to receive email when the first valid change set is produced for a new repository
       from User Preferences.  This differs from 1 above in that the user will not receive email until a
       change set tha tincludes valid content is produced.
    3. An admin user checks the "Receive email alerts" check box on the manage repository page.  Since the
       user is an admin user, the email will include information about both HTML and image content that was
       included in the change set.
    4. A regular user checks the "Receive email alerts" check box on the manage repository page.  Since the
       user is not an admin user, the email will not include any information about both HTML and image content
       that was included in the change set.
    """
    sa_session = app.model.context.current
    repo = hg_util.get_repo_for_repository(app,
                                           repository=repository,
                                           repo_path=None,
                                           create=False)
    sharable_link = repository_util.generate_sharable_link_for_repository_in_tool_shed(
        repository, changeset_revision=None)
    smtp_server = app.config.smtp_server
    if smtp_server and (new_repo_alert or repository.email_alerts):
        # Send email alert to users that want them.
        if app.config.email_from is not None:
            email_from = app.config.email_from
        elif host.split(':')[0] in ['localhost', '127.0.0.1', '0.0.0.0']:
            email_from = 'galaxy-no-reply@' + socket.getfqdn()
        else:
            email_from = 'galaxy-no-reply@' + host.split(':')[0]
        tip_changeset = repo.changelog.tip()
        ctx = repo.changectx(tip_changeset)
        try:
            username = ctx.user().split()[0]
        except:
            username = ctx.user()
        # We'll use 2 template bodies because we only want to send content
        # alerts to tool shed admin users.
        if new_repo_alert:
            template = new_repo_email_alert_template
        else:
            template = email_alert_template
        display_date = hg_util.get_readable_ctx_date(ctx)
        admin_body = string.Template(template).safe_substitute(
            host=host,
            sharable_link=sharable_link,
            repository_name=repository.name,
            revision='%s:%s' % (str(ctx.rev()), ctx),
            display_date=display_date,
            description=ctx.description(),
            username=username,
            content_alert_str=content_alert_str)
        body = string.Template(template).safe_substitute(
            host=host,
            sharable_link=sharable_link,
            repository_name=repository.name,
            revision='%s:%s' % (str(ctx.rev()), ctx),
            display_date=display_date,
            description=ctx.description(),
            username=username,
            content_alert_str='')
        admin_users = app.config.get("admin_users", "").split(",")
        frm = email_from
        if new_repo_alert:
            subject = "Galaxy tool shed alert for new repository named %s" % str(
                repository.name)
            subject = subject[:80]
            email_alerts = []
            for user in sa_session.query( app.model.User ) \
                                  .filter( and_( app.model.User.table.c.deleted == false(),
                                                 app.model.User.table.c.new_repo_alert == true() ) ):
                if admin_only:
                    if user.email in admin_users:
                        email_alerts.append(user.email)
                else:
                    email_alerts.append(user.email)
        else:
            subject = "Galaxy tool shed update alert for repository named %s" % str(
                repository.name)
            email_alerts = json.loads(repository.email_alerts)
        for email in email_alerts:
            to = email.strip()
            # Send it
            try:
                if to in admin_users:
                    util.send_mail(frm, to, subject, admin_body, app.config)
                else:
                    util.send_mail(frm, to, subject, body, app.config)
            except Exception:
                log.exception(
                    "An error occurred sending a tool shed repository update alert by email."
                )
Beispiel #44
0
    def set_banner(self):
        data = parameter_required({'pbid': '品牌唯一值缺失'})
        pbid = data.get('pbid')
        if is_supplizer():

            supplizer = get_current_supplizer()
            ProductBrand.query.filter(
                ProductBrand.PBid == pbid, ProductBrand.SUid == supplizer.SUid).first_('只能修改自己的品牌')
        elif is_admin():
            ProductBrand.query.filter(
                ProductBrand.PBid == pbid).first_('品牌不存在')
        else:
            raise AuthorityError()
        bbid = data.get('bbid')
        bbcontent = data.get('bbcontent')
        if bbcontent:
            try:
                bbcontent = json.dumps(bbcontent)
            except Exception as e:
                current_app.logger.info('转置json 出错 bbcontent = {} e = {}'.format(bbcontent, e))
        bbsort = data.get('bbsort')
        if bbsort:
            bbsort = self._check_sort(bbsort, model=BrandBanner, filter_args=[BrandBanner.PBid == pbid], default=1)
        with db.auto_commit():
            if bbid:
                if data.get('delete'):
                    BrandBanner.query.filter(BrandBanner.BBid == bbid, BrandBanner.isdelete == false()).delete_(
                        synchronize_session=False)
                    return Success('删除成功')

                bb = BrandBanner.query.filter(BrandBanner.BBid == bbid, BrandBanner.isdelete == false()).first()
                if bb:
                    if bbsort:
                        bb.BBsort = bbsort
                    if bbcontent:
                        bb.BBcontent = bbcontent
                    return Success('更新成功', data=bbid)
            bbid = str(uuid.uuid1())
            if not bbcontent:
                raise ParamsError('轮播图图片路由缺失')
            bb = BrandBanner.create({
                'BBid': bbid,
                'PBid': pbid,
                'BBsort': bbsort or 1,
                'BBcontent': bbcontent
            })
            db.session.add(bb)

        return Success('添加成功', data=bbid)
Beispiel #45
0
 def user_can_import_repository_archive(self, user, archive_owner):
     # This method should be called only if the current user is not an admin.
     if user.username == archive_owner:
         return True
     # A member of the IUC is authorized to create new repositories that are owned by another user.
     iuc_group = self.sa_session.query( self.model.Group ) \
                                .filter( and_( self.model.Group.table.c.name == 'Intergalactic Utilities Commission',
                                               self.model.Group.table.c.deleted == false() ) ) \
                                .first()
     if iuc_group is not None:
         for uga in iuc_group.users:
             if uga.user.id == user.id:
                 return True
     return False
Beispiel #46
0
    def _fill_brand(self, brand, **kwargs):
        product_num, product_fields, = kwargs.get('product_num', 3), kwargs.get('product_fields', list())
        new_product = kwargs.get('new_product', False)
        banner_show = kwargs.get('banner_show', False)
        recommend_pr = kwargs.get('recommend_pr', False)
        coupon = kwargs.get('coupon', False)

        if not product_fields:
            product_fields = self.prfields[:]

        if coupon:
            user = None
            if common_user():
                user = get_current_user()
            brand_coupon = self._get_brand_coupon(brand.SUid, user)

            if brand_coupon:
                from planet.control.CCoupon import CCoupon
                ccoupon = CCoupon()
                usid = user.USid if user else None
                ccoupon._coupon(brand_coupon, usid=usid, fill_con=False)
                product_num -= 1

                brand.fill('coupon', brand_coupon)

        # 推荐商品
        if recommend_pr:
            brand_recommend_product = self._recommend_pb_product(brand.PBid).all()[:product_num]
            pr_supplement_id = list()
            if brand_recommend_product:
                for product in brand_recommend_product:
                    product.fields = product_fields
                    pr_supplement_id.append(product.PRid)

            supplement_num = product_num - len(brand_recommend_product)
            if supplement_num:
                supplement_product = Products.query.filter(
                    Products.isdelete == false(), Products.PBid == brand.PBid).order_by(
                    Products.createtime.desc(), Products.PRid.notin_(pr_supplement_id)).all()
                brand_recommend_product.extend(supplement_product[:supplement_num])
            if brand_recommend_product:
                brand.fill('recommend', brand_recommend_product)

        # 新品推荐
        if new_product:
            brand_new_prodct = Products.query.filter(
                Products.isdelete == false(), Products.PBid == brand.PBid).order_by(Products.createtime.desc()).all()
            brand_new_prodct = brand_new_prodct[:self.br_new]
            if brand_new_prodct:
                for product in brand_new_prodct: product.fields = product_fields
                brand.fill('new', brand_new_prodct)

        # todo 填充动态
        # brand.fill('BrandTweets', list())

        # 填充banner
        if banner_show:
            bb_list = BrandBanner.query.filter(
                BrandBanner.PBid == brand.PBid, BrandBanner.isdelete == false()).order_by(
                BrandBanner.BBsort.asc(), BrandBanner.createtime.desc()).all()
            bbs = self._fill_bb(bb_list)
            if bbs:
                brand.fill('brandbanner', bbs)
Beispiel #47
0
def translate_bool(bln):
    if bln:
        return true()
    return false()
Beispiel #48
0
class Instance(db.Model):  # type: ignore
    id = db.Column(db.Integer, primary_key=True)
    name = db.Column(db.Text, unique=True, nullable=False)
    discarded = db.Column(db.Boolean, default=False, nullable=False)
    # aka is_open_service in jormun
    is_free = db.Column(db.Boolean, default=False, nullable=False)

    # this doesn't impact anything but is_free was used this,
    # but an instance can be freely accessible but not using open data
    is_open_data = db.Column(db.Boolean, default=False, nullable=False)

    authorizations = db.relationship(
        'Authorization',
        backref=backref('instance', lazy='joined'),
        lazy='dynamic',
        cascade='save-update, merge, delete',
    )

    jobs = db.relationship('Job', backref='instance', lazy='dynamic', cascade='save-update, merge, delete')

    poi_type_json = db.relationship(
        'PoiTypeJson',
        uselist=False,
        backref=backref('instance'),
        cascade='save-update, merge, delete, delete-orphan',
    )

    traveler_profiles = db.relationship('TravelerProfile', backref='instance')

    import_stops_in_mimir = db.Column(db.Boolean, default=False, nullable=False)

    import_ntfs_in_mimir = db.Column(db.Boolean, default=False, nullable=False)

    admins_from_cities_db = db.Column(db.Boolean, default=False, nullable=False)

    # ============================================================
    # params for jormungandr
    # ============================================================
    # the scenario used by jormungandr, by default we use the new default scenario (and not the default one...)
    scenario = db.Column(db.Text, nullable=False, default='distributed')

    # order of the journey, this order is for clockwise request, else it is reversed
    journey_order = db.Column(
        db.Enum('arrival_time', 'departure_time', name='journey_order'),
        default=default_values.journey_order,
        nullable=False,
    )

    max_walking_duration_to_pt = db.Column(
        db.Integer, default=default_values.max_walking_duration_to_pt, nullable=False
    )

    max_bike_duration_to_pt = db.Column(
        db.Integer, default=default_values.max_bike_duration_to_pt, nullable=False
    )

    max_bss_duration_to_pt = db.Column(db.Integer, default=default_values.max_bss_duration_to_pt, nullable=False)

    max_car_duration_to_pt = db.Column(db.Integer, default=default_values.max_car_duration_to_pt, nullable=False)

    max_car_no_park_duration_to_pt = db.Column(
        db.Integer, default=default_values.max_car_no_park_duration_to_pt, nullable=False
    )

    max_ridesharing_duration_to_pt = db.Column(
        db.Integer, default=default_values.max_ridesharing_duration_to_pt, nullable=False
    )

    max_taxi_duration_to_pt = db.Column(
        db.Integer, default=default_values.max_taxi_duration_to_pt, nullable=False
    )

    walking_speed = db.Column(db.Float, default=default_values.walking_speed, nullable=False)

    bike_speed = db.Column(db.Float, default=default_values.bike_speed, nullable=False)

    bss_speed = db.Column(db.Float, default=default_values.bss_speed, nullable=False)

    bss_rent_duration = db.Column(db.Integer, default=default_values.bss_rent_duration, nullable=False)

    bss_rent_penalty = db.Column(db.Integer, default=default_values.bss_rent_penalty, nullable=False)

    bss_return_duration = db.Column(db.Integer, default=default_values.bss_return_duration, nullable=False)

    bss_return_penalty = db.Column(db.Integer, default=default_values.bss_return_penalty, nullable=False)

    car_speed = db.Column(db.Float, default=default_values.car_speed, nullable=False)

    car_no_park_speed = db.Column(db.Float, default=default_values.car_no_park_speed, nullable=False)

    ridesharing_speed = db.Column(db.Float, default=default_values.ridesharing_speed, nullable=False)

    taxi_speed = db.Column(db.Float, default=default_values.taxi_speed, nullable=False)

    max_nb_transfers = db.Column(db.Integer, default=default_values.max_nb_transfers, nullable=False)

    min_bike = db.Column(db.Integer, default=default_values.min_bike, nullable=False)

    min_bss = db.Column(db.Integer, default=default_values.min_bss, nullable=False)

    min_car = db.Column(db.Integer, default=default_values.min_car, nullable=False)

    min_ridesharing = db.Column(db.Integer, default=default_values.min_ridesharing, nullable=False)

    min_taxi = db.Column(db.Integer, default=default_values.min_taxi, nullable=False)

    max_duration = db.Column(
        db.Integer, default=default_values.max_duration, nullable=False, server_default='86400'
    )

    arrival_transfer_penalty = db.Column(
        db.Integer, default=default_values.arrival_transfer_penalty, nullable=False, server_default='120'
    )

    walking_transfer_penalty = db.Column(
        db.Integer, default=default_values.walking_transfer_penalty, nullable=False, server_default='120'
    )

    night_bus_filter_max_factor = db.Column(
        db.Float, default=default_values.night_bus_filter_max_factor, nullable=False
    )

    night_bus_filter_base_factor = db.Column(
        db.Integer, default=default_values.night_bus_filter_base_factor, nullable=False, server_default='3600'
    )

    priority = db.Column(db.Integer, default=default_values.priority, nullable=False, server_default='0')

    bss_provider = db.Column(
        db.Boolean, default=default_values.bss_provider, nullable=False, server_default=true()
    )

    car_park_provider = db.Column(
        db.Boolean, default=default_values.car_park_provider, nullable=False, server_default=true()
    )

    max_additional_connections = db.Column(
        db.Integer, default=default_values.max_additional_connections, nullable=False, server_default='2'
    )

    successive_physical_mode_to_limit_id = db.Column(
        db.Text,
        default=default_values.successive_physical_mode_to_limit_id,
        nullable=False,
        server_default=default_values.successive_physical_mode_to_limit_id,
    )

    full_sn_geometries = db.Column(db.Boolean, default=False, nullable=False, server_default=false())

    realtime_pool_size = db.Column(db.Integer, default=default_values.realtime_pool_size)

    # parameters migrated from scenario STIF
    min_nb_journeys = db.Column(
        db.Integer, default=default_values.min_nb_journeys, nullable=False, server_default='0'
    )
    min_journeys_calls = db.Column(
        db.Integer, default=default_values.min_journeys_calls, nullable=False, server_default='1'
    )
    max_successive_physical_mode = db.Column(db.Integer, nullable=True)
    final_line_filter = db.Column(
        db.Boolean, default=default_values.final_line_filter, nullable=False, server_default=false()
    )
    max_extra_second_pass = db.Column(
        db.Integer, default=default_values.max_extra_second_pass, nullable=False, server_default='0'
    )
    max_nb_journeys = db.Column(db.Integer, nullable=True)

    # param only used by distributed scenario
    import json

    # default value is read when there is no record in db
    # server_default(dumped json) is the actual value stored in db, postgres will convert it to a dict when it's read
    max_nb_crowfly_by_mode = db.Column(
        JSONB,
        default=default_values.max_nb_crowfly_by_mode,
        server_default=json.dumps(default_values.max_nb_crowfly_by_mode),
    )

    autocomplete_backend = db.Column(db.Text, nullable=False, default=default_values.autocomplete_backend)

    additional_time_after_first_section_taxi = db.Column(
        db.Integer, default=default_values.additional_time_after_first_section_taxi, nullable=False
    )

    additional_time_before_last_section_taxi = db.Column(
        db.Integer, default=default_values.additional_time_before_last_section_taxi, nullable=False
    )

    max_walking_direct_path_duration = db.Column(
        db.Integer, default=default_values.max_walking_direct_path_duration, nullable=False
    )

    max_bike_direct_path_duration = db.Column(
        db.Integer,
        default=default_values.max_bike_direct_path_duration,
        nullable=False,
        server_default=str(default_values.max_bike_direct_path_duration),
    )

    max_bss_direct_path_duration = db.Column(
        db.Integer,
        default=default_values.max_bss_direct_path_duration,
        nullable=False,
        server_default=str(default_values.max_bss_direct_path_duration),
    )

    max_car_direct_path_duration = db.Column(
        db.Integer,
        default=default_values.max_car_direct_path_duration,
        nullable=False,
        server_default=str(default_values.max_car_direct_path_duration),
    )

    max_car_no_park_direct_path_duration = db.Column(
        db.Integer,
        default=default_values.max_car_no_park_direct_path_duration,
        nullable=False,
        server_default=str(default_values.max_car_no_park_direct_path_duration),
    )

    max_taxi_direct_path_duration = db.Column(
        db.Integer,
        default=default_values.max_taxi_direct_path_duration,
        nullable=False,
        server_default=str(default_values.max_taxi_direct_path_duration),
    )

    max_ridesharing_direct_path_duration = db.Column(
        db.Integer,
        default=default_values.max_ridesharing_direct_path_duration,
        nullable=False,
        server_default=str(default_values.max_ridesharing_direct_path_duration),
    )

    max_bike_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_bike_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_bike_direct_path_distance),
    )

    max_bss_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_bss_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_bss_direct_path_distance),
    )

    max_walking_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_walking_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_walking_direct_path_distance),
    )

    max_car_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_car_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_car_direct_path_distance),
    )

    max_car_no_park_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_car_no_park_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_car_no_park_direct_path_distance),
    )

    max_ridesharing_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_ridesharing_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_ridesharing_direct_path_distance),
    )

    max_taxi_direct_path_distance = db.Column(
        db.Integer,
        default=default_values.max_taxi_direct_path_distance,
        nullable=False,
        server_default=str(default_values.max_taxi_direct_path_distance),
    )

    equipment_details_providers = db.relationship(
        "EquipmentsProvider", secondary=associate_instance_equipments, backref="instances", lazy='joined'
    )

    # street_network_configurations
    street_network_car = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_car,
    )

    street_network_car_no_park = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_car_no_park,
    )

    street_network_walking = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_walking,
    )
    street_network_bike = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_bike,
    )
    street_network_bss = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_bss,
    )

    street_network_ridesharing = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_ridesharing,
    )
    street_network_taxi = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.street_network_taxi,
    )

    asgard_language = db.Column(
        db.Text,
        db.ForeignKey('streetnetwork_backend.id'),
        nullable=False,
        default=default_values.asgard_language,
    )

    poi_dataset = db.Column(db.Text, default=None, nullable=True)

    stop_points_nearby_duration = db.Column(
        db.Integer,
        default=default_values.stop_points_nearby_duration,
        nullable=False,
        server_default=str(default_values.stop_points_nearby_duration),
    )

    # Active the asynchronous_ridesharing mode
    asynchronous_ridesharing = db.Column(db.Boolean, default=False, nullable=False)
    # Active ridesharing service call with async greenlet
    greenlet_pool_for_ridesharing_services = db.Column(db.Boolean, default=False, nullable=False)
    # Ridesharing greenlet pool size
    ridesharing_greenlet_pool_size = db.Column(
        db.Integer, default=default_values.ridesharing_greenlet_pool_size, nullable=False
    )

    ridesharing_services = db.relationship(
        "RidesharingService", secondary=associate_instance_ridesharing, backref="instances", lazy='joined'
    )

    external_services = db.relationship(
        "ExternalService", secondary=associate_instance_external_service, backref="instances", lazy='joined'
    )
    # max_waiting_duration default value 4h: 4*60*60 = 14400 minutes
    max_waiting_duration = db.Column(
        db.Integer, nullable=False, server_default='{}'.format(default_values.max_waiting_duration)
    )

    places_proximity_radius = db.Column(
        db.Integer,
        default=default_values.places_proximity_radius,
        nullable=False,
        server_default=str(default_values.places_proximity_radius),
    )

    transfer_path = db.Column(
        db.Boolean,
        default=default_values.transfer_path,
        nullable=False,
        server_default=str(default_values.transfer_path),
    )

    access_points = db.Column(
        db.Boolean,
        default=default_values.access_points,
        nullable=False,
        server_default=str(default_values.access_points),
    )

    default_pt_planner = db.Column(
        db.Text,
        default=default_values.default_pt_planner,
        nullable=False,
        server_default=str(default_values.default_pt_planner),
    )

    pt_planners_configurations = db.Column(
        JSONB,
        default=default_values.pt_planners_configurations,
        nullable=False,
        server_default=json.dumps(default_values.pt_planners_configurations),
    )

    filter_odt_journeys = db.Column(
        db.Boolean,
        default=default_values.filter_odt_journeys,
        nullable=False,
        server_default=str(default_values.filter_odt_journeys),
    )

    def __init__(self, name=None, is_free=False, authorizations=None, jobs=None):
        self.name = name
        self.is_free = is_free
        if authorizations:
            self.authorizations = authorizations
        if jobs:
            self.jobs = jobs

    def last_datasets(self, nb_dataset=1, family_type=None):
        """
        return the n last dataset of each family type loaded for this instance
        """
        query = db.session.query(func.distinct(DataSet.family_type)).filter(
            Instance.id == self.id, DataSet.family_type != 'mimir'
        )
        if family_type:
            query = query.filter(DataSet.family_type == family_type)

        family_types = query.all()

        result = []
        for family_type in family_types:
            data_sets = (
                db.session.query(DataSet)
                .join(Job)
                .join(Instance)
                .filter(Instance.id == self.id, DataSet.family_type == family_type, Job.state == 'done')
                .order_by(Job.created_at.desc())
                .limit(nb_dataset)
                .all()
            )
            result += data_sets
        return result

    def running_datasets(self):
        """
        return all datasets with job state = 'running' for this instance
        """
        data_sets = (
            db.session.query(DataSet)
            .join(Job)
            .join(Instance)
            .filter(Instance.id == self.id, Job.state == 'running')
            .order_by(Job.created_at.desc())
            .all()
        )
        return data_sets

    @classmethod
    def query_existing(cls):
        return cls.query.filter_by(discarded=False)

    @classmethod
    def query_all(cls):
        return cls.query

    @classmethod
    def get_by_name(cls, name):
        res = cls.query_existing().filter_by(name=name).first()
        return res

    @classmethod
    def get_from_id_or_name(cls, id=None, name=None):
        if id:
            return cls.query.get_or_404(id)
        elif name:
            return cls.query_existing().filter_by(name=name).first_or_404()
        else:
            raise Exception({'error': 'instance is required'}, 400)

    def delete_dataset(self, _type):
        result = (
            db.session.query(DataSet, Job)
            .join(Job)
            .filter(DataSet.type == _type, Job.instance_id == self.id)
            .all()
        )

        if not result:
            return 0
        for dataset, job in result:
            # Cascade Delete not working so delete Metric associated manually
            db.session.query(Metric).filter(Metric.dataset_id == dataset.id).delete()
            db.session.delete(dataset)

            # Delete a job without any dataset
            if not len(job.data_sets.all()):
                db.session.delete(job)

        db.session.commit()
        return len(result)

    def delete_old_jobs_and_list_datasets(self, time_limit):
        """
        Delete jobs created before the date parameter 'time_limit' and return a list of datasets to delete
        :param time_limit: date from which jobs will be deleted
        :return: list of datasets to delete
        """
        # Keep the last dataset of each type to be able to reload data
        dataset_to_keep = self.last_datasets()
        dataset_file_to_keep = [f.name for f in dataset_to_keep]

        # Keep the jobs associated
        jobs_to_keep = set()
        for dataset in dataset_to_keep:
            job_associated = db.session.query(Job).filter(Job.data_sets.contains(dataset)).first()
            jobs_to_keep.add(job_associated)

        # Retrieve all jobs created before the time limit
        old_jobs = (
            db.session.query(Job)
            .filter(Job.instance_id == self.id, Job.created_at < time_limit, Job.state != 'running')
            .all()
        )

        # List all jobs that can be deleted
        to_delete = list(set(old_jobs) - jobs_to_keep)

        # Retrieve the datasets associated to old jobs in order to delete backups folders
        old_datasets = []
        for job_to_delete in to_delete:
            old_datasets.extend(db.session.query(DataSet).filter(DataSet.job_id == job_to_delete.id).all())
            db.session.delete(job_to_delete)

        db.session.commit()

        return [dataset.name for dataset in old_datasets if dataset.name not in dataset_file_to_keep]

    def __repr__(self):
        return '<Instance %r>' % self.name
 def __restarter( self ):
     log.info( 'Update repository manager restarter starting up...' )
     while self.running:
         # Make a call to the Tool Shed for each installed repository to get the latest
         # status information in the Tool Shed for the repository.  This information includes
         # items like newer installable repository revisions, current revision updates, whether
         # the repository revision is the latest installable revision, and whether the repository
         # has been deprecated in the Tool Shed.
         for repository in self.context.query( self.app.install_model.ToolShedRepository ) \
                                       .filter( self.app.install_model.ToolShedRepository.table.c.deleted == false() ):
             tool_shed_status_dict = repository_util.get_tool_shed_status_for_installed_repository( self.app, repository )
             if tool_shed_status_dict:
                 if tool_shed_status_dict != repository.tool_shed_status:
                     repository.tool_shed_status = tool_shed_status_dict
                     self.context.flush()
             else:
                 # The received tool_shed_status_dict is an empty dictionary, so coerce to None.
                 tool_shed_status_dict = None
                 if tool_shed_status_dict != repository.tool_shed_status:
                     repository.tool_shed_status = tool_shed_status_dict
                     self.context.flush()
         self.sleeper.sleep( self.seconds_to_sleep )
     log.info( 'Update repository manager restarter shutting down...' )
Beispiel #50
0
    def __search(self, tool_id, tool_version, user, input_data, job_state=None, param_dump=None, wildcard_param_dump=None):
        search_timer = ExecutionTimer()

        def replace_dataset_ids(path, key, value):
            """Exchanges dataset_ids (HDA, LDA, HDCA, not Dataset) in param_dump with dataset ids used in job."""
            if key == 'id':
                current_case = param_dump
                for p in path:
                    current_case = current_case[p]
                src = current_case['src']
                value = job_input_ids[src][value]
                return key, value
            return key, value

        conditions = [and_(model.Job.tool_id == tool_id,
                           model.Job.user == user)]

        if tool_version:
            conditions.append(model.Job.tool_version == str(tool_version))

        if job_state is None:
            conditions.append(
                model.Job.state.in_([model.Job.states.NEW,
                                     model.Job.states.QUEUED,
                                     model.Job.states.WAITING,
                                     model.Job.states.RUNNING,
                                     model.Job.states.OK])
            )
        else:
            if isinstance(job_state, string_types):
                conditions.append(model.Job.state == job_state)
            elif isinstance(job_state, list):
                o = []
                for s in job_state:
                    o.append(model.Job.state == s)
                conditions.append(
                    or_(*o)
                )

        # We now build the query filters that relate to the input datasets
        # that this job uses. We keep track of the requested dataset id in `requested_ids`,
        # the type (hda, hdca or lda) in `data_types`
        # and the ids that have been used in the job that has already been run in `used_ids`.
        requested_ids = []
        data_types = []
        used_ids = []
        for k, input_list in input_data.items():
            for type_values in input_list:
                t = type_values['src']
                v = type_values['id']
                requested_ids.append(v)
                data_types.append(t)
                identifier = type_values['identifier']
                if t == 'hda':
                    a = aliased(model.JobToInputDatasetAssociation)
                    b = aliased(model.HistoryDatasetAssociation)
                    c = aliased(model.HistoryDatasetAssociation)
                    d = aliased(model.JobParameter)
                    e = aliased(model.HistoryDatasetAssociationHistory)
                    conditions.append(and_(
                        model.Job.id == a.job_id,
                        a.name == k,
                        a.dataset_id == b.id,  # b is the HDA use for the job
                        c.dataset_id == b.dataset_id,
                        c.id == v,  # c is the requested job input HDA
                        # We need to make sure that the job we are looking for has been run with identical inputs.
                        # Here we deal with 3 requirements:
                        #  - the jobs' input dataset (=b) version is 0, meaning the job's input dataset is not yet ready
                        #  - b's update_time is older than the job create time, meaning no changes occurred
                        #  - the job has a dataset_version recorded, and that versions' metadata matches c's metadata.
                        or_(
                            and_(or_(a.dataset_version.in_([0, b.version]),
                                     b.update_time < model.Job.create_time),
                                 b.name == c.name,
                                 b.extension == c.extension,
                                 b.metadata == c.metadata,
                                 ),
                            and_(b.id == e.history_dataset_association_id,
                                 a.dataset_version == e.version,
                                 e.name == c.name,
                                 e.extension == c.extension,
                                 e._metadata == c._metadata,
                                 ),
                        ),
                        or_(b.deleted == false(), c.deleted == false())
                    ))
                    if identifier:
                        conditions.append(and_(model.Job.id == d.job_id,
                                             d.name == "%s|__identifier__" % k,
                                             d.value == json.dumps(identifier)))
                    used_ids.append(a.dataset_id)
                elif t == 'ldda':
                    a = aliased(model.JobToInputLibraryDatasetAssociation)
                    conditions.append(and_(
                        model.Job.id == a.job_id,
                        a.name == k,
                        a.ldda_id == v
                    ))
                    used_ids.append(a.ldda_id)
                elif t == 'hdca':
                    a = aliased(model.JobToInputDatasetCollectionAssociation)
                    b = aliased(model.HistoryDatasetCollectionAssociation)
                    c = aliased(model.HistoryDatasetCollectionAssociation)
                    conditions.append(and_(
                        model.Job.id == a.job_id,
                        a.name == k,
                        b.id == a.dataset_collection_id,
                        c.id == v,
                        b.name == c.name,
                        or_(and_(b.deleted == false(), b.id == v),
                            and_(or_(c.copied_from_history_dataset_collection_association_id == b.id,
                                     b.copied_from_history_dataset_collection_association_id == c.id),
                                 c.deleted == false(),
                                 )
                            )
                    ))
                    used_ids.append(a.dataset_collection_id)
                else:
                    return []

        for k, v in wildcard_param_dump.items():
            wildcard_value = json.dumps(v, sort_keys=True).replace('"id": "__id_wildcard__"', '"id": %')
            a = aliased(model.JobParameter)
            conditions.append(and_(
                model.Job.id == a.job_id,
                a.name == k,
                a.value.like(wildcard_value)
            ))

        conditions.append(and_(
            model.Job.any_output_dataset_collection_instances_deleted == false(),
            model.Job.any_output_dataset_deleted == false()
        ))

        query = self.sa_session.query(model.Job.id, *used_ids).filter(and_(*conditions))
        for job in query.all():
            # We found a job that is equal in terms of tool_id, user, state and input datasets,
            # but to be able to verify that the parameters match we need to modify all instances of
            # dataset_ids (HDA, LDDA, HDCA) in the incoming param_dump to point to those used by the
            # possibly equivalent job, which may have been run on copies of the original input data.
            job_input_ids = {}
            if len(job) > 1:
                # We do have datasets to check
                job_id, current_jobs_data_ids = job[0], job[1:]
                job_parameter_conditions = [model.Job.id == job_id]
                for src, requested_id, used_id in zip(data_types, requested_ids, current_jobs_data_ids):
                    if src not in job_input_ids:
                        job_input_ids[src] = {requested_id: used_id}
                    else:
                        job_input_ids[src][requested_id] = used_id
                new_param_dump = remap(param_dump, visit=replace_dataset_ids)
                # new_param_dump has its dataset ids remapped to those used by the job.
                # We now ask if the remapped job parameters match the current job.
                for k, v in new_param_dump.items():
                    a = aliased(model.JobParameter)
                    job_parameter_conditions.append(and_(
                        a.name == k,
                        a.value == json.dumps(v, sort_keys=True)
                    ))
            else:
                job_parameter_conditions = [model.Job.id == job]
            query = self.sa_session.query(model.Job).filter(*job_parameter_conditions)
            job = query.first()
            if job is None:
                continue
            n_parameters = 0
            # Verify that equivalent jobs had the same number of job parameters
            # We skip chrominfo, dbkey, __workflow_invocation_uuid__ and identifer
            # parameter as these are not passed along when expanding tool parameters
            # and they can differ without affecting the resulting dataset.
            for parameter in job.parameters:
                if parameter.name in {'__workflow_invocation_uuid__', 'chromInfo', 'dbkey'} or parameter.name.endswith('|__identifier__'):
                    continue
                n_parameters += 1
            if not n_parameters == len(param_dump):
                continue
            log.info("Found equivalent job %s", search_timer)
            return job
        log.info("No equivalent jobs found %s", search_timer)
        return None
Beispiel #51
0
    def get_workflows_list(self, trans, kwd):
        """
        Displays a collection of workflows.

        :param  show_published:      if True, show also published workflows
        :type   show_published:      boolean
        :param  missing_tools:       if True, include a list of missing tools per workflow
        :type   missing_tools:       boolean
        """
        missing_tools = util.string_as_bool(kwd.get('missing_tools', 'False'))
        rval = []
        filter1 = (trans.app.model.StoredWorkflow.user == trans.user)
        user = trans.get_user()
        if user is None:
            show_published = util.string_as_bool(kwd.get('show_published', 'True'))
        else :
            show_published = util.string_as_bool(kwd.get('show_published', 'False'))
        if show_published:
            filter1 = or_(filter1, (trans.app.model.StoredWorkflow.published == true()))
        for wf in trans.sa_session.query(trans.app.model.StoredWorkflow).options(
                joinedload("annotations")).options(
                joinedload("latest_workflow").undefer("step_count").lazyload("steps")).options(
                joinedload("tags")).filter(
                    filter1, trans.app.model.StoredWorkflow.table.c.deleted == false()).order_by(
                    desc(trans.app.model.StoredWorkflow.table.c.update_time)).all():
            item = wf.to_dict(value_mapper={'id': trans.security.encode_id})
            encoded_id = trans.security.encode_id(wf.id)
            item['annotations'] = [x.annotation for x in wf.annotations]
            item['url'] = url_for('workflow', id=encoded_id)
            item['owner'] = wf.user.username
            item['number_of_steps'] = wf.latest_workflow.step_count
            item['show_in_tool_panel'] = False
            if user is not None:
                item['show_in_tool_panel'] = wf.show_in_tool_panel(user_id=user.id)
            rval.append(item)
        for wf_sa in trans.sa_session.query(model.StoredWorkflowUserShareAssociation).join(
                model.StoredWorkflowUserShareAssociation.stored_workflow).options(
                joinedload("stored_workflow").joinedload("annotations")).options(
                joinedload("stored_workflow").joinedload("latest_workflow").undefer("step_count").lazyload("steps")).options(
                joinedload("stored_workflow").joinedload("user")).options(
                joinedload("stored_workflow").joinedload("tags")).filter(model.StoredWorkflowUserShareAssociation.user == trans.user).filter(
                model.StoredWorkflow.deleted == false()).order_by(
                desc(model.StoredWorkflow.update_time)).all():
            item = wf_sa.stored_workflow.to_dict(value_mapper={'id': trans.security.encode_id})
            encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
            item['annotations'] = [x.annotation for x in wf_sa.stored_workflow.annotations]
            item['url'] = url_for('workflow', id=encoded_id)
            item['slug'] = wf_sa.stored_workflow.slug
            item['owner'] = wf_sa.stored_workflow.user.username
            item['number_of_steps'] = wf_sa.stored_workflow.latest_workflow.step_count
            item['show_in_tool_panel'] = False
            if user is not None:
                item['show_in_tool_panel'] = wf_sa.stored_workflow.show_in_tool_panel(user_id=user.id)
            rval.append(item)
        if missing_tools:
            workflows_missing_tools = []
            workflows = []
            workflows_by_toolshed = dict()
            for key, value in enumerate(rval):
                tool_ids = []
                workflow_details = self.workflow_contents_manager.workflow_to_dict(trans, self.__get_stored_workflow(trans, value['id']), style='instance')
                if 'steps' in workflow_details:
                    for step in workflow_details['steps']:
                        tool_id = workflow_details['steps'][step].get('tool_id')
                        if tool_id and tool_id not in tool_ids and self.app.toolbox.is_missing_shed_tool(tool_id):
                            tool_ids.append(tool_id)
                if len(tool_ids) > 0:
                    value['missing_tools'] = tool_ids
                    workflows_missing_tools.append(value)
            for workflow in workflows_missing_tools:
                for tool_id in workflow['missing_tools']:
                    toolshed, _, owner, name, tool, version = tool_id.split('/')
                    shed_url = self.__get_full_shed_url(toolshed)
                    repo_identifier = '/'.join([toolshed, owner, name])
                    if repo_identifier not in workflows_by_toolshed:
                        workflows_by_toolshed[repo_identifier] = dict(shed=shed_url.rstrip('/'), repository=name, owner=owner, tools=[tool_id], workflows=[workflow['name']])
                    else:
                        if tool_id not in workflows_by_toolshed[repo_identifier]['tools']:
                            workflows_by_toolshed[repo_identifier]['tools'].append(tool_id)
                        if workflow['name'] not in workflows_by_toolshed[repo_identifier]['workflows']:
                            workflows_by_toolshed[repo_identifier]['workflows'].append(workflow['name'])
            for repo_tag in workflows_by_toolshed:
                workflows.append(workflows_by_toolshed[repo_tag])
            return workflows
        return rval
Beispiel #52
0
class Label(Base):
    """Definition of a Label."""

    __tablename__ = 'Labels'
    id: LabelID = Column(String, primary_key=True)
    comment: Optional[str] = Column(String, nullable=True)
    labeling_time: LabelingTime = Column(Float, nullable=True)
    is_predefined: Boolean = Column(Boolean,
                                    nullable=False,
                                    server_default=false())
    status: LabelVerificationStatus = Column(
        Enum(LabelVerificationStatus),
        nullable=False,
        server_default=LabelVerificationStatus.NOT_VERIFIED.value)

    scan_id: ScanID = Column(String, ForeignKey('Scans.id',
                                                ondelete='cascade'))
    scan: Scan = relationship('Scan', back_populates='labels')

    task_id: TaskID = Column(Integer, ForeignKey('Tasks.id'), nullable=False)
    task: Task = relationship('Task')

    owner_id: int = Column(Integer, ForeignKey('Users.id'))
    owner: User = relationship('User', back_populates='labels')

    elements: List['LabelElement'] = relationship('LabelElement',
                                                  back_populates='label',
                                                  cascade='delete')

    def __init__(self,
                 user: User,
                 labeling_time: LabelingTime,
                 comment: str = None,
                 is_predefined: bool = False) -> None:
        """Initialize Label.

        By default all of the labels are not verified.

        :param user: User that entered such Label
        :param labeling_time: time that was needed to prepare such Label
        :param comment: (optional) comment added by User on Labeling Page
        :param is_predefined: (optional) mark such Label as predefined for given Scan and Task
        """
        self.id = LabelID(str(uuid.uuid4()))
        self.owner = user
        self.labeling_time = labeling_time
        self.status = LabelVerificationStatus.NOT_VERIFIED
        self.comment = comment
        self.is_predefined = is_predefined

    def __repr__(self) -> str:
        """Return string representation for Label."""
        return '<{}: {}: {}: {} {} {} {}>'.format(self.__class__.__name__,
                                                  self.id, self.scan_id,
                                                  self.task_id,
                                                  self.labeling_time,
                                                  self.owner, self.comment)

    def update_status(self, status: LabelVerificationStatus) -> 'Label':
        """Update Label's verification status.

        :param status: new status for this Label
        :return: Label object
        """
        self.status = status
        self.save()
        return self
def upgrade():
    # Create tables for the new models.
    op.create_table(
        "sl_columns",
        # AuditMixinNullable
        sa.Column("created_on", sa.DateTime(), nullable=True),
        sa.Column("changed_on", sa.DateTime(), nullable=True),
        sa.Column("created_by_fk", sa.Integer(), nullable=True),
        sa.Column("changed_by_fk", sa.Integer(), nullable=True),
        # ExtraJSONMixin
        sa.Column("extra_json", sa.Text(), nullable=True),
        # ImportExportMixin
        sa.Column("uuid", UUIDType(binary=True), primary_key=False, default=uuid4),
        # Column
        sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
        sa.Column("name", sa.TEXT(), nullable=False),
        sa.Column("type", sa.TEXT(), nullable=False),
        sa.Column("expression", sa.TEXT(), nullable=False),
        sa.Column("is_physical", sa.BOOLEAN(), nullable=False, default=True,),
        sa.Column("description", sa.TEXT(), nullable=True),
        sa.Column("warning_text", sa.TEXT(), nullable=True),
        sa.Column("unit", sa.TEXT(), nullable=True),
        sa.Column("is_temporal", sa.BOOLEAN(), nullable=False),
        sa.Column("is_spatial", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_partition", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_aggregation", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_additive", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column("is_increase_desired", sa.BOOLEAN(), nullable=False, default=True,),
        sa.Column(
            "is_managed_externally",
            sa.Boolean(),
            nullable=False,
            server_default=sa.false(),
        ),
        sa.Column("external_url", sa.Text(), nullable=True),
        sa.PrimaryKeyConstraint("id"),
    )
    with op.batch_alter_table("sl_columns") as batch_op:
        batch_op.create_unique_constraint("uq_sl_columns_uuid", ["uuid"])

    op.create_table(
        "sl_tables",
        # AuditMixinNullable
        sa.Column("created_on", sa.DateTime(), nullable=True),
        sa.Column("changed_on", sa.DateTime(), nullable=True),
        sa.Column("created_by_fk", sa.Integer(), nullable=True),
        sa.Column("changed_by_fk", sa.Integer(), nullable=True),
        # ExtraJSONMixin
        sa.Column("extra_json", sa.Text(), nullable=True),
        # ImportExportMixin
        sa.Column("uuid", UUIDType(binary=True), primary_key=False, default=uuid4),
        # Table
        sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
        sa.Column("database_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("catalog", sa.TEXT(), nullable=True),
        sa.Column("schema", sa.TEXT(), nullable=True),
        sa.Column("name", sa.TEXT(), nullable=False),
        sa.Column(
            "is_managed_externally",
            sa.Boolean(),
            nullable=False,
            server_default=sa.false(),
        ),
        sa.Column("external_url", sa.Text(), nullable=True),
        sa.ForeignKeyConstraint(["database_id"], ["dbs.id"], name="sl_tables_ibfk_1"),
        sa.PrimaryKeyConstraint("id"),
    )
    with op.batch_alter_table("sl_tables") as batch_op:
        batch_op.create_unique_constraint("uq_sl_tables_uuid", ["uuid"])

    op.create_table(
        "sl_table_columns",
        sa.Column("table_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("column_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.ForeignKeyConstraint(
            ["column_id"], ["sl_columns.id"], name="sl_table_columns_ibfk_2"
        ),
        sa.ForeignKeyConstraint(
            ["table_id"], ["sl_tables.id"], name="sl_table_columns_ibfk_1"
        ),
    )

    op.create_table(
        "sl_datasets",
        # AuditMixinNullable
        sa.Column("created_on", sa.DateTime(), nullable=True),
        sa.Column("changed_on", sa.DateTime(), nullable=True),
        sa.Column("created_by_fk", sa.Integer(), nullable=True),
        sa.Column("changed_by_fk", sa.Integer(), nullable=True),
        # ExtraJSONMixin
        sa.Column("extra_json", sa.Text(), nullable=True),
        # ImportExportMixin
        sa.Column("uuid", UUIDType(binary=True), primary_key=False, default=uuid4),
        # Dataset
        sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
        sa.Column("sqlatable_id", sa.INTEGER(), nullable=True),
        sa.Column("name", sa.TEXT(), nullable=False),
        sa.Column("expression", sa.TEXT(), nullable=False),
        sa.Column("is_physical", sa.BOOLEAN(), nullable=False, default=False,),
        sa.Column(
            "is_managed_externally",
            sa.Boolean(),
            nullable=False,
            server_default=sa.false(),
        ),
        sa.Column("external_url", sa.Text(), nullable=True),
        sa.PrimaryKeyConstraint("id"),
    )
    with op.batch_alter_table("sl_datasets") as batch_op:
        batch_op.create_unique_constraint("uq_sl_datasets_uuid", ["uuid"])
        batch_op.create_unique_constraint(
            "uq_sl_datasets_sqlatable_id", ["sqlatable_id"]
        )

    op.create_table(
        "sl_dataset_columns",
        sa.Column("dataset_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("column_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.ForeignKeyConstraint(
            ["column_id"], ["sl_columns.id"], name="sl_dataset_columns_ibfk_2"
        ),
        sa.ForeignKeyConstraint(
            ["dataset_id"], ["sl_datasets.id"], name="sl_dataset_columns_ibfk_1"
        ),
    )

    op.create_table(
        "sl_dataset_tables",
        sa.Column("dataset_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.Column("table_id", sa.INTEGER(), autoincrement=False, nullable=False),
        sa.ForeignKeyConstraint(
            ["dataset_id"], ["sl_datasets.id"], name="sl_dataset_tables_ibfk_1"
        ),
        sa.ForeignKeyConstraint(
            ["table_id"], ["sl_tables.id"], name="sl_dataset_tables_ibfk_2"
        ),
    )

    # migrate existing datasets to the new models
    bind = op.get_bind()
    session = db.Session(bind=bind)  # pylint: disable=no-member

    datasets = session.query(SqlaTable).all()
    for dataset in datasets:
        dataset.fetch_columns_and_metrics(session)
        after_insert(target=dataset)
Beispiel #54
0
            RepositoryMetadata,
            foreign_keys=[
                RepositoryReview.table.c.repository_id,
                RepositoryReview.table.c.changeset_revision
            ],
            primaryjoin=((RepositoryReview.table.c.repository_id
                          == RepositoryMetadata.table.c.repository_id) &
                         (RepositoryReview.table.c.changeset_revision
                          == RepositoryMetadata.table.c.changeset_revision)),
            backref='review'),
        user=relation(User, backref="repository_reviews"),
        component_reviews=relation(
            ComponentReview,
            primaryjoin=((RepositoryReview.table.c.id
                          == ComponentReview.table.c.repository_review_id) &
                         (ComponentReview.table.c.deleted == false()))),
        private_component_reviews=relation(
            ComponentReview,
            primaryjoin=((RepositoryReview.table.c.id
                          == ComponentReview.table.c.repository_review_id) &
                         (ComponentReview.table.c.deleted == false()) &
                         (ComponentReview.table.c.private == true())))))

mapper(ComponentReview,
       ComponentReview.table,
       properties=dict(repository_review=relation(RepositoryReview),
                       component=relation(
                           Component,
                           primaryjoin=(ComponentReview.table.c.component_id ==
                                        Component.table.c.id))))
Beispiel #55
0
    sa.Column('created_at',
              sa.DateTime,
              nullable=False,
              default=datetime.utcnow(),
              server_default=sa.text("(now() at time zone 'utc')")),
    sa.Column('updated_at',
              sa.DateTime,
              nullable=False,
              default=datetime.utcnow(),
              onupdate=datetime.utcnow(),
              server_onupdate=sa.text("(now() at time zone 'utc')"),
              server_default=sa.text("(now() at time zone 'utc')")),
    sa.Column('is_deleted',
              sa.Boolean,
              default=False,
              server_default=sa.false(),
              nullable=False),
    sa.UniqueConstraint('norad_cat_id', 'dt', 'source', 'is_deleted'))


def run_migrations(db_params):
    import os
    from alembic import command, config

    os.chdir(PROJECT_DIR)

    db_params.setdefault('host', 'localhost')
    db_params.setdefault('port', 5432)

    alembic_cfg = config.Config(os.path.join(PROJECT_DIR, 'alembic.ini'))
    alembic_cfg.set_main_option('sqlalchemy.url', DSN_TPL % db_params)
Beispiel #56
0
def get_all_installed_repositories(actually_installed=False):
    if actually_installed:
        return install_session.query( galaxy.model.tool_shed_install.ToolShedRepository ) \
                              .filter( and_( galaxy.model.tool_shed_install.ToolShedRepository.table.c.deleted == false(),
                                             galaxy.model.tool_shed_install.ToolShedRepository.table.c.uninstalled == false(),
                                             galaxy.model.tool_shed_install.ToolShedRepository.table.c.status == galaxy.model.tool_shed_install.ToolShedRepository.installation_status.INSTALLED ) ) \
                              .all()
    else:
        return install_session.query(
            galaxy.model.tool_shed_install.ToolShedRepository).all()
Beispiel #57
0
    def get_approval_list(self):
        data = parameter_required(('ptid', ))
        filter_starttime, filter_endtime = data.get(
            'starttime', '2018-12-01'), data.get('endtime', '2100-01-01')
        avstatus = data.get('avstatus', "")
        current_app.logger.info('get avstatus {0} '.format(avstatus))
        if avstatus and avstatus != 'all':
            avstatus = getattr(ApplyStatus, data.get('avstatus'), None)
        else:
            avstatus = None

        if is_admin():
            admin = Admin.query.filter_by_(ADid=request.user.id).first_()
            if not admin:
                current_app.logger.info('get admin failed id is {0}'.format(
                    request.user.id))
                raise NotFound("该管理员已被删除")

            pt = PermissionType.query.filter_by_(PTid=data.get('ptid')).first()
            # ptytype = ActivityType(int(data.get('pttype'))).name
            ap_querry = Approval.query.filter(
                Approval.PTid == pt.PTid,
                Approval.AVlevel == Permission.PELevel,
                Permission.PTid == Approval.PTid,
                Permission.PIid == AdminPermission.PIid,
                AdminPermission.ADid == admin.ADid,
                Approval.isdelete == False,
                Permission.isdelete == False,
                AdminPermission.isdelete == False,
            )
            if avstatus is not None:
                current_app.logger.info('sql avstatus = {0}'.format(
                    avstatus.value))
                ap_querry = ap_querry.filter(
                    Approval.AVstatus == avstatus.value)

            ap_list = ap_querry.order_by(
                Approval.AVstatus.desc(),
                Approval.createtime.desc()).all_with_page()
        else:
            try:
                status = getattr(ApplyStatus, data.get('avstatus',
                                                       'wait_check'),
                                 'wait_check').value
            except Exception as e:
                current_app.logger.error(
                    "sup approval list status error :{}".format(e))
                status = None
            pt = PermissionType.query.filter_by_(
                PTid=data.get('ptid')).first_('审批类型不存在')
            sup = Supplizer.query.filter_by_(
                SUid=request.user.id).first_('供应商不存在')

            ap_list = Approval.query.filter_by_(
                AVstartid=sup.SUid).all_with_page()
        res = []
        for ap in ap_list:
            if not ap.AVstartdetail:
                continue
            ap.hide('AVcontentdetail', 'AVstartdetail')
            content = ap.AVcontentdetail or 'null'
            content = json.loads(content)
            if content.get('prid') and content.get('suname'):
                pr_nums = db.session.query(func.count(Product.PRid)).filter(
                    Product.isdelete == false(),
                    Product.PRstatus.in_((ProductStatus.ready.value,
                                          ProductStatus.active.value)),
                    Product.SUid == content.get('suid'),
                ).scalar() or 0  # 当前已上架的商品数
                content[
                    'suname'] = f'{content.get("suname")} » 现有{pr_nums}件商品上架'
                start = ap.AVstartdetail or 'null'

                ap.fill('content', content)
                ap.fill('start', json.loads(start))
                ap.add('createtime')
                ap.fill('avstatus_en', ApplyStatus(ap.AVstatus).name)
                ap.fill('avstatus_zh', ApplyStatus(ap.AVstatus).zh_value)
                res.append(ap)

        return Success('获取待审批列表成功', data=res)
Beispiel #58
0
class Offer(PcObject, Model, ExtraDataMixin, DeactivableMixin,
            ProvidableMixin):
    # We redefine this so we can reference it in the baseScore column_property
    id = Column(BigInteger, primary_key=True, autoincrement=True)

    productId = Column(BigInteger,
                       ForeignKey("product.id"),
                       index=True,
                       nullable=False)

    product = relationship('Product',
                           foreign_keys=[productId],
                           backref='offers')

    venueId = Column(BigInteger,
                     ForeignKey("venue.id"),
                     nullable=False,
                     index=True)

    venue = relationship('Venue', foreign_keys=[venueId], backref='offers')

    bookingEmail = Column(String(120), nullable=True)

    type = Column(String(50),
                  CheckConstraint("type != 'None'"),
                  index=True,
                  nullable=False)

    name = Column(String(140), nullable=False)

    description = Column(Text, nullable=True)

    conditions = Column(String(120), nullable=True)

    ageMin = Column(Integer, nullable=True)
    ageMax = Column(Integer, nullable=True)

    url = Column(String(255), nullable=True)

    mediaUrls = Column(ARRAY(String(220)), nullable=False, default=[])

    durationMinutes = Column(Integer, nullable=True)

    isNational = Column(Boolean,
                        server_default=false(),
                        default=False,
                        nullable=False)

    dateCreated = Column(DateTime, nullable=False, default=datetime.utcnow)

    baseScore = column_property(
        select([func.coalesce(func.sum(Criterion.scoreDelta), 0)]).where(
            and_(Criterion.id == OfferCriterion.criterionId,
                 (OfferCriterion.offerId == id))))

    criteria = relationship('Criterion',
                            backref=db.backref('criteria', lazy='dynamic'),
                            secondary='offer_criterion')

    def errors(self):
        api_errors = super(Offer, self).errors()
        if self.venue:
            venue = self.venue
        else:
            venue = Venue.query.get(self.venueId)
        if self.isDigital and not venue.isVirtual:
            api_errors.add_error(
                'venue',
                'Une offre numérique doit obligatoirement être associée au lieu "Offre en ligne"'
            )
        elif not self.isDigital and venue.isVirtual:
            api_errors.add_error(
                'venue',
                'Une offre physique ne peut être associée au lieu "Offre en ligne"'
            )
        if self.isDigital and self._type_can_only_be_offline():
            api_errors.add_error(
                'url',
                'Une offre de type {} ne peut pas être numérique'.format(
                    self._get_label_from_type_string()))

        return api_errors

    def update_with_product_data(self, product_dict: dict):
        owning_offerer = self.product.owningOfferer
        if owning_offerer and owning_offerer == self.venue.managingOfferer:
            self.product.populate_from_dict(product_dict)

    @property
    def dateRange(self):
        if ProductType.is_thing(self.type) or not self.stocks:
            return DateTimes()

        start = min([stock.beginningDatetime for stock in self.stocks])
        end = max([stock.endDatetime for stock in self.stocks])
        return DateTimes(start, end)

    @property
    def lastStock(self):
        query = Stock.queryNotSoftDeleted()
        return query.join(Offer) \
            .filter(Offer.id == self.id) \
            .order_by(desc(Stock.bookingLimitDatetime)) \
            .first()

    @property
    def hasActiveMediation(self):
        return any(map(lambda m: m.isActive, self.mediations))

    @property
    def offerType(self):
        all_types = list(ThingType) + list(EventType)
        for possible_type in all_types:
            if str(possible_type) == self.type:
                return possible_type.as_dict()

    @property
    def isEvent(self):
        return ProductType.is_event(self.type)

    @property
    def isThing(self):
        return ProductType.is_thing(self.type)

    @property
    def isDigital(self):
        return self.url is not None and self.url != ''

    @property
    def isEditable(self):
        return self.lastProviderId is None

    @property
    def isFinished(self):
        return all(map(lambda s: not s.isBookable, self.stocks))

    @property
    def isFullyBooked(self):
        if self._has_unlimited_stock():
            return False

        bookable_stocks = list(filter(lambda s: s.isBookable, self.stocks))
        total_quantity = 0

        for stock in bookable_stocks:
            bookings = filter(lambda b: not b.isCancelled, stock.bookings)
            total_quantity += sum(map(lambda s: s.quantity, bookings))

        available_stocks = sum(
            map(lambda s: s.available if s.isBookable else 0, self.stocks))
        return total_quantity >= available_stocks

    @property
    def activeMediation(self):
        sorted_by_date_asc = sorted(self.mediations,
                                    key=lambda m: m.dateCreated)
        sorted_by_date_desc = reversed(sorted_by_date_asc)
        only_active = list(filter(lambda m: m.isActive, sorted_by_date_desc))
        return only_active[0] if only_active else None

    @property
    def stockAlertMessage(self) -> str:
        non_deleted_stocks = [
            stock for stock in self.stocks if not stock.isSoftDeleted
        ]
        total_number_stocks = len(non_deleted_stocks)
        number_of_empty_stocks = len(
            list(
                filter(lambda s: s.available == 0 or s.remainingQuantity == 0,
                       non_deleted_stocks)))
        remaining_for_all_stocks = sum(
            map(lambda s: s.remainingQuantity,
                filter(lambda s: s.available, non_deleted_stocks)))

        if total_number_stocks == 0:
            return 'pas encore de stock' if self.isThing else 'pas encore de places'

        if all([s.available is None for s in non_deleted_stocks]):
            return 'illimité'

        if self.isFullyBooked:
            return 'plus de stock' if self.isThing else 'plus de places pour toutes les dates'

        if number_of_empty_stocks >= 1:
            offer_word = pluralize(number_of_empty_stocks, 'offre')
            stock_or_place = 'stock' if self.isThing else 'places'
            return f'plus de {stock_or_place} pour {number_of_empty_stocks} {offer_word}'

        if not self.isFullyBooked:
            remaining_stock_word = 'en stock' if self.isThing else pluralize(
                remaining_for_all_stocks, 'place')
            return f'encore {remaining_for_all_stocks} {remaining_stock_word}'

    def _has_unlimited_stock(self):
        return any(map(lambda s: s.available is None, self.stocks))

    def _type_can_only_be_offline(self):
        offline_only_things = filter(
            lambda thing_type: thing_type.value['offlineOnly'], ThingType)
        offline_only_types_for_things = map(lambda x: x.__str__(),
                                            offline_only_things)
        return self.type in offline_only_types_for_things

    def _get_label_from_type_string(self):
        matching_type_thing = next(
            filter(lambda thing_type: str(thing_type) == self.type, ThingType))
        return matching_type_thing.value['proLabel']
Beispiel #59
0
class Instance(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    name = db.Column(db.Text, unique=True, nullable=False)
    discarded = db.Column(db.Boolean, default=False, nullable=False)
    #aka is_open_service in jormun
    is_free = db.Column(db.Boolean, default=False, nullable=False)

    #this doesn't impact anything but is_free was used this,
    #but an instance can be freely accessible but not using open data
    is_open_data = db.Column(db.Boolean, default=False, nullable=False)

    authorizations = db.relationship('Authorization',
                                     backref=backref('instance',
                                                     lazy='joined'),
                                     lazy='dynamic',
                                     cascade='save-update, merge, delete')

    jobs = db.relationship('Job',
                           backref='instance',
                           lazy='dynamic',
                           cascade='save-update, merge, delete')

    poi_type_json = db.relationship(
        'PoiTypeJson',
        uselist=False,
        backref=backref('instance'),
        cascade='save-update, merge, delete, delete-orphan')

    import_stops_in_mimir = db.Column(db.Boolean,
                                      default=False,
                                      nullable=False)

    # ============================================================
    # params for jormungandr
    # ============================================================
    #the scenario used by jormungandr, by default we use the new default scenario (and not the default one...)
    scenario = db.Column(db.Text, nullable=False, default='new_default')

    #order of the journey, this order is for clockwise request, else it is reversed
    journey_order = db.Column(db.Enum('arrival_time',
                                      'departure_time',
                                      name='journey_order'),
                              default=default_values.journey_order,
                              nullable=False)

    max_walking_duration_to_pt = db.Column(
        db.Integer,
        default=default_values.max_walking_duration_to_pt,
        nullable=False)

    max_bike_duration_to_pt = db.Column(
        db.Integer,
        default=default_values.max_bike_duration_to_pt,
        nullable=False)

    max_bss_duration_to_pt = db.Column(
        db.Integer,
        default=default_values.max_bss_duration_to_pt,
        nullable=False)

    max_car_duration_to_pt = db.Column(
        db.Integer,
        default=default_values.max_car_duration_to_pt,
        nullable=False)

    walking_speed = db.Column(db.Float,
                              default=default_values.walking_speed,
                              nullable=False)

    bike_speed = db.Column(db.Float,
                           default=default_values.bike_speed,
                           nullable=False)

    bss_speed = db.Column(db.Float,
                          default=default_values.bss_speed,
                          nullable=False)

    car_speed = db.Column(db.Float,
                          default=default_values.car_speed,
                          nullable=False)

    max_nb_transfers = db.Column(db.Integer,
                                 default=default_values.max_nb_transfers,
                                 nullable=False)

    min_tc_with_car = db.Column(db.Integer,
                                default=default_values.min_tc_with_car,
                                nullable=False)

    min_tc_with_bike = db.Column(db.Integer,
                                 default=default_values.min_tc_with_bike,
                                 nullable=False)

    min_tc_with_bss = db.Column(db.Integer,
                                default=default_values.min_tc_with_bss,
                                nullable=False)

    min_bike = db.Column(db.Integer,
                         default=default_values.min_bike,
                         nullable=False)

    min_bss = db.Column(db.Integer,
                        default=default_values.min_bss,
                        nullable=False)

    min_car = db.Column(db.Integer,
                        default=default_values.min_car,
                        nullable=False)

    factor_too_long_journey = db.Column(
        db.Float,
        default=default_values.factor_too_long_journey,
        nullable=False)

    min_duration_too_long_journey = db.Column(db.Integer, default=default_values.min_duration_too_long_journey, \
            nullable=False)

    max_duration_criteria = db.Column(
        db.Enum('time', 'duration', name='max_duration_criteria'),
        default=default_values.max_duration_criteria,
        nullable=False)

    max_duration_fallback_mode = db.Column(
        db.Enum('walking',
                'bss',
                'bike',
                'car',
                name='max_duration_fallback_mode'),
        default=default_values.max_duration_fallback_mode,
        nullable=False)

    max_duration = db.Column(db.Integer,
                             default=default_values.max_duration,
                             nullable=False,
                             server_default='86400')

    walking_transfer_penalty = db.Column(
        db.Integer,
        default=default_values.walking_transfer_penalty,
        nullable=False,
        server_default='2')

    night_bus_filter_max_factor = db.Column(
        db.Float,
        default=default_values.night_bus_filter_max_factor,
        nullable=False)

    night_bus_filter_base_factor = db.Column(
        db.Integer,
        default=default_values.night_bus_filter_base_factor,
        nullable=False,
        server_default='3600')

    priority = db.Column(db.Integer,
                         default=default_values.priority,
                         nullable=False,
                         server_default='0')

    bss_provider = db.Column(db.Boolean,
                             default=default_values.bss_provider,
                             nullable=False,
                             server_default=true())

    max_additional_connections = db.Column(
        db.Integer,
        default=default_values.max_additional_connections,
        nullable=False,
        server_default='2')

    successive_physical_mode_to_limit_id = db.Column(
        db.Text,
        default=default_values.successive_physical_mode_to_limit_id,
        nullable=False,
        server_default=default_values.successive_physical_mode_to_limit_id)

    full_sn_geometries = db.Column(db.Boolean,
                                   default=False,
                                   nullable=False,
                                   server_default=false())

    def __init__(self,
                 name=None,
                 is_free=False,
                 authorizations=None,
                 jobs=None):
        self.name = name
        self.is_free = is_free
        if authorizations:
            self.authorizations = authorizations
        if jobs:
            self.jobs = jobs

    def last_datasets(self, nb_dataset=1, family_type=None):
        """
        return the n last dataset of each family type loaded for this instance
        """
        query = db.session.query(func.distinct(DataSet.family_type)) \
            .filter(Instance.id == self.id)
        if family_type:
            query = query.filter(DataSet.family_type == family_type)

        family_types = query.all()

        result = []
        for family_type in family_types:
            data_sets = db.session.query(DataSet) \
                .join(Job) \
                .join(Instance) \
                .filter(Instance.id == self.id, DataSet.family_type == family_type, Job.state == 'done') \
                .order_by(Job.created_at.desc()) \
                .limit(nb_dataset) \
                .all()
            result += data_sets
        return result

    @classmethod
    def query_existing(cls):
        return cls.query.filter_by(discarded=False)

    @classmethod
    def get_by_name(cls, name):
        res = cls.query_existing().filter_by(name=name).first()
        return res

    @classmethod
    def get_from_id_or_name(cls, id=None, name=None):
        if id:
            return cls.query.get_or_404(id)
        elif name:
            return cls.query_existing().filter_by(name=name).first_or_404()
        else:
            raise Exception({'error': 'instance is required'}, 400)

    def __repr__(self):
        return '<Instance %r>' % self.name
Beispiel #60
0
    def index(self, trans, deleted=False, owner=None, name=None, **kwd):
        """
        GET /api/repositories
        Displays a collection of repositories with optional criteria.

        :param q:        (optional)if present search on the given query will be performed
        :type  q:        str

        :param page:     (optional)requested page of the search
        :type  page:     int

        :param page_size:     (optional)requested page_size of the search
        :type  page_size:     int

        :param jsonp:    (optional)flag whether to use jsonp format response, defaults to False
        :type  jsonp:    bool

        :param callback: (optional)name of the function to wrap callback in
                         used only when jsonp is true, defaults to 'callback'
        :type  callback: str

        :param deleted:  (optional)displays repositories that are or are not set to deleted.
        :type  deleted:  bool

        :param owner:    (optional)the owner's public username.
        :type  owner:    str

        :param name:     (optional)the repository name.
        :type  name:     str

        :returns dict:   object containing list of results

        Examples:
            GET http://localhost:9009/api/repositories
            GET http://localhost:9009/api/repositories?q=fastq
        """
        repository_dicts = []
        deleted = util.asbool(deleted)
        q = kwd.get('q', '')
        if q:
            page = kwd.get('page', 1)
            page_size = kwd.get('page_size', 10)
            try:
                page = int(page)
                page_size = int(page_size)
            except ValueError:
                raise RequestParameterInvalidException(
                    'The "page" and "page_size" parameters have to be integers.'
                )
            return_jsonp = util.asbool(kwd.get('jsonp', False))
            callback = kwd.get('callback', 'callback')
            search_results = self._search(trans, q, page, page_size)
            if return_jsonp:
                response = str('%s(%s);' %
                               (callback, json.dumps(search_results)))
            else:
                response = json.dumps(search_results)
            return response

        clause_list = [
            and_(trans.app.model.Repository.table.c.deprecated == false(),
                 trans.app.model.Repository.table.c.deleted == deleted)
        ]
        if owner is not None:
            clause_list.append(
                and_(
                    trans.app.model.User.table.c.username == owner,
                    trans.app.model.Repository.table.c.user_id ==
                    trans.app.model.User.table.c.id))
        if name is not None:
            clause_list.append(trans.app.model.Repository.table.c.name == name)
        for repository in trans.sa_session.query( trans.app.model.Repository ) \
                                          .filter( *clause_list ) \
                                          .order_by( trans.app.model.Repository.table.c.name ):
            repository_dict = repository.to_dict(
                view='collection', value_mapper=self.__get_value_mapper(trans))
            repository_dict[ 'category_ids' ] = \
                [ trans.security.encode_id( x.category.id ) for x in repository.categories ]
            repository_dicts.append(repository_dict)
        return json.dumps(repository_dicts)