Esempio n. 1
0
 def recalculate_user_disk_usage( self, trans, **kwd ):
     user_id = kwd.get( 'id', None )
     user = trans.sa_session.query( trans.model.User ).get( trans.security.decode_id( user_id ) )
     if not user:
         return trans.show_error_message( "User not found for id (%s)" % sanitize_text( str( user_id ) ) )
     engine = None
     if trans.app.config.database_connection:
         engine = trans.app.config.database_connection.split(':')[0]
     if engine not in ( 'postgres', 'postgresql' ):
         done = False
         while not done:
             current = user.get_disk_usage()
             new = user.calculate_disk_usage()
             trans.sa_session.refresh( user )
             # make sure usage didn't change while calculating, set done
             if user.get_disk_usage() == current:
                 done = True
             if new not in (current, None):
                 user.set_disk_usage( new )
                 trans.sa_session.add( user )
                 trans.sa_session.flush()
     else:
         # We can use the lightning fast pgcalc!
         current = user.get_disk_usage()
         new = pgcalc( self.sa_session, user.id )
     # yes, still a small race condition between here and the flush
     if new in ( current, None ):
         message = 'Usage is unchanged at %s.' % nice_size( current )
     else:
         message = 'Usage has changed by %s to %s.' % ( nice_size( new - current ), nice_size( new )  )
     return trans.response.send_redirect( web.url_for( controller='admin',
                                                       action='users',
                                                       message=sanitize_text( message ),
                                                       status='info' ) )
def quotacheck(sa_session, users, engine):
    sa_session.refresh(user)
    current = user.get_disk_usage()
    print(user.username, '<' + user.email + '>:', end=' ')
    if engine not in ('postgres', 'postgresql'):
        new = user.calculate_disk_usage()
        sa_session.refresh(user)
        # usage changed while calculating, do it again
        if user.get_disk_usage() != current:
            print('usage changed while calculating, trying again...')
            return quotacheck(sa_session, user, engine)
    else:
        new = pgcalc(sa_session, user.id, dryrun=args.dryrun)
    # yes, still a small race condition between here and the flush
    print('old usage:', nice_size(current), 'change:', end=' ')
    if new in (current, None):
        print('none')
    else:
        if new > current:
            print('+%s' % (nice_size(new - current)))
        else:
            print('-%s' % (nice_size(current - new)))
        if not args.dryrun and engine not in ('postgres', 'postgresql'):
            user.set_disk_usage(new)
            sa_session.add(user)
            sa_session.flush()
Esempio n. 3
0
def quotacheck(sa_session, users, engine):
    sa_session.refresh(user)
    current = user.get_disk_usage()
    print user.username, "<" + user.email + ">:",
    if engine not in ("postgres", "postgresql"):
        new = user.calculate_disk_usage()
        sa_session.refresh(user)
        # usage changed while calculating, do it again
        if user.get_disk_usage() != current:
            print "usage changed while calculating, trying again..."
            return quotacheck(sa_session, user, engine)
    else:
        new = pgcalc(sa_session, user.id, dryrun=options.dryrun)
    # yes, still a small race condition between here and the flush
    print "old usage:", nice_size(current), "change:",
    if new in (current, None):
        print "none"
    else:
        if new > current:
            print "+%s" % (nice_size(new - current))
        else:
            print "-%s" % (nice_size(current - new))
        if not options.dryrun and engine not in ("postgres", "postgresql"):
            user.set_disk_usage(new)
            sa_session.add(user)
            sa_session.flush()
Esempio n. 4
0
def quotacheck(sa_session, users, engine):
    sa_session.refresh(user)
    current = user.get_disk_usage()
    print user.username, '<' + user.email + '>:',
    if engine not in ('postgres', 'postgresql'):
        new = user.calculate_disk_usage()
        sa_session.refresh(user)
        # usage changed while calculating, do it again
        if user.get_disk_usage() != current:
            print 'usage changed while calculating, trying again...'
            return quotacheck(sa_session, user, engine)
    else:
        new = pgcalc(sa_session, user.id, dryrun=options.dryrun)
    # yes, still a small race condition between here and the flush
    print 'old usage:', nice_size(current), 'change:',
    if new in (current, None):
        print 'none'
    else:
        if new > current:
            print '+%s' % (nice_size(new - current))
        else:
            print '-%s' % (nice_size(current - new))
        if not options.dryrun and engine not in ('postgres', 'postgresql'):
            user.set_disk_usage(new)
            sa_session.add(user)
            sa_session.flush()
 def recalculate_user_disk_usage( self, trans, **kwd ):
     user_id = kwd.get( 'id', None )
     user = trans.sa_session.query( trans.model.User ).get( trans.security.decode_id( user_id ) )
     if not user:
         return trans.show_error_message( "User not found for id (%s)" % sanitize_text( str( user_id ) ) )
     engine = None
     if trans.app.config.database_connection:
         engine = trans.app.config.database_connection.split(':')[0]
     if engine not in ( 'postgres', 'postgresql' ):
         done = False
         while not done:
             current = user.get_disk_usage()
             new = user.calculate_disk_usage()
             trans.sa_session.refresh( user )
             # make sure usage didn't change while calculating, set done
             if user.get_disk_usage() == current:
                 done = True
             if new not in (current, None):
                 user.set_disk_usage( new )
                 trans.sa_session.add( user )
                 trans.sa_session.flush()
     else:
         # We can use the lightning fast pgcalc!
         current = user.get_disk_usage()
         new = pgcalc( self.sa_session, user.id )
     # yes, still a small race condition between here and the flush
     if new in ( current, None ):
         message = 'Usage is unchanged at %s.' % nice_size( current )
     else:
         message = 'Usage has changed by %s to %s.' % ( nice_size( new - current ), nice_size( new )  )
     return trans.response.send_redirect( web.url_for( controller='admin',
                                                       action='users',
                                                       message=sanitize_text( message ),
                                                       status='info' ) )
Esempio n. 6
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = 'RNA Dot Plot format (Postscript derivative)'
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 7
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = 'RNA Dot Plot format (Postscript derivative)'
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 8
0
    def serialize_current_anonymous_user(self,
                                         user,
                                         keys,
                                         trans=None,
                                         **kwargs):
        # use the current history if any to get usage stats for trans' anonymous user
        # TODO: might be better as sep. Serializer class
        usage = 0
        percent = None

        history = trans.history
        if history:
            usage = self.app.quota_agent.get_usage(trans,
                                                   history=trans.history)
            percent = self.app.quota_agent.get_percent(trans=trans,
                                                       usage=usage)

        # a very small subset of keys available
        values = {
            'id': None,
            'total_disk_usage': float(usage),
            'nice_total_disk_usage': util.nice_size(usage),
            'quota_percent': percent,
        }
        serialized = {}
        for key in keys:
            if key in values:
                serialized[key] = values[key]
        return serialized
Esempio n. 9
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = "Augustus model"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 10
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = "Binary UCSC %s file" % self._name
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 11
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = "Thermo Finnigan RAW file"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 12
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = "Binary wave sequence file"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 13
0
 def recalculate_user_disk_usage( self, trans, **kwd ):
     user_id = kwd.get( 'id', None )
     user = trans.sa_session.query( trans.model.User ).get( trans.security.decode_id( user_id ) )
     if not user:
         return trans.show_error_message( "User not found for id (%s)" % sanitize_text( str( user_id ) ) )
     current = user.get_disk_usage()
     user.calculate_and_set_disk_usage()
     new = user.get_disk_usage()
     if new in ( current, None ):
         message = 'Usage is unchanged at %s.' % nice_size( current )
     else:
         message = 'Usage has changed by %s to %s.' % ( nice_size( new - current ), nice_size( new )  )
     return trans.response.send_redirect( web.url_for( controller='admin',
                                                       action='users',
                                                       message=sanitize_text( message ),
                                                       status='info' ) )
Esempio n. 14
0
    def serialize(self, trans, ld):
        """Serialize the library dataset into a dictionary."""
        current_user_roles = trans.get_current_user_roles()

        # Build the full path for breadcrumb purposes.
        full_path = self._build_path(trans, ld.folder)
        dataset_item = (trans.security.encode_id(ld.id), ld.name)
        full_path.insert(0, dataset_item)
        full_path = full_path[::-1]

        # Find expired versions of the library dataset
        expired_ldda_versions = []
        for expired_ldda in ld.expired_datasets:
            expired_ldda_versions.append((trans.security.encode_id(expired_ldda.id), expired_ldda.name))

        rval = trans.security.encode_all_ids(ld.to_dict())
        if len(expired_ldda_versions) > 0:
            rval['has_versions'] = True
            rval['expired_versions'] = expired_ldda_versions
        rval['deleted'] = ld.deleted
        rval['folder_id'] = 'F' + rval['folder_id']
        rval['full_path'] = full_path
        rval['file_size'] = util.nice_size(int(ld.library_dataset_dataset_association.get_size()))
        rval['date_uploaded'] = ld.library_dataset_dataset_association.create_time.strftime("%Y-%m-%d %I:%M %p")
        rval['can_user_modify'] = trans.app.security_agent.can_modify_library_item(current_user_roles, ld) or trans.user_is_admin()
        rval['is_unrestricted'] = trans.app.security_agent.dataset_is_public(ld.library_dataset_dataset_association.dataset)
        rval['tags'] = self.tag_manager.get_tags_str(ld.library_dataset_dataset_association.tags)

        #  Manage dataset permission is always attached to the dataset itself, not the the ld or ldda to maintain consistency
        rval['can_user_manage'] = trans.app.security_agent.can_manage_dataset(current_user_roles, ld.library_dataset_dataset_association.dataset) or trans.user_is_admin()
        return rval
Esempio n. 15
0
 def set_peek(self, dataset, **kwd):
     if not dataset.dataset.purged:
         dataset.peek = "Binary RGenetics file"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 16
0
 def anon_user_api_value(self, trans):
     """Return data for an anonymous user, truncated to only usage and quota_percent"""
     usage = trans.app.quota_agent.get_usage(trans)
     percent = trans.app.quota_agent.get_percent(trans=trans, usage=usage)
     return {'total_disk_usage': int(usage),
             'nice_total_disk_usage': util.nice_size(usage),
             'quota_percent': percent}
Esempio n. 17
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = "Binary UCSC %s file" % self._name
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 18
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = "Binary ab1 sequence file"
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 19
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = 'Image in %s format' % dataset.extension
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 20
0
 def set_peek(self, dataset, **kwd):
     if not dataset.dataset.purged:
         dataset.peek = "Binary RGenetics file"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 21
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = "Thermo Finnigan RAW file"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 22
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = "Gemini SQLite Database, version %s" % ( dataset.metadata.gemini_version or 'unknown' )
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 23
0
 def anon_user_api_value(self, trans):
     """Return data for an anonymous user, truncated to only usage and quota_percent"""
     usage = trans.app.quota_agent.get_usage(trans)
     percent = trans.app.quota_agent.get_percent(trans=trans, usage=usage)
     return {'total_disk_usage': int(usage),
             'nice_total_disk_usage': util.nice_size(usage),
             'quota_percent': percent}
Esempio n. 24
0
 def set_peek(self, dataset):
     if not dataset.dataset.purged:
         dataset.peek = "HTML file"
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 25
0
 def set_peek(self, dataset):
     if not dataset.dataset.purged:
         dataset.peek = f'Image in {dataset.extension} format'
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
 def get_html(self, prefix=""):
     rval = FTPFileField.thead
     if self.dir is None:
         rval += '<tr><td colspan="4"><em>Please <a href="%s">create</a> or <a href="%s">log in to</a> a Galaxy account to view files uploaded via FTP.</em></td></tr>' % (
             url_for(controller='user',
                     action='create',
                     cntrller='user',
                     referer=url_for(controller='root')),
             url_for(controller='user',
                     action='login',
                     cntrller='user',
                     referer=url_for(controller='root')))
     elif not os.path.exists(self.dir):
         rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
     else:
         uploads = []
         for (dirpath, dirnames, filenames) in os.walk(self.dir):
             for filename in filenames:
                 path = relpath(os.path.join(dirpath, filename), self.dir)
                 statinfo = os.lstat(os.path.join(dirpath, filename))
                 uploads.append(
                     dict(path=path,
                          size=nice_size(statinfo.st_size),
                          ctime=time.strftime(
                              "%m/%d/%Y %I:%M:%S %p",
                              time.localtime(statinfo.st_ctime))))
         if not uploads:
             rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
         for upload in uploads:
             rval += FTPFileField.trow % (prefix, self.name, upload['path'],
                                          upload['path'], upload['size'],
                                          upload['ctime'])
     rval += FTPFileField.tfoot
     rval += '<div class="toolParamHelp">This Galaxy server allows you to upload files via FTP.  To upload some files, log in to the FTP server at <strong>%s</strong> using your Galaxy credentials (email address and password).</div>' % self.ftp_site
     return rval
Esempio n. 27
0
def get_repository_file_contents(app,
                                 file_path,
                                 repository_id,
                                 is_admin=False):
    """Return the display-safe contents of a repository file for display in a browser."""
    safe_str = ''
    if not is_path_browsable(app, file_path, repository_id, is_admin):
        log.warning(
            'Request tries to access a file outside of the repository location. File path: %s',
            file_path)
        return 'Invalid file path'
    # Symlink targets are checked by is_path_browsable
    if os.path.islink(file_path):
        safe_str = 'link to: ' + basic_util.to_html_string(
            os.readlink(file_path))
        return safe_str
    elif checkers.is_gzip(file_path):
        return '<br/>gzip compressed file<br/>'
    elif checkers.is_bz2(file_path):
        return '<br/>bz2 compressed file<br/>'
    elif checkers.check_zip(file_path):
        return '<br/>zip compressed file<br/>'
    elif checkers.check_binary(file_path):
        return '<br/>Binary file<br/>'
    else:
        for i, line in enumerate(open(file_path)):
            safe_str = '%s%s' % (safe_str, basic_util.to_html_string(line))
            # Stop reading after string is larger than MAX_CONTENT_SIZE.
            if len(safe_str) > MAX_CONTENT_SIZE:
                large_str = \
                    '<br/>File contents truncated because file size is larger than maximum viewing size of %s<br/>' % \
                    util.nice_size( MAX_CONTENT_SIZE )
                safe_str = '%s%s' % (safe_str, large_str)
                break

        if len(safe_str) > basic_util.MAX_DISPLAY_SIZE:
            # Eliminate the middle of the file to display a file no larger than basic_util.MAX_DISPLAY_SIZE.
            # This may not be ideal if the file is larger than MAX_CONTENT_SIZE.
            join_by_str = \
                "<br/><br/>...some text eliminated here because file size is larger than maximum viewing size of %s...<br/><br/>" % \
                util.nice_size( basic_util.MAX_DISPLAY_SIZE )
            safe_str = util.shrink_string_by_size(safe_str,
                                                  basic_util.MAX_DISPLAY_SIZE,
                                                  join_by=join_by_str,
                                                  left_larger=True,
                                                  beginning_on_size_error=True)
        return safe_str
Esempio n. 28
0
 def set_peek(self, dataset, is_multi_byte=False):
     """Set the peek and blurb text"""
     if not dataset.dataset.purged:
         dataset.peek = 'binary data'
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 29
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = "Gemini SQLite Database, version %s" % (
             dataset.metadata.gemini_version or 'unknown')
         dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 30
0
 def set_peek( self, dataset, is_multi_byte=False ):
     """Set the peek and blurb text"""
     if not dataset.dataset.purged:
         dataset.peek = 'binary data'
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 31
0
 def job_info(self, trans, **kwd):
     not_is_admin = not trans.user_is_admin
     if not_is_admin and not trans.app.config.enable_data_manager_user_view:
         raise paste.httpexceptions.HTTPUnauthorized("This Galaxy instance is not configured to allow non-admins to view the data manager.")
     message = kwd.get('message', '')
     status = kwd.get('status', 'info')
     job_id = kwd.get('id', None)
     try:
         job_id = trans.security.decode_id(job_id)
         job = trans.sa_session.query(trans.app.model.Job).get(job_id)
     except Exception as e:
         job = None
         log.error("Bad job id (%s) passed to job_info: %s" % (job_id, e))
     if not job:
         return {'message': "Invalid job (%s) was requested" % job_id,
                 'status': "error"}
     data_manager_id = job.data_manager_association.data_manager_id
     data_manager = trans.app.data_managers.get_manager(data_manager_id)
     hdas = [assoc.dataset for assoc in job.get_output_datasets()]
     hda_info = []
     data_manager_output = []
     error_messages = []
     for hda in hdas:
         hda_info.append({'id': hda.id,
                          'encId': trans.security.encode_id(hda.id),
                          'name': hda.name,
                          'created': unicodify(hda.create_time.strftime(trans.app.config.pretty_datetime_format)),
                          'fileSize': nice_size(hda.dataset.file_size),
                          'fileName': hda.file_name,
                          'infoUrl': web.url_for(controller='dataset',
                                                 action='show_params',
                                                 dataset_id=trans.security.encode_id(hda.id))})
         try:
             data_manager_json = loads(open(hda.get_file_name()).read())
         except Exception as e:
             data_manager_json = {}
             error_messages.append("Unable to obtain data_table info for hda (%s): %s" % (hda.id, e))
         values = []
         for key, value in data_manager_json.get('data_tables', {}).items():
             values.append((key, value))
         data_manager_output.append(values)
     return {'jobId': job_id,
             'exitCode': job.exit_code,
             'runUrl': web.url_for(controller="tool_runner",
                                   action="rerun",
                                   job_id=trans.security.encode_id(job.id)),
             'commandLine': job.command_line,
             'dataManager': {'id': data_manager_id,
                             'name': data_manager.name,
                             'description': data_manager.description.lower(),
                             'toolUrl': web.url_for(controller='root',
                                                    tool_id=data_manager.tool.id)},
             'hdaInfo': hda_info,
             'dataManagerOutput': data_manager_output,
             'errorMessages': error_messages,
             'viewOnly': not_is_admin,
             'message': message,
             'status': status}
Esempio n. 32
0
 def __user_get_usage( self, trans ):
     usage = trans.app.quota_agent.get_usage( trans )
     percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage ) 
     rval = {}
     if percent is None:
         rval['usage'] = util.nice_size( usage )
     else:
         rval['percent'] = percent
     return rval
Esempio n. 33
0
 def __user_get_usage(self, trans):
     usage = trans.app.quota_agent.get_usage(trans)
     percent = trans.app.quota_agent.get_percent(trans=trans, usage=usage)
     rval = {}
     if percent is None:
         rval['usage'] = util.nice_size(usage)
     else:
         rval['percent'] = percent
     return rval
Esempio n. 34
0
def render_body(context, **pageargs):
    context.caller_stack._push_frame()
    try:
        __M_locals = __M_dict_builtin(pageargs=pageargs)

        def inputs_recursive(input_params, param_values, depth=1):
            return render_inputs_recursive(context.locals_(__M_locals),
                                           input_params, param_values, depth)

        tool = context.get('tool', UNDEFINED)
        params_objects = context.get('params_objects', UNDEFINED)
        hda = context.get('hda', UNDEFINED)
        history = context.get('history', UNDEFINED)
        __M_writer = context.writer()
        # SOURCE LINE 1
        from galaxy.util import nice_size

        __M_locals.update(
            __M_dict_builtin([(__M_key, __M_locals_builtin()[__M_key])
                              for __M_key in ['nice_size']
                              if __M_key in __M_locals_builtin()]))
        __M_writer(u'\n\n')
        # SOURCE LINE 23
        __M_writer(
            u'\n\n<table class="tabletip">\n  <thead>\n    <tr><th colspan="2" style="font-size: 120%;">'
        )
        # SOURCE LINE 27
        __M_writer(unicode(tool.name))
        __M_writer(
            u'</th></tr>\n  </thead>\n  <tbody>\n    <tr><th>Created:</th><td>'
        )
        # SOURCE LINE 30
        __M_writer(unicode(history.create_time.strftime("%b %d, %Y")))
        __M_writer(u'</td></tr>\n    <tr><th>Modified:</th><td>')
        # SOURCE LINE 31
        __M_writer(unicode(history.update_time.strftime("%b %d, %Y")))
        __M_writer(u'</td></tr>\n')
        # SOURCE LINE 33
        __M_writer(u'    <tr><th>Filesize:</th><td>')
        __M_writer(unicode(nice_size(hda.dataset.file_size)))
        __M_writer(u'</td></tr>\n    <tr><th>Dbkey:</th><td>')
        # SOURCE LINE 34
        __M_writer(unicode(hda.dbkey))
        __M_writer(u'</td></tr>\n    <tr><th>Format:</th><td>')
        # SOURCE LINE 35
        __M_writer(unicode(hda.ext))
        __M_writer(
            u'</td></tr>\n    \n</table><br />\n<table class="tabletip">\n  <thead>\n    <tr>\n      <th>Input Parameter</th>\n      <th>Value</th>\n    </tr>\n  </thead>\n  <tbody>\n      '
        )
        # SOURCE LINE 46
        __M_writer(
            unicode(inputs_recursive(tool.inputs, params_objects, depth=1)))
        __M_writer(u'\n  </tbody>\n</table>\n')
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 35
0
 def format(self, key, value):
     title = TITLES.get(key, key)
     if key in CONVERSION:
         return title, CONVERSION[key](value)
     elif key.endswith("_bytes"):
         try:
             return title, nice_size(key)
         except ValueError:
             pass
     return title, value
Esempio n. 36
0
 def set_peek(self, dataset, is_multi_byte=False):
     if not dataset.dataset.purged:
         dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
         if dataset.metadata.sequences:
             dataset.blurb = "%s sequences" % util.commaify(str(dataset.metadata.sequences))
         else:
             dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 37
0
 def set_peek(self, dataset):
     if not dataset.dataset.purged:
         dataset.peek = get_file_peek(dataset.file_name)
         if dataset.metadata.sequences:
             dataset.blurb = f"{util.commaify(str(dataset.metadata.sequences))} sequences"
         else:
             dataset.blurb = nice_size(dataset.get_size())
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 38
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
         if dataset.metadata.sequences:
             dataset.blurb = "%s sequences" % util.commaify( str( dataset.metadata.sequences ) )
         else:
             dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 39
0
    def get_quota(self, user, nice_size=False):
        """
        Calculated like so:

            1. Anonymous users get the default quota.
            2. Logged in users start with the highest of their associated '='
               quotas or the default quota, if there are no associated '='
               quotas.  If an '=' unlimited (-1 in the database) quota is found
               during this process, the user has no quota (aka unlimited).
            3. Quota is increased or decreased by any corresponding '+' or '-'
               quotas.
        """
        if not user:
            return self.default_unregistered_quota
        quotas = []
        for group in [uga.group for uga in user.groups]:
            for quota in [gqa.quota for gqa in group.quotas]:
                if quota not in quotas:
                    quotas.append(quota)
        for quota in [uqa.quota for uqa in user.quotas]:
            if quota not in quotas:
                quotas.append(quota)
        use_default = True
        max = 0
        adjustment = 0
        rval = 0
        for quota in quotas:
            if quota.deleted:
                continue
            if quota.operation == '=' and quota.bytes == -1:
                rval = None
                break
            elif quota.operation == '=':
                use_default = False
                if quota.bytes > max:
                    max = quota.bytes
            elif quota.operation == '+':
                adjustment += quota.bytes
            elif quota.operation == '-':
                adjustment -= quota.bytes
        if use_default:
            max = self.default_registered_quota
            if max is None:
                rval = None
        if rval is not None:
            rval = max + adjustment
            if rval <= 0:
                rval = 0
        if nice_size:
            if rval is not None:
                rval = util.nice_size(rval)
            else:
                rval = 'unlimited'
        return rval
Esempio n. 40
0
    def get_quota( self, user, nice_size=False ):
        """
        Calculated like so:

            1. Anonymous users get the default quota.
            2. Logged in users start with the highest of their associated '='
               quotas or the default quota, if there are no associated '='
               quotas.  If an '=' unlimited (-1 in the database) quota is found
               during this process, the user has no quota (aka unlimited).
            3. Quota is increased or decreased by any corresponding '+' or '-'
               quotas.
        """
        if not user:
            return self.default_unregistered_quota
        quotas = []
        for group in [ uga.group for uga in user.groups ]:
            for quota in [ gqa.quota for gqa in group.quotas ]:
                if quota not in quotas:
                    quotas.append( quota )
        for quota in [ uqa.quota for uqa in user.quotas ]:
            if quota not in quotas:
                quotas.append( quota )
        use_default = True
        max = 0
        adjustment = 0
        rval = 0
        for quota in quotas:
            if quota.deleted:
                continue
            if quota.operation == '=' and quota.bytes == -1:
                rval = None
                break
            elif quota.operation == '=':
                use_default = False
                if quota.bytes > max:
                    max = quota.bytes
            elif quota.operation == '+':
                adjustment += quota.bytes
            elif quota.operation == '-':
                adjustment -= quota.bytes
        if use_default:
            max = self.default_registered_quota
            if max is None:
                rval = None
        if rval is not None:
            rval = max + adjustment
            if rval <= 0:
                rval = 0
        if nice_size:
            if rval is not None:
                rval = util.nice_size( rval )
            else:
                rval = 'unlimited'
        return rval
Esempio n. 41
0
def get_repository_file_contents(app, file_path, repository_id, is_admin=False):
    """Return the display-safe contents of a repository file for display in a browser."""
    safe_str = ''
    if not is_path_browsable(app, file_path, repository_id, is_admin):
        log.warning('Request tries to access a file outside of the repository location. File path: %s', file_path)
        return 'Invalid file path'
    # Symlink targets are checked by is_path_browsable
    if os.path.islink(file_path):
        safe_str = 'link to: ' + basic_util.to_html_string(os.readlink(file_path))
        return safe_str
    elif checkers.is_gzip(file_path):
        return '<br/>gzip compressed file<br/>'
    elif checkers.is_bz2(file_path):
        return '<br/>bz2 compressed file<br/>'
    elif checkers.is_zip(file_path):
        return '<br/>zip compressed file<br/>'
    elif checkers.check_binary(file_path):
        return '<br/>Binary file<br/>'
    else:
        for i, line in enumerate(open(file_path)):
            safe_str = '%s%s' % (safe_str, basic_util.to_html_string(line))
            # Stop reading after string is larger than MAX_CONTENT_SIZE.
            if len(safe_str) > MAX_CONTENT_SIZE:
                large_str = \
                    '<br/>File contents truncated because file size is larger than maximum viewing size of %s<br/>' % \
                    util.nice_size(MAX_CONTENT_SIZE)
                safe_str = '%s%s' % (safe_str, large_str)
                break

        if len(safe_str) > basic_util.MAX_DISPLAY_SIZE:
            # Eliminate the middle of the file to display a file no larger than basic_util.MAX_DISPLAY_SIZE.
            # This may not be ideal if the file is larger than MAX_CONTENT_SIZE.
            join_by_str = \
                "<br/><br/>...some text eliminated here because file size is larger than maximum viewing size of %s...<br/><br/>" % \
                util.nice_size(basic_util.MAX_DISPLAY_SIZE)
            safe_str = util.shrink_string_by_size(safe_str,
                                                  basic_util.MAX_DISPLAY_SIZE,
                                                  join_by=join_by_str,
                                                  left_larger=True,
                                                  beginning_on_size_error=True)
        return safe_str
Esempio n. 42
0
 def format(self, key, value):
     title = TITLES.get(key, key)
     if key in CONVERSION:
         return title, CONVERSION[key](value)
     elif key.endswith("_bytes"):
         try:
             return title, nice_size(value)
         except ValueError:
             pass
     elif isinstance(value, numbers.Number) and value == int(value):
         value = int(value)
     return title, value
Esempio n. 43
0
 def anon_user_api_value(self, trans):
     """Return data for an anonymous user, truncated to only usage and quota_percent"""
     if not trans.user and not trans.history:
         # Can't return info about this user, may not have a history yet.
         return {}
     usage = trans.app.quota_agent.get_usage(trans)
     percent = trans.app.quota_agent.get_percent(trans=trans, usage=usage)
     return {
         'total_disk_usage': int(usage),
         'nice_total_disk_usage': util.nice_size(usage),
         'quota_percent': percent
     }
Esempio n. 44
0
def quotacheck(sa_session, users, engine):
    sa_session.refresh(user)
    current = user.get_disk_usage()
    print(user.username, '<' + user.email + '>:', end=' ')

    if not args.dryrun:
        # Apply new disk usage
        user.calculate_and_set_disk_usage()
        # And fetch
        new = user.get_disk_usage()
    else:
        new = user.calculate_disk_usage()

    print('old usage:', nice_size(current), 'change:', end=' ')
    if new in (current, None):
        print('none')
    else:
        if new > current:
            print('+%s' % (nice_size(new - current)))
        else:
            print('-%s' % (nice_size(current - new)))
Esempio n. 45
0
 def format(self, key, value):
     title = TITLES.get(key, key)
     if key in CONVERSION:
         return title, CONVERSION[key](value)
     elif key.endswith("_bytes"):
         try:
             return title, nice_size(value)
         except ValueError:
             pass
     elif isinstance(value, numbers.Number) and value == int(value):
         value = int(value)
     return title, value
Esempio n. 46
0
    def serialize(self, trans, ld):
        """Serialize the library dataset into a dictionary."""
        current_user_roles = trans.get_current_user_roles()

        # Build the full path for breadcrumb purposes.
        full_path = self._build_path(trans, ld.folder)
        dataset_item = (trans.security.encode_id(ld.id), ld.name)
        full_path.insert(0, dataset_item)
        full_path = full_path[::-1]

        # Find expired versions of the library dataset
        expired_ldda_versions = []
        for expired_ldda in ld.expired_datasets:
            expired_ldda_versions.append(
                (trans.security.encode_id(expired_ldda.id), expired_ldda.name))

        rval = trans.security.encode_all_ids(ld.to_dict())
        if len(expired_ldda_versions) > 0:
            rval['has_versions'] = True
            rval['expired_versions'] = expired_ldda_versions

        ldda = ld.library_dataset_dataset_association
        if ldda.creating_job_associations:
            if ldda.creating_job_associations[0].job.stdout:
                rval['job_stdout'] = ldda.creating_job_associations[
                    0].job.stdout.strip()
            if ldda.creating_job_associations[0].job.stderr:
                rval['job_stderr'] = ldda.creating_job_associations[
                    0].job.stderr.strip()
        if ldda.dataset.uuid:
            rval['uuid'] = str(ldda.dataset.uuid)
        rval['deleted'] = ld.deleted
        rval['folder_id'] = f"F{rval['folder_id']}"
        rval['full_path'] = full_path
        rval['file_size'] = util.nice_size(int(ldda.get_size()))
        rval['date_uploaded'] = ldda.create_time.strftime("%Y-%m-%d %I:%M %p")
        rval['update_time'] = ldda.update_time.strftime("%Y-%m-%d %I:%M %p")
        rval[
            'can_user_modify'] = trans.user_is_admin or trans.app.security_agent.can_modify_library_item(
                current_user_roles, ld)
        rval['is_unrestricted'] = trans.app.security_agent.dataset_is_public(
            ldda.dataset)
        rval['tags'] = self.tag_handler.get_tags_str(ldda.tags)

        #  Manage dataset permission is always attached to the dataset itself, not the the ld or ldda to maintain consistency
        rval[
            'can_user_manage'] = trans.user_is_admin or trans.app.security_agent.can_manage_dataset(
                current_user_roles, ldda.dataset)
        return rval
Esempio n. 47
0
 def set_peek( self, dataset, is_multi_byte=False ):
     if not dataset.dataset.purged:
         dataset.peek = "SQLite Database"
         lines = ['SQLite Database']
         if dataset.metadata.tables:
             for table in dataset.metadata.tables:
                 try:
                     lines.append('%s [%s]' % (table, dataset.metadata.table_row_count[table]))
                 except:
                     continue
         dataset.peek = '\n'.join(lines)
         dataset.blurb = nice_size( dataset.get_size() )
     else:
         dataset.peek = 'file does not exist'
         dataset.blurb = 'file purged from disk'
Esempio n. 48
0
    def show( self, trans, id, **kwd ):
        """
        show( self, trans, id, **kwd )
        * GET /api/libraries/datasets/{encoded_dataset_id}:
            Displays information about the dataset identified by the encoded ID.

        :param  id:      the encoded id of the dataset to query
        :type   id:      an encoded id string

        :returns:   detailed dataset information from base controller
        :rtype:     dictionary

        .. seealso:: :attr:`galaxy.web.base.controller.UsesLibraryMixinItems.get_library_dataset`
        """
        try:
            library_dataset = self.get_library_dataset( trans, id=id, check_ownership=False, check_accessible=True )
        except Exception:
            raise exceptions.ObjectNotFound( 'Requested library_dataset was not found.' )

        current_user_roles = trans.get_current_user_roles()

        # Build the full path for breadcrumb purposes.
        full_path = self._build_path( trans, library_dataset.folder )
        dataset_item = ( trans.security.encode_id( library_dataset.id ), library_dataset.name )
        full_path.insert(0, dataset_item)
        full_path = full_path[ ::-1 ]

        # Find expired versions of the library dataset
        expired_ldda_versions = []
        for expired_ldda in library_dataset.expired_datasets:
            expired_ldda_versions.append( ( trans.security.encode_id( expired_ldda.id ), expired_ldda.name ) )

        rval = trans.security.encode_all_ids( library_dataset.to_dict() )
        if len(expired_ldda_versions) > 0:
            rval[ 'has_versions' ] = True
            rval[ 'expired_versions' ] = expired_ldda_versions
        rval[ 'deleted' ] = library_dataset.deleted
        rval[ 'folder_id' ] = 'F' + rval[ 'folder_id' ]
        rval[ 'full_path' ] = full_path
        rval[ 'file_size' ] = util.nice_size( int( library_dataset.library_dataset_dataset_association.get_size() ) )
        rval[ 'date_uploaded' ] = library_dataset.library_dataset_dataset_association.create_time.strftime( "%Y-%m-%d %I:%M %p" )
        rval[ 'can_user_modify' ] = trans.app.security_agent.can_modify_library_item( current_user_roles, library_dataset) or trans.user_is_admin()
        rval[ 'is_unrestricted' ] = trans.app.security_agent.dataset_is_public( library_dataset.library_dataset_dataset_association.dataset )

        #  Manage dataset permission is always attached to the dataset itself, not the the ld or ldda to maintain consistency
        rval[ 'can_user_manage' ] = trans.app.security_agent.can_manage_dataset( current_user_roles, library_dataset.library_dataset_dataset_association.dataset) or trans.user_is_admin()
        return rval
Esempio n. 49
0
    def show( self, trans, id, **kwd ):
        """
        show( self, trans, id, **kwd )
        * GET /api/libraries/datasets/{encoded_dataset_id}:
            Displays information about the dataset identified by the encoded ID.

        :param  id:      the encoded id of the dataset to query
        :type   id:      an encoded id string

        :returns:   detailed dataset information from base controller
        :rtype:     dictionary

        .. seealso:: :attr:`galaxy.web.base.controller.UsesLibraryMixinItems.get_library_dataset`
        """
        try:
            library_dataset = self.get_library_dataset( trans, id=id, check_ownership=False, check_accessible=True )
        except Exception:
            raise exceptions.ObjectNotFound( 'Requested library_dataset was not found.' )

        current_user_roles = trans.get_current_user_roles()

        # Build the full path for breadcrumb purposes.
        full_path = self._build_path( trans, library_dataset.folder )
        dataset_item = ( trans.security.encode_id( library_dataset.id ), library_dataset.name )
        full_path.insert(0, dataset_item)
        full_path = full_path[ ::-1 ]

        # Find expired versions of the library dataset
        expired_ldda_versions = []
        for expired_ldda in library_dataset.expired_datasets:
            expired_ldda_versions.append( ( trans.security.encode_id( expired_ldda.id ), expired_ldda.name ) )

        rval = trans.security.encode_all_ids( library_dataset.to_dict() )
        if len(expired_ldda_versions) > 0:
            rval[ 'has_versions' ] = True
            rval[ 'expired_versions' ] = expired_ldda_versions
        rval[ 'deleted' ] = library_dataset.deleted
        rval[ 'folder_id' ] = 'F' + rval[ 'folder_id' ]
        rval[ 'full_path' ] = full_path
        rval[ 'file_size' ] = util.nice_size( int( library_dataset.library_dataset_dataset_association.get_size() ) )
        rval[ 'date_uploaded' ] = library_dataset.library_dataset_dataset_association.create_time.strftime( "%Y-%m-%d %I:%M %p" )
        rval[ 'can_user_modify' ] = trans.app.security_agent.can_modify_library_item( current_user_roles, library_dataset) or trans.user_is_admin()
        rval[ 'is_unrestricted' ] = trans.app.security_agent.dataset_is_public( library_dataset.library_dataset_dataset_association.dataset )

        #  Manage dataset permission is always attached to the dataset itself, not the the ld or ldda to maintain consistency
        rval[ 'can_user_manage' ] = trans.app.security_agent.can_manage_dataset( current_user_roles, library_dataset.library_dataset_dataset_association.dataset) or trans.user_is_admin()
        return rval
    def delete(self, trans, encoded_dataset_id, **kwd):
        """
        delete( self, trans, encoded_dataset_id, **kwd ):
        * DELETE /api/libraries/datasets/{encoded_dataset_id}
            Marks the dataset deleted or undeleted based on the value
            of the undelete flag.
            If the flag is not present it is considered False and the
            item is marked deleted.

        :param  encoded_dataset_id:      the encoded id of the dataset to change
        :type   encoded_dataset_id:      an encoded id string

        :returns:   dict containing information about the dataset
        :rtype:     dictionary
        """
        undelete = util.string_as_bool(kwd.get('undelete', False))
        try:
            dataset = self.get_library_dataset(trans,
                                               id=encoded_dataset_id,
                                               check_ownership=False,
                                               check_accessible=False)
        except Exception as e:
            raise exceptions.ObjectNotFound(
                'Requested dataset was not found.' + str(e))
        current_user_roles = trans.get_current_user_roles()
        allowed = trans.app.security_agent.can_modify_library_item(
            current_user_roles, dataset)
        if (not allowed) and (not trans.user_is_admin()):
            raise exceptions.InsufficientPermissionsException(
                'You do not have proper permissions to delete this dataset.')

        if undelete:
            dataset.deleted = False
        else:
            dataset.deleted = True

        trans.sa_session.add(dataset)
        trans.sa_session.flush()

        rval = trans.security.encode_all_ids(dataset.to_dict())
        nice_size = util.nice_size(
            int(dataset.library_dataset_dataset_association.get_size()))
        rval['file_size'] = nice_size
        rval['update_time'] = dataset.update_time.strftime("%Y-%m-%d %I:%M %p")
        rval['deleted'] = dataset.deleted
        rval['folder_id'] = 'F' + rval['folder_id']
        return rval
def render_body(context,**pageargs):
    context.caller_stack._push_frame()
    try:
        __M_locals = __M_dict_builtin(pageargs=pageargs)
        def inputs_recursive(input_params,param_values,depth=1):
            return render_inputs_recursive(context.locals_(__M_locals),input_params,param_values,depth)
        tool = context.get('tool', UNDEFINED)
        params_objects = context.get('params_objects', UNDEFINED)
        hda = context.get('hda', UNDEFINED)
        history = context.get('history', UNDEFINED)
        __M_writer = context.writer()
        # SOURCE LINE 1
        from galaxy.util import nice_size 
        
        __M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin()[__M_key]) for __M_key in ['nice_size'] if __M_key in __M_locals_builtin()]))
        __M_writer(u'\n\n')
        # SOURCE LINE 23
        __M_writer(u'\n\n<table class="tabletip">\n  <thead>\n    <tr><th colspan="2" style="font-size: 120%;">')
        # SOURCE LINE 27
        __M_writer(unicode(tool.name))
        __M_writer(u'</th></tr>\n  </thead>\n  <tbody>\n    <tr><th>Created:</th><td>')
        # SOURCE LINE 30
        __M_writer(unicode(history.create_time.strftime("%b %d, %Y")))
        __M_writer(u'</td></tr>\n    <tr><th>Modified:</th><td>')
        # SOURCE LINE 31
        __M_writer(unicode(history.update_time.strftime("%b %d, %Y")))
        __M_writer(u'</td></tr>\n')
        # SOURCE LINE 33
        __M_writer(u'    <tr><th>Filesize:</th><td>')
        __M_writer(unicode(nice_size(hda.dataset.file_size)))
        __M_writer(u'</td></tr>\n    <tr><th>Dbkey:</th><td>')
        # SOURCE LINE 34
        __M_writer(unicode(hda.dbkey))
        __M_writer(u'</td></tr>\n    <tr><th>Format:</th><td>')
        # SOURCE LINE 35
        __M_writer(unicode(hda.ext))
        __M_writer(u'</td></tr>\n    \n</table><br />\n<table class="tabletip">\n  <thead>\n    <tr>\n      <th>Input Parameter</th>\n      <th>Value</th>\n    </tr>\n  </thead>\n  <tbody>\n      ')
        # SOURCE LINE 46
        __M_writer(unicode( inputs_recursive(tool.inputs, params_objects, depth=1) ))
        __M_writer(u'\n  </tbody>\n</table>\n')
        return ''
    finally:
        context.caller_stack._pop_frame()
Esempio n. 52
0
    def delete( self, trans, encoded_dataset_id, **kwd ):
        """
        delete( self, trans, encoded_dataset_id, **kwd ):
        * DELETE /api/libraries/datasets/{encoded_dataset_id}
            Marks the dataset deleted or undeleted based on the value
            of the undelete flag.
            If the flag is not present it is considered False and the
            item is marked deleted.

        :param  encoded_dataset_id:      the encoded id of the dataset to change
        :type   encoded_dataset_id:      an encoded id string

        :returns:   dict containing information about the dataset
        :rtype:     dictionary
        """
        undelete = util.string_as_bool( kwd.get( 'undelete', False ) )
        try:
            dataset = self.get_library_dataset( trans, id=encoded_dataset_id, check_ownership=False, check_accessible=False )
        except Exception as e:
            raise exceptions.ObjectNotFound( 'Requested dataset was not found.' + str(e) )
        current_user_roles = trans.get_current_user_roles()
        allowed = trans.app.security_agent.can_modify_library_item( current_user_roles, dataset )
        if ( not allowed ) and ( not trans.user_is_admin() ):
            raise exceptions.InsufficientPermissionsException( 'You do not have proper permissions to delete this dataset.')

        if undelete:
            dataset.deleted = False
        else:
            dataset.deleted = True

        trans.sa_session.add( dataset )
        trans.sa_session.flush()

        rval = trans.security.encode_all_ids( dataset.to_dict() )
        nice_size = util.nice_size( int( dataset.library_dataset_dataset_association.get_size() ) )
        rval[ 'file_size' ] = nice_size
        rval[ 'update_time' ] = dataset.update_time.strftime( "%Y-%m-%d %I:%M %p" )
        rval[ 'deleted' ] = dataset.deleted
        rval[ 'folder_id' ] = 'F' + rval[ 'folder_id' ]
        return rval
Esempio n. 53
0
    def serialize_current_anonymous_user(self, user, keys, trans=None, **kwargs):
        # use the current history if any to get usage stats for trans' anonymous user
        # TODO: might be better as sep. Serializer class
        history = trans.history
        if not history:
            return None

        usage = self.app.quota_agent.get_usage(trans, history=trans.history)
        percent = self.app.quota_agent.get_percent(trans=trans, usage=usage)

        # a very small subset of keys available
        values = {
            "id": None,
            "total_disk_usage": float(usage),
            "nice_total_disk_usage": util.nice_size(usage),
            "quota_percent": percent,
        }
        serialized = {}
        for key in keys:
            if key in values:
                serialized[key] = values[key]
        return serialized
Esempio n. 54
0
    def serialize_current_anonymous_user( self, user, keys, trans=None, **kwargs ):
        # use the current history if any to get usage stats for trans' anonymous user
        # TODO: might be better as sep. Serializer class
        history = trans.history
        if not history:
            raise exceptions.AuthenticationRequired( 'No history for anonymous user usage stats' )

        usage = self.app.quota_agent.get_usage( trans, history=trans.history )
        percent = self.app.quota_agent.get_percent( trans=trans, usage=usage )

        # a very small subset of keys available
        values = {
            'id'                    : None,
            'total_disk_usage'      : float( usage ),
            'nice_total_disk_usage' : util.nice_size( usage ),
            'quota_percent'         : percent,
        }
        serialized = {}
        for key in keys:
            if key in values:
                serialized[ key ] = values[ key ]
        return serialized
Esempio n. 55
0
 def get_html( self, prefix="" ):
     rval = FTPFileField.thead
     if self.dir is None:
         rval += '<tr><td colspan="4"><em>Please <a href="%s">create</a> or <a href="%s">log in to</a> a Galaxy account to view files uploaded via FTP.</em></td></tr>' % ( url_for( controller='user', action='create', cntrller='user', referer=url_for( controller='root' ) ), url_for( controller='user', action='login', cntrller='user', referer=url_for( controller='root' ) ) )
     elif not os.path.exists( self.dir ):
         rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
     else:
         uploads = []
         for ( dirpath, dirnames, filenames ) in os.walk( self.dir ):
             for filename in filenames:
                 path = relpath( os.path.join( dirpath, filename ), self.dir )
                 statinfo = os.lstat( os.path.join( dirpath, filename ) )
                 uploads.append( dict( path=path,
                                       size=nice_size( statinfo.st_size ),
                                       ctime=time.strftime( "%m/%d/%Y %I:%M:%S %p", time.localtime( statinfo.st_ctime ) ) ) )
         if not uploads:
             rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
         for upload in uploads:
             rval += FTPFileField.trow % ( prefix, self.name, upload['path'], upload['path'], upload['size'], upload['ctime'] )
     rval += FTPFileField.tfoot
     rval += '<div class="toolParamHelp">This Galaxy server allows you to upload files via FTP.  To upload some files, log in to the FTP server at <strong>%s</strong> using your Galaxy credentials (email address and password). After transfering files via FTP they will appear here. To use them in further analysis you must return to this page, select these files and press the <strong>Execute</strong> button. After they are processed they will appear in your Uploaded Files project space. Consult <a href="http://wiki.galaxyproject.org/FTPUpload">the Galaxy wiki</a> for more information.</div>' % self.ftp_site
     return rval
Esempio n. 56
0
 def get_html( self, prefix="" ):
     rval = FTPFileField.thead
     if self.dir is None:
         rval += '<tr><td colspan="4"><em>Please <a href="%s">create</a> or <a href="%s">log in to</a> a Galaxy account to view files uploaded via FTP.</em></td></tr>' % ( url_for( controller='user', action='create', cntrller='user', referer=url_for( controller='root' ) ), url_for( controller='user', action='login', cntrller='user', referer=url_for( controller='root' ) ) )
     elif not os.path.exists( self.dir ):
         rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
     else:
         uploads = []
         for ( dirpath, dirnames, filenames ) in os.walk( self.dir ):
             for filename in filenames:
                 path = relpath( os.path.join( dirpath, filename ), self.dir )
                 statinfo = os.lstat( os.path.join( dirpath, filename ) )
                 uploads.append( dict( path=path,
                                       size=nice_size( statinfo.st_size ),
                                       ctime=time.strftime( "%m/%d/%Y %I:%M:%S %p", time.localtime( statinfo.st_ctime ) ) ) )
         if not uploads:
             rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
         for upload in uploads:
             rval += FTPFileField.trow % ( prefix, self.name, upload['path'], upload['path'], upload['size'], upload['ctime'] )
     rval += FTPFileField.tfoot
     rval += '<div class="toolParamHelp">This Galaxy server allows you to upload files via FTP.  To upload some files, log in to the FTP server at <strong>%s</strong> using your Galaxy credentials (email address and password).</div>' % self.ftp_site
     return rval
Esempio n. 57
0
 def get_html( self, prefix="" ):
     rval = FTPFileField.thead
     if self.dir is None:
         rval += '<tr><td colspan="4"><em>Please <a href="%s">create</a> or <a href="%s">log in to</a> a Galaxy account to view files uploaded via FTP.</em></td></tr>' % ( url_for( controller='user', action='create', cntrller='user', referer=url_for( controller='root' ) ), url_for( controller='user', action='login', cntrller='user', referer=url_for( controller='root' ) ) )
     elif not os.path.exists( self.dir ):
         rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
     else:
         uploads = []
         for ( dirpath, dirnames, filenames ) in os.walk( self.dir ):
             for filename in filenames:
                 path = relpath( os.path.join( dirpath, filename ), self.dir )
                 statinfo = os.lstat( os.path.join( dirpath, filename ) )
                 uploads.append( dict( path=path,
                                       size=nice_size( statinfo.st_size ),
                                       ctime=time.strftime( "%m/%d/%Y %I:%M:%S %p", time.localtime( statinfo.st_ctime ) ) ) )
         if not uploads:
             rval += '<tr><td colspan="4"><em>Your FTP upload directory contains no files.</em></td></tr>'
         for upload in uploads:
             rval += FTPFileField.trow % ( prefix, self.name, upload['path'], upload['path'], upload['size'], upload['ctime'] )
     galaxy_ip_address = subprocess.check_output('curl http://instance-data/latest/meta-data/public-ipv4', shell=True)
     rval += FTPFileField.tfoot
     rval += '<div class="toolParamHelp">This Galaxy server allows you to upload files via FTP.  To upload files, use an FTP program to access the FTP server at <strong>%s</strong> (using the SFTP protocol) and port <strong>2200</strong>. You will use your Galaxy credentials (email address and password) to log in to the server.</div>' % galaxy_ip_address
     return rval