def test_ban_se_cancel(self): """ Ban a SE that has files queued, make sure they are canceled """ jobs = list() jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination2', 'ACTIVE')) jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination2', 'FAILED', duration=10, queued=20)) answer = self.app.post(url="/ban/se", params={'storage': 'gsiftp://source'}, status=200) canceled_ids = json.loads(answer.body) self.assertEqual(2, len(canceled_ids)) self.assertIn(jobs[0], canceled_ids) self.assertIn(jobs[1], canceled_ids) self.assertNotIn(jobs[2], canceled_ids) for job_id in jobs[0:2]: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) self.assertEqual('CANCELED', job.job_state) self.assertNotEqual(None, job.job_finished) self.assertNotEqual(None, job.finish_time) for f in files: self.assertEqual('CANCELED', f.file_state) self.assertNotEqual(None, f.job_finished) self.assertNotEqual(None, f.finish_time) self.assertEqual('Storage banned', f.reason) job = Session.query(Job).get(jobs[2]) self.assertEqual(job.job_state, 'FAILED') files = Session.query(File).filter(File.job_id == job.job_id) for f in files: self.assertEqual('FAILED', f.file_state)
def test_cancel_terminal(self): """ Cancel a job with files in terminal state """ job_id = self._submit() job = Session.query(Job).get(job_id) job.job_state = 'FINISHED' for f in job.files: f.file_state = 'FINISHED' Session.merge(job) Session.commit() answer = self.app.delete(url="/jobs/%s" % job_id, status=200) job = json.loads(answer.body) self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'FINISHED') self.assertNotEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'FINISHED') for f in job.files: self.assertEqual(f.file_state, 'FINISHED')
def _cancel_jobs(dn): """ Cancel all jobs that belong to dn. Returns the list of affected jobs ids. """ jobs = Session.query(Job.job_id).filter(Job.job_state.in_(JobActiveStates)).filter(Job.user_dn == dn) job_ids = map(lambda j: j[0], jobs) try: now = datetime.utcnow() for job_id in job_ids: Session.query(File).filter(File.job_id == job_id).filter(File.file_state.in_(FileActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'User banned', 'job_finished': now, 'finish_time': now }, synchronize_session=False) Session.query(Job).filter(Job.job_id == job_id)\ .update({ 'job_state': 'CANCELED', 'reason': 'User banned', 'job_finished': now, 'finish_time': now }, synchronize_session=False) Session.commit() Session.expire_all() return job_ids except Exception: Session.rollback() raise
def apiVersion(self): credV = Session.query(CredentialVersion)[0] schemaV = Session.query(SchemaVersion)[0] return {'api': _Version(0, 2, 1), 'schema': credV, 'delegation': schemaV, '_links': { 'curies': [{'name': 'fts', 'href': 'https://svnweb.cern.ch/trac/fts3'}], 'fts:whoami': {'href': '/whoami', 'title': 'Check user certificate'}, 'fts:joblist': {'href': '/jobs{?vo_name,user_dn}', 'title': 'List of active jobs', 'templated': True}, 'fts:job': { 'href': '/jobs/{id}', 'title': 'Job information', 'templated': True, 'hints': { 'allow': ['GET', 'DELETE'] } }, 'fts:configaudit': {'href': '/config/audit', 'title': 'Configuration'}, 'fts:submitschema': {'href': '/schema/submit', 'title': 'JSON schema of messages'}, 'fts:jobsubmit': { 'href': '/jobs', 'hints': { 'allow': ['POST'], 'representations': ['fts:submitschema'] } }, } }
def test_ban_se_cancel_vo(self): """ Cancel a SE that has files queued, make sure they are canceled (with VO) """ jobs = list() jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination2', 'SUBMITTED')) answer = self.app.post( url="/ban/se", params={'storage': 'gsiftp://source', 'status': 'cancel', 'vo_name': 'dteam'}, status=200 ) canceled_ids = json.loads(answer.body) self.assertEqual(1, len(canceled_ids)) self.assertIn(jobs[0], canceled_ids) for job_id in jobs: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) if job_id in canceled_ids: self.assertEqual('CANCELED', job.job_state) else: self.assertEqual('SUBMITTED', job.job_state) for f in files: if job_id in canceled_ids: self.assertEqual('CANCELED', f.file_state) else: self.assertEqual('SUBMITTED', f.file_state)
def test_ban_se_partial_job(self): """ Ban a SE that has files queued. If a job has other pairs, the job must remain! """ job_id = insert_job( 'dteam', multiple=[('gsiftp://source', 'gsiftp://destination'), ('gsiftp://other', 'gsiftp://destination')] ) answer = self.app.post(url="/ban/se", params={'storage': 'gsiftp://source'}, status=200) canceled_ids = json.loads(answer.body) self.assertEqual(1, len(canceled_ids)) self.assertEqual(job_id, canceled_ids[0]) job = Session.query(Job).get(job_id) self.assertEqual('SUBMITTED', job.job_state) self.assertEqual(None, job.job_finished) self.assertEqual(None, job.finish_time) files = Session.query(File).filter(File.job_id == job_id) for f in files: if f.source_se == 'gsiftp://source': self.assertEqual('CANCELED', f.file_state) self.assertNotEqual(None, f.finish_time) else: self.assertEqual('SUBMITTED', f.file_state) self.assertEqual(None, f.job_finished)
def test_ban_se_wait_vo(self): """ Ban a SE, but instead of canceling, give jobs some time to finish (with VO) """ jobs = list() jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination2', 'SUBMITTED')) answer = self.app.post( url="/ban/se", params={'storage': 'gsiftp://source', 'status': 'wait', 'vo_name': 'dteam', 'timeout': 33}, status=200 ) waiting_ids = json.loads(answer.body) self.assertEqual(1, len(waiting_ids)) self.assertIn(jobs[0], waiting_ids) for job_id in jobs: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) self.assertEqual('SUBMITTED', job.job_state) for f in files: self.assertEqual('SUBMITTED', f.file_state) if job_id in waiting_ids: self.assertEqual(33, f.wait_timeout) else: self.assertEqual(None, f.wait_timeout)
def update_app(self, client_id): """ Update an application """ user = pylons.request.environ['fts3.User.Credentials'] app = Session.query(OAuth2Application).get(client_id) if not app: raise HTTPNotFound('Application not found') if app.owner != user.user_dn: raise HTTPForbidden() if pylons.request.headers['Content-Type'].startswith('application/json'): fields = json.loads(pylons.request.body) else: fields = pylons.request.POST try: if 'delete' not in fields: app.description = fields.get('description', '') app.website = fields.get('website', '') app.redirect_to = fields.get('redirect_to', '') Session.merge(app) Session.commit() redirect(url_for(controller='oauth2', action='get_app'), code=HTTPSeeOther.code) else: Session.delete(app) Session.query(OAuth2Token).filter(OAuth2Token.client_id == client_id).delete() Session.query(OAuth2Code).filter(OAuth2Code.client_id == client_id).delete() Session.commit() redirect(url_for(controller='oauth2', action='get_my_apps'), code=HTTPSeeOther.code) except: Session.rollback() raise
def get_my_apps(self): """ Returns the list of registered apps """ user = pylons.request.environ['fts3.User.Credentials'] my_apps = Session.query(OAuth2Application).filter(OAuth2Application.owner == user.user_dn).all() authorized_apps = Session.query( OAuth2Application.client_id, OAuth2Application.name, OAuth2Application.website, OAuth2Application.description, OAuth2Token.refresh_token, OAuth2Token.scope, OAuth2Token.expires ).filter((OAuth2Token.dlg_id == user.delegation_id) & (OAuth2Token.client_id == OAuth2Application.client_id)) response = {'apps': my_apps, 'authorized': authorized_apps} if _accept_html(pylons.request.accept): pylons.response.headers['Content-Type'] = 'text/html; charset=UTF-8' response['user'] = user response['site'] = pylons.config['fts3.SiteName'] return render('/apps.html', extra_vars=response) else: pylons.response.headers['Content-Type'] = 'application/json' # Better serialization for authorized apps authorized = list() for auth in authorized_apps: authorized.append({ 'name': auth.name, 'website': auth.website, 'description': auth.description, 'scope': auth.scope, 'expires': auth.expires }) response['authorized'] = authorized return to_json(response)
def get_files(self, job_id): """ Get the files within a job """ owner = Session.query(Job.user_dn, Job.vo_name).filter(Job.job_id == job_id).first() if owner is None: raise HTTPNotFound('No job with the id "%s" has been found' % job_id) if not authorized(TRANSFER, resource_owner=owner[0], resource_vo=owner[1]): raise HTTPForbidden('Not enough permissions to check the job "%s"' % job_id) files = Session.query(File).filter(File.job_id == job_id).options(noload(File.retries)) return files.yield_per(100).enable_eagerloads(False)
def get_files(self, job_id): """ Get the files within a job """ owner = Session.query(Job.user_dn, Job.vo_name).filter(Job.job_id == job_id).first() if owner is None: raise HTTPNotFound('No job with the id "%s" has been found' % job_id) if not authorized(TRANSFER, resource_owner=owner[0], resource_vo=owner[1]): raise HTTPForbidden('Not enough permissions to check the job "%s"' % job_id) files = Session.query(File).filter(File.job_id == job_id).options(noload(File.retries)) return files.all()
def set_se_config(self): """ Set the configuration parameters for a given SE """ input_dict = get_input_as_dict(request) try: for storage, cfg in input_dict.iteritems(): if not storage or storage.isspace(): raise ValueError se_info = None se_info_new = cfg.get('se_info', None) if se_info_new: se_info = Session.query(Se).get(storage) if not se_info: se_info = Se(storage=storage) for key, value in se_info_new.iteritems(): #value = validate_type(Se, key, value) setattr(se_info, key, value) audit_configuration( 'set-se-config', 'Set config %s: %s' % (storage, json.dumps(cfg))) Session.merge(se_info) # Operation limits operations = cfg.get('operations', None) if operations: for vo, limits in operations.iteritems(): for op, limit in limits.iteritems(): limit = int(limit) new_limit = Session.query(OperationConfig).get( (vo, storage, op)) if limit > 0: if not new_limit: new_limit = OperationConfig( vo_name=vo, host=storage, operation=op) new_limit.concurrent_ops = limit Session.merge(new_limit) elif new_limit: Session.delete(new_limit) audit_configuration( 'set-se-limits', 'Set limits for %s: %s' % (storage, json.dumps(operations))) Session.commit() except (AttributeError, ValueError): Session.rollback() raise HTTPBadRequest('Malformed configuration') except: Session.rollback() raise return (se_info, operations)
def test_ban_dn_cancel(self): """ Ban a DN that has transfers running, make sure they are canceled """ jobs = list() jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED', user_dn='/DC=cern/CN=someone')) jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination2', 'ACTIVE', user_dn='/DC=cern/CN=someone')) jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination2', 'FAILED', duration=10, queued=20, user_dn='/DC=cern/CN=someone')) canceled_ids = self.app.post(url="/ban/dn", params={ 'user_dn': '/DC=cern/CN=someone' }, status=200).json self.assertEqual(2, len(canceled_ids)) self.assertIn(jobs[0], canceled_ids) self.assertIn(jobs[1], canceled_ids) self.assertNotIn(jobs[2], canceled_ids) for job_id in jobs[0:2]: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) self.assertEqual('CANCELED', job.job_state) self.assertNotEqual(None, job.job_finished) self.assertEqual('User banned', job.reason) for f in files: self.assertEqual('CANCELED', f.file_state) self.assertNotEqual(None, f.finish_time) self.assertEqual('User banned', f.reason) job = Session.query(Job).get(jobs[2]) self.assertEqual(job.job_state, 'FAILED') files = Session.query(File).filter(File.job_id == job.job_id) for f in files: self.assertEqual('FAILED', f.file_state)
def _get_limits(source, destination): source_thr = Session.query(Optimize.throughput)\ .filter(Optimize.source_se==source).filter(Optimize.throughput != None).all() dest_thr = Session.query(Optimize.throughput)\ .filter(Optimize.dest_se==destination).filter(Optimize.throughput != None).all() limits = dict() if len(source_thr): limits['source'] = source_thr[0][0] if len(dest_thr): limits['destination'] = dest_thr[0][0] return limits
def persist_authorization_code(self, client_id, code, scope): user = pylons.request.environ['fts3.User.Credentials'] self._insert_user(user) # Remove previous codes Session.query(OAuth2Code).filter( (OAuth2Code.client_id == client_id) & (OAuth2Code.dlg_id == user.delegation_id)).delete() # Token code = OAuth2Code(client_id=client_id, code=code, scope=scope, dlg_id=user.delegation_id) Session.merge(code) Session.commit()
def setUp(self): super(TestConfigSe, self).setUp() self.setup_gridsite_environment() Session.query(Optimizer).delete() Session.query(ConfigAudit).delete() Session.query(OperationConfig).delete() Session.query(Se).delete() Session.commit() self.host_config = { 'operations': { 'atlas': { 'delete': 22, 'staging': 32, }, 'dteam': { 'delete': 10, 'staging': 11 } }, 'se_info': { 'ipv6': 1, 'outbound_max_active': 55, 'inbound_max_active': 11, 'inbound_max_throughput': 33, 'se_metadata': 'metadata' } }
def test_ban_se_staging(self): """ Ban a storage with transfers queued as STAGING, submit a new STAGING, unban. Final state must be STAGING """ self.push_delegation() pre_job_id = insert_job('testvo', 'srm://source', 'srm://destination', 'STAGING', user_dn='/DC=cern/CN=someone') self.app.post(url="/ban/se", params={ 'storage': 'srm://source', 'status': 'wait', 'allow_submit': True }, status=200) files = Session.query(File).filter(File.job_id == pre_job_id) for f in files: self.assertEqual('ON_HOLD_STAGING', f.file_state) job = { 'files': [{ 'sources': ['srm://source/file'], 'destinations': ['gsiftp://destination2/path/'] }], 'params': { 'copy_pin_lifetime': 1234 } } post_job_id = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] files = Session.query(File).filter(File.job_id == post_job_id) for f in files: self.assertEqual('ON_HOLD_STAGING', f.file_state) self.app.delete(url="/ban/se?storage=%s" % urllib.quote('srm://source'), status=204) files = Session.query(File).filter( File.job_id.in_((pre_job_id, post_job_id))) for f in files: self.assertEqual('STAGING', f.file_state)
def test_reset(self): """ Set once, reset new values """ self.test_set() self.app.post_json(url="/config/global", params=dict(retry=55, vo_name='dteam'), status=200) config = Session.query(ServerConfig).get('dteam') self.assertEqual(55, config.retry) audit = Session.query(ConfigAudit).all() self.assertEqual(2, len(audit))
def autocomplete_storage(self): """ Autocomplete a storage, regardless of it being source or destination """ term = request.params.get('term', 'srm://') src_matches = Session.query(LinkConfig.source)\ .filter(LinkConfig.source.startswith(term)).distinct().all() dest_matches = Session.query(LinkConfig.destination)\ .filter(LinkConfig.destination.startswith(term)).distinct().all() srcs = map(lambda r: r[0], src_matches) dsts = map(lambda r: r[0], dest_matches) return set(srcs).union(set(dsts))
def delete_link_config(self,sym_name, start_response): """ Deletes an existing link configuration """ try: sym_name = urllib.unquote(sym_name) Session.query(LinkConfig).filter(LinkConfig.symbolicname == sym_name).delete() audit_configuration('link-delete', 'Link %s has been deleted' % sym_name) Session.commit() except: Session.rollback() raise start_response('204 No Content', []) return ['']
def tearDown(self): super(TestOAuth2, self).tearDown() Session.query(OAuth2Application).delete() Session.query(OAuth2Token).delete() Session.query(OAuth2Code).delete() Session.query(AuthorizationByDn).delete() Session.commit()
def test_delete_link(self): """ Delete an existing link """ self.test_config_link_se() self.app.delete("/config/links/test-link", status=204) links = Session.query(LinkConfig).all() self.assertEqual(0, len(links)) self.app.get_json("/config/links/test-link", status=404) audits = Session.query(ConfigAudit).all() self.assertEqual(2, len(audits))
def test_ban_se_wait(self): """ Ban a SE, but instead of canceling, give jobs some time to finish """ jobs = list() jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination2', 'ACTIVE')) jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination2', 'FAILED', duration=10, queued=20)) waiting_ids = self.app.post(url="/ban/se", params={ 'storage': 'gsiftp://source', 'status': 'wait', 'timeout': 1234 }, status=200).json self.assertEqual(1, len(waiting_ids)) self.assertIn(jobs[0], waiting_ids) self.assertNotIn(jobs[1], waiting_ids) self.assertNotIn(jobs[2], waiting_ids) for job_id in jobs[0:2]: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) self.assertIn(job.job_state, ['ACTIVE', 'SUBMITTED']) self.assertEqual(None, job.job_finished) for f in files: self.assertIn(f.file_state, ['ACTIVE', 'ON_HOLD']) self.assertEqual(None, f.finish_time) job = Session.query(Job).get(jobs[2]) self.assertEqual(job.job_state, 'FAILED') files = Session.query(File).filter(File.job_id == job.job_id) for f in files: self.assertEqual('FAILED', f.file_state) banned = Session.query(BannedSE).get(('gsiftp://source', 'testvo')) self.assertEqual('WAIT', banned.status)
def test_ban_dn(self): """ Just ban a DN and unban it, make sure changes go into the DB """ answer = self.app.post(url='/ban/dn', params={'user_dn': '/DC=cern/CN=someone'}, status=200) canceled = json.loads(answer.body) self.assertEqual(0, len(canceled)) banned = Session.query(BannedDN).get('/DC=cern/CN=someone') self.assertNotEqual(None, banned) self.assertEqual(self.get_user_credentials().user_dn, banned.admin_dn) self.app.delete(url="/ban/dn?user_dn=%s" % urllib.quote('/DC=cern/CN=someone'), status=204) banned = Session.query(BannedDN).get('/DC=cern/CN=someone') self.assertEqual(None, banned)
def get_file_retries(self, job_id, file_id): """ Get the retries for a given file """ owner = Session.query(Job.user_dn, Job.vo_name).filter(Job.job_id == job_id).all() if owner is None or len(owner) < 1: raise HTTPNotFound('No job with the id "%s" has been found' % job_id) if not authorized(TRANSFER, resource_owner=owner[0][0], resource_vo=owner[0][1]): raise HTTPForbidden('Not enough permissions to check the job "%s"' % job_id) f = Session.query(File.file_id).filter(File.file_id == file_id) if not f: raise HTTPNotFound('No file with the id "%d" has been found' % file_id) retries = Session.query(FileRetryLog).filter(FileRetryLog.file_id == file_id) return retries.all()
def setUp(self): super(TestConfigSe, self).setUp() self.setup_gridsite_environment() Session.query(Optimizer).delete() Session.query(ConfigAudit).delete() Session.query(OperationConfig).delete() Session.query(Se).delete() Session.commit()
def test_remove_authz(self): """ Remove a operation for a dn """ self.test_add_authz() self.app.delete( "/config/authorize?dn=/DN=a.test.user&operation=config", status=204) audits = Session.query(ConfigAudit).all() self.assertEqual(2, len(audits)) authz = Session.query(AuthorizationByDn).get( ('/DN=a.test.user', 'config')) self.assertEqual(None, authz)
def test_simple_delete(self): """ Simple deletion job """ self.setup_gridsite_environment() self.push_delegation() job = { 'delete': [ 'root://source.es/file', { 'surl': 'root://source.es/file2', 'metadata': { 'a': 'b' } } ] } job_id = self.app.put(url="/jobs", params=json.dumps(job), status=200).json['job_id'] self.assertIsNotNone(job_id) job = Session.query(Job).get(job_id) self.assertEqual(job.vo_name, 'testvo') self.assertEqual(job.user_dn, self.TEST_USER_DN) self.assertEqual(job.source_se, 'root://source.es') self.assertEqual('DELETE', job.job_state) self.assertIsNotNone(job.cred_id) dm = Session.query(DataManagement).filter( DataManagement.job_id == job_id).all() self.assertEqual(2, len(dm)) self.assertEqual(dm[0].source_surl, 'root://source.es/file') self.assertEqual(dm[1].source_surl, 'root://source.es/file2') self.assertEqual(dm[1].file_metadata['a'], 'b') self.assertEqual(dm[0].hashed_id, dm[1].hashed_id) for d in dm: self.assertEqual(d.vo_name, 'testvo') self.assertEqual(d.file_state, 'DELETE') self.assertEqual(d.source_se, 'root://source.es') return str(job_id)
def cancel_all_by_vo(self, vo_name): """ Cancel all files by the given vo_name """ user = request.environ['fts3.User.Credentials'] now = datetime.utcnow() if not user.is_root: raise HTTPForbidden( 'User does not have root privileges' ) try: # FTS3 daemon expects finish_time to be NULL in order to trigger the signal # to fts_url_copy file_count = Session.query(File).filter(File.vo_name == vo_name)\ .filter(File.file_state.in_(FileActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'dest_surl_uuid':None, 'finish_time': None }, synchronize_session=False) # However, for data management operations there is nothing to signal, so # set job_finished dm_count = Session.query(DataManagement).filter(DataManagement.vo_name == vo_name)\ .filter(DataManagement.file_state.in_(DataManagementActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now, 'finish_time': now }, synchronize_session=False) job_count = Session.query(Job).filter(Job.vo_name == vo_name)\ .filter(Job.job_state.in_(JobActiveStates))\ .update({ 'job_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now }, synchronize_session=False) Session.commit() Session.expire_all() log.info("Active jobs for VO %s canceled" % vo_name) except: Session.rollback() raise return { "affected_files": file_count, "affected_dm": dm_count, "affected_jobs": job_count }
def test_register(self): """ Test the registration of an app """ self.setup_gridsite_environment() req = { 'name': 'MyApp', 'description': 'Blah blah blah', 'website': 'https://example.com', 'redirect_to': 'https://mysite.com/callback' } response = self.app.post( url="/oauth2/register", content_type='application/json', params=json.dumps(req), status=201 ) client_id = json.loads(response.body) self.assertNotEqual(None, client_id) app = Session.query(OAuth2Application).get(client_id) self.assertEqual('MyApp', app.name) self.assertEqual('Blah blah blah', app.description) self.assertEqual('https://example.com', app.website) self.assertEqual('https://mysite.com/callback', app.redirect_to) self.assertEqual('/DC=ch/DC=cern/CN=Test User', app.owner) return client_id
def test_no_vo(self): """ Submit a valid job with no VO data in the credentials (could happen with plain SSL!) The job must be accepted, but assigned to the user's 'virtual' vo. """ self.setup_gridsite_environment(no_vo=True) self.push_delegation() job = { 'files': [{ 'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True} } answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) # Make sure it was commited to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(len(job_id), 0) self._validate_submitted(Session.query(Job).get(job_id), no_vo=True)
def test_surl_with_spaces(self): """ Submit a job where the surl has spaces at the beginning and at the end """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['root://source.es/file\n \r '], 'destinations': ['\r\n root://dest.ch/file\n\n \n'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024.0, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True} } answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) # Make sure it was commited to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(len(job_id), 0) job = Session.query(Job).get(job_id) self._validate_submitted(job)
def test_files_balanced(self): """ Checks the distribution of the file 'hashed ids' is reasonably uniformely distributed. hashed_id is a legacy name, its purpose is balance the transfers between hosts regardless of the number running in a giving moment """ raise SkipTest('Disabled as it is not very reliable') self.setup_gridsite_environment() self.push_delegation() files = [] for r in xrange(5000): files.append({ 'sources': ["root://source.es/file%d" % r], 'destinations': ["root://dest.ch/file%d" % r] }) job = {'files': files} answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) job_id = json.loads(answer.body)['job_id'] files = Session.query(File.hashed_id).filter(File.job_id == job_id) hashed_ids = map(lambda f: f.hashed_id, files) # Null hypothesis: the distribution of hashed_ids is uniform histogram, min_value, binsize, outsiders = scipy.stats.histogram(hashed_ids, defaultlimits=(0, 2 ** 16 - 1)) chisq, pvalue = scipy.stats.chisquare(histogram) self.assertGreater(min_value, -1) self.assertEqual(outsiders, 0) self.assertGreater(pvalue, 0.1)
def test_expired(self): """ Get a token, the token expires, so it should be denied """ client_id, access_token, refresh_token, expires = self.test_get_token() del self.app.extra_environ['GRST_CRED_AURI_0'] response = self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=200 ) whoami = json.loads(response.body) self.assertEqual('oauth2', whoami['method']) token = Session.query(OAuth2Token).get((client_id, refresh_token)) token.expires = datetime.utcnow() - timedelta(hours=1) Session.merge(token) Session.commit() response = self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=403 )
def test_submit_max_time_in_queue_suffix(self): """ Submits a job specifying the maximum time it should stay in the queue. Use a suffix. """ self.setup_gridsite_environment() self.push_delegation() dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['http://source.es:8446/file'], 'destinations': [dest_surl], }], 'params': { 'max_time_in_queue': '4s' } } job_id = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] job = Session.query(Job).get(job_id) self.assertGreater(job.max_time_in_queue, time.time()) self.assertLessEqual(job.max_time_in_queue, 8 + time.time())
def test_simple_bringonline(self): """ Test a regular, one file, bring online job """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['srm://source.es/?SFN=/path/'], 'destinations': ['srm://dest.ch/file'], }], 'params': { 'overwrite': True, 'copy_pin_lifetime': 3600, 'bring_online': 60, 'verify_checksum': True } } answer = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200) # Make sure it was committed to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(len(job_id), 0) db_job = Session.query(Job).get(job_id) self.assertEqual(db_job.job_state, 'STAGING') self.assertEqual(db_job.files[0].file_state, 'STAGING')
def view(self, id): user = request.environ['fts3.User.Credentials'] cred = Session.query(Credential).get((id, user.user_dn)) if not cred: return None else: return {'termination_time': cred.termination_time}
def test_submit_multiple_sources(self): self.setupGridsiteEnvironment() self.pushDelegation() job = {'files': [{'sources': ['http://source.es:8446/file', 'root://source.es/file'], 'destinations': ['http://dest.ch:8447/file', 'root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True}} answer = self.app.post(url = url_for(controller = 'jobs', action = 'submit'), content_type = 'application/json', params = json.dumps(job), status = 200) # Validate job in the database jobId = json.loads(answer.body)['job_id'] dbJob = Session.query(Job).get(jobId) assert len(dbJob.files) == 2 assert dbJob.files[0].file_index == 0 assert dbJob.files[0].source_surl == 'http://source.es:8446/file' assert dbJob.files[0].dest_surl == 'http://dest.ch:8447/file' assert dbJob.files[0].file_metadata['mykey'] == 'myvalue' assert dbJob.files[1].file_index == 0 assert dbJob.files[1].source_surl == 'root://source.es/file' assert dbJob.files[1].dest_surl == 'root://dest.ch/file' assert dbJob.files[1].file_metadata['mykey'] == 'myvalue'
def popDelegation(self): cred = self.getUserCredentials() if cred and cred.delegation_id: delegated = Session.query(Credential).get((cred.delegation_id, cred.user_dn)) if delegated: Session.delete(delegated) Session.commit()
def test_submit_with_cloud_cred(self): """ Submit a job specifying cloud credentials """ self.setup_gridsite_environment() self.push_delegation() dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['dropbox://dropbox.com/file'], 'destinations': [dest_surl], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': { 'mykey': 'myvalue' }, }], 'params': { 'overwrite': True, 'verify_checksum': True } } job_id = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] job = Session.query(Job).get(job_id) self.assertEqual(1, len(job.files)) self.assertEqual('dropbox://dropbox.com/file', job.files[0].source_surl) self.assertEqual(dest_surl, job.files[0].dest_surl)
def test_submit(self): """ Submit a valid job """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True} } answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) # Make sure it was committed to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(job_id, 0) self._validate_submitted(Session.query(Job).get(job_id)) return str(job_id)
def test_submit_max_time_in_queue(self): """ Submits a job specifying the maximum time it should stay in the queue. """ self.setup_gridsite_environment() self.push_delegation() dest_surl = 'root://dest.ch:8447/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['http://source.es:8446/file'], 'destinations': [dest_surl], }], 'params': { 'max_time_in_queue': 8 } } job_id = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] # See FTS-311 # max_time_in_queue was effectively ignored by FTS3 # Since FTS-311, this field stores the timestamp when the job expires job = Session.query(Job).get(job_id) self.assertGreater(job.max_time_in_queue, time.time()) self.assertLessEqual(job.max_time_in_queue, (8 * 60 * 60) + time.time())
def credential(self, dlg_id, start_response): """ Second step of the delegation process: put the generated certificate The certificate being PUT will have to pass the following validation: - There is a previous certificate request done - The certificate subject matches the certificate issuer + '/CN=Proxy' - The certificate modulus matches the stored private key modulus """ user = request.environ['fts3.User.Credentials'] if dlg_id != user.delegation_id: raise HTTPForbidden('The requested ID and the credentials ID do not match') credential_cache = Session.query(CredentialCache)\ .get((user.delegation_id, user.user_dn)) if credential_cache is None: raise HTTPBadRequest('No credential cache found') x509_proxy_pem = request.body log.debug("Received delegated credentials for %s" % dlg_id) log.debug(x509_proxy_pem) try: expiration_time = _validate_proxy(x509_proxy_pem, credential_cache.priv_key) x509_full_proxy_pem = _build_full_proxy(x509_proxy_pem, credential_cache.priv_key) except ProxyException, e: raise HTTPBadRequest('Could not process the proxy: ' + str(e))
def _granted_level(self, role_permissions): """ Get all granted levels for this user out of the configuration (all levels authorized for public, plus those for the given Roles) """ if self.is_root: return { 'transfer': 'all', 'deleg': 'all', 'config': 'all', 'datamanagement': 'all' } granted_level = dict() # Public apply to anyone if role_permissions is not None: if 'public' in role_permissions: granted_level = copy.deepcopy(role_permissions['public']) # Roles from the proxy for grant in self.roles: if grant in role_permissions: granted_level.update(copy.deepcopy( role_permissions[grant])) # DB Configuration for grant in Session.query(AuthorizationByDn).filter( AuthorizationByDn.dn == self.user_dn).all(): log.info( '%s granted to "%s" because it is configured in the database' % (grant.operation, self.user_dn)) granted_level[grant.operation] = 'all' return granted_level
def get_dm(self, job_id): """ Get the data management tasks within a job """ owner = Session.query( Job.user_dn, Job.vo_name).filter(Job.job_id == job_id).first() if owner is None: raise HTTPNotFound('No job with the id "%s" has been found' % job_id) if not authorized( TRANSFER, resource_owner=owner[0], resource_vo=owner[1]): raise HTTPForbidden( 'Not enough permissions to check the job "%s"' % job_id) dm = Session.query(DataManagement).filter( DataManagement.job_id == job_id) return dm.yield_per(100).enable_eagerloads(False)
def request(self, dlg_id, start_response): """ First step of the delegation process: get a certificate request The returned certificate request must be signed with the user's original credentials. """ user = request.environ['fts3.User.Credentials'] if dlg_id != user.delegation_id: raise HTTPForbidden('The requested ID and the credentials ID do not match') credential_cache = Session.query(CredentialCache)\ .get((user.delegation_id, user.user_dn)) if credential_cache is None or credential_cache.cert_request is None: (x509_request, private_key) = _generate_proxy_request() credential_cache = CredentialCache(dlg_id=user.delegation_id, dn=user.user_dn, cert_request=x509_request.as_pem(), priv_key=private_key.as_pem(cipher=None), voms_attrs=' '.join(user.voms_cred)) try: Session.merge(credential_cache) Session.commit() except Exception: Session.rollback() raise log.debug("Generated new credential request for %s" % dlg_id) else: log.debug("Using cached request for %s" % dlg_id) start_response('200 Ok', [('X-Delegation-ID', credential_cache.dlg_id), ('Content-Type', 'text/plain')]) return [credential_cache.cert_request]
def test_submit_post(self): self.setupGridsiteEnvironment() self.pushDelegation() job = {'files': [{'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True}} answer = self.app.post(url = url_for(controller = 'jobs', action = 'submit'), content_type = 'application/json', params = json.dumps(job), status = 200) # Make sure it was committed to the DB jobId = json.loads(answer.body)['job_id'] assert len(jobId) > 0 self._validateSubmitted(Session.query(Job).get(jobId)) return jobId
def test_set_voms(self): """ The server must regenerate a proxy with VOMS extensions Need a real proxy for this one """ self.setup_gridsite_environment() creds = self.get_user_credentials() # Need to push a real proxy :/ proxy_pem = self.get_real_x509_proxy() if proxy_pem is None: raise SkipTest( 'Could not get a valid real proxy for test_set_voms') proxy = Credential() proxy.dn = creds.user_dn proxy.dlg_id = creds.delegation_id proxy.termination_time = datetime.utcnow() + timedelta(hours=1) proxy.proxy = proxy_pem Session.merge(proxy) Session.commit() # Now, request the voms extensions self.app.post_json(url="/delegation/%s/voms" % creds.delegation_id, params=['dteam:/dteam/Role=lcgadmin'], status=203) # And validate proxy2 = Session.query(Credential).get( (creds.delegation_id, creds.user_dn)) self.assertNotEqual(proxy.proxy, proxy2.proxy) self.assertEqual('dteam:/dteam/Role=lcgadmin', proxy2.voms_attrs)
def test_cancel_reuse_small_files_and_big_files(self): """ Cancel a job with small files and one big file is reused """ job_id = self._submit_none_reuse(100, 1) job = self.app.delete(url="/jobs/%s" % job_id, status=200).json self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'CANCELED') self.assertEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'CANCELED') auto_session_reuse = pylons.config.get('fts3.AutoSessionReuse', 'false') if auto_session_reuse == 'true': self.assertEqual(job.job_type, 'Y') else: self.assertEqual(job.job_type, 'N') self.assertNotEqual(None, job.job_finished) for f in job.files: self.assertEqual(f.file_state, 'CANCELED') self.assertNotEqual(None, f.finish_time)
def remove_authz(self, start_response): """ Revoke access for a DN for a given operation, or all """ input_dict = get_input_as_dict(request, from_query=True) dn = input_dict.get('dn') op = input_dict.get('operation') if not dn: raise HTTPBadRequest('Missing DN parameter') to_be_removed = Session.query(AuthorizationByDn).filter( AuthorizationByDn.dn == dn) if op: to_be_removed = to_be_removed.filter( AuthorizationByDn.operation == op) try: to_be_removed.delete() if op: audit_configuration('revoke', '%s revoked for "%s"' % (op, dn)) else: audit_configuration('revoke', 'All revoked for "%s"' % dn) Session.commit() except: Session.rollback() raise start_response('204 No Content', []) return ['']
def test_with_activity(self): """ Submit a job specifiying activities for the files """ self.setup_gridsite_environment() self.push_delegation() dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['root://source.es/file'], 'destinations': [dest_surl], 'activity': 'my-activity' }, { 'sources': ['https://source.es/file2'], 'destinations': ['https://dest.ch/file2'], 'activity': 'my-second-activity' }] } job_id = self.app.put(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] # Make sure it was commited to the DB self.assertGreater(len(job_id), 0) job = Session.query(Job).get(job_id) self.assertEqual(job.files[0].activity, 'my-activity') self.assertEqual(job.files[1].activity, 'my-second-activity')
def test_submit_with_cloud_cred(self): """ Submit a job specifying cloud credentials """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['dropbox://dropbox.com/file'], 'destinations': ['root://dest.ch:8447/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True, 'credential': 'dropbox'} } response = self.app.post( url="/jobs", content_type='application/json', params=json.dumps(job), status=200 ) self.assertEquals(response.content_type, 'application/json') job_id = json.loads(response.body)['job_id'] job = Session.query(Job).get(job_id) self.assertEqual(1, len(job.files)) self.assertEqual('dropbox://dropbox.com/file', job.files[0].source_surl) self.assertEqual('root://dest.ch:8447/file', job.files[0].dest_surl) self.assertEqual('dropbox', job.user_cred)
def test_submit(self): """ Submit a valid job """ self.setup_gridsite_environment() self.push_delegation() dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['root://source.es/file'], 'destinations': [dest_surl], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': { 'mykey': 'myvalue' }, }], 'params': { 'overwrite': True, 'verify_checksum': True } } job_id = self.app.put(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] # Make sure it was committed to the DB self.assertGreater(job_id, 0) self._validate_submitted(Session.query(Job).get(job_id)) return str(job_id)
def test_with_activity(self): """ Submit a job specifiying activities for the files """ self.setup_gridsite_environment() self.push_delegation() job = {'files': [ { 'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], 'activity': 'my-activity' }, { 'sources': ['https://source.es/file2'], 'destinations': ['https://dest.ch/file2'], 'activity': 'my-second-activity' }] } answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) # Make sure it was commited to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(len(job_id), 0) job = Session.query(Job).get(job_id) self.assertEqual(job.files[0].activity, 'my-activity') self.assertEqual(job.files[1].activity, 'my-second-activity')
def test_surl_with_spaces(self): """ Submit a job where the surl has spaces at the beginning and at the end """ self.setup_gridsite_environment() self.push_delegation() dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['root://source.es/file\n \r '], 'destinations': ['\r\n' + dest_surl + '\n\n \n'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024.0, 'metadata': { 'mykey': 'myvalue' }, }], 'params': { 'overwrite': True, 'verify_checksum': True } } job_id = self.app.put(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] # Make sure it was commited to the DB self.assertGreater(len(job_id), 0) job = Session.query(Job).get(job_id) self._validate_submitted(job)
def test_no_vo(self): """ Submit a valid job with no VO data in the credentials (could happen with plain SSL!) The job must be accepted, but assigned to the user's 'virtual' vo. """ self.setup_gridsite_environment(no_vo=True) self.push_delegation() dest_surl = 'root://dest.ch/file' + str(random.randint(0, 100)) job = { 'files': [{ 'sources': ['root://source.es/file'], 'destinations': [dest_surl], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': { 'mykey': 'myvalue' }, }], 'params': { 'overwrite': True, 'verify_checksum': True } } job_id = self.app.put(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] # Make sure it was commited to the DB self.assertGreater(len(job_id), 0) self._validate_submitted(Session.query(Job).get(job_id), no_vo=True)
def test_set_voms(self): """ The server must regenerate a proxy with VOMS extensions Need a real proxy for this one """ self.setup_gridsite_environment() creds = self.get_user_credentials() # Need to push a real proxy :/ proxy_pem = self.get_real_x509_proxy() if proxy_pem is None: raise SkipTest('Could not get a valid real proxy for test_set_voms') proxy = Credential() proxy.dn = creds.user_dn proxy.dlg_id = creds.delegation_id proxy.termination_time = datetime.utcnow() + timedelta(hours=1) proxy.proxy = proxy_pem Session.merge(proxy) Session.commit() # Now, request the voms extensions self.app.post(url="/delegation/%s/voms" % creds.delegation_id, content_type='application/json', params=json.dumps(['dteam:/dteam/Role=lcgadmin']), status=203) # And validate proxy2 = Session.query(Credential).get((creds.delegation_id, creds.user_dn)) self.assertNotEqual(proxy.proxy, proxy2.proxy) self.assertEqual('dteam:/dteam/Role=lcgadmin', proxy2.voms_attrs)
def test_job_priority_invalid(self): """ Submit a job, try to change priority to an invalid value later """ self.setup_gridsite_environment() self.push_delegation() job = {'files': [{ 'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], }], 'params': { 'priority': 2 } } job_id = self.app.post_json( url="/jobs", params=job, status=200 ).json['job_id'] job = Session.query(Job).get(job_id) self.assertEqual(2, job.priority) mod = {'params': { 'priority': 'axxx' }} self.app.post_json( url="/jobs/%s" % str(job_id), params=mod, status=400 )
def _getJob(self, id): job = Session.query(Job).get(id) if job is None: abort(404, 'No job with the id "%s" has been found' % id) if not authorized(TRANSFER, resource_owner = job.user_dn, resource_vo = job.vo_name): abort(403, 'Not enough permissions to check the job "%s"' % id) return job