def test_ban_se_cancel_vo(self): """ Cancel a SE that has files queued, make sure they are canceled (with VO) """ jobs = list() jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination2', 'SUBMITTED')) answer = self.app.post( url="/ban/se", params={'storage': 'gsiftp://source', 'status': 'cancel', 'vo_name': 'dteam'}, status=200 ) canceled_ids = json.loads(answer.body) self.assertEqual(1, len(canceled_ids)) self.assertIn(jobs[0], canceled_ids) for job_id in jobs: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) if job_id in canceled_ids: self.assertEqual('CANCELED', job.job_state) else: self.assertEqual('SUBMITTED', job.job_state) for f in files: if job_id in canceled_ids: self.assertEqual('CANCELED', f.file_state) else: self.assertEqual('SUBMITTED', f.file_state)
def test_ban_se_wait_vo(self): """ Ban a SE, but instead of canceling, give jobs some time to finish (with VO) """ jobs = list() jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('atlas', 'gsiftp://source', 'gsiftp://destination2', 'SUBMITTED')) answer = self.app.post( url="/ban/se", params={'storage': 'gsiftp://source', 'status': 'wait', 'vo_name': 'dteam', 'timeout': 33}, status=200 ) waiting_ids = json.loads(answer.body) self.assertEqual(1, len(waiting_ids)) self.assertIn(jobs[0], waiting_ids) for job_id in jobs: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) self.assertEqual('SUBMITTED', job.job_state) for f in files: self.assertEqual('SUBMITTED', f.file_state) if job_id in waiting_ids: self.assertEqual(33, f.wait_timeout) else: self.assertEqual(None, f.wait_timeout)
def cancel(self, id, **kwargs): """DELETE /jobs/id: Delete an existing item""" job = self._getJob(id) if job.job_state in JobActiveStates: now = datetime.now() job.job_state = 'CANCELED' job.finish_time = now job.job_finished = now job.reason = 'Job canceled by the user' for f in job.files: if f.file_state in JobActiveStates: f.file_state = 'CANCELED' f.job_finished = now f.finish_time = now f.reason = 'Job canceled by the user' Session.merge(job) Session.commit() job = self._getJob(id) files = job.files return job
def test_get_retries(self): """ Get the retries for a file, forcing one """ self.setup_gridsite_environment() self.push_delegation() job_id = self._submit() answer = self.app.get(url="/jobs/%s/files" % job_id, status=200) files = json.loads(answer.body) file_id = files[0]['file_id'] retry = FileRetryLog() retry.file_id = file_id retry.attempt = 1 retry.datetime = datetime.utcnow() retry.reason = 'Blahblahblah' Session.merge(retry) Session.commit() answer = self.app.get(url="/jobs/%s/files/%d/retries" % (job_id, file_id), status=200) retries = json.loads(answer.body) self.assertEqual(1, len(retries)) self.assertEqual(1, retries[0]['attempt']) self.assertEqual('Blahblahblah', retries[0]['reason'])
def test_expired(self): """ Get a token, the token expires, so it should be denied """ client_id, access_token, refresh_token, expires = self.test_get_token() del self.app.extra_environ['GRST_CRED_AURI_0'] response = self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=200 ) whoami = json.loads(response.body) self.assertEqual('oauth2', whoami['method']) token = Session.query(OAuth2Token).get((client_id, refresh_token)) token.expires = datetime.utcnow() - timedelta(hours=1) Session.merge(token) Session.commit() response = self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=403 )
def test_ban_se_cancel(self): """ Ban a SE that has files queued, make sure they are canceled """ jobs = list() jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination2', 'ACTIVE')) jobs.append(insert_job('dteam', 'gsiftp://source', 'gsiftp://destination2', 'FAILED', duration=10, queued=20)) answer = self.app.post(url="/ban/se", params={'storage': 'gsiftp://source'}, status=200) canceled_ids = json.loads(answer.body) self.assertEqual(2, len(canceled_ids)) self.assertIn(jobs[0], canceled_ids) self.assertIn(jobs[1], canceled_ids) self.assertNotIn(jobs[2], canceled_ids) for job_id in jobs[0:2]: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) self.assertEqual('CANCELED', job.job_state) self.assertNotEqual(None, job.job_finished) self.assertNotEqual(None, job.finish_time) for f in files: self.assertEqual('CANCELED', f.file_state) self.assertNotEqual(None, f.job_finished) self.assertNotEqual(None, f.finish_time) self.assertEqual('Storage banned', f.reason) job = Session.query(Job).get(jobs[2]) self.assertEqual(job.job_state, 'FAILED') files = Session.query(File).filter(File.job_id == job.job_id) for f in files: self.assertEqual('FAILED', f.file_state)
def test_set_voms(self): """ The server must regenerate a proxy with VOMS extensions Need a real proxy for this one """ self.setup_gridsite_environment() creds = self.get_user_credentials() # Need to push a real proxy :/ proxy_pem = self.get_real_x509_proxy() if proxy_pem is None: raise SkipTest('Could not get a valid real proxy for test_set_voms') proxy = Credential() proxy.dn = creds.user_dn proxy.dlg_id = creds.delegation_id proxy.termination_time = datetime.utcnow() + timedelta(hours=1) proxy.proxy = proxy_pem Session.merge(proxy) Session.commit() # Now, request the voms extensions self.app.post(url="/delegation/%s/voms" % creds.delegation_id, content_type='application/json', params=json.dumps(['dteam:/dteam/Role=lcgadmin']), status=203) # And validate proxy2 = Session.query(Credential).get((creds.delegation_id, creds.user_dn)) self.assertNotEqual(proxy.proxy, proxy2.proxy) self.assertEqual('dteam:/dteam/Role=lcgadmin', proxy2.voms_attrs)
def apiVersion(self): credV = Session.query(CredentialVersion)[0] schemaV = Session.query(SchemaVersion)[0] return {'api': _Version(0, 2, 1), 'schema': credV, 'delegation': schemaV, '_links': { 'curies': [{'name': 'fts', 'href': 'https://svnweb.cern.ch/trac/fts3'}], 'fts:whoami': {'href': '/whoami', 'title': 'Check user certificate'}, 'fts:joblist': {'href': '/jobs{?vo_name,user_dn}', 'title': 'List of active jobs', 'templated': True}, 'fts:job': { 'href': '/jobs/{id}', 'title': 'Job information', 'templated': True, 'hints': { 'allow': ['GET', 'DELETE'] } }, 'fts:configaudit': {'href': '/config/audit', 'title': 'Configuration'}, 'fts:submitschema': {'href': '/schema/submit', 'title': 'JSON schema of messages'}, 'fts:jobsubmit': { 'href': '/jobs', 'hints': { 'allow': ['POST'], 'representations': ['fts:submitschema'] } }, } }
def popDelegation(self): cred = self.getUserCredentials() if cred and cred.delegation_id: delegated = Session.query(Credential).get((cred.delegation_id, cred.user_dn)) if delegated: Session.delete(delegated) Session.commit()
def get_my_apps(self): """ Returns the list of registered apps """ user = pylons.request.environ['fts3.User.Credentials'] my_apps = Session.query(OAuth2Application).filter(OAuth2Application.owner == user.user_dn).all() authorized_apps = Session.query( OAuth2Application.client_id, OAuth2Application.name, OAuth2Application.website, OAuth2Application.description, OAuth2Token.refresh_token, OAuth2Token.scope, OAuth2Token.expires ).filter((OAuth2Token.dlg_id == user.delegation_id) & (OAuth2Token.client_id == OAuth2Application.client_id)) response = {'apps': my_apps, 'authorized': authorized_apps} if _accept_html(pylons.request.accept): pylons.response.headers['Content-Type'] = 'text/html; charset=UTF-8' response['user'] = user response['site'] = pylons.config['fts3.SiteName'] return render('/apps.html', extra_vars=response) else: pylons.response.headers['Content-Type'] = 'application/json' # Better serialization for authorized apps authorized = list() for auth in authorized_apps: authorized.append({ 'name': auth.name, 'website': auth.website, 'description': auth.description, 'scope': auth.scope, 'expires': auth.expires }) response['authorized'] = authorized return to_json(response)
def test_ban_se_partial_job(self): """ Ban a SE that has files queued. If a job has other pairs, the job must remain! """ job_id = insert_job( 'dteam', multiple=[('gsiftp://source', 'gsiftp://destination'), ('gsiftp://other', 'gsiftp://destination')] ) answer = self.app.post(url="/ban/se", params={'storage': 'gsiftp://source'}, status=200) canceled_ids = json.loads(answer.body) self.assertEqual(1, len(canceled_ids)) self.assertEqual(job_id, canceled_ids[0]) job = Session.query(Job).get(job_id) self.assertEqual('SUBMITTED', job.job_state) self.assertEqual(None, job.job_finished) self.assertEqual(None, job.finish_time) files = Session.query(File).filter(File.job_id == job_id) for f in files: if f.source_se == 'gsiftp://source': self.assertEqual('CANCELED', f.file_state) self.assertNotEqual(None, f.finish_time) else: self.assertEqual('SUBMITTED', f.file_state) self.assertEqual(None, f.job_finished)
def pop_delegation(self): """ Remove the mock proxy from the database """ cred = self.get_user_credentials() if cred and cred.delegation_id: delegated = Session.query(Credential).get((cred.delegation_id, cred.user_dn)) if delegated: Session.delete(delegated) Session.commit()
def pushDelegation(self, lifetime = timedelta(hours = 7)): creds = self.getUserCredentials() delegated = Credential() delegated.dlg_id = creds.delegation_id delegated.dn = creds.user_dn delegated.proxy = '-NOT USED-' delegated.voms_attrs = None delegated.termination_time = datetime.now() + lifetime Session.merge(delegated) Session.commit()
def test_ban_dn_submission(self): """ If a DN is banned, submissions from this user must not be accepted """ banned = BannedDN() banned.dn = self.get_user_credentials().user_dn Session.merge(banned) Session.commit() self.push_delegation() self.app.post(url="/jobs", content_type='application/json', params='[]', status=403)
def get_files(self, job_id): """ Get the files within a job """ owner = Session.query(Job.user_dn, Job.vo_name).filter(Job.job_id == job_id).first() if owner is None: raise HTTPNotFound('No job with the id "%s" has been found' % job_id) if not authorized(TRANSFER, resource_owner=owner[0], resource_vo=owner[1]): raise HTTPForbidden('Not enough permissions to check the job "%s"' % job_id) files = Session.query(File).filter(File.job_id == job_id).options(noload(File.retries)) return files.all()
def setUp(self): # Inject a Dropbox app cs = CloudStorage( cloudStorage_name='DROPBOX', app_key='1234', app_secret='sssh', service_api_url='https://api.dropbox.com' ) Session.merge(cs) Session.commit() self.setup_gridsite_environment()
def _get_limits(source, destination): source_thr = Session.query(Optimize.throughput)\ .filter(Optimize.source_se==source).filter(Optimize.throughput != None).all() dest_thr = Session.query(Optimize.throughput)\ .filter(Optimize.dest_se==destination).filter(Optimize.throughput != None).all() limits = dict() if len(source_thr): limits['source'] = source_thr[0][0] if len(dest_thr): limits['destination'] = dest_thr[0][0] return limits
def get_file_retries(self, job_id, file_id): """ Get the retries for a given file """ owner = Session.query(Job.user_dn, Job.vo_name).filter(Job.job_id == job_id).all() if owner is None or len(owner) < 1: raise HTTPNotFound('No job with the id "%s" has been found' % job_id) if not authorized(TRANSFER, resource_owner=owner[0][0], resource_vo=owner[0][1]): raise HTTPForbidden('Not enough permissions to check the job "%s"' % job_id) f = Session.query(File.file_id).filter(File.file_id == file_id) if not f: raise HTTPNotFound('No file with the id "%d" has been found' % file_id) retries = Session.query(FileRetryLog).filter(FileRetryLog.file_id == file_id) return retries.all()
def test_ban_dn(self): """ Just ban a DN and unban it, make sure changes go into the DB """ answer = self.app.post(url='/ban/dn', params={'user_dn': '/DC=cern/CN=someone'}, status=200) canceled = json.loads(answer.body) self.assertEqual(0, len(canceled)) banned = Session.query(BannedDN).get('/DC=cern/CN=someone') self.assertNotEqual(None, banned) self.assertEqual(self.get_user_credentials().user_dn, banned.admin_dn) self.app.delete(url="/ban/dn?user_dn=%s" % urllib.quote('/DC=cern/CN=someone'), status=204) banned = Session.query(BannedDN).get('/DC=cern/CN=someone') self.assertEqual(None, banned)
def _getJob(self, id): job = Session.query(Job).get(id) if job is None: abort(404, 'No job with the id "%s" has been found' % id) if not authorized(TRANSFER, resource_owner = job.user_dn, resource_vo = job.vo_name): abort(403, 'Not enough permissions to check the job "%s"' % id) return job
def test_submit_multiple_sources(self): self.setupGridsiteEnvironment() self.pushDelegation() job = {'files': [{'sources': ['http://source.es:8446/file', 'root://source.es/file'], 'destinations': ['http://dest.ch:8447/file', 'root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True}} answer = self.app.post(url = url_for(controller = 'jobs', action = 'submit'), content_type = 'application/json', params = json.dumps(job), status = 200) # Validate job in the database jobId = json.loads(answer.body)['job_id'] dbJob = Session.query(Job).get(jobId) assert len(dbJob.files) == 2 assert dbJob.files[0].file_index == 0 assert dbJob.files[0].source_surl == 'http://source.es:8446/file' assert dbJob.files[0].dest_surl == 'http://dest.ch:8447/file' assert dbJob.files[0].file_metadata['mykey'] == 'myvalue' assert dbJob.files[1].file_index == 0 assert dbJob.files[1].source_surl == 'root://source.es/file' assert dbJob.files[1].dest_surl == 'root://dest.ch/file' assert dbJob.files[1].file_metadata['mykey'] == 'myvalue'
def test_register(self): """ Test the registration of an app """ self.setup_gridsite_environment() req = { 'name': 'MyApp', 'description': 'Blah blah blah', 'website': 'https://example.com', 'redirect_to': 'https://mysite.com/callback' } response = self.app.post( url="/oauth2/register", content_type='application/json', params=json.dumps(req), status=201 ) client_id = json.loads(response.body) self.assertNotEqual(None, client_id) app = Session.query(OAuth2Application).get(client_id) self.assertEqual('MyApp', app.name) self.assertEqual('Blah blah blah', app.description) self.assertEqual('https://example.com', app.website) self.assertEqual('https://mysite.com/callback', app.redirect_to) self.assertEqual('/DC=ch/DC=cern/CN=Test User', app.owner) return client_id
def test_simple_bringonline(self): """ Test a regular, one file, bring online job """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['srm://source.es/?SFN=/path/'], 'destinations': ['srm://dest.ch/file'], }], 'params': { 'overwrite': True, 'copy_pin_lifetime': 3600, 'bring_online': 60, 'verify_checksum': True } } answer = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200) # Make sure it was committed to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(len(job_id), 0) db_job = Session.query(Job).get(job_id) self.assertEqual(db_job.job_state, 'STAGING') self.assertEqual(db_job.files[0].file_state, 'STAGING')
def test_submit_with_cloud_cred(self): """ Submit a job specifying cloud credentials """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['dropbox://dropbox.com/file'], 'destinations': ['root://dest.ch:8447/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True, 'credential': 'dropbox'} } response = self.app.post( url="/jobs", content_type='application/json', params=json.dumps(job), status=200 ) self.assertEquals(response.content_type, 'application/json') job_id = json.loads(response.body)['job_id'] job = Session.query(Job).get(job_id) self.assertEqual(1, len(job.files)) self.assertEqual('dropbox://dropbox.com/file', job.files[0].source_surl) self.assertEqual('root://dest.ch:8447/file', job.files[0].dest_surl) self.assertEqual('dropbox', job.user_cred)
def test_files_balanced(self): """ Checks the distribution of the file 'hashed ids' is reasonably uniformely distributed. hashed_id is a legacy name, its purpose is balance the transfers between hosts regardless of the number running in a giving moment """ raise SkipTest('Disabled as it is not very reliable') self.setup_gridsite_environment() self.push_delegation() files = [] for r in xrange(5000): files.append({ 'sources': ["root://source.es/file%d" % r], 'destinations': ["root://dest.ch/file%d" % r] }) job = {'files': files} answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) job_id = json.loads(answer.body)['job_id'] files = Session.query(File.hashed_id).filter(File.job_id == job_id) hashed_ids = map(lambda f: f.hashed_id, files) # Null hypothesis: the distribution of hashed_ids is uniform histogram, min_value, binsize, outsiders = scipy.stats.histogram(hashed_ids, defaultlimits=(0, 2 ** 16 - 1)) chisq, pvalue = scipy.stats.chisquare(histogram) self.assertGreater(min_value, -1) self.assertEqual(outsiders, 0) self.assertGreater(pvalue, 0.1)
def view(self, id): user = request.environ['fts3.User.Credentials'] cred = Session.query(Credential).get((id, user.user_dn)) if not cred: return None else: return {'termination_time': cred.termination_time}
def test_surl_with_spaces(self): """ Submit a job where the surl has spaces at the beginning and at the end """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['root://source.es/file\n \r '], 'destinations': ['\r\n root://dest.ch/file\n\n \n'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024.0, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True} } answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) # Make sure it was commited to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(len(job_id), 0) job = Session.query(Job).get(job_id) self._validate_submitted(job)
def credential(self, dlg_id, start_response): """ Second step of the delegation process: put the generated certificate The certificate being PUT will have to pass the following validation: - There is a previous certificate request done - The certificate subject matches the certificate issuer + '/CN=Proxy' - The certificate modulus matches the stored private key modulus """ user = request.environ['fts3.User.Credentials'] if dlg_id != user.delegation_id: raise HTTPForbidden('The requested ID and the credentials ID do not match') credential_cache = Session.query(CredentialCache)\ .get((user.delegation_id, user.user_dn)) if credential_cache is None: raise HTTPBadRequest('No credential cache found') x509_proxy_pem = request.body log.debug("Received delegated credentials for %s" % dlg_id) log.debug(x509_proxy_pem) try: expiration_time = _validate_proxy(x509_proxy_pem, credential_cache.priv_key) x509_full_proxy_pem = _build_full_proxy(x509_proxy_pem, credential_cache.priv_key) except ProxyException, e: raise HTTPBadRequest('Could not process the proxy: ' + str(e))
def test_submit_post(self): self.setupGridsiteEnvironment() self.pushDelegation() job = {'files': [{'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True}} answer = self.app.post(url = url_for(controller = 'jobs', action = 'submit'), content_type = 'application/json', params = json.dumps(job), status = 200) # Make sure it was committed to the DB jobId = json.loads(answer.body)['job_id'] assert len(jobId) > 0 self._validateSubmitted(Session.query(Job).get(jobId)) return jobId
def test_submit(self): """ Submit a valid job """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }], 'params': {'overwrite': True, 'verify_checksum': True} } answer = self.app.put(url="/jobs", params=json.dumps(job), status=200) # Make sure it was committed to the DB job_id = json.loads(answer.body)['job_id'] self.assertGreater(job_id, 0) self._validate_submitted(Session.query(Job).get(job_id)) return str(job_id)
def get_access_requested(self): dropbox_info = self._get_dropbox_info() request_tokens = self._make_call( dropboxApiEndpoint + "/1/oauth/request_token", 'OAuth oauth_version="1.0", oauth_signature_method="PLAINTEXT",' 'oauth_consumer_key="' + dropbox_info.app_key + '", oauth_signature="' + dropbox_info.app_secret + '&"', None ) # It returns: oauth_token_secret=b9q1n5il4lcc&oauth_token=mh7an9dkrg59 tokens = request_tokens.split('&') newuser = CloudStorageUser( user_dn=self.user_dn, storage_name=dropbox_info.storage_name, request_token=tokens[1].split('=')[1], request_token_secret=tokens[0].split('=')[1], vo_name='' ) try: Session.add(newuser) Session.commit() except: Session.rollback() raise return request_tokens
def test_cancel_some_terminal(self): """ Cancel a job with some files in terminal state """ job_id = self._submit(10) job = Session.query(Job).get(job_id) job.job_state = 'ACTIVE' for f in job.files: if f.file_id % 2 == 0: f.file_state = 'FINISHED' Session.merge(job) Session.commit() job = self.app.delete(url="/jobs/%s" % job_id, status=200).json self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'CANCELED') self.assertEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'CANCELED') for f in job.files: if f.file_id % 2 == 0: self.assertEqual(f.file_state, 'FINISHED') self.assertNotEqual(f.reason, 'Job canceled by the user') else: self.assertEqual(f.file_state, 'CANCELED')
def test_cancel_running(self): """ Cancel a job, but the transfer is running (pid is set) """ job_id = self._submit() # Add pid transfer = Session.query(File).filter(File.job_id == job_id).first() transfer.pid = 1234 Session.merge(transfer) Session.commit() job = self.app.delete(url="/jobs/%s" % job_id, status=200).json self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'CANCELED') self.assertEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'CANCELED') self.assertNotEqual(None, job.job_finished) for f in job.files: self.assertEqual(f.file_state, 'CANCELED') self.assertEqual(None, f.finish_time)
def get_access_granted(self): dropbox_user_info = self._get_dropbox_user_info() if not dropbox_user_info: raise HTTPBadRequest('No registered user for the service "%s" has been found' % self.service) dropbox_info = self._get_dropbox_info() if not dropbox_info: raise HTTPNotFound('Dropbox info not found in the database') access_tokens = self._make_call( dropboxApiEndpoint + "/1/oauth/access_token", 'OAuth oauth_version="1.0", oauth_signature_method="PLAINTEXT", oauth_consumer_key="' + dropbox_info.app_key + '", oauth_token="' + dropbox_user_info.request_token + '", oauth_signature="' + dropbox_info.app_secret + '&' + dropbox_user_info.request_token_secret + '"', None ) # It returns: oauth_token=<access-token>&oauth_token_secret=<access-token-secret>&uid=<user-id> access_tokens = access_tokens.split('&') dropbox_user_info.access_token = access_tokens[1].split('=')[1] dropbox_user_info.access_token_secret = access_tokens[0].split('=')[1] try: Session.add(dropbox_user_info) Session.commit() except: Session.rollback() raise return access_tokens
def cancel_all(self): """ Cancel all files """ user = request.environ['fts3.User.Credentials'] now = datetime.utcnow() if not user.is_root: raise HTTPForbidden('User does not have root privileges') try: # FTS3 daemon expects finish_time to be NULL in order to trigger the signal # to fts_url_copy file_count = Session.query(File).filter(File.file_state.in_(FileActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'dest_surl_uuid':None, 'finish_time': None }, synchronize_session=False) # However, for data management operations there is nothing to signal, so # set job_finished dm_count = Session.query(DataManagement)\ .filter(DataManagement.file_state.in_(DataManagementActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now, 'finish_time': now }, synchronize_session=False) job_count = Session.query(Job).filter(Job.job_state.in_(JobActiveStates))\ .update({ 'job_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now }, synchronize_session=False) Session.commit() Session.expire_all() log.info("Active jobs canceled") except: Session.rollback() raise return { "affected_files": file_count, "affected_dm": dm_count, "affected_jobs": job_count }
def _prepare_and_test_created_jobs_to_cancel(self, files_per_job=8): """ Helper function to prepare and test created jobs for cancel tests """ job_ids = list() for i in range(len(FileActiveStates) + len(FileTerminalStates)): job_ids.append(self._submit(files_per_job)) i = 0 for state in FileActiveStates + FileTerminalStates: job = Session.query(Job).get(job_ids[i]) i += 1 if state == 'STARTED': job.job_state = 'STAGING' else: job.job_state = state for f in job.files: f.file_state = state Session.merge(job) Session.commit() i = 0 for state in FileActiveStates + FileTerminalStates: job = Session.query(Job).get(job_ids[i]) state_job = state if state == 'STARTED': state_job = 'STAGING' self.assertEqual(job.job_state, state_job) for f in job.files: self.assertEqual(f.file_state, state) i += 1 return job_ids
def test_submit_reuse_auto_small(self): """ Submit small files with reuse not set (auto). It should be enabled. """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [{ 'sources': ['http://source.es:8446/file'], 'destinations': ['http://dest.ch:8447/file'], 'filesize': 1024, }, { 'sources': ['http://source.es:8446/otherfile'], 'destinations': ['http://dest.ch:8447/otherfile'], 'filesize': 1024, }], 'params': { 'overwrite': True, 'reuse': None } } job_id = self.app.post(url="/jobs", content_type='application/json', params=json.dumps(job), status=200).json['job_id'] job = Session.query(Job).get(job_id) auto_session_reuse = pylons.config.get('fts3.AutoSessionReuse', 'false') if auto_session_reuse == 'true': self.assertEqual(job.job_type, 'Y') files = Session.query(File).filter(File.job_id == job_id) hashed = files[0].hashed_id for f in files: self.assertEqual(1024, f.user_filesize) self.assertEqual(hashed, f.hashed_id) else: self.assertEqual(job.job_type, 'N')
def test_submit_combination(self): self.setupGridsiteEnvironment() self.pushDelegation() job = {'files': [{'sources': ['srm://source.es:8446/file', 'srm://source.fr:8443/file'], 'destinations': ['srm://dest.ch:8447/file'], 'selection_strategy': 'orderly', 'checksum': 'adler32:1234', 'filesize': 1024, 'metadata': {'mykey': 'myvalue'}, }, {'sources': ['https://host.com/another/file'], 'destinations': ['https://dest.net/another/destination'], 'selection_strategy': 'whatever', 'checksum': 'adler32:56789', 'filesize': 512, 'metadata': {'flag': True} }], 'params': {'overwrite': True, 'verify_checksum': True}} answer = self.app.post(url = url_for(controller = 'jobs', action = 'submit'), content_type = 'application/json', params = json.dumps(job), status = 200) # Validate job in the database jobId = json.loads(answer.body)['job_id'] dbJob = Session.query(Job).get(jobId) assert len(dbJob.files) == 3 assert dbJob.files[0].file_index == 0 assert dbJob.files[0].source_surl == 'srm://source.es:8446/file' assert dbJob.files[0].dest_surl == 'srm://dest.ch:8447/file' assert dbJob.files[0].checksum == 'adler32:1234' assert dbJob.files[0].user_filesize == 1024 assert dbJob.files[0].file_metadata['mykey'] == 'myvalue' assert dbJob.files[1].file_index == 0 assert dbJob.files[1].source_surl == 'srm://source.fr:8443/file' assert dbJob.files[1].dest_surl == 'srm://dest.ch:8447/file' assert dbJob.files[1].checksum == 'adler32:1234' assert dbJob.files[1].user_filesize == 1024 assert dbJob.files[0].file_metadata['mykey'] == 'myvalue' assert dbJob.files[2].file_index == 1 assert dbJob.files[2].source_surl == 'https://host.com/another/file' assert dbJob.files[2].dest_surl == 'https://dest.net/another/destination' assert dbJob.files[2].checksum == 'adler32:56789' assert dbJob.files[2].user_filesize == 512 assert dbJob.files[2].file_metadata['flag'] == True
def remove_cloud_storage(self, storage_name, start_response): """ Remove a registered cloud storage """ storage = Session.query(CloudStorage).get(storage_name) if not storage: raise HTTPNotFound('The storage does not exist') try: Session.query(CloudStorageUser).filter(CloudStorageUser.storage_name == storage_name).delete() Session.delete(storage) Session.commit() except: Session.rollback() raise start_response('204 No Content', []) return ['']
def test_ban_se_cancel_vo(self): """ Cancel a SE that has files queued, make sure they are canceled (with VO) """ jobs = list() jobs.append( insert_job('testvo', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append( insert_job('atlas', 'gsiftp://source', 'gsiftp://destination', 'SUBMITTED')) jobs.append( insert_job('atlas', 'gsiftp://source', 'gsiftp://destination2', 'SUBMITTED')) canceled_ids = self.app.post(url="/ban/se", params={ 'storage': 'gsiftp://source', 'status': 'cancel', 'vo_name': 'testvo' }, status=200).json self.assertEqual(1, len(canceled_ids)) self.assertIn(jobs[0], canceled_ids) for job_id in jobs: job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) if job_id in canceled_ids: self.assertEqual('CANCELED', job.job_state) else: self.assertEqual('SUBMITTED', job.job_state) for f in files: if job_id in canceled_ids: self.assertEqual('CANCELED', f.file_state) else: self.assertEqual('SUBMITTED', f.file_state)
def test_reset_and_add(self): """ Set, reset, add a new one """ self.test_reset() self.app.post_json(url="/config/global", params=dict(retry=42, max_time_queue=22, global_timeout=55, sec_per_mb=1, show_user_dn=True, vo_name='atlas'), status=200) config = Session.query(ServerConfig).get('atlas') self.assertIsNotNone(config) config = Session.query(ServerConfig).get('dteam') self.assertIsNotNone(config) audit = Session.query(ConfigAudit).all() self.assertEqual(3, len(audit))
def test_multihop_lfc(self): """ Submit a multihop transfer with a final LFC hop """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [ { 'sources': ['http://source.es:8446/file'], 'destinations': ['http://intermediate.ch:8447/file'], }, { 'sources': ['http://intermediate.ch:8447/file'], 'destinations': ['lfc://lfc.ch/lfn'] } ], 'params': {'overwrite': True, 'multihop': True} } job_id = self.app.post( url="/jobs", content_type='application/json', params=json.dumps(job), status=200 ).json['job_id'] # The hashed ID must be the same for all files! # Also, the reuse flag must be 'H' in the database job = Session.query(Job).get(job_id) self.assertEqual(job.job_type, 'H') files = Session.query(File).filter(File.job_id == job_id).all() self.assertEquals(2, len(files)) hashed = files[0].hashed_id for f in files: self.assertEqual(hashed, f.hashed_id)
def test_ban_dn(self): """ Just ban a DN and unban it, make sure changes go into the DB """ canceled = self.app.post(url='/ban/dn', params={ 'user_dn': '/DC=cern/CN=someone', 'message': 'TEST BAN' }, status=200).json self.assertEqual(0, len(canceled)) banned = Session.query(BannedDN).get('/DC=cern/CN=someone') self.assertNotEqual(None, banned) self.assertEqual(self.get_user_credentials().user_dn, banned.admin_dn) self.assertEqual('TEST BAN', banned.message) self.app.delete(url="/ban/dn?user_dn=%s" % urllib.quote('/DC=cern/CN=someone'), status=204) banned = Session.query(BannedDN).get('/DC=cern/CN=someone') self.assertEqual(None, banned)
def list_authz(self): """ List granted accesses """ input_dict = get_input_as_dict(request, from_query=True) dn = input_dict.get('dn') op = input_dict.get('operation') authz = Session.query(AuthorizationByDn) if dn: authz = authz.filter(AuthorizationByDn.dn == dn) if op: authz = authz.filter(AuthorizationByDn.operation == op) return authz.all()
def get_global_config(self): """ Get the global configuration """ # Only retry, is bound to VO, the others are global (no VO) rows = Session.query(ServerConfig).all() result = {'*': ServerConfig()} for r in rows: if r: if r.vo_name in (None, '*'): result['*'] = r else: result[r.vo_name] = r return result
def hosts_activity(self): """ What are the hosts doing """ staging = Session.execute("SELECT COUNT(*), agent_dn " " FROM t_file " " WHERE file_state = 'STARTED' " " GROUP BY agent_dn") response = dict() for (count, host) in staging: response[host] = dict(staging=count) active = Session.execute("SELECT COUNT(*), transferHost " " FROM t_file " " WHERE file_state = 'ACTIVE' " " GROUP BY transferHost") for (count, host) in active: if host not in response: response[host] = dict() response[host]['active'] = count return response
def test_authorize_config_via_db(self): """ Credentials with no vo extensions, if the DN is in the database as authorized, configuration should be allowed """ del self.creds del self.env['fts3.User.Credentials'] env = dict(GRST_CRED_AURI_0='dn:' + TestAuthorization.DN) self.creds = fts3auth.UserCredentials(env, TestAuthorization.ROLES) self.env['fts3.User.Credentials'] = self.creds self.assertFalse(fts3auth.authorized(fts3auth.CONFIG, env = self.env)) authz = AuthorizationByDn(dn=TestAuthorization.DN, operation=fts3auth.CONFIG) Session.merge(authz) Session.commit() # Force reload of creds self.creds = fts3auth.UserCredentials(env, TestAuthorization.ROLES) self.env['fts3.User.Credentials'] = self.creds self.assertTrue(fts3auth.authorized(fts3auth.CONFIG, env = self.env))
def test_ban_se(self): """ Just ban a SE and unban it, make sure changes go into the DB """ canceled = self.app.post(url="/ban/se", params={ 'storage': 'gsiftp://nowhere', 'message': 'TEST BAN 42' }, status=200).json self.assertEqual(0, len(canceled)) banned = Session.query(BannedSE).filter( BannedSE.se == 'gsiftp://nowhere').first() self.assertNotEqual(None, banned) self.assertEqual(self.get_user_credentials().user_dn, banned.admin_dn) self.assertEqual('CANCEL', banned.status) self.assertEqual('TEST BAN 42', banned.message) self.app.delete(url="/ban/se?storage=%s" % urllib.quote('gsiftp://nowhere'), status=204) banned = Session.query(BannedSE).filter( BannedSE.se == 'gsiftp://nowhere').first() self.assertEqual(None, banned)
def test_ban_se_vo(self): """ Just ban a SE and unban it, specifying a VO """ canceled = self.app.post(url="/ban/se", params={ 'storage': 'gsiftp://nowhere', 'vo_name': 'testvo' }, status=200).json self.assertEqual(0, len(canceled)) banned = Session.query(BannedSE).get(('gsiftp://nowhere', 'testvo')) self.assertNotEqual(None, banned) self.assertEqual(self.get_user_credentials().user_dn, banned.admin_dn) self.assertEqual('CANCEL', banned.status) self.assertEqual('testvo', banned.vo) self.app.delete(url="/ban/se?storage=%s&vo_name=testvo" % urllib.quote('gsiftp://nowhere'), status=204) banned = Session.query(BannedSE).get(('gsiftp://nowhere', 'someone')) self.assertEqual(None, banned)
def test_set_activity_share(self, legacy=False): """ Set a collection of activity shares for a given VO """ self.setup_gridsite_environment() if legacy: msg = {"vo": "dteam", "active": True, "share": [{"High": 80}, {"Medium": 15}, {"Low": 5}]} else: msg = {"vo": "dteam", "active": True, "share": {"High": 80, "Medium": 15, "Low": 5}} self.app.post_json(url="/config/activity_shares", params=msg, status=200) activity_share = Session.query(ActivityShare).get("dteam") self.assertIsNotNone(activity_share) self.assertEqual(activity_share.vo, "dteam") self.assertTrue(activity_share.active) # Regardless of the submission format, this is what FTS3 expects on the DB self.assertEqual( activity_share.activity_share, [{"High": 80}, {"Medium": 15}, {"Low": 5}] ) audit = Session.query(ConfigAudit).order_by(ConfigAudit.datetime.desc())[0] self.assertEqual(audit.action, "activity-share")
def _get_proxy(): user = request.environ['fts3.User.Credentials'] cred = Session.query(Credential).get((user.delegation_id, user.user_dn)) if not cred: raise HTTPAuthenticationTimeout('No delegated proxy available') if cred.termination_time <= datetime.utcnow(): raise HTTPAuthenticationTimeout('Delegated proxy expired (%s)' % user.delegation_id) tmp_file = tempfile.NamedTemporaryFile(mode='w', suffix='.pem', prefix='rest-proxy-', delete=False) tmp_file.write(cred.proxy) tmp_file.flush() os.fsync(tmp_file.fileno()) return tmp_file
def _terminal(self, state, window): job_id = self._submit() job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) finish_time = datetime.utcnow() - window job.finish_time = finish_time job.job_finished = finish_time job.job_state = state Session.merge(job) for f in files: f.finish_time = finish_time f.job_finished = finish_time f.file_state = state Session.merge(f) Session.commit() return job_id
def test_set_drain(self): """ Set one host to drain """ self.app.post_json(url="/config/drain", params=dict(hostname='host1.cern.ch', drain=True), status=200) hosts = Session.query(Host).all() for host in hosts: if host.hostname == 'host1.cern.ch': self.assertTrue(host.drain) else: self.assertFalse(host.drain) response = self.app.get_json(url="/config", status=200).json for entry in response: if entry['hostname'] == 'host1.cern.ch': self.assertTrue(entry['drain']) else: self.assertFalse(entry['drain']) self.assertEqual(1, len(Session.query(ConfigAudit).all()))
def test_set_se_config(self): """ Set SE config """ config = {'test.cern.ch': self.host_config} self.app.post_json("/config/se", params=config, status=200) audits = Session.query(ConfigAudit).all() self.assertEqual(2, len(audits)) ops = Session.query(OperationConfig).filter( OperationConfig.host == 'test.cern.ch').all() self.assertEqual(4, len(ops)) for op in ops: self.assertEqual( config[op.host]['operations'][op.vo_name][op.operation], op.concurrent_ops) se = Session.query(Se).filter(Se.storage == 'test.cern.ch').first() self.assertEqual(1, se.ipv6) self.assertEqual(55, se.outbound_max_active) self.assertEqual(11, se.inbound_max_active) self.assertEqual(33, se.inbound_max_throughput)
def _become_root(self): """ Helper function to become root superuser """ self.app.extra_environ.update({ 'GRST_CRED_AURI_0': 'dn:/C=CH/O=CERN/OU=hosts/OU=cern.ch/CN=ftsdummyhost.cern.ch' }) self.app.extra_environ.update({ 'SSL_SERVER_S_DN': '/C=CH/O=CERN/OU=hosts/OU=cern.ch/CN=ftsdummyhost.cern.ch' }) creds = self.get_user_credentials() delegated = Credential() delegated.dlg_id = creds.delegation_id delegated.dn = '/C=CH/O=CERN/OU=hosts/OU=cern.ch/CN=ftsdummyhost.cern.ch' delegated.proxy = '-NOT USED-' delegated.voms_attrs = None delegated.termination_time = datetime.utcnow() + timedelta(hours=7) Session.merge(delegated) Session.commit()
def test_cancel_only_file(self): """ Cancel the only file in a job. The job must go to CANCELED. """ job_id = self._submit(1) files = self.app.get(url="/jobs/%s/files" % job_id, status=200).json self.app.delete(url="/jobs/%s/files/%s" % (job_id, files[0]['file_id'])) job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'CANCELED') self.assertEqual('CANCELED', job.files[0].file_state)
def _test_cancel_file_asserts(self, job_id, expect_job, expect_files): """ Helper for test_cancel_remaining_file """ job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, expect_job) if expect_job in JobActiveStates: self.assertIsNone(job.job_finished) else: self.assertIsNotNone(job.job_finished) self.assertEqual('CANCELED', job.files[0].file_state) self.assertIsNotNone(job.files[0].finish_time) for f in job.files[1:]: self.assertEqual(expect_files, f.file_state)
def test_job_priority(self): """ Submit a job, change priority later """ self.setup_gridsite_environment() self.push_delegation() job = {'files': [{ 'sources': ['root://source.es/file'], 'destinations': ['root://dest.ch/file'], }], 'params': { 'priority': 2 } } job_id = self.app.post_json( url="/jobs", params=job, status=200 ).json['job_id'] job = Session.query(Job).get(job_id) self.assertEqual(2, job.priority) mod = {'params': { 'priority': 4 }} self.app.post_json( url="/jobs/%s" % str(job_id), params=mod, status=200 ) job = Session.query(Job).get(job_id) self.assertEqual(4, job.priority)
def test_submit_reuse_auto_big(self): """ Submit big files with reuse not set (auto). It should be disabled. """ self.setup_gridsite_environment() self.push_delegation() job = { 'files': [ { 'sources': ['http://source.es:8446/file'], 'destinations': ['http://dest.ch:8447/file'], 'filesize': 2**30, }, { 'sources': ['http://source.es:8446/otherfile'], 'destinations': ['http://dest.ch:8447/otherfile'], 'filesize': 2**30, } ], 'params': {'overwrite': True, 'reuse': None} } job_id = self.app.post( url="/jobs", content_type='application/json', params=json.dumps(job), status=200 ).json['job_id'] job = Session.query(Job).get(job_id) self.assertEqual(job.job_type, 'N') files = Session.query(File).filter(File.job_id == job_id) hashed = files[0].hashed_id for f in files: self.assertEqual(2**30, f.user_filesize)
def get_my_apps(self): """ Returns the list of registered apps """ user = pylons.request.environ['fts3.User.Credentials'] my_apps = Session.query(OAuth2Application).filter( OAuth2Application.owner == user.user_dn).all() authorized_apps = Session.query( OAuth2Application.client_id, OAuth2Application.name, OAuth2Application.website, OAuth2Application.description, OAuth2Token.refresh_token, OAuth2Token.scope, OAuth2Token.expires, OAuth2Application.scope).filter( (OAuth2Token.dlg_id == user.delegation_id) & (OAuth2Token.client_id == OAuth2Application.client_id)) response = {'apps': my_apps, 'authorized': authorized_apps} if _accept_html(pylons.request.accept): pylons.response.headers[ 'Content-Type'] = 'text/html; charset=UTF-8' response['user'] = user response['site'] = pylons.config['fts3.SiteName'] return render('/apps.html', extra_vars=response) else: pylons.response.headers['Content-Type'] = 'application/json' # Better serialization for authorized apps authorized = list() for auth in authorized_apps: authorized.append({ 'name': auth.name, 'website': auth.website, 'description': auth.description, 'scope': auth.scope, 'expires': auth.expires }) response['authorized'] = authorized return [to_json(response)]