def test_expired(self): """ Get a token, the token expires, so it should be denied """ client_id, access_token, refresh_token, expires = self.test_get_token() del self.app.extra_environ['GRST_CRED_AURI_0'] response = self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=200 ) whoami = json.loads(response.body) self.assertEqual('oauth2', whoami['method']) token = Session.query(OAuth2Token).get((client_id, refresh_token)) token.expires = datetime.utcnow() - timedelta(hours=1) Session.merge(token) Session.commit() response = self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=403 )
def test_cancel_running(self): """ Cancel a job, but the transfer is running (pid is set) """ job_id = self._submit() # Add pid transfer = Session.query(File).filter(File.job_id == job_id).first() transfer.pid = 1234 Session.merge(transfer) Session.commit() job = self.app.delete(url="/jobs/%s" % job_id, status=200).json self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'CANCELED') self.assertEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'CANCELED') self.assertNotEqual(None, job.job_finished) for f in job.files: self.assertEqual(f.file_state, 'CANCELED') self.assertEqual(None, f.finish_time)
def test_cancel_some_terminal(self): """ Cancel a job with some files in terminal state """ job_id = self._submit(10) job = Session.query(Job).get(job_id) job.job_state = 'ACTIVE' for f in job.files: if f.file_id % 2 == 0: f.file_state = 'FINISHED' Session.merge(job) Session.commit() job = self.app.delete(url="/jobs/%s" % job_id, status=200).json self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'CANCELED') self.assertEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'CANCELED') for f in job.files: if f.file_id % 2 == 0: self.assertEqual(f.file_state, 'FINISHED') self.assertNotEqual(f.reason, 'Job canceled by the user') else: self.assertEqual(f.file_state, 'CANCELED')
def update_app(self, client_id): """ Update an application """ user = pylons.request.environ['fts3.User.Credentials'] app = Session.query(OAuth2Application).get(client_id) if not app: raise HTTPNotFound('Application not found') if app.owner != user.user_dn: raise HTTPForbidden() if pylons.request.headers['Content-Type'].startswith('application/json'): fields = json.loads(pylons.request.body) else: fields = pylons.request.POST try: if 'delete' not in fields: app.description = fields.get('description', '') app.website = fields.get('website', '') app.redirect_to = fields.get('redirect_to', '') Session.merge(app) Session.commit() redirect(url_for(controller='oauth2', action='get_app'), code=HTTPSeeOther.code) else: Session.delete(app) Session.query(OAuth2Token).filter(OAuth2Token.client_id == client_id).delete() Session.query(OAuth2Code).filter(OAuth2Code.client_id == client_id).delete() Session.commit() redirect(url_for(controller='oauth2', action='get_my_apps'), code=HTTPSeeOther.code) except: Session.rollback() raise
def set_activity_shares(self): """ Set a new/modify an activity share """ input_dict = get_input_as_dict(request) if not input_dict.get('vo', None): raise HTTPBadRequest('Missing VO') if not input_dict.get('share', None): raise HTTPBadRequest('Missing share') if 'active' not in input_dict: input_dict['active'] = True input_dict['share'] = _normalize_activity_share_format( input_dict['share']) # Make sure the share weights are numbers for entry in input_dict['share']: for key, value in entry.iteritems(): if not type(value) in (float, int): raise HTTPBadRequest('Share weight must be a number') try: activity_share = ActivityShare(vo=input_dict['vo'], active=input_dict['active'], activity_share=input_dict['share']) Session.merge(activity_share) audit_configuration('activity-share', json.dumps(input_dict)) Session.commit() except ValueError, e: raise HTTPBadRequest(str(e))
def request(self, dlg_id, start_response): """ First step of the delegation process: get a certificate request The returned certificate request must be signed with the user's original credentials. """ user = request.environ['fts3.User.Credentials'] if dlg_id != user.delegation_id: raise HTTPForbidden('The requested ID and the credentials ID do not match') credential_cache = Session.query(CredentialCache)\ .get((user.delegation_id, user.user_dn)) if credential_cache is None or credential_cache.cert_request is None: (x509_request, private_key) = _generate_proxy_request() credential_cache = CredentialCache(dlg_id=user.delegation_id, dn=user.user_dn, cert_request=x509_request.as_pem(), priv_key=private_key.as_pem(cipher=None), voms_attrs=' '.join(user.voms_cred)) try: Session.merge(credential_cache) Session.commit() except Exception: Session.rollback() raise log.debug("Generated new credential request for %s" % dlg_id) else: log.debug("Using cached request for %s" % dlg_id) start_response('200 Ok', [('X-Delegation-ID', str(credential_cache.dlg_id)), ('Content-Type', 'text/plain')]) return [credential_cache.cert_request]
def add_user_to_cloud_storage(self, storage_name, start_response): """ Add a user or a VO credentials to the storage """ storage = Session.query(CloudStorage).get(storage_name) if not storage: raise HTTPNotFound('The storage does not exist') input_dict = get_input_as_dict(request) if not input_dict.get('user_dn', None) and not input_dict.get('vo_name', None): raise HTTPBadRequest('One of user_dn or vo_name must be specified') elif input_dict.get('user_dn', None) and input_dict.get('vo_name', None): raise HTTPBadRequest('Only one of user_dn or vo_name must be specified') cuser = CloudStorageUser( user_dn=input_dict.get('user_dn', ''), storage_name=storage_name, access_token=input_dict.get('access_key', input_dict.get('access_token', None)), access_token_secret=input_dict.get('secret_key', input_dict.get('access_token_secret', None)), request_token=input_dict.get('request_token'), request_token_secret=input_dict.get('request_token_secret'), vo_name=input_dict.get('vo_name', ''), ) try: Session.merge(cuser) Session.commit() except: Session.rollback() raise start_response('201 Created', []) return dict(storage_name=cuser.storage_name, user_dn=cuser.user_dn, vo_name=cuser.vo_name)
def test_set_voms(self): """ The server must regenerate a proxy with VOMS extensions Need a real proxy for this one """ self.setup_gridsite_environment() creds = self.get_user_credentials() # Need to push a real proxy :/ proxy_pem = self.get_real_x509_proxy() if proxy_pem is None: raise SkipTest( 'Could not get a valid real proxy for test_set_voms') proxy = Credential() proxy.dn = creds.user_dn proxy.dlg_id = creds.delegation_id proxy.termination_time = datetime.utcnow() + timedelta(hours=1) proxy.proxy = proxy_pem Session.merge(proxy) Session.commit() # Now, request the voms extensions self.app.post_json(url="/delegation/%s/voms" % creds.delegation_id, params=['dteam:/dteam/Role=lcgadmin'], status=203) # And validate proxy2 = Session.query(Credential).get( (creds.delegation_id, creds.user_dn)) self.assertNotEqual(proxy.proxy, proxy2.proxy) self.assertEqual('dteam:/dteam/Role=lcgadmin', proxy2.voms_attrs)
def test_expired(self): """ Get a token, the token expires, so it should be denied """ raise SkipTest('Disabled as code is not used atm') client_id, access_token, refresh_token, expires = self.test_get_token() del self.app.extra_environ['GRST_CRED_AURI_0'] whoami = self.app.get(url="/whoami", headers={ 'Authorization': str('Bearer %s' % access_token) }, status=200).json self.assertEqual('oauth2', whoami['method']) token = Session.query(OAuth2Token).get((client_id, refresh_token)) token.expires = datetime.utcnow() - timedelta(hours=1) Session.merge(token) Session.commit() self.app.get( url="/whoami", headers={'Authorization': str('Bearer %s' % access_token)}, status=403)
def test_set_voms(self): """ The server must regenerate a proxy with VOMS extensions Need a real proxy for this one """ self.setup_gridsite_environment() creds = self.get_user_credentials() # Need to push a real proxy :/ proxy_pem = self.get_real_x509_proxy() if proxy_pem is None: raise SkipTest('Could not get a valid real proxy for test_set_voms') proxy = Credential() proxy.dn = creds.user_dn proxy.dlg_id = creds.delegation_id proxy.termination_time = datetime.utcnow() + timedelta(hours=1) proxy.proxy = proxy_pem Session.merge(proxy) Session.commit() # Now, request the voms extensions self.app.post(url="/delegation/%s/voms" % creds.delegation_id, content_type='application/json', params=json.dumps(['dteam:/dteam/Role=lcgadmin']), status=203) # And validate proxy2 = Session.query(Credential).get((creds.delegation_id, creds.user_dn)) self.assertNotEqual(proxy.proxy, proxy2.proxy) self.assertEqual('dteam:/dteam/Role=lcgadmin', proxy2.voms_attrs)
def test_get_retries(self): """ Get the retries for a file, forcing one """ self.setup_gridsite_environment() self.push_delegation() job_id = self._submit() files = self.app.get(url="/jobs/%s/files" % job_id, status=200).json file_id = files[0]['file_id'] retry = FileRetryLog() retry.file_id = file_id retry.attempt = 1 retry.datetime = datetime.utcnow() retry.reason = 'Blahblahblah' Session.merge(retry) Session.commit() retries = self.app.get(url="/jobs/%s/files/%d/retries" % (job_id, file_id), status=200).json self.assertEqual(1, len(retries)) self.assertEqual(1, retries[0]['attempt']) self.assertEqual('Blahblahblah', retries[0]['reason'])
def request(self, dlg_id, start_response): """ First step of the delegation process: get a certificate request The returned certificate request must be signed with the user's original credentials. """ user = request.environ['fts3.User.Credentials'] if dlg_id != user.delegation_id: raise HTTPForbidden('The requested ID and the credentials ID do not match') credential_cache = Session.query(CredentialCache)\ .get((user.delegation_id, user.user_dn)) if credential_cache is None or credential_cache.cert_request is None: (x509_request, private_key) = _generate_proxy_request() credential_cache = CredentialCache(dlg_id=user.delegation_id, dn=user.user_dn, cert_request=x509_request.as_pem(), priv_key=private_key.as_pem(cipher=None), voms_attrs=' '.join(user.voms_cred)) try: Session.merge(credential_cache) Session.commit() except Exception: Session.rollback() raise log.debug("Generated new credential request for %s" % dlg_id) else: log.debug("Using cached request for %s" % dlg_id) start_response('200 Ok', [('X-Delegation-ID', credential_cache.dlg_id), ('Content-Type', 'text/plain')]) return [credential_cache.cert_request]
def set_drain(self): """ Set the drain status of a server """ input_dict = get_input_as_dict(request) hostname = input_dict.get('hostname', None) drain = input_dict.get('drain', True) if not isinstance(drain, bool) or not isinstance(hostname, basestring): raise HTTPBadRequest('Invalid drain request') entries = Session.query(Host).filter(Host.hostname == hostname).all() if not entries: raise HTTPBadRequest('Host not found') try: audit_configuration( 'drain', 'Turning drain %s the drain mode for %s' % (drain, hostname)) for entry in entries: entry.drain = drain Session.merge(entry) Session.commit() except: Session.rollback() raise
def cancel(self, id, **kwargs): """DELETE /jobs/id: Delete an existing item""" job = self._getJob(id) if job.job_state in JobActiveStates: now = datetime.now() job.job_state = 'CANCELED' job.finish_time = now job.job_finished = now job.reason = 'Job canceled by the user' for f in job.files: if f.file_state in JobActiveStates: f.file_state = 'CANCELED' f.job_finished = now f.finish_time = now f.reason = 'Job canceled by the user' Session.merge(job) Session.commit() job = self._getJob(id) files = job.files return job
def test_get_retries(self): """ Get the retries for a file, forcing one """ self.setup_gridsite_environment() self.push_delegation() job_id = self._submit() answer = self.app.get(url="/jobs/%s/files" % job_id, status=200) files = json.loads(answer.body) file_id = files[0]['file_id'] retry = FileRetryLog() retry.file_id = file_id retry.attempt = 1 retry.datetime = datetime.utcnow() retry.reason = 'Blahblahblah' Session.merge(retry) Session.commit() answer = self.app.get(url="/jobs/%s/files/%d/retries" % (job_id, file_id), status=200) retries = json.loads(answer.body) self.assertEqual(1, len(retries)) self.assertEqual(1, retries[0]['attempt']) self.assertEqual('Blahblahblah', retries[0]['reason'])
def test_cancel_terminal(self): """ Cancel a job with files in terminal state """ job_id = self._submit() job = Session.query(Job).get(job_id) job.job_state = 'FINISHED' for f in job.files: f.file_state = 'FINISHED' Session.merge(job) Session.commit() answer = self.app.delete(url="/jobs/%s" % job_id, status=200) job = json.loads(answer.body) self.assertEqual(job['job_id'], job_id) self.assertEqual(job['job_state'], 'FINISHED') self.assertNotEqual(job['reason'], 'Job canceled by the user') # Is it in the database? job = Session.query(Job).get(job_id) self.assertEqual(job.job_state, 'FINISHED') for f in job.files: self.assertEqual(f.file_state, 'FINISHED')
def _prepare_and_test_created_jobs_to_cancel(self, files_per_job=8): """ Helper function to prepare and test created jobs for cancel tests """ job_ids = list() for i in range(len(FileActiveStates) + len(FileTerminalStates)): job_ids.append(self._submit(files_per_job)) i = 0 for state in FileActiveStates + FileTerminalStates: job = Session.query(Job).get(job_ids[i]) i += 1 if state == 'STARTED': job.job_state = 'STAGING' else: job.job_state = state for f in job.files: f.file_state = state Session.merge(job) Session.commit() i = 0 for state in FileActiveStates + FileTerminalStates: job = Session.query(Job).get(job_ids[i]) state_job = state if state == 'STARTED': state_job = 'STAGING' self.assertEqual(job.job_state, state_job) for f in job.files: self.assertEqual(f.file_state, state) i += 1 return job_ids
def register(self): """ Register a new third party application """ if pylons.request.content_type.split( ';')[0].strip() == 'application/json': req = json.loads(pylons.request.body) scopes = req.get('scope', list()) else: req = pylons.request.POST scopes = req.getall('scope') if isinstance(scopes, basestring): scopes = scopes.split(',') if not req.get('name', None): raise HTTPBadRequest('Missing application name') if not req.get('website', None): raise HTTPBadRequest('Missing application website') if not req.get('redirect_to', None): raise HTTPBadRequest('Missing redirect urls') for s in scopes: if str(s) not in VALID_OPERATIONS: raise HTTPBadRequest('Invalid scope (%s)' % s) user = pylons.request.environ['fts3.User.Credentials'] app_id = _generate_app_id() app = OAuth2Application(client_id=app_id, client_secret=_generate_app_secret(), name=req['name'], description=req.get('description', ''), website=req['website'], scope=scopes, redirect_to=req['redirect_to'], owner=user.user_dn) try: Session.merge(app) Session.commit() except IntegrityError: Session.rollback() raise HTTPForbidden('The name already exists') except: Session.rollback() raise log.info("New application registered: %s (%s)" % (req['name'], app_id)) if _accept_html(pylons.request.accept): redirect(url_for(controller='oauth2', action='get_my_apps'), code=HTTPSeeOther.code) else: pylons.response.status_int = HTTPCreated.code pylons.response.headers['Content-Type'] = 'application/json' return [to_json(app.client_id)]
def request(self, dlg_id, start_response): """ First step of the delegation process: get a certificate request The returned certificate request must be signed with the user's original credentials. """ user = request.environ['fts3.User.Credentials'] if dlg_id != user.delegation_id: raise HTTPForbidden( 'The requested ID and the credentials ID do not match') credential_cache = Session.query(CredentialCache)\ .get((user.delegation_id, user.user_dn)) user_cert = self.certificate() request_key_len = 2048 if user_cert: user_key = X509.load_cert_string(user_cert) request_key_len = user_key.get_pubkey().size() * 8 cached = credential_cache is not None and credential_cache.cert_request is not None if cached: cached_key_len = X509.load_request_string( credential_cache.cert_request).get_pubkey().size() * 8 if cached_key_len != request_key_len: cached = False log.debug( "Invalidating cache due to key length missmatch between client and cached certificates" ) if not cached: (x509_request, private_key) = _generate_proxy_request(request_key_len) credential_cache = CredentialCache( dlg_id=user.delegation_id, dn=user.user_dn, cert_request=x509_request.as_pem(), priv_key=private_key.as_pem(cipher=None), voms_attrs=' '.join(user.voms_cred)) try: Session.merge(credential_cache) Session.commit() except Exception: Session.rollback() raise log.debug("Generated new credential request for %s" % dlg_id) else: log.debug("Using cached request for %s" % dlg_id) start_response('200 Ok', [('X-Delegation-ID', str(credential_cache.dlg_id)), ('Content-Type', 'text/plain')]) return [credential_cache.cert_request]
def setUp(self): # Inject a Dropbox app cs = CloudStorage(storage_name='DROPBOX', app_key='1234', app_secret='sssh', service_api_url='https://api.dropbox.com') Session.merge(cs) Session.commit() self.setup_gridsite_environment()
def cancel_files(self, job_id, file_ids): """ Cancel individual files - comma separated for multiple - within a job """ job = self._get_job(job_id) if job.job_type != 'N': raise HTTPBadRequest('Multihop or reuse jobs must be cancelled at once (%s)' % str(job.job_type)) file_ids = file_ids.split(',') changed_states = list() try: # Mark files in the list as CANCELED for file_id in file_ids: file = Session.query(File).get(file_id) if not file or file.job_id != job_id: changed_states.append('File does not belong to the job') continue if file.file_state not in FileActiveStates: changed_states.append(file.file_state) continue file.file_state = 'CANCELED' file.finish_time = datetime.utcnow() file.dest_surl_uuid = None changed_states.append('CANCELED') Session.merge(file) # Mark job depending on the status of the rest of files not_changed_states = map(lambda f: f.file_state, filter(lambda f: f.file_id not in file_ids, job.files)) all_states = not_changed_states + changed_states # All files within the job have been canceled if len(not_changed_states) == 0: job.job_state = 'CANCELED' job.cancel_job = True job.job_finished = datetime.utcnow() log.warning('Cancelling all remaining files within the job %s' % job_id) # No files in non-terminal, mark the job as CANCELED too elif len(filter(lambda s: s in FileActiveStates, all_states)) == 0: log.warning('Cancelling a file within a job with others in terminal state (%s)' % job_id) job.job_state = 'CANCELED' job.cancel_job = True job.job_finished = datetime.utcnow() else: log.warning('Cancelling files within a job with others still active (%s)' % job_id) Session.merge(job) Session.commit() except: Session.rollback() raise return changed_states if len(changed_states) > 1 else changed_states[0]
def test_ban_dn_submission(self): """ If a DN is banned, submissions from this user must not be accepted """ banned = BannedDN() banned.dn = self.get_user_credentials().user_dn Session.merge(banned) Session.commit() self.push_delegation() self.app.post(url="/jobs", content_type='application/json', params='[]', status=403)
def pushDelegation(self, lifetime = timedelta(hours = 7)): creds = self.getUserCredentials() delegated = Credential() delegated.dlg_id = creds.delegation_id delegated.dn = creds.user_dn delegated.proxy = '-NOT USED-' delegated.voms_attrs = None delegated.termination_time = datetime.now() + lifetime Session.merge(delegated) Session.commit()
def _insert_user(self, user): # We will need the user in t_credential_cache at least! cred = Session.query(CredentialCache).filter( CredentialCache.dlg_id == user.delegation_id).first() if not cred: cred = CredentialCache(dlg_id=user.delegation_id, dn=user.user_dn, cert_request=None, priv_key=None, voms_attrs='\n'.join(user.voms_cred)) Session.merge(cred)
def pushDelegation(self, lifetime=timedelta(hours=7)): creds = self.getUserCredentials() delegated = Credential() delegated.dlg_id = creds.delegation_id delegated.dn = creds.user_dn delegated.proxy = '-NOT USED-' delegated.voms_attrs = None delegated.termination_time = datetime.now() + lifetime Session.merge(delegated) Session.commit()
def set_se_config(self): """ Set the configuration parameters for a given SE """ input_dict = get_input_as_dict(request) try: for storage, cfg in input_dict.iteritems(): if not storage or storage.isspace(): raise ValueError se_info = None se_info_new = cfg.get('se_info', None) if se_info_new: se_info = Session.query(Se).get(storage) if not se_info: se_info = Se(storage=storage) for key, value in se_info_new.iteritems(): #value = validate_type(Se, key, value) setattr(se_info, key, value) audit_configuration( 'set-se-config', 'Set config %s: %s' % (storage, json.dumps(cfg))) Session.merge(se_info) # Operation limits operations = cfg.get('operations', None) if operations: for vo, limits in operations.iteritems(): for op, limit in limits.iteritems(): limit = int(limit) new_limit = Session.query(OperationConfig).get( (vo, storage, op)) if limit > 0: if not new_limit: new_limit = OperationConfig( vo_name=vo, host=storage, operation=op) new_limit.concurrent_ops = limit Session.merge(new_limit) elif new_limit: Session.delete(new_limit) audit_configuration( 'set-se-limits', 'Set limits for %s: %s' % (storage, json.dumps(operations))) Session.commit() except (AttributeError, ValueError): Session.rollback() raise HTTPBadRequest('Malformed configuration') except: Session.rollback() raise return (se_info, operations)
def setUp(self): # Inject a Dropbox app cs = CloudStorage( cloudStorage_name='DROPBOX', app_key='1234', app_secret='sssh', service_api_url='https://api.dropbox.com' ) Session.merge(cs) Session.commit() self.setup_gridsite_environment()
def persist_authorization_code(self, client_id, code, scope): user = pylons.request.environ['fts3.User.Credentials'] self._insert_user(user) # Remove previous codes Session.query(OAuth2Code).filter( (OAuth2Code.client_id == client_id) & (OAuth2Code.dlg_id == user.delegation_id)).delete() # Token code = OAuth2Code(client_id=client_id, code=code, scope=scope, dlg_id=user.delegation_id) Session.merge(code) Session.commit()
def _ban_dn(dn): """ Mark in the db the given DN as banned """ user = request.environ['fts3.User.Credentials'] banned = BannedDN() banned.dn = dn banned.addition_time = datetime.utcnow() banned.admin_dn = user.user_dn try: Session.merge(banned) Session.commit() except Exception: Session.rollback() raise
def _terminal(self, state, window): job_id = self._submit() job = Session.query(Job).get(job_id) files = Session.query(File).filter(File.job_id == job_id) finish_time = datetime.utcnow() - window job.finish_time = finish_time job.job_finished = finish_time job.job_state = state Session.merge(job) for f in files: f.finish_time = finish_time f.job_finished = finish_time f.file_state = state Session.merge(f) Session.commit() return job_id
def _ban_dn(dn, message): """ Mark in the db the given DN as banned """ user = request.environ['fts3.User.Credentials'] banned = BannedDN() banned.dn = dn banned.addition_time = datetime.utcnow() banned.admin_dn = user.user_dn banned.message = message try: Session.merge(banned) Session.commit() except Exception: Session.rollback() raise
def update_app(self, client_id): """ Update an application """ user = pylons.request.environ['fts3.User.Credentials'] app = Session.query(OAuth2Application).get(client_id) if not app: raise HTTPNotFound('Application not found') if app.owner != user.user_dn: raise HTTPForbidden() if pylons.request.headers['Content-Type'].startswith( 'application/json'): fields = json.loads(pylons.request.body) scopes = fields.get('scope', list()) else: fields = pylons.request.POST scopes = fields.getall('scope') if isinstance(scopes, basestring): scopes = scopes.split(',') for s in scopes: if str(s) not in VALID_OPERATIONS: raise HTTPBadRequest('Invalid scope (%s)' % s) try: if 'delete' not in fields: app.description = fields.get('description', '') app.website = fields.get('website', '') app.redirect_to = fields.get('redirect_to', '') app.scope = scopes Session.merge(app) Session.commit() redirect(url_for(controller='oauth2', action='get_app'), code=HTTPSeeOther.code) else: Session.delete(app) Session.query(OAuth2Token).filter( OAuth2Token.client_id == client_id).delete() Session.query(OAuth2Code).filter( OAuth2Code.client_id == client_id).delete() Session.commit() redirect(url_for(controller='oauth2', action='get_my_apps'), code=HTTPSeeOther.code) except: Session.rollback() raise
def register(self): """ Register a new third party application """ if pylons.request.content_type.split(';')[0].strip() == 'application/json': req = json.loads(pylons.request.body) else: req = pylons.request.POST if not req.get('name', None): raise HTTPBadRequest('Missing application name') if not req.get('website', None): raise HTTPBadRequest('Missing application website') if not req.get('redirect_to', None): raise HTTPBadRequest('Missing redirect urls') user = pylons.request.environ['fts3.User.Credentials'] app_id = _generate_app_id() app = OAuth2Application( client_id=app_id, client_secret=_generate_app_secret(), name=req['name'], description=req.get('description', ''), website=req['website'], redirect_to=req['redirect_to'], owner=user.user_dn ) try: Session.merge(app) Session.commit() except IntegrityError: Session.rollback() raise HTTPForbidden('The name already exists') except: Session.rollback() raise log.info("New application registered: %s (%s)" % (req['name'], app_id)) if _accept_html(pylons.request.accept): redirect(url_for(controller='oauth2', action='get_my_apps'), code=HTTPSeeOther.code) else: pylons.response.status_int = HTTPCreated.code pylons.response.headers['Content-Type'] = 'application/json' return to_json(app.client_id)
def _insert_job(self): job = ArchivedJob() job.job_id = '111-222-333' job.job_state = 'CANCELED' archived = ArchivedFile() archived.job_id = job.job_id archived.file_id = 1234 archived.file_state = 'CANCELED' archived.source_se = 'srm://source' archived.dest_se = 'srm://dest' Session.merge(job) Session.merge(archived) Session.commit() return job.job_id
def persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data): # Remove previous tokens Session.query(OAuth2Token).filter( (OAuth2Token.dlg_id == data['dlg_id']) & (OAuth2Token.client_id == client_id)).delete() # Add new token = OAuth2Token(client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires=datetime.utcnow() + timedelta(seconds=expires_in), refresh_token=refresh_token, dlg_id=data['dlg_id']) Session.merge(token) Session.commit()
def push_delegation(self, lifetime=timedelta(hours=7)): """ Push into the database a mock delegated credential Args: lifetime: The mock credential lifetime """ creds = self.get_user_credentials() delegated = Credential() delegated.dlg_id = creds.delegation_id delegated.dn = creds.user_dn delegated.proxy = '-NOT USED-' delegated.voms_attrs = None delegated.termination_time = datetime.utcnow() + lifetime Session.merge(delegated) Session.commit()
def test_optimizer_respected(self): """ Submitting a job with an existing OptimizerActive entry must respect the existing value """ self.test_submit() # Set active to 20 oa = Session.query(OptimizerActive).get(('root://source.es', 'root://dest.ch')) oa.active = 20 Session.merge(oa) Session.flush() Session.commit() # Submit a job self.test_submit() # Make sure it is still 20! oa2 = Session.query(OptimizerActive).get(('root://source.es', 'root://dest.ch')) self.assertEqual(20, oa2.active)
def run(self): """ Thread logic """ host = Host( hostname=socket.getfqdn(), service_name=self.tag, ) while self.interval: host.beat = datetime.utcnow() try: Session.merge(host) Session.commit() log.debug('Hearbeat') except Exception, e: log.warning("Failed to update the heartbeat: %s" % str(e)) time.sleep(self.interval)
def _insert_job(self): job = ArchivedJob() job.job_id = '111-222-333' job.job_state = 'CANCELED' job.user_dn = TestController.TEST_USER_DN archived = ArchivedFile() archived.job_id = job.job_id archived.file_id = 1234 archived.file_state = 'CANCELED' archived.source_se = 'srm://source' archived.dest_se = 'srm://dest' Session.merge(job) Session.merge(archived) Session.commit() return job.job_id
def set_share(self, start_response): """ Add or modify a share """ input_dict = get_input_as_dict(request) source = input_dict.get('source') destination = input_dict.get('destination') vo = input_dict.get('vo') try: share = int(input_dict.get('share')) if share < 0: raise HTTPBadRequest('Shares values cannot be negative') except: raise HTTPBadRequest('Bad share value') if not source or not destination or not vo or not share: raise HTTPBadRequest( 'Missing source, destination, vo and/or share') source = urlparse(source) if not source.scheme or not source.hostname: raise HTTPBadRequest('Invalid source') source = "%s://%s" % (source.scheme, source.hostname) destination = urlparse(destination) if not destination.scheme or not destination.hostname: raise HTTPBadRequest('Invalid source') destination = "%s://%s" % (destination.scheme, destination.hostname) try: share_cfg = ShareConfig(source=source, destination=destination, vo=vo, share=share) Session.merge(share_cfg) audit_configuration( 'share-set', 'Share %s, %s, %s has been set to %d' % (source, destination, vo, share)) Session.commit() except: Session.rollback() raise return share
def _submit_and_mark_all_but_one(self, count, states): """ Helper for test_cancel_remaining_file Submit a job, mark all files except the first one with the state 'state' state can be a list with count-1 final states """ job_id = self._submit(count) files = self.app.get(url="/jobs/%s/files" % job_id, status=200).json if isinstance(states, str): states = [states] * (count - 1) for i in range(1, count): fil = Session.query(File).get(files[i]['file_id']) fil.file_state = states[i - 1] Session.merge(fil) Session.commit() return job_id, files
def test_401(self): """ Get 401 """ csu = CloudStorageUser(storage_name='DROPBOX', user_dn='/DC=ch/DC=cern/CN=Test User', access_token=None, vo_name='') Session.merge(csu) Session.commit() self.setup_gridsite_environment() def overriden_info(self): raise Exception return '401' info = None with mock.patch.object(DropboxConnector, '_make_call', overriden_info): info = self.app.get(url="/cs/access_request/dropbox/", status=200)
def insert_job(vo, source=None, destination=None, state='SUBMITTED', multiple=None, duration=None, queued=None, thr=None, reason=None, user_dn='/DC=ch/DC=cern/CN=Test User'): assert(multiple is not None or (destination is not None and source is not None)) job = Job() job.user_dn = user_dn job.vo_name = vo job.source_se = source job.job_state = state job.submit_time = datetime.utcnow() if duration and queued: job.finish_time = job.submit_time + timedelta(seconds=duration+queued) elif duration: job.finish_time = job.submit_time + timedelta(seconds=duration) job.job_id = str(uuid.uuid4()) Session.merge(job) if multiple is None: multiple = [(source, destination)] for (s, d) in multiple: transfer = File() transfer.job_id = job.job_id transfer.vo_name = vo transfer.source_se = s transfer.source_surl = s + '/path' transfer.dest_se = d transfer.dest_surl = d + '/path' transfer.file_state = state if queued: transfer.start_time = job.submit_time + timedelta(seconds=queued) if duration: transfer.tx_duration = duration if reason: transfer.reason = reason if thr: transfer.throughput = thr Session.merge(transfer) Session.commit() return job.job_id
def _ban_se(storage, vo_name, allow_submit, status, timeout): """ Mark in the db the given storage as banned """ user = request.environ['fts3.User.Credentials'] banned = BannedSE() banned.se = storage banned.addition_time = datetime.utcnow() banned.admin_dn = user.user_dn banned.vo = vo_name if allow_submit and status == 'WAIT': banned.status = 'WAIT_AS' else: banned.status = status banned.wait_timeout = timeout try: Session.merge(banned) Session.commit() except Exception: Session.rollback() raise
def credential(self, id, start_response): user = request.environ['fts3.User.Credentials'] credentialCache = Session.query(CredentialCache).get((id, user.user_dn)) x509ProxyPEM = request.body x509Proxy = X509.load_cert_string(x509ProxyPEM) proxyExpirationTime = x509Proxy.get_not_after().get_datetime().replace(tzinfo = None) x509FullProxyPEM = self._buildFullProxyPEM(x509ProxyPEM, credentialCache.priv_key) credential = Credential(dlg_id = id, dn = user.user_dn, proxy = x509FullProxyPEM, voms_attrs = credentialCache.voms_attrs, termination_time = proxyExpirationTime) Session.merge(credential) Session.commit() start_response('201 CREATED', []) return ''
if credential.termination_time <= datetime.utcnow(): raise HTTPForbidden('Delegated proxy already expired') try: voms_client = voms.VomsClient(credential.proxy) (new_proxy, new_termination_time) = voms_client.init(voms_list) except voms.VomsException, e: # Error generating the proxy because of the request itself raise HTTPMethodFailure(str(e)) credential.proxy = new_proxy credential.termination_time = new_termination_time credential.voms_attrs = ' '.join(voms_list) try: Session.merge(credential) Session.commit() except Exception: Session.rollback() raise start_response('203 Non-Authoritative Information', [('Content-Type', 'text/plain')]) return [str(new_termination_time)] @require_certificate def delegation_page(self): """ Render an HTML form to delegate the credentials """ user = request.environ['fts3.User.Credentials'] return render(
def _cancel_transfers(storage=None, vo_name=None): """ Cancels the transfers that have the given storage either in source or destination, and belong to the given VO. Returns the list of affected jobs ids. """ affected_job_ids = set() files = Session.query(File)\ .filter((File.source_se == storage) | (File.dest_se == storage))\ .filter(File.file_state.in_(FileActiveStates + ['NOT_USED'])) if vo_name: files = files.filter(File.vo_name == vo_name) now = datetime.utcnow() try: for file in files: affected_job_ids.add(file.job_id) # Cancel the affected file file.file_state = 'CANCELED' file.reason = 'Storage banned' file.finish_time = now Session.merge(file) # If there are alternatives, enable them Session.query(File).filter(File.job_id == file.job_id)\ .filter(File.file_index == file.file_index)\ .filter(File.file_state == 'NOT_USED').update({'file_state': 'SUBMITTED'}) # Or next queries will not see the changes! Session.commit() except Exception: Session.rollback() raise # Set each job terminal state if needed try: for job_id in affected_job_ids: reuse_flag = Session.query(Job.reuse_job).filter(Job.job_id == job_id)[0][0] n_files = Session.query(func.count(distinct(File.file_id))).filter(File.job_id == job_id).all()[0][0] n_canceled = Session.query(func.count(distinct(File.file_id)))\ .filter(File.job_id == job_id).filter(File.file_state == 'CANCELED').all()[0][0] n_finished = Session.query(func.count(distinct(File.file_id)))\ .filter(File.job_id == job_id).filter(File.file_state == 'FINISHED').all()[0][0] n_failed = Session.query(func.count(distinct(File.file_id)))\ .filter(File.job_id == job_id).filter(File.file_state == 'FAILED').all()[0][0] n_terminal = n_canceled + n_finished + n_failed # Job finished! if n_terminal == n_files: reason = None Session.query(Job).filter(Job.job_id == job_id).update({ 'job_state': 'CANCELED', 'job_finished': now, 'finish_time': now, 'reason': reason }) Session.query(File).filter(File.job_id == job_id).update({ 'job_finished': now }) Session.commit() except Exception: Session.rollback() raise return affected_job_ids
# Validate that there are no bad combinations if _has_multiple_options(files): if job['reuse_job'] == 'Y': raise HTTPBadRequest('Can not specify reuse and multiple replicas at the same time') job['reuse_job'] = 'R' # Update the optimizer unique_pairs = set(map(lambda f: (f['source_se'], f['dest_se']), files)) for (source_se, dest_se) in unique_pairs: optimizer_active = OptimizerActive() optimizer_active.source_se = source_se optimizer_active.dest_se = dest_se optimizer_active.ema = 0 optimizer_active.datetime = datetime.utcnow() Session.merge(optimizer_active) # Update the database try: Session.execute(Job.__table__.insert(), [job]) if len(files): Session.execute(File.__table__.insert(), files) if len(datamanagement): Session.execute(DataManagement.__table__.insert(), datamanagement) Session.commit() except: Session.rollback() raise if len(files): log.info("Job %s submitted with %d transfers" % (job['job_id'], len(files)))