def deploy(self): """ Deploy a package to destination """ logger.debug("======= deploy =======") try: err, pkg_name, dest, email, passwd = \ self.__get_deploy_request_params() if (err): return HTTPBadRequest(err) result = deploy_package(pkg_name, dest, email, passwd) if result['status'] == 202: deployment_id = result['deployment_id'] status_url = 'deployment/{0}'.format(deployment_id) contents = json.dumps(result, sort_keys=True) response = HTTPAccepted() response.headers = { 'Content-Type': 'application/json; charset=UTF-8', 'Location': status_url } response.body = contents return response elif result['status'] == 404: msg = 'Cannot find package "{0}" installed.'.format(pkg_name) return HTTPNotFound(detail=msg) else: return HTTPInternalServerError(detail=result['errors']) except Exception as e: stack_info = traceback.format_exc() logger.exception(stack_info) return HTTPInternalServerError(detail=stack_info)
def stop(self): """ PATCH /arc2box/communities/<community>/ {"action": "stop"} Tell the archiver to stop copying content from this community to box and to take the community out of the 'copying' state. The community must be in the 'copying' or 'reviewing' state. The community will return to normal operation and will not be in any archiving state. """ community = self.context status = getattr(community, 'archive_status', None) if status not in ('copying', 'reviewing', 'exception'): return HTTPBadRequest( "Community must be in 'copying' or 'reviewing' state.") # Restore normal ACL for workflow state wf = get_workflow(ICommunity, 'security', community) wf.reset(community) del community.__custom_acl__ # If still in the copy queue, the archiver will skip this community del community.archive_status community.archive_copied = None logger.info('arc2box: stop community: ' + community.title) return HTTPAccepted()
def copy(self): """ PATCH /arc2box/communities/<community>/ {"action": "copy"} Tell the archiver to start copying content from this community to box. The community must not already be in any archive state. This operation will place the community in the 'copying' state. The archiver will place the community into the 'reviewing' state at the completion of the copy operation. Returns a status of '202 Accepted'. """ community = self.context # For all but KarlAdmin, reduce access to VIEW only acl = [] for allow, who, what in community.__acl__: if allow == Allow and who != 'group.KarlAdmin': what = (VIEW, ) acl.append((allow, who, what)) modify_acl(community, acl) # Queue the community for copying self.queue.queue_for_copy(community) community.archive_status = 'copying' logger.info('arc2box: copy community: ' + community.title) return HTTPAccepted()
def _wait_for_serial(self, serial): keyfs = self.xom.keyfs next_serial = keyfs.get_next_serial() if serial > next_serial: raise HTTPNotFound("can only wait for next serial") elif serial == next_serial: arrived = keyfs.wait_tx_serial(serial, timeout=self.MAX_REPLICA_BLOCK_TIME) if not arrived: raise HTTPAccepted("no new transaction yet", headers={str("X-DEVPI-SERIAL"): str(keyfs.get_current_serial())}) return serial
def add_addon_hash(request): """Registers a new hash for a given addon. This call is async, meaning that it will be queued to avoid disturbing too much the other endpoints which are more critical. The parameters should be passed in the body of the request:: {'id': '*****@*****.**', 'sha256': 'the hash of the addon, to check'} The server should answer with a 202 Accepted HTTP status code. """ record_new_hash.delay(request.validated['id'], request.validated['sha256']) return HTTPAccepted()
def login_with_token(request: 'pyramid.request.Request'): """Use a Bearer token to log in and get a Pyramid session ticket. Args: request (pyramid.request.Request): The Pyramid request Returns: pyramid.response.Response: An empty response if the login worked, but with the session cookie headers. """ # Check the Bearer token and log the user in resource_protector = ResourceProtector() validator = IntrospectTokenValidator(request.registry.oidc.fedora) resource_protector.register_token_validator(validator) token = resource_protector.validate_request([SCOPES], request) response = HTTPAccepted() get_and_store_user(request, token["access_token"], response) return response
def test_no_userinfo(self): """Test when the OIDC server has no userinfo.""" request = testing.DummyRequest( path="/oidc/login", headers={"Authorization": "Bearer TOKEN"}) request.registry = self.registry request.db = self.db userinfo_response = { "error": "invalid_request", "error_description": "No userinfo for token", } with mock.patch('requests.sessions.Session.send', side_effect=fake_send({"UserInfo": userinfo_response})): with pytest.raises(InvalidTokenError) as exc: get_and_store_user(request, "TOKEN", HTTPAccepted()) assert str(exc.value) == "invalid_token: No userinfo for token"
def update_ticket(request): """ Update a ticket """ ticket = request.db.query(models.Ticket).get(request.matchdict['id']) if ticket: ticket.is_open = request.POST['is_open'] if request.POST.get('message', None): ticket.comments.append( models.Comment(author=request.POST['comment_author'], message=request.POST['message'])) try: transaction.commit() return HTTPAccepted() except: request.db.rollback() raise else: return HTTPNotFound()
def _wait_for_entry(self, serial): max_wakeups = self.MAX_REPLICA_BLOCK_TIME / self.WAKEUP_INTERVAL keyfs = self.xom.keyfs with keyfs.notifier.cv_new_transaction: next_serial = keyfs.get_next_serial() if serial > next_serial: raise HTTPNotFound("can only wait for next serial") with threadlog.around("debug", "waiting for tx%s", serial): num_wakeups = 0 while serial >= keyfs.get_next_serial(): if num_wakeups >= max_wakeups: raise HTTPAccepted("no new transaction yet", headers={ str("X-DEVPI-SERIAL"): str(keyfs.get_current_serial()) }) # we loop because we want control-c to get through keyfs.notifier.cv_new_transaction.wait( self.WAKEUP_INTERVAL) num_wakeups += 1 return keyfs._fs.get_raw_changelog_entry(serial)
def mothball(self): """ PATCH /arc2box/communities/<community>/ {"action": "mothball"} Tell the archiver to remove all content from the community in Karl. This operation cannot be stopped or reversed. The community must be in the 'reviewing' state. This operation will place the community into the 'removing' state. The archiver will place the community into the 'archived' state at the completion of the mothball operation. """ community = self.context status = getattr(community, 'archive_status', None) if status != 'reviewing': return HTTPBadRequest("Community must be in 'reviewing' state.") # Queue the community for mothball self.queue.queue_for_mothball(community) community.archive_status = 'removing' logger.info('arc2box: mothball community: ' + community.title) return HTTPAccepted()
def delete(self): self.context.delete(self.request.dbsession) self.request.response = HTTPAccepted() return dict(result='Record deleted.', data=dict(id=self.context.id))
def request_quote(request): # type: (PyramidRequest) -> AnyViewResponse """ Request a quotation for a process. """ settings = get_settings(request) weaver_config = get_weaver_configuration(settings) if weaver_config not in WeaverFeature.QUOTING: raise HTTPBadRequest(f"Unsupported quoting request for configuration '{weaver_config}'.") process_id = request.matchdict.get("process_id") process_store = get_db(request).get_store(StoreProcesses) try: process = process_store.fetch_by_id(process_id) # type: Process except ProcessNotFound: raise ProcessNotFound(json={ "title": "NoSuchProcess", "type": "http://www.opengis.net/def/exceptions/ogcapi-processes-1/1.0/no-such-process", "detail": "Process with specified reference identifier does not exist.", "status": ProcessNotFound.code, "cause": str(process_id) }) if ( (process.type not in [ProcessType.APPLICATION, ProcessType.WORKFLOW]) or (process.type == ProcessType.WORKFLOW and weaver_config not in WeaverFeature.REMOTE) ): raise HTTPBadRequest(json={ "title": "UnsupportedOperation", "detail": f"Unsupported quoting process type '{process.type}' on '{weaver_config}' instance.", "status": HTTPBadRequest.code, "instance": process.href(settings) }) try: process_params = sd.QuoteProcessParametersSchema().deserialize(request.json) except colander.Invalid as exc: raise OWSMissingParameterValue(json={ "title": "MissingParameterValue", "cause": f"Invalid schema: [{exc.msg!s}]", "error": exc.__class__.__name__, "value": exc.value }) quote_store = get_db(request).get_store(StoreQuotes) quote_user = request.authenticated_userid quote_info = { "process": process_id, "processParameters": process_params, "user": quote_user } quote = Quote(**quote_info) quote = quote_store.save_quote(quote) max_wait = as_int(settings.get("weaver.quote_sync_max_wait"), default=20) mode, wait, applied = parse_prefer_header_execute_mode(request.headers, process.jobControlOptions, max_wait) result = process_quote_estimator.delay(quote.id) LOGGER.debug("Celery pending task [%s] for quote [%s].", result.id, quote.id) if mode == ExecuteMode.SYNC and wait: LOGGER.debug("Celery task requested as sync if it completes before (wait=%ss)", wait) try: result.wait(timeout=wait) except CeleryTaskTimeoutError: pass if result.ready(): quote = quote_store.fetch_by_id(quote.id) data = quote.json() data.update({"description": sd.CreatedQuoteResponse.description}) data.update({"links": quote.links(settings)}) data = sd.CreatedQuoteResponse().deserialize(data) return HTTPCreated(json=data) else: LOGGER.debug("Celery task requested as sync took too long to complete (wait=%ss). Continue in async.", wait) # sync not respected, therefore must drop it # since both could be provided as alternative preferences, drop only async with limited subset prefer = get_header("Preference-Applied", applied, pop=True) _, _, async_applied = parse_prefer_header_execute_mode({"Prefer": prefer}, [ExecuteMode.ASYNC]) applied = async_applied data = quote.partial() data.update({"description": sd.AcceptedQuoteResponse.description}) headers = {"Location": quote.href(settings)} headers.update(applied) return HTTPAccepted(headers=headers, json=data)