def __init__(self, sha1_name, *args): self.sha1_name = sha1_name self.ref_name = request.context['ref'] self.project = Project.query.get(request.context['project_id']) self.repos = Repo.query.filter_by(project=self.project, ref=self.ref_name) if sha1_name != 'latest': self.repos = self.repos.filter_by(sha1=sha1_name).all() request.context['sha1'] = sha1_name else: # if the url contains distro and distro_version we want # to filter by that as well. This avoids a bug where a sha1 # would be used for a distro/distro_version that might not have a # ready repo, resulting in the further controllers giving a 504 if len(args) >= 2: flavor = "default" if 'flavors' in args: flavor = args[3] self.repos = Repo.query.filter_by( project=self.project, ref=self.ref_name, distro=args[0], distro_version=args[1], flavor=flavor, ) latest_repo = self.repos.filter_by(status='ready').order_by(desc(Repo.modified)).first() if not latest_repo: abort(504, "no repository is ready for: %s/%s" % (self.project.name, self.ref_name)) self.repos = [latest_repo] request.context['sha1'] = latest_repo.sha1
def delete(self, ref_or_id): """ Delete a policy. Handles requests: POST /policies/1?_method=delete DELETE /policies/1 DELETE /policies/mypack.mypolicy """ op = 'DELETE /policies/%s/' % ref_or_id db_model = self._get_by_ref_or_id(ref_or_id=ref_or_id) LOG.debug('%s found object: %s', op, db_model) try: validate_not_part_of_system_pack(db_model) except ValueValidationException as e: LOG.exception('%s unable to delete object from system pack.', op) abort(http_client.BAD_REQUEST, str(e)) try: self.access.delete(db_model) except Exception as e: LOG.exception('%s unable to delete object: %s', op, db_model) abort(http_client.INTERNAL_SERVER_ERROR, str(e)) return LOG.debug('%s deleted object: %s', op, db_model) LOG.audit('Policy deleted. Policy.id=%s' % (db_model.id), extra={'policy_db': db_model}) return None
def _lookup(self, collection, *remainder): # if collection exists in the extension to service plugins map then # we are assuming that collection is the service plugin and # needs to be remapped. # Example: https://neutron.endpoint/v2.0/lbaas/loadbalancers if (remainder and manager.NeutronManager.get_resources_for_path_prefix( collection)): collection = remainder[0] remainder = remainder[1:] controller = manager.NeutronManager.get_controller_for_resource( collection) if not controller: LOG.warning("No controller found for: %s - returning response " "code 404", collection) pecan.abort(404) # Store resource and collection names in pecan request context so that # hooks can leverage them if necessary. The following code uses # attributes from the controller instance to ensure names have been # properly sanitized (eg: replacing dashes with underscores) request.context['resource'] = controller.resource request.context['collection'] = controller.collection # NOTE(blogan): initialize a dict to store the ids of the items walked # in the path for example: /networks/1234 would cause uri_identifiers # to contain: {'network_id': '1234'} # This is for backwards compatibility with legacy extensions that # defined their own controllers and expected kwargs to be passed in # with the uri_identifiers request.context['uri_identifiers'] = {} return controller, remainder
def _authorize(self, uid=None): if not endpoint_active: abort(403) # Shortcircuit the authorization for testing purpose # return if not request.remote_user: request.remote_user = request.headers.get('Remote-User') if not request.remote_user: request.remote_user = request.headers.get('X-Remote-User') if request.remote_user == '(null)': if request.headers.get('Authorization'): auth_header = request.headers.get('Authorization').split()[1] request.remote_user = base64.b64decode( auth_header).split(':')[0] if (request.remote_user == "admin" and request.headers.get('Admin-Token')): sent_admin_token = request.headers.get('Admin-Token') # If remote-user is admin and an admin-token is passed # authorized if the token is correct if sent_admin_token == admin_token: return 'admin' else: # If uid targeted by the request is the same # as the requester then authorize if uid and uid == request.remote_user: return uid abort(401)
def put(self, instance, ref_or_id): op = 'PUT /policies/%s/' % ref_or_id db_model = self._get_by_ref_or_id(ref_or_id=ref_or_id) LOG.debug('%s found object: %s', op, db_model) db_model_id = db_model.id try: validate_not_part_of_system_pack(db_model) except ValueValidationException as e: LOG.exception('%s unable to update object from system pack.', op) abort(http_client.BAD_REQUEST, str(e)) if not getattr(instance, 'pack', None): instance.pack = db_model.pack try: db_model = self.model.to_model(instance) db_model.id = db_model_id db_model = self.access.add_or_update(db_model) except (ValidationError, ValueError) as e: LOG.exception('%s unable to update object: %s', op, db_model) abort(http_client.BAD_REQUEST, str(e)) return LOG.debug('%s updated object: %s', op, db_model) LOG.audit('Policy updated. Policy.id=%s' % (db_model.id), extra={'policy_db': db_model}) return self.model.from_model(db_model)
def decorate(self, *args, **kwargs): try: return func(self, *args, **kwargs) except exc.OAuthException as o_exc: # Extract the parameters error = o_exc.error error_description = o_exc.msg or _("No details available.") # If we have a redirect URL, build the error redirect. if o_exc.redirect_uri: # Split the redirect_url apart parts = urlparse(o_exc.redirect_uri) # Add the error and error_description if parts.query: params = urlparse.parse_qsl(parts.query) else: params = [] params.append(('error', error)) params.append(('error_description', error_description)) # Overwrite the old query params and reconstruct the URL parts_list = list(parts) parts_list[4] = urlencode(params) location = urlunparse(parts_list) redirect(location) else: error_body = { 'error': error, 'error_description': error_description } response.json = error_body abort(o_exc.code, error_description, json_body=error_body)
def before(self, state): if state.request.method not in self.ACTION_MAP: pecan.abort(405) neutron_context = state.request.context.get('neutron_context') resource = state.request.context.get('resource') is_update = (state.request.method == 'PUT') items = state.request.resources policy.init() action = '%s_%s' % (self.ACTION_MAP[state.request.method], resource) for item in items: if is_update: obj = copy.copy(state.request.original_object) obj.update(item) obj[const.ATTRIBUTES_TO_UPDATE] = item.keys() item = obj try: policy.enforce( neutron_context, action, item, pluralized=attribute_population._plural(resource)) except oslo_policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception() as ctxt: # If a tenant is modifying it's own object, it's safe to # return a 403. Otherwise, pretend that it doesn't exist # to avoid giving away information. if (is_update and neutron_context.tenant_id != obj['tenant_id']): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg)
def delete(self): if not self.controller_delete: pecan.abort(405) pecan.response.status = 204 shim_request = ShimRequest(request.context["neutron_context"]) uri_identifiers = request.context["uri_identifiers"] return self.controller_delete(shim_request, self.item, **uri_identifiers)
def create(self): if not self.controller_create: pecan.abort(405) pecan.response.status = 201 shim_request = ShimRequest(request.context["neutron_context"]) uri_identifiers = request.context["uri_identifiers"] return self.controller_create(shim_request, request.json, **uri_identifiers)
def delete(self, action_ref_or_id): """ Delete an action. Handles requests: POST /actions/1?_method=delete DELETE /actions/1 DELETE /actions/mypack.myaction """ action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id) action_id = action_db.id try: validate_not_part_of_system_pack(action_db) except ValueValidationException as e: abort(http_client.BAD_REQUEST, str(e)) LOG.debug('DELETE /actions/ lookup with ref_or_id=%s found object: %s', action_ref_or_id, action_db) try: Action.delete(action_db) except Exception as e: LOG.error('Database delete encountered exception during delete of id="%s". ' 'Exception was %s', action_id, e) abort(http_client.INTERNAL_SERVER_ERROR, str(e)) return extra = {'action_db': action_db} LOG.audit('Action deleted. Action.id=%s' % (action_db.id), extra=extra) return None
def put(self, branch_id, branch): """Modify this branch. :param branch_id: An ID of the branch. :param branch: A branch within the request body. """ branch_dict = branch.as_dict(omit_unset=True) if "expiration_date" in six.iterkeys(branch_dict): abort(400, _("Can't change expiration date.")) if "expired" in six.iterkeys(branch_dict): if branch_dict["expired"]: branch_dict["expiration_date"] = datetime.now(tz=pytz.utc) else: branch_dict["expiration_date"] = None if branch.project_id: original_branch = branches_api.branch_get(branch_id) if not original_branch: raise exc.NotFound(_("Branch %s not found") % branch_id) if branch.project_id != original_branch.project_id: abort(400, _("You can't associate branch %s " "with another project.") % branch_id) result = branches_api.branch_update(branch_id, branch_dict) if result: return wmodels.Branch.from_db_model(result) else: raise exc.NotFound(_("Branch %s not found") % branch_id)
def post(self, triggertype): """ Create a new triggertype. Handles requests: POST /triggertypes/ """ try: triggertype_db = TriggerTypeAPI.to_model(triggertype) triggertype_db = TriggerType.add_or_update(triggertype_db) except (ValidationError, ValueError) as e: LOG.exception('Validation failed for triggertype data=%s.', triggertype) abort(http_client.BAD_REQUEST, str(e)) return except StackStormDBObjectConflictError as e: LOG.warn('TriggerType creation of %s failed with uniqueness conflict. Exception : %s', triggertype, str(e)) abort(http_client.CONFLICT, str(e), body={'conflict-id': e.conflict_id}) return else: extra = {'triggertype_db': triggertype_db} LOG.audit('TriggerType created. TriggerType.id=%s' % (triggertype_db.id), extra=extra) if not triggertype_db.parameters_schema: TriggerTypeController._create_shadow_trigger(triggertype_db) triggertype_api = TriggerTypeAPI.from_model(triggertype_db) return triggertype_api
def post(self, action_alias): """ Create a new ActionAlias. Handles requests: POST /actionalias/ """ try: action_alias_db = ActionAliasAPI.to_model(action_alias) LOG.debug('/actionalias/ POST verified ActionAliasAPI and formulated ActionAliasDB=%s', action_alias_db) action_alias_db = ActionAlias.add_or_update(action_alias_db) except (ValidationError, ValueError, ValueValidationException) as e: LOG.exception('Validation failed for action alias data=%s.', action_alias) pecan.abort(http_client.BAD_REQUEST, str(e)) return except StackStormDBObjectConflictError as e: LOG.warn('ActionAlias creation of %s failed with uniqueness conflict.', action_alias, exc_info=True) pecan.abort(http_client.CONFLICT, str(e), body={'conflict-id': e.conflict_id}) return extra = {'action_alias_db': action_alias_db} LOG.audit('Action alias created. ActionAlias.id=%s' % (action_alias_db.id), extra=extra) action_alias_api = ActionAliasAPI.from_model(action_alias_db) return action_alias_api
def post(self, action): """ Create a new action. Handles requests: POST /actions/ """ if not hasattr(action, 'pack'): setattr(action, 'pack', DEFAULT_PACK_NAME) try: action_validator.validate_action(action) except ValueValidationException as e: abort(http_client.BAD_REQUEST, str(e)) return # ActionsController._validate_action_parameters(action, runnertype_db) action_model = ActionAPI.to_model(action) LOG.debug('/actions/ POST verified ActionAPI object=%s', action) action_db = Action.add_or_update(action_model) LOG.debug('/actions/ POST saved ActionDB object=%s', action_db) extra = {'action_db': action_db} LOG.audit('Action created. Action.id=%s' % (action_db.id), extra=extra) action_api = ActionAPI.from_model(action_db) return action_api
def index(self): if self.resource not in self.inverted_member_actions["GET"]: pecan.abort(404) shim_request = ShimRequest(request.context["neutron_context"]) uri_identifiers = request.context["uri_identifiers"] method = getattr(self.controller, self.resource) return method(shim_request, self.item, **uri_identifiers)
def __get_by_id(id): try: return RunnerType.get_by_id(id) except (ValueError, ValidationError) as e: msg = 'Database lookup for id="%s" resulted in exception. %s' % (id, e) LOG.exception(msg) abort(http_client.NOT_FOUND, msg)
def post(self, **kwargs): logger.info('Client requests authentication.') back = kwargs.get('back') if not back: logger.error('Client requests authentication without back url.') abort(422) username = kwargs.get('username') password = kwargs.get('password') if username and password: valid_user = self.check_valid_user(username, password) if not valid_user: logger.error('Client requests authentication with wrong' ' credentials.') response.status = 401 return render('login.html', dict(back=back, message='Authorization failed.')) email, lastname, sshkey = valid_user logger.info('Client requests authentication success %s' % username) common.setup_response(username, back, email, lastname, sshkey) else: logger.error('Client requests authentication without credentials.') response.status = 401 return render('login.html', dict(back=back, message='Authorization failed.'))
def on_put(self, external_project_id, **kwargs): if (not pecan.request.content_type or pecan.request.content_type == 'application/json'): pecan.abort( 415, u._("Content-Type of '{content_type}' is not supported for " "PUT.").format(content_type=pecan.request.content_type) ) transport_key_id = kwargs.get('transport_key_id') payload = pecan.request.body if not payload: raise exception.NoDataToProcess() if validators.secret_too_big(payload): raise exception.LimitExceeded() if self.secret.encrypted_data or self.secret.secret_store_metadata: _secret_already_has_data() project_model = res.get_or_create_project(external_project_id) content_type = pecan.request.content_type content_encoding = pecan.request.headers.get('Content-Encoding') plugin.store_secret( unencrypted_raw=payload, content_type_raw=content_type, content_encoding=content_encoding, secret_model=self.secret, project_model=project_model, transport_key_id=transport_key_id) LOG.info(u._LI('Updated secret for project: %s'), external_project_id)
def put(self, rule_ref_or_id, rule): rule_db = self._get_by_ref_or_id(rule_ref_or_id) LOG.debug('PUT /rules/ lookup with id=%s found object: %s', rule_ref_or_id, rule_db) try: if rule.id is not None and rule.id is not '' and rule.id != rule_ref_or_id: LOG.warning('Discarding mismatched id=%s found in payload and using uri_id=%s.', rule.id, rule_ref_or_id) old_rule_db = rule_db rule_db = RuleAPI.to_model(rule) # Check referenced trigger and action permissions # Note: This needs to happen after "to_model" call since to_model performs some # validation (trigger exists, etc.) assert_request_user_has_rule_trigger_and_action_permission(request=pecan.request, rule_api=rule) rule_db.id = rule_ref_or_id rule_db = Rule.add_or_update(rule_db) # After the rule has been added modify the ref_count. This way a failure to add # the rule due to violated constraints will have no impact on ref_count. increment_trigger_ref_count(rule_api=rule) except (ValueValidationException, jsonschema.ValidationError, ValueError) as e: LOG.exception('Validation failed for rule data=%s', rule) abort(http_client.BAD_REQUEST, str(e)) return # use old_rule_db for cleanup. cleanup_trigger_db_for_rule(old_rule_db) extra = {'old_rule_db': old_rule_db, 'new_rule_db': rule_db} LOG.audit('Rule updated. Rule.id=%s.' % (rule_db.id), extra=extra) rule_api = RuleAPI.from_model(rule_db) return rule_api
def post(self, **kw): """ Used to add a new ingredient into the recipe. """ if request.pecan.get('validation_errors'): abort(400) # Look up the addition entity by name cls = getattr(entities, kw.get('type'), None) # Clean up the namespace a bit kw.pop('type') ingredient = kw.pop('ingredient') unit = ingredient.default_unit kw['amount'] = 0 kw['unit'] = unit # # If it's a hop addition, copy defaults for AA # if getattr(ingredient, 'alpha_acid', None): kw['alpha_acid'] = ingredient.alpha_acid # # Create the entity and assign the ingredient # to the correct attribute (e.g., `fermentable`, # `hop`, `yeast`) # entity = cls(**kw) setattr(entity, ingredient.row_type, ingredient) entity.recipe = request.context['recipe'] request.context['recipe'].touch() return self.__rendered__()
def before(self, state): context_kwargs = {} if "X-Project-ID" in state.request.headers: context_kwargs["tenant"] = state.request.headers["X-Project-ID"] context_kwargs["base_url"] = state.request.host_url + "/".join(state.request.path.split("/")[0:2]) # hack: if the configuration is set, the project_id # will be appended into the base_url if cfg.CONF.project_id_in_url: context_kwargs["base_url"] = "/".join( [context_kwargs["base_url"], state.request.headers["X-Project-ID"]] ) if "X-Auth-Token" in state.request.headers: context_kwargs["auth_token"] = state.request.headers["X-Auth-Token"] # if we still dont have a tenant, then return a 400 if "tenant" not in context_kwargs: pecan.abort(400, detail="The Project ID must be provided.") request_context = PoppyRequestContext(**context_kwargs) state.request.context = request_context local.store.context = request_context """Attach tenant_id as a member variable project_id to controller.""" state.controller.__self__.project_id = getattr(local.store.context, "tenant", None) state.controller.__self__.base_url = getattr(local.store.context, "base_url", None) """Attach auth_token as a member variable project_id to controller.""" state.controller.__self__.auth_token = getattr(local.store.context, "auth_token", None)
def post(self, story_id, task): """Create a new task. :param story_id: An ID of the story. :param task: a task within the request body. """ if not task.story_id: task.story_id = story_id if task.story_id != story_id: abort(400, _("URL story_id and task.story_id do not match")) task = task_is_valid_post(task) creator_id = request.current_user_id task.creator_id = creator_id # We can't set due dates when creating tasks at the moment. task_dict = task.as_dict() if "due_dates" in task_dict: del task_dict['due_dates'] created_task = tasks_api.task_create(task_dict) events_api.task_created_event(story_id=task.story_id, task_id=created_task.id, task_title=created_task.title, author_id=creator_id) return wmodels.Task.from_db_model(created_task)
def _lookup(self, kind, *remainder): if kind == 'record': return dns_records.DnsRecordsController(), remainder elif kind == 'zones': return dns_zones.DnsZonesController(), remainder elif kind == 'cache': return cache_clean.CacheCleanController(), remainder elif kind == "dns_servers": return dns_servers.DnsServersController(), remainder elif kind == "proximity": return static_proximity.ProximityController(), remainder elif kind == "member": return user_region_member.RegionMemberController(), remainder elif kind == "region": return user_region.RegionController(), remainder elif kind == "gmember": return dns_gmember.DnsGmemberController(), remainder elif kind == "hm_template": return dns_hm_template.DnsHmTemplateController(), remainder elif kind == "gslb_zone": return dns_gslb_zone.Glsb_zoneController(), remainder elif kind == 'syngroup': return dns_syngroup.DnsSyngroupController(),remainder elif kind =="gpool": return dns_gpool.DnsGPoolController(),remainder elif kind =="gmap": return dns_gmap.DnsGMapController(),remainder else: pecan.abort(404)
def post(self, payload): action_alias_name = payload.name if payload else None if not action_alias_name: pecan.abort(http_client.BAD_REQUEST, 'Alias execution "name" is required') format = payload.format or '' command = payload.command or '' try: action_alias_db = ActionAlias.get_by_name(action_alias_name) except ValueError: action_alias_db = None if not action_alias_db: msg = 'Unable to identify action alias with name "%s".' % action_alias_name pecan.abort(http_client.NOT_FOUND, msg) execution_parameters = self._extract_parameters(action_alias_db=action_alias_db, format=format, param_stream=command) notify = self._get_notify_field(payload) execution = self._schedule_execution(action_alias_db=action_alias_db, params=execution_parameters, notify=notify) return str(execution.id)
def on_put(self, keystone_id, **kwargs): if not pecan.request.content_type or \ pecan.request.content_type == 'application/json': pecan.abort( 415, u._("Content-Type of '{0}' is not supported for PUT.").format( pecan.request.content_type ) ) secret = self.repo.get(entity_id=self.secret_id, keystone_id=keystone_id, suppress_exception=True) if not secret: _secret_not_found() if secret.encrypted_data: _secret_already_has_data() tenant = res.get_or_create_tenant(keystone_id, self.tenant_repo) content_type = pecan.request.content_type content_encoding = pecan.request.headers.get('Content-Encoding') res.create_encrypted_datum(secret, pecan.request.body, content_type, content_encoding, tenant, self.crypto_manager, self.datum_repo, self.kek_repo)
def post(self, trigger): """ Create a new trigger. Handles requests: POST /triggers/ """ LOG.info('POST /triggers/ with trigger data=%s', trigger) try: trigger_db = TriggerService.create_trigger_db(trigger) except (ValidationError, ValueError) as e: LOG.exception('Validation failed for trigger data=%s.', trigger) abort(http_client.BAD_REQUEST, str(e)) return except NotUniqueError as e: LOG.warn('Trigger creation of %s failed with uniqueness conflict. Exception %s', trigger, str(e)) abort(http_client.CONFLICT, str(e)) return LOG.audit('Trigger created. Trigger=%s', trigger_db) trigger_api = TriggerAPI.from_model(trigger_db) LOG.debug('POST /triggers/ client_result=%s', trigger_api) return trigger_api
def post(self, triggertype): """ Create a new triggertype. Handles requests: POST /triggertypes/ """ LOG.info('POST /triggertypes/ with triggertype data=%s', triggertype) try: triggertype_db = TriggerTypeAPI.to_model(triggertype) triggertype_db = TriggerType.add_or_update(triggertype_db) except (ValidationError, ValueError) as e: LOG.exception('Validation failed for triggertype data=%s.', triggertype) abort(http_client.BAD_REQUEST, str(e)) return except NotUniqueError as e: LOG.warn('TriggerType creation of %s failed with uniqueness conflict. Exception : %s', triggertype, str(e)) abort(http_client.CONFLICT, str(e)) return else: LOG.audit('TriggerType created. TriggerType=%s', triggertype_db) if not triggertype_db.parameters_schema: TriggerTypeController._create_shadow_trigger(triggertype_db) triggertype_api = TriggerTypeAPI.from_model(triggertype_db) LOG.debug('POST /triggertypes/ client_result=%s', triggertype_api) return triggertype_api
def put(self, rule_ref_or_id, rule): try: rule_db = self._get_by_ref_or_id(rule_ref_or_id) except Exception as e: LOG.exception(e.message) abort(http_client.NOT_FOUND, e.message) return LOG.debug('PUT /rules/ lookup with id=%s found object: %s', rule_ref_or_id, rule_db) try: if rule.id is not None and rule.id is not '' and rule.id != rule_ref_or_id: LOG.warning('Discarding mismatched id=%s found in payload and using uri_id=%s.', rule.id, rule_ref_or_id) old_rule_db = rule_db rule_db = RuleAPI.to_model(rule) rule_db.id = rule_ref_or_id rule_db = Rule.add_or_update(rule_db) except (ValidationError, ValueError) as e: LOG.exception('Validation failed for rule data=%s', rule) abort(http_client.BAD_REQUEST, str(e)) return extra = {'old_rule_db': old_rule_db, 'new_rule_db': rule_db} LOG.audit('Rule updated. Rule.id=%s.' % (rule_db.id), extra=extra) rule_api = RuleAPI.from_model(rule_db) return rule_api
def _lookup(self, lbid, *remainder): """Routes more complex url mapping. Most things are /loadbalancer/{id}/function/... so this routes that Raises: 404 """ try: lbid = int(lbid) except ValueError: raise abort(404) if len(remainder): if remainder[0] == "nodes": return NodesController(lbid), remainder[1:] if remainder[0] == "virtualips": return VipsController(lbid), remainder[1:] if remainder[0] == "logs": return LogsController(lbid), remainder[1:] if remainder[0] == "healthmonitor": return HealthMonitorController(lbid), remainder[1:] # Kludgy fix for PUT since WSME doesn't like IDs on the path elif lbid: return LoadBalancersController(lbid), remainder abort(404)
def _mash(self, method, instructions): if self.recipe.type != 'MASH': abort(405) self.recipe.mash_method = method self.recipe.mash_instructions = instructions self.recipe.touch() return dict()
def _lookup(self, controller, *remainder): if controller == "shared-images": return sharedimages.Controller(), remainder elif controller == "images": return images.Controller(), remainder abort(404)
def _secret_payload_not_found(): """Throw exception indicating secret's payload is not found.""" pecan.abort(404, u._('Not Found. Sorry but your secret has no payload.'))
def _invalid_secret_id(): """Throw exception indicating secret id is invalid.""" pecan.abort(404, u._('Not Found. Provided secret id is invalid.'))
def _secret_not_found(): """Throw exception indicating secret not found.""" pecan.abort(404, u._('Not Found. Sorry but your secret is in ' 'another castle.'))
def index(self, **kwargs): pecan.abort(405) # HTTP 405 Method Not Allowed as default
def not_supported(self): pecan.abort(405)
def get_all(self, **kwargs): return abort(404)
def get_one(self, id): abort(httplib.NOT_FOUND)
def invalid_container_id(): """Throw exception indicating container id is invalid.""" pecan.abort(404, u._('Not Found. Provided container id is invalid.'))
def delete(self, exec_id): """ Stops a single execution. Handles requests: DELETE /executions/<id> """ execution_api = self._get_one(id=exec_id) if not execution_api: abort(http_client.NOT_FOUND, 'Execution with id %s not found.' % exec_id) liveaction_id = execution_api.liveaction['id'] if not liveaction_id: abort( http_client.INTERNAL_SERVER_ERROR, 'Execution object missing link to liveaction %s.' % liveaction_id) try: liveaction_db = LiveAction.get_by_id(liveaction_id) except: abort( http_client.INTERNAL_SERVER_ERROR, 'Execution object missing link to liveaction %s.' % liveaction_id) if liveaction_db.status == LIVEACTION_STATUS_CANCELED: abort(http_client.OK, 'Action is already in "canceled" state.') if liveaction_db.status not in LIVEACTION_CANCELABLE_STATES: abort( http_client.OK, 'Action cannot be canceled. State = %s.' % liveaction_db.status) try: (liveaction_db, execution_db) = action_service.request_cancellation( liveaction_db, get_requester()) except: LOG.exception('Failed requesting cancellation for liveaction %s.', liveaction_db.id) abort(http_client.INTERNAL_SERVER_ERROR, 'Failed canceling execution.') from_model_kwargs = self._get_from_model_kwargs_for_request( request=pecan.request) return ActionExecutionAPI.from_model(execution_db, from_model_kwargs)
def index(self): if not self.controller_index: pecan.abort(405) shim_request = ShimRequest(request.context['neutron_context']) uri_identifiers = request.context['uri_identifiers'] return self.controller_index(shim_request, **uri_identifiers)
def get_all(self): abort(httplib.NOT_FOUND)
def get_one(self, run_id, page=None, per_page=None, thread_id=None): session = get_session() # Figure out which page and how many items to show. Look at # the session first, because if we don't have valid explicit # inputs we will use the session values as defaults. We track # the per_page value no matter the run_id for consistency. if session.get('run_id') == run_id: page = page or session.get('page') # Use the thread_id from the session if one has not been # provided as an explicit argument. if thread_id is None: thread_id = session.get('thread_id') if page is None: page = 1 per_page = per_page or session.get('per_page') or 20 # We can't pass None easily, so we pass an empty string when # we want to force all threads. None means no value was # passed, so we try to find the previous value from the # session (above) and fall back to None if there is no setting # in the session. thread_id = thread_id or None if (run_id, thread_id) == self._cached_ids and self._cached_trace: LOG.debug('using cached trace for %s', run_id) trace_data = self._cached_trace else: LOG.debug('computing trace for %s', run_id) trace_data = list( trace.collapse_trace(request.db.get_trace(run_id, thread_id))) self._cached_ids = (run_id, thread_id) self._cached_trace = trace_data syntax_line_cache = syntax.StyledLineCache(request.db, run_id) page_vals = pagination.get_pagination_values( page, per_page, len(trace_data), ) start = page_vals['start'] end = page_vals['end'] def getlines(filename, nums): start, end = nums return syntax_line_cache.getlines(filename, start, end, include_comments=True) try: context = run_context.get_context(request.db, run_id, thread_id) except db.NoSuchRun as e: # No such run. abort(404, six.text_type(e)) context.update({ 'trace': trace_data[start:end], 'getlines': getlines, 'getfileid': functools.partial(request.db.get_file_signature, run_id=run_id), }) context.update(page_vals) session['run_id'] = run_id session['thread_id'] = thread_id session['page'] = page session['per_page'] = per_page session.save() return context
def container_not_found(): """Throw exception indicating container not found.""" pecan.abort(404, u._('Secrets container not found.'))
def _order_cannot_be_updated_if_not_pending(order_status): """Throw exception that order cannot be updated if not PENDING.""" pecan.abort( 400, u._("Only PENDING orders can be updated. Order is in the" "{0} state.").format(order_status))
def index(self): pecan.abort(405)
def _secret_not_in_order(): """Throw exception that secret info is not available in the order.""" pecan.abort(400, u._("Secret metadata expected but not received."))
def order_cannot_modify_order_type(): """Throw exception that order type cannot be modified.""" pecan.abort(400, u._("Cannot modify order type."))
def post(self, **kw): context = t_context.extract_context_from_environ() if not t_context.is_admin_context(context): pecan.abort(400, _('Admin role required to create pods')) return if 'pod' not in kw: pecan.abort(400, _('Request body pod not found')) return pod = kw['pod'] # if az_name is null, and there is already one in db pod_name = pod.get('pod_name', '').strip() pod_az_name = pod.get('pod_az_name', '').strip() dc_name = pod.get('dc_name', '').strip() az_name = pod.get('az_name', '').strip() _uuid = uuidutils.generate_uuid() if az_name == '' and pod_name == '': return Response(_('Valid pod_name is required for top region'), 422) if az_name != '' and pod_name == '': return Response(_('Valid pod_name is required for pod'), 422) if pod.get('az_name') is None: if self._get_top_region(context) != '': return Response(_('Top region already exists'), 409) # if az_name is not null, then the pod region name should not # be same as that the top region if az_name != '': if self._get_top_region(context) == pod_name and pod_name != '': return Response( _('Pod region name duplicated with the top region name'), 409) # to create the top region, make the pod_az_name to null value if az_name == '': pod_az_name = '' try: with context.session.begin(): # if not top region, # then add corresponding ag and az for the pod if az_name != '': ag_name = utils.get_ag_name(pod_name) aggregate = az_ag.create_ag_az(context, ag_name=ag_name, az_name=az_name) if aggregate is None: return Response(_('Ag creation failure'), 400) new_pod = core.create_resource( context, models.Pod, { 'pod_id': _uuid, 'pod_name': pod_name, 'pod_az_name': pod_az_name, 'dc_name': dc_name, 'az_name': az_name }) except db_exc.DBDuplicateEntry as e1: LOG.exception( _LE('Record already exists on %(pod_name)s: ' '%(exception)s'), { 'pod_name': pod_name, 'exception': e1 }) return Response(_('Record already exists'), 409) except Exception as e2: LOG.exception( _LE('Failed to create pod: %(pod_name)s,' 'pod_az_name: %(pod_az_name)s,' 'dc_name: %(dc_name)s,' 'az_name: %(az_name)s' '%(exception)s '), { 'pod_name': pod_name, 'pod_az_name': pod_az_name, 'dc_name': dc_name, 'az_name': az_name, 'exception': e2 }) return Response(_('Failed to create pod'), 500) return {'pod': new_pod}
def _order_update_not_supported(): """Throw exception that PUT operation is not supported for orders.""" pecan.abort(405, u._("Order update is not supported."))
def post(self, compute_create_data): DLOG.verbose("Compute-API create called for compute %s." % compute_create_data.compute_id) compute_data = compute_create_data.compute_data cpu_info = compute_data.virtual_cpu memory_info = compute_data.virtual_memory storage_info = compute_data.virtual_storage if compute_create_data.meta_data is None: meta_data = dict() else: meta_data = json.loads(compute_create_data.meta_data) vim_connection = pecan.request.vim.open_connection() rpc_request = rpc.APIRequestCreateInstance() rpc_request.name = compute_create_data.compute_id rpc_request.instance_type_uuid = compute_data.flavour_id rpc_request.image_uuid = compute_create_data.image_id rpc_request.vcpus = cpu_info.num_virtual_cpu rpc_request.memory_mb = memory_info.virtual_mem_size rpc_request.disk_gb = storage_info.size_of_storage rpc_request.ephemeral_gb = 0 rpc_request.swap_gb = 0 rpc_request.network_uuid = meta_data.get("network_uuid", None) rpc_request.auto_recovery = meta_data.get("sw:wrs:auto_recovery", None) rpc_request.live_migration_timeout \ = meta_data.get("hw:wrs:live_migration_timeout", None) rpc_request.live_migration_max_downtime \ = meta_data.get("hw:wrs:live_migration_max_downtime", None) vim_connection.send(rpc_request.serialize()) msg = vim_connection.receive() if msg is None: DLOG.error("No response received for compute %s." % compute_create_data.compute_id) return pecan.abort(httplib.INTERNAL_SERVER_ERROR) response = rpc.RPCMessage.deserialize(msg) if rpc.RPC_MSG_TYPE.CREATE_INSTANCE_RESPONSE != response.type: DLOG.error("Unexpected message type received, msg_type=%s." % response.type) return pecan.abort(httplib.INTERNAL_SERVER_ERROR) if rpc.RPC_MSG_RESULT.SUCCESS == response.result: virtual_memory = ComputeQueryVirtualMemoryType() virtual_memory.virtual_mem_size = response.memory_mb virtual_cpu = ComputeQueryVirtualCpuType() virtual_cpu.num_virtual_cpu = response.vcpus compute_attributes = ComputeQueryAttributesResourceType() compute_attributes.flavour_id = '' compute_attributes.virtual_memory = virtual_memory compute_attributes.virtual_cpu = virtual_cpu compute_attributes.flavour_original_name = \ response.instance_type_original_name query_result = ComputeQueryResourceType() query_result.compute_id = response.uuid query_result.compute_attributes = compute_attributes query_result.host_id = response.host_uuid query_result.vc_image_id = response.image_uuid meta_data = dict() meta_data['sw:wrs:auto_recovery'] = response.auto_recovery meta_data['hw:wrs:live_migration_timeout'] \ = response.live_migration_timeout meta_data['hw:wrs:live_migration_max_downtime'] \ = response.live_migration_max_downtime query_result.meta_data = json.dumps(meta_data) compute = ComputeQueryData() compute.query_result = query_result return compute DLOG.error("Unexpected result received for compute %s, result=%s." % (compute_create_data.compute_id, response.result)) return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
def _order_not_found(): """Throw exception indicating order not found.""" pecan.abort( 404, u._('Not Found. Sorry but your order is in ' 'another castle.'))
def get(self): """Get information of all uploaded test results. Get information of all uploaded test results in descending chronological order. Make it possible to specify some input parameters for filtering. For example: /v1/results?page=<page number>&cpid=1234. By default, page is set to page number 1, if the page parameter is not specified. """ expected_input_params = [ const.START_DATE, const.END_DATE, const.CPID, const.SIGNED, const.VERIFICATION_STATUS, const.PRODUCT_ID ] filters = api_utils.parse_input_params(expected_input_params) if const.PRODUCT_ID in filters: product = db.get_product(filters[const.PRODUCT_ID]) vendor_id = product['organization_id'] is_admin = (api_utils.check_user_is_foundation_admin() or api_utils.check_user_is_vendor_admin(vendor_id)) if is_admin: filters[const.ALL_PRODUCT_TESTS] = True elif not product['public']: pecan.abort(403, 'Forbidden.') records_count = db.get_test_records_count(filters) page_number, total_pages_number = \ api_utils.get_page_number(records_count) try: per_page = CONF.api.results_per_page results = db.get_test_records(page_number, per_page, filters) is_foundation = api_utils.check_user_is_foundation_admin() for result in results: if not (api_utils.check_owner(result['id']) or is_foundation): # Don't expose product info if the product is not public. if (result.get('product_version') and not result['product_version']['product_info'] ['public']): result['product_version'] = None # Only show all metadata if the user is the owner or a # member of the Foundation group. result['meta'] = { k: v for k, v in result['meta'].items() if k in MetadataController.rw_access_keys } result.update({ 'url': parse.urljoin(CONF.ui_url, CONF.api.test_results_url) % result['id'] }) page = { 'results': results, 'pagination': { 'current_page': page_number, 'total_pages': total_pages_number } } except Exception as ex: LOG.debug('An error occurred during ' 'operation with database: %s' % str(ex)) pecan.abort(500) return page
def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except ex.MistralException as excp: pecan.response.translatable_error = excp pecan.abort(excp.http_code, six.text_type(excp))
def _bad_query_string_parameters(): pecan.abort(400, u._("URI provided invalid query string parameters."))
def wrapper(*args, **kwargs): meta_key = args[2] if meta_key not in args[0].rw_access_keys: pecan.abort(403) return func(*args, **kwargs)
def _secret_not_found(): """Throw exception indicating secret not found.""" pecan.abort(404, u._('Secret not found.'))
def index(self): abort(405)
def post(self, payload): action_alias_name = payload.name if payload else None if not action_alias_name: abort(http_client.BAD_REQUEST, 'Alias execution "name" is required') format_str = payload.format or '' command = payload.command or '' try: action_alias_db = ActionAlias.get_by_name(action_alias_name) except ValueError: action_alias_db = None if not action_alias_db: msg = 'Unable to identify action alias with name "%s".' % ( action_alias_name) abort(http_client.NOT_FOUND, msg) return if not action_alias_db.enabled: msg = 'Action alias with name "%s" is disabled.' % ( action_alias_name) abort(http_client.BAD_REQUEST, msg) return execution_parameters = extract_parameters_for_action_alias_db( action_alias_db=action_alias_db, format_str=format_str, param_stream=command) notify = self._get_notify_field(payload) context = { 'action_alias_ref': reference.get_ref_from_model(action_alias_db), 'api_user': payload.user, 'user': get_requester(), 'source_channel': payload.source_channel } execution = self._schedule_execution(action_alias_db=action_alias_db, params=execution_parameters, notify=notify, context=context) result = { 'execution': execution, 'actionalias': ActionAliasAPI.from_model(action_alias_db) } if action_alias_db.ack: try: if 'format' in action_alias_db.ack: result.update({ 'message': render({'alias': action_alias_db.ack['format']}, result)['alias'] }) except UndefinedError as e: result.update({ 'message': 'Cannot render "format" in field "ack" for alias. ' + e.message }) try: if 'extra' in action_alias_db.ack: result.update({ 'extra': render(action_alias_db.ack['extra'], result) }) except UndefinedError as e: result.update({ 'extra': 'Cannot render "extra" in field "ack" for alias. ' + e.message }) return result
def _secret_already_has_data(): """Throw exception that the secret already has data.""" pecan.abort(409, u._("Secret already has data, cannot modify it."))