def parse(self, annotation, data): try: schema = self._annotations.get(annotation, None) if not schema: raise AXIllegalArgumentException( "annotation {} is not supported".format(annotation)) validate(data, schema) except ValidationError as e: raise AXIllegalArgumentException(e.message, detail=e)
def subdomain_name_check(subdomain): mobj = subdomain_re.match(subdomain) if not mobj: raise AXIllegalArgumentException( "subdomain {} must match regex {}".format(subdomain, subdomain_re.pattern)) if len(mobj.group(0)) != len(subdomain): raise AXIllegalArgumentException( "subdomain {} must match regex {}".format(subdomain, subdomain_re.pattern))
def update_fixture_class(self, fix_class): """Updates an existing fixture class into database""" if not fix_class.get('id'): raise AXIllegalArgumentException("Class id required for updates") assert fix_class['id'] in self._fixture_classes fix_class = copy.deepcopy(fix_class) self._fixture_classes[fix_class['id']].update(fix_class)
def __init__(self, route53, name, aws_zone=None): """ Args: route53: Route53 object name: The name of the hosted zone """ assert isinstance( route53, Route53), "Instance of client needs to be of type {}".format( type(Route53)) self.client = route53 if name.endswith("."): check_name = name[:-1] self.name = name else: check_name = name self.name = name + "." if not hostname_validator(check_name): raise AXIllegalArgumentException( "Hosted Zone {} needs to be a valid domain name".format(name)) self._id = None if aws_zone: self._id = aws_zone["Id"]
def login(self, registry, username, password): """ Returns a base64 encoded token of username and password only if login is successful else it raises exceptions """ try: self._conn.login(username, password=password, registry=registry, reauth=True) except APIError as e: code = e.response.status_code if code == 401: # on login failure it raises a docker.errors.APIError: # 401 Client Error: Unauthorized raise AXUnauthorizedException(e.explanation) elif code == 404: raise AXNotFoundException(e.explanation) elif code == 500: if "x509: certificate signed by unknown authority" in e.response.text: raise AXIllegalArgumentException( "Certificate signed by unknown authority for {}". format(registry)) else: raise e else: raise e token = base64.b64encode("{}:{}".format(username, password)) return token
def volumepool_get_volume(pool_name): ref = request.args.get('ref', None) if ref is None: raise AXIllegalArgumentException("Volumepool get must contain a ref=something query parameter") manager = VolumeManager() volname = manager.get_from_pool(pool_name, ref) return jsonify(result=volname)
def post(cls): """Email API for sending emails. Take json argument contains the email information """ logger.info("Received email sending request: %s", Common.pretty_json(request.json)) email_info = {} try: email_info = email_schema(request.json) except MultipleInvalid or Exception as exc: logger.exception("Fail to parse argument. %s", str(exc)) abort(400, **(AXIllegalArgumentException(str(exc)).json())) # If customized smtp configuration is supplied in the payload if email_info.get('smtp', None): smtp_info = email_info['smtp'] try: cls.send_mail_smtp(smtp_info, email_content=email_info, test=False) except Exception as exc: logger.exception("Fail to send email. %s", str(exc)) abort(400, **(AXIllegalOperationException(str(exc)).json())) return {} # Using the configs in the database else: smtp_result = Configuration.query.filter_by(active=True, type='smtp') if smtp_result.count() == 0: error_msg = \ "No active SMTP Server found. Please configure " \ "it in the configuration page." logger.exception(error_msg) abort(400, **(AXIllegalOperationException(error_msg).json())) error_msg = "" for smtp_info in smtp_result: try: cls.send_mail_smtp(json.loads(smtp_info.config), email_content=email_info, test=False) return {} except Exception as exc: error_msg += str(exc) logger.exception("Fail to send email. %s", str(exc)) logger.error("Fail to send email. %s", error_msg) msg = "Fail to send email using the current SMTP configuration settings. " \ "Please check the Notification section in Configurations." abort( 400, **(AXIllegalOperationException(message=msg, detail=error_msg).json()))
def update_fixture_instance(self, instance): """Updates an existing instance into database""" instance_id = instance.get('id') if not instance_id: raise AXIllegalArgumentException( "Instance id required for updates") existing = self._fixture_instances.get(instance_id) if not existing: raise AXException("Instance does not exists") existing.update(instance) self._serialize_json_attrs(instance)
def update_volume(self, volume): """Update a volume""" volume_id = volume.get('id') if not volume_id: raise AXIllegalArgumentException("Volume id required for updates") existing = self._volumes.get(volume_id) if not existing: self._volumes[volume['id']] = volume else: existing.update(volume) self._serialize_json_attrs(self._volumes[volume['id']])
def post(cls): logger.info("Received POST configuration test request") config, config_detail = Common.parse_smtp_argument(request.json, test=True) if config['type'] == 'smtp': try: EmailNotificationAPI.send_mail_smtp(config_detail, test=True) except Exception as exc: logger.exception("Internal Error. %s", str(exc)) abort(400, **(AXIllegalArgumentException(str(exc)).json())) return config_detail
def __init__(self, dnsname): if not hostname_validator(dnsname): raise AXIllegalArgumentException("dns name {} is illegal".format(dnsname)) (self.name, _, self.domain) = dnsname.partition(".") self.ax_meta = {} config = Config(connect_timeout=60, read_timeout=60) boto_client = boto3.client("route53", config=config) client = Route53(boto_client) self.zone = Route53HostedZone(client, self.domain)
def __init__(self, name, boto_client=None): if not NAME_REGEX.match(name): raise AXIllegalArgumentException("Name can only contain a-zA-Z0-9 and hyphen and not start or end with hyphen. Max length is 32 characters") self.name = name self._npm = NodePortManager() self._vars = ManagedElbVars() (self.name_id, self.bucket, self.terraform_dir, self.region, self.placement, self.trusted_cidrs, self.s3) = self._vars.get_vars() if boto_client is None: self._boto = boto3.client("elb", region_name=self.region) else: self._boto = boto_client
def parse_smtp_argument(cls, argument, test=False): try: if test and 'id' not in argument: argument['id'] = "NA" config = configuration_schema(argument) config_detail = config if config['type'] == 'smtp': config_detail = smtp_schema(argument) if not config_detail['port']: if config_detail['use_tls']: config_detail['port'] = 465 else: config_detail['port'] = 25 else: error_msg = \ "Unsupported notification type: {}".format(config['type']) logger.error(error_msg) abort(400, **(AXIllegalArgumentException(error_msg).json())) return config, config_detail except MultipleInvalid or Exception as exc: logger.exception("Fail to parse argument. %s", str(exc)) abort(400, **(AXIllegalArgumentException(str(exc)).json()))
def substitute_attributes(value, instance): """Substitutes occurrences %%attributes.attr_name%% with instance attributes""" for attr_name, attr_val in instance.attributes.items(): value = value.replace("%%attributes.{}%%".format(attr_name), str(attr_val)) for field in ['id', 'name', 'status']: value = value.replace("%%attributes.{}%%".format(field), getattr(instance, field)) value = value.replace("%%attributes.class%%", instance.class_name) remaining = re.search(r"(%%attributes\.(\w+)%%)", value) if remaining: raise AXIllegalArgumentException( "Template parameters had unresolvable attributes: {}".format( remaining.group(1))) return value
def put(cls): """The PUT function will create the new configuration if the id does not exist or update the configuration if the id does exist.""" logger.info("Received PUT config list request.") config_result = {} try: config_result = Configuration.query.filter( Configuration.id == request.json['id']) except Exception as exc: logger.exception(str(exc)) abort(400, **(AXIllegalArgumentException(str(exc)).json())) if config_result.count() == 0: return SMTPConfigurationListAPI.post() else: return SMTPConfigurationAPI.put(request.json['id'])
def __init__(self, resource_id, category, resource, ttl, timestamp=None, detail=None): try: assert resource_id and category and resource and ttl, 'missing parameters' self.resource_id = resource_id self.category = category self.resource = AXWorkflowResource(resource) self.ttl = int(ttl) self.timestamp = timestamp self.detail = detail if detail else {} except Exception as e: raise AXIllegalArgumentException(str(e))
def _generate_probe_spec(spec): """ @type spec argo.template.v1.container.ContainerProbe """ try: probe = swagger_client.V1Probe() probe.initial_delay_seconds = spec.initial_delay_seconds probe.timeout_seconds = spec.timeout_seconds probe.period_seconds = spec.period_seconds probe.failure_threshold = spec.failure_threshold probe.success_threshold = spec.success_threshold if spec.exec_probe: action = swagger_client.V1ExecAction() action.command = shlex.split(spec.exec_probe.command) probe._exec = action return probe elif spec.http_get: action = swagger_client.V1HTTPGetAction() action.path = spec.http_get.path action.port = spec.http_get.port headers = spec.http_get.http_headers action.http_headers = [] for header in headers or []: h = swagger_client.V1HTTPHeader() h.name = header["name"] h.value = header["value"] action.http_headers.append(h) probe.http_get = action return probe else: logger.debug("Cannot handle probe {}".format(spec)) except Exception as e: raise AXIllegalArgumentException( "Probe {} cannot be processed due to error {}".format(spec, e)) return None
def generate_spec(self): c = swagger_client.V1Container() c.name = self.name c.image = self.image if self.resources is not None: c.resources = swagger_client.V1ResourceRequirements() c.resources.requests = {} c.resources.limits = {} if "cpu_cores" in self.resources: c.resources.requests["cpu"] = str( self.resources["cpu_cores"][0]) if self.resources["cpu_cores"][1] is not None: c.resources.limits["cpu"] = str( self.resources["cpu_cores"][1]) if "mem_mib" in self.resources: c.resources.requests["memory"] = "{}Mi".format( self.resources["mem_mib"][0]) if self.resources["mem_mib"][1] is not None: c.resources.limits["memory"] = "{}Mi".format( self.resources["mem_mib"][1]) # Kubernetes 1.5 requires init container must specify image pull policy. Since we are setting # a pull policy for all containers, we want to replicate the kubernetes default behavior of pulling # the image if tag is "latest" if self.image.endswith(':latest'): c.image_pull_policy = ContainerImagePullPolicy.PullAlways else: c.image_pull_policy = self.image_pull_policy or ContainerImagePullPolicy.PullIfNotPresent if self.command: c.command = self.command if self.args: c.args = self.args c.volume_mounts = [] for _, vol in self.vmap.iteritems(): c.volume_mounts.append(vol.get_container_spec()) c.env = [] for _, env in self.env_map.iteritems(): c.env.append(env) if self.privileged is not None: c.security_context = swagger_client.V1SecurityContext() c.security_context.privileged = self.privileged for probe in self.probes: probe_spec = self.probes[probe] probe_k8s_spec = Container._generate_probe_spec(probe_spec) if probe == Container.LIVENESS_PROBE: c.liveness_probe = probe_k8s_spec elif probe == Container.READINESS_PROBE: c.readiness_probe = probe_k8s_spec else: raise AXIllegalArgumentException( "Unexpected probe type {} found with spec {}".format( probe, probe_spec)) return c
def update_cluster_sg_aws(): """ Ensure argo cluster is opened to the given trusted_cidrs Data input format: { "trusted_cidrs": ["1.1.1.1/32", "2.2.2.2/32", "3.3.3.3/32"] } :return: """ data = request.get_json() ip_ranges = data.get("trusted_cidrs", None) if not ip_ranges: return jsonify(trusted_cidrs=ip_ranges) if not isinstance(ip_ranges, list): raise AXIllegalArgumentException("Trusted CIDRs must be a list") @retry_unless(status_code=[404, 422]) def _do_update_axops(ip): spec = { "spec": { "loadBalancerSourceRanges": ip } } kubectl.api.patch_namespaced_service(spec, name="axops", namespace="axsys") with cfg_lock: cluster_config = AXClusterConfig(cluster_name_id=cluster_name_id) current_trusted_cidrs = cluster_config.get_trusted_cidr() # Update node security groups cloud_util = AXBootstrap(cluster_name_id=cluster_name_id, region=cluster_config.get_region()) try: cloud_util.modify_node_security_groups( old_cidr=current_trusted_cidrs, new_cidr=ip_ranges, action_name="UserInitiatedTrustedCidrChange" ) except AWSClientError as ace: # In case of client error, ensure current CIDRs are reverted back. # The only inconsistency could be, any CIDRs that user wants to add # and does not trigger client error are added to node security groups # which is fine as long as we return proper error message to UI, and # leave users to fix and retry. # Not catching exception here because any CIDR ranges persisted to # cluster config should are guaranteed to be acceptable by cloud # provider # TODO (harry): not efficient here as it potentially checks CIDRs that are not removed cloud_util.modify_node_security_groups( old_cidr=[], new_cidr=current_trusted_cidrs, action_name="EnsureExistingDueToError" ) if "InvalidParameterValue" in str(ace): raise AXIllegalArgumentException("InvalidParameterValue: {}".format(str(ace))) else: raise ace # Update axops security groups _do_update_axops(ip=ip_ranges) # Persist cluster config. We need to do it the last as if any of the previous # option fails, we should not show up the updated trusted CIDRs on UI from # any subsequent GET call cluster_config.set_trusted_cidr(cidrs=ip_ranges) cluster_config.save_config() return jsonify(trusted_cidrs=ip_ranges)
def create_webhook(): """ Create a kubernetes service load balancer connecting external traffic to axops, with a range of trusted ips Data input format: { "port_spec": [ { "name": "webhook", "port": 8443, "targetPort": 8087 } ], "ip_ranges": ["0.0.0.0/0"] } :return: { "ingress": "xxxxxx.us-west-2.elb.amazonaws.com", "detail": V1Service } """ data = request.get_json() port_spec = data.get("port_spec", None) ip_ranges = data.get("ip_ranges", ["0.0.0.0/0"]) if not port_spec: raise AXIllegalArgumentException("No port spec provided") webhook_svc_name = "axops-webhook" srv = swagger_client.V1Service() srv.metadata = swagger_client.V1ObjectMeta() srv.metadata.name = webhook_svc_name srv.metadata.labels = { "app": webhook_svc_name, "tier": "platform", "role": "axcritical" } spec = swagger_client.V1ServiceSpec() spec.selector = { 'app': "axops-deployment" } spec.type = "LoadBalancer" spec.ports = port_spec spec.load_balancer_source_ranges = ip_ranges srv.spec = spec # Don't have to retry here as creating webhook is a manual process # and it is fair to throw proper error message to user and have them # retry manually need_update = False try: kubectl.api.create_namespaced_service(body=srv, namespace="axsys") except ApiException as ae: if ae.status == 409: need_update = True elif ae.status == 422: raise AXIllegalArgumentException("Unable to create webhook due to invalid argument", detail=str(ae)) else: raise AXPlatformException("Unable to create webhook due to Kubernetes internal error", detail=str(ae)) except Exception as e: raise AXPlatformException("Unable to create webhook", detail=str(e)) if need_update: update_body = { "spec": { "ports": port_spec, "load_balancer_source_ranges": ip_ranges } } try: kubectl.api.patch_namespaced_service(body=update_body, namespace="axsys", name=webhook_svc_name) except Exception as e: raise AXPlatformException("Unable to update webhook", detail=str(e)) trail = 0 rst = { "port_spec": port_spec, "ip_ranges": ip_ranges } while trail < 60: time.sleep(3) try: svc = kubectl.api.read_namespaced_service_status(namespace="axsys", name=webhook_svc_name) if svc.status.load_balancer and svc.status.load_balancer.ingress: rst["hostname"] = svc.status.load_balancer.ingress[0].hostname return jsonify(rst) except ApiException: pass trail += 1 try: kubectl.api.delete_namespaced_service(namespace="axsys", name=webhook_svc_name) except ApiException as ae: if ae.status != 404: raise ae raise AXPlatformException("Webhook creation timeout")