def get_all_events(from_time: str, to_time: str, configuration: Configuration, secrets: Secrets) -> InstanaResponse: """ Get all events from instana within a time window, given by the from_time and the to_time for details of the api see https://instana.github.io/openapi/#tag/Events """ logger.debug("get_all_events") instana_host = configuration.get("instana_host") instana_api_token = secrets.get("instana_api_token") if not (instana_host and instana_api_token): raise ActivityFailed( "No Instana Host or API Token Secrete were found.") url = "{}/api/events".format(configuration.get("instana_host")) params = {} if from_time: params["from"] = from_time if to_time: params["to"] = to_time result = execute_instana_get_request(url, params, secrets) return result
def create_ibmcloud_databases_client(configuration: Configuration) -> CloudDatabasesV5: api_key = configuration.get("api_key") service_url = configuration.get("service_url") os.environ["CLOUD_DATABASES_URL"] = service_url os.environ["CLOUD_DATABASES_APIKEY"] = api_key service = CloudDatabasesV5.new_instance() return service
def auth(configuration: Configuration, secrets: Secrets) -> Dict[str, str]: """ Authenticate with the Cloud Foundry API endpoint. The `configuration` mapping must include `"api_url"` key associated to the URL of the API server, for example: `"https://api.local.pcfdev.io"`. When testing against a secured endpoint exposing self-signed certificate, you should set `"verify_ssl"` to `True`. The `secrets` mapping must contain: `"username"`: the user to authenticate with `"password"`: the user's password `"client_id"` the client id to authenticate with, defaults to `"cf"` `"client_secret"`: the client's secret, defaults to `""` Returns a mapping with the `access_token` and `refresh_token` keys as per http://docs.cloudfoundry.org/api/uaa/version/4.8.0/index.html#password-grant """ api_url = configuration.get("cf_api_url") verify_ssl = configuration.get("cf_verify_ssl", True) username = secrets.get("cf_username") password = secrets.get("cf_password") client_id = secrets.get("cf_client_id", "cf") client_secret = secrets.get("cf_client_secret", "") logger.debug( "Querying a new access token for client '{c}'".format(c=client_id)) return get_tokens(api_url, username, password, client_id, client_secret, verify_ssl)
def create_jaeger_tracer(configuration: Configuration = None, secrets: Secrets = None): """ Create a Jaeger tracer """ from jaeger_client.config import DEFAULT_REPORTING_PORT from jaeger_client.constants import TRACE_ID_HEADER, \ BAGGAGE_HEADER_PREFIX from jaeger_client import Config host = configuration.get("tracing_host", "localhost") port = configuration.get("tracing_port", DEFAULT_REPORTING_PORT) tracer_config = Config( config={ 'sampler': { 'type': 'const', 'param': 1, }, 'logging': True, 'propagation': configuration.get('tracing_propagation', None), 'trace_id_header': configuration.get( "tracing_id_name", TRACE_ID_HEADER), 'baggage_header_prefix': configuration.get( "baggage_prefix", BAGGAGE_HEADER_PREFIX), 'local_agent': { 'reporting_host': host, 'reporting_port': port } }, service_name='chaostoolkit', validate=True, ) addr = "{}:{}".format(host, port) logger.debug("Configured Jaeger Tracer to send to '{}'".format(addr)) return tracer_config.initialize_tracer()
def vsphere_client(configuration: Configuration = None, secrets: Secrets = None): """ Private function that authorizes against the vSphere API. """ # now setup your connection properties host = configuration.get("vsphere_server") port = configuration.get("vsphere_port", 443) verify_ssl = configuration.get("vsphere_verify_ssl", True) if secrets: username = secrets.get("vsphere_username") password = secrets.get("vsphere_password") else: username = os.getenv("VSPHERE_USERNAME") password = os.getenv("VSPHERE_PASSWORD") # now connect to your vCenter/vSphere context = None if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() connection = SmartConnect(host=host, user=username, pwd=password, port=port, sslContext=context) if not connection: print("Could not connect to the specified host using specified " "username and password") return connection
def aws_client(resource_name: str, configuration: Configuration = None, secrets: Secrets = None): """ Create a boto3 client for the given resource. You may pass the `aws_region` key in the `configuration` object to be explicit about which region you want to use. You may pass `aws_profile_name` value to the `configuration` object so that we load the appropriate profile to converse with the AWS services. In that case, make sure your local `~/aws/credentials` config is properly setup, as per https://boto3.readthedocs.io/en/latest/guide/configuration.html#aws-config-file Also, if you want to assume a role, you should setup that file as per https://boto3.readthedocs.io/en/latest/guide/configuration.html#assume-role-provider as we do not read those settings from the `secrets` object. """ # noqa: E501 configuration = configuration or {} region = configuration.get("aws_region", "us-east-1") creds = get_credentials(secrets) if boto3.DEFAULT_SESSION is None: profile_name = configuration.get("aws_profile_name") # we must create our own session so that we can populate the profile # name when it is provided. Only create the default session once. boto3.setup_default_session(profile_name=profile_name, region_name=region, **creds) return boto3.client(resource_name, region_name=region, **creds)
def load_secrets_from_vault(secrets_info: Dict[str, Dict[str, str]], configuration: Configuration = None) -> Secrets: secrets = {} url = configuration.get("vault_addr") token = configuration.get("vault_token") client = None if HAS_HVAC: client = hvac.Client(url=url, token=token) for (target, keys) in secrets_info.items(): secrets[target] = {} for (key, value) in keys.items(): if isinstance(value, dict) and value.get("type") == "vault": if not HAS_HVAC: logger.error( "Install the `hvac` package to fetch secrets " "from Vault: `pip install chaostoolkit-lib[vault]`.") return {} secrets[target][key] = client.read(value["key"]) if not secrets[target]: secrets.pop(target) return secrets
def detach_volume_from_worker(configuration: Configuration, cluster_id: str, volume_id: str, worker_id: str): """ detach_volume_from_worker: attach volume to worker nodes attr cluster_id str: Cluster ID attr volume_id str: VolumeID attr worker_id str: Worker node ID """ # Retrieve API Key from Configuration and create authenticator Object api_key = configuration.get("api_key") authenticator = IAMAuthenticator(api_key) # Create vpcV1 Api Service to retrieve the volumeattachementID needed by delete attachement method service = VpcV1('2020-06-02', authenticator=authenticator, generation=int(2)) url = configuration.get("service_url", 'https://dallas.iaas.cloud.ibm.com/v1') service.service_url = url volume = service.get_volume(volume_id)._to_dict() if (volume['result']['volume_attachments'][0] is None): raise ValueError('This disk is not attached to any worker nodes') volume_attachment = volume['result']['volume_attachments'][0]['id'] # Create Delete Attachement Model deleteAttachement = DeleteAttachementModel( cluster=cluster_id, volumeID=volume_id, volumeAttachmentID=volume_attachment, worker=worker_id) # Instatiate ContaierService to connect containers API service = ContainerService(authenticator) service.delete_raw_block_storage(deleteAttachement)
def create_ibmcloud_api_client(configuration: Configuration) -> VpcV1: api_key = configuration.get("api_key") gen = configuration.get("generation", 2) url = configuration.get("service_url", 'https://api.au-syd.databases.cloud.ibm.com/v5/ibm/') authenticator = IAMAuthenticator(api_key) service = VpcV1('2020-06-02', authenticator=authenticator, generation=int(gen)) service.service_url = url return service
def aws_client(resource_name: str, configuration: Configuration = None, secrets: Secrets = None): """ Create a boto3 client for the given resource. You may pass the `aws_region` key in the `configuration` object to be explicit about which region you want to use. You may pass `aws_profile_name` value to the `configuration` object so that we load the appropriate profile to converse with the AWS services. In that case, make sure your local `~/aws/credentials` config is properly setup, as per https://boto3.readthedocs.io/en/latest/guide/configuration.html#aws-config-file Also, if you want to assume a role, you should setup that file as per https://boto3.readthedocs.io/en/latest/guide/configuration.html#assume-role-provider as we do not read those settings from the `secrets` object. """ # noqa: E501 configuration = configuration or {} region = configuration.get("aws_region", "us-east-1") creds = get_credentials(secrets) aws_assume_role_arn = configuration.get("aws_assume_role_arn") if boto3.DEFAULT_SESSION is None: profile_name = configuration.get("aws_profile_name") # we must create our own session so that we can populate the profile # name when it is provided. Only create the default session once. boto3.setup_default_session(profile_name=profile_name, region_name=region, **creds) # default config if not aws_assume_role_arn: logger.debug("Using default AWS role") return boto3.client(resource_name, region_name=region, **creds) else: logger.info("Assuming role: " + aws_assume_role_arn) # connect to sts client client = boto3.client('sts', region_name=region, **creds) # get credentials for the role we want response = client.assume_role( RoleArn=aws_assume_role_arn, RoleSessionName="tempDetourSession")['Credentials'] # create new dictionary for credentials new_creds = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None) new_creds["aws_access_key_id"] = response['AccessKeyId'] new_creds["aws_secret_access_key"] = response['SecretAccessKey'] new_creds["aws_session_token"] = response['SessionToken'] # return new client return boto3.client(resource_name, region_name=region, **new_creds)
def get_context(configuration: Configuration, secrets: Secrets = None) -> GCEContext: """ Collate all the GCE context information. """ return GCEContext( project_id=configuration.get("gce_project_id"), cluster_name=configuration.get("gce_cluster_name"), region=configuration.get("gce_region"), zone=configuration.get("gce_zone"), )
def build_baseUrl(configuration: Configuration) -> str: """ Constructs toxiproxy baseURL using variables from configuration. """ toxiproxy_host = configuration.get("toxiproxy_host") toxiproxy_port = configuration.get("toxiproxy_port") toxiproxy_url = configuration.get("toxiproxy_url") if not toxiproxy_url: if not toxiproxy_port: toxiproxy_port = 8474 url = "http://{}:{}".format(toxiproxy_host, toxiproxy_port) else: url = toxiproxy_url logger.debug("Calculated toxiproxy URL is: {}".format(url)) return url
def configure_control(configuration: Configuration, secrets: Secrets): global grafana_host global grafana_port global api_token global protocol global cert_file global grafana_annotation_api_endpoint global exp_start_time global exp_end_time global dashboardId global only_actions global tags # defaults grafana = configuration.get('grafana_api_token', {}) grafana_host = grafana.get('host', 'localhost') grafana_port = grafana.get('port', 3000) protocol = grafana.get('protocol', 'http') cert_file = grafana.get('cert_file', None) api_token = grafana.get('api_token', '') exp_start_time = int(round(time.time() * 1000)) exp_end_time = int(round(time.time() * 1000)) dashboardId = grafana.get('dashboardId') only_actions = grafana.get('only_actions', 0) tags = grafana.get('tags', []) grafana_annotation_api_endpoint = '/api/annotations' return 1
def configure_control(configuration: Configuration, secrets: Secrets): global grafana_host global grafana_port global grafana_annotation_api_endpoint global grafana_user global grafana_pass global exp_start_time global exp_end_time global dashboardId global only_actions global tags # defaults grafana = configuration.get('grafana', {}) grafana_user = grafana.get('username', 'admin') grafana_pass = grafana.get('password', 'admin') grafana_host = grafana.get('host', 'localhost') grafana_port = grafana.get('port', 3000) exp_start_time = int(round(time.time() * 1000)) exp_end_time = int(round(time.time() * 1000)) dashboardId = grafana.get('dashboardId', 1) only_actions = grafana.get('only_actions', 0) tags = grafana.get('tags', []) grafana_annotation_api_endpoint = '/api/annotations' return 1
def apply_python_control(level: str, control: Control, experiment: Experiment, context: Union[Activity, Experiment], state: Union[Journal, Run, List[Run]] = None, configuration: Configuration = None, secrets: Secrets = None): """ Apply a control by calling a function matching the given level. """ provider = control["provider"] func_name = _level_mapping.get(level) func = load_func(control, func_name) if not func: return arguments = deepcopy(provider.get("arguments", {})) if configuration or secrets: arguments = substitute(arguments, configuration, secrets) sig = inspect.signature(func) if "secrets" in provider and "secrets" in sig.parameters: arguments["secrets"] = {} for s in provider["secrets"]: arguments["secrets"].update(secrets.get(s, {}).copy()) if "configuration" in sig.parameters: arguments["configuration"] = configuration.copy() if "state" in sig.parameters: arguments["state"] = state if "experiment" in sig.parameters: arguments["experiment"] = experiment func(context=context, **arguments)
def check_no_alert_for_dashboard( dashboard_id: int, configuration: Configuration = None, secrets: Secrets = None) -> bool: """ Check alert for dashboard in grafana :param panel_id: panel id in grafana :param dashboard_id: dashboard id in grafana :return: true if no alerts exist for specified dashboard, false otherwise """ grafana_host = configuration.get('grafana_host') secrets = secrets or {} grafana_token = get_grafana_token(secrets) headers = {"Authorization": "Bearer %s" % grafana_token} parameters = {"dashboardId": dashboard_id, "state": "alerting"} endpoint = urljoin(grafana_host, "/api/alerts") alerts = requests.get(endpoint, headers=headers, params=parameters).json() retval = len(alerts) == 0 for alert in alerts: for match in alert['evalData']['evalMatches']: logger.debug("Alert for node {n}' ".format( n=match)) return retval
def configure_control(config: Configuration, secrets: Secrets, settings: Settings): global value_from_config if config: value_from_config = config.get("dummy-key", "default") elif settings: value_from_config = settings.get("dummy-key", "default")
def run_python_activity(activity: Activity, configuration: Configuration, secrets: Secrets) -> Any: """ Run a Python activity. A python activity is a function from any importable module. The result of that function is returned as the activity's output. This should be considered as a private function. """ provider = activity["provider"] mod_path = provider["module"] func_name = provider["func"] mod = importlib.import_module(mod_path) func = getattr(mod, func_name) arguments = provider.get("arguments", {}).copy() if configuration or secrets: arguments = substitute(arguments, configuration, secrets) sig = inspect.signature(func) if "secrets" in provider and "secrets" in sig.parameters: arguments["secrets"] = {} for s in provider["secrets"]: arguments["secrets"].update(secrets.get(s, {}).copy()) if "configuration" in sig.parameters: arguments["configuration"] = configuration.copy() try: return func(**arguments) except Exception as x: raise FailedActivity( traceback.format_exception_only( type(x), x)[0].strip()).with_traceback(sys.exc_info()[2])
def all_nodes_are_ok(label_selector: str = None, configuration: Configuration = None, secrets: Secrets = None): """ List all Kubernetes worker nodes in your cluster. You may filter nodes by specifying a label selector. """ retval = True ignore_list = [] if configuration is not None: ignore_list = load_taint_list_from_dict( configuration.get("taints-ignore-list", {})) resp, k8s_api_v1 = get_active_nodes(label_selector, ignore_list, secrets) for item in resp.items: localresult = True for condition in item.status.conditions: if condition.type == "Ready" and condition.status == "False": logger.debug("{p} Ready=False ".format(p=item.metadata.name)) localresult = False if item.spec.unschedulable: logger.debug("{p} unschedulable ' ".format(p=item.metadata.name)) localresult = False # if item.spec.taints and len(item.spec.taints) > 0: # logger.debug("{p} Tainted node ' ".format( # p=item.metadata.name)) # localresult = False if not localresult: logger.debug("{p} Is not healthy ' ".format(p=item.metadata.name)) if localresult is False: retval = localresult return retval
def check_quorum(dc: str, service_name: str, configuration: Configuration = None): """ Check that service has more live endpoints than dead ones :param service_name: service name to check :param configuration: injected by chaostoolkit :return: True if more endpoins are passing healthcheck. False otherwise """ retval = False consul_host = configuration.get('consul_host') consul_client = consul.Consul(host=consul_host) service_name = service_name.replace('.', '--') try: nodes = consul_client.health.service(service_name, dc=dc)[1] if nodes: total_nodes = len(nodes) good_nodes = get_good_nodes(nodes) total_good_nodes = len(good_nodes) if (total_nodes - total_good_nodes) < total_good_nodes: retval = True except (ValueError, IndexError) as e: logger.error(e) pass return retval
def damage_quorum(service_name: str, dc: str, num_of_instances_to_kill: int, seconds_to_be_dead: int, configuration: Configuration): """ Works only for specific service that supports play dead command :param service_name: service to kill :param dc: in wich dc to kill instances :param num_of_instances_to_kill: how much instances to kill :param seconds_to_be_dead: number of seconds to play dead :param configuration: chaostoolkit will inject this parameter :return: """ consul_host = configuration.get('consul_host') consul_client = consul.Consul(host=consul_host) service_name = service_name.replace('.', '--') try: nodes = consul_client.catalog.service(service_name, dc=dc)[1] if nodes: if len(nodes) < num_of_instances_to_kill: num_of_instances_to_kill = len(nodes) for i in range(0, num_of_instances_to_kill): kill_instance(nodes[i], seconds_to_be_dead) except (ValueError, IndexError) as e: logger.error(e) pass
def get_random_namespace(configuration: Configuration = None, secrets: Secrets = None): """ Get random namespace from cluster. Supports ns-ignore-list value in configuration :param secrets: chaostoolkit will inject this dictionary :param configuration: chaostoolkit will inject this dictionary :return: random namespace """ ns_ignore_list = [] if configuration is not None: ns_ignore_list = configuration.get("ns-ignore-list", []) api = create_k8s_api_client(secrets) v1 = client.CoreV1Api(api) ret = v1.list_namespace() namespace = None clean_ns = [ namespace for namespace in ret.items if namespace.metadata.name not in ns_ignore_list ] if len(clean_ns) > 0: namespace = random.choice(clean_ns) return namespace
def load_timeout(experiment_configuration: Configuration) -> int: """ Defaults to 600 seconds if no timeout is given. """ result = 600 if experiment_configuration: result = experiment_configuration.get("timeout", result) return result
def create_vault_client(configuration: Configuration = None): """ Initialize a Vault client from either a token or an approle. """ client = None if HAS_HVAC: url = configuration.get("vault_addr") client = hvac.Client(url=url) client.secrets.kv.default_kv_version = str(configuration.get( "vault_kv_version", "2")) logger.debug( "Using Vault secrets KV version {}".format( client.secrets.kv.default_kv_version)) if "vault_token" in configuration: client.token = configuration.get("vault_token") elif "vault_role_id" in configuration and \ "vault_role_secret" in configuration: role_id = configuration.get("vault_role_id") role_secret = configuration.get("vault_role_secret") try: app_role = client.auth_approle(role_id, role_secret) except Exception as ve: raise InvalidExperiment( "Failed to connect to Vault with the AppRole: {}".format( str(ve))) client.token = app_role['auth']['client_token'] elif "vault_sa_role" in configuration: sa_token_path = configuration.get( "vault_sa_token_path", "") or \ "/var/run/secrets/kubernetes.io/serviceaccount/token" mount_point = configuration.get( "vault_k8s_mount_point", "kubernetes") try: with open(sa_token_path) as sa_token: jwt = sa_token.read() role = configuration.get("vault_sa_role") client.auth_kubernetes(role=role, jwt=jwt, use_token=True, mount_point=mount_point) except IOError: raise InvalidExperiment( "Failed to get service account token at: {path}".format( path=sa_token_path)) except Exception as e: raise InvalidExperiment( "Failed to connect to Vault using service account with " "errors: '{errors}'".format(errors=str(e))) return client
def build_baseUrl(configuration: Configuration) -> str: toxiproxy_host = configuration.get("toxiproxy_host") toxiproxy_port = configuration.get("toxiproxy_port") if not toxiproxy_port: toxiproxy_port = 8474 url = "http://{}:{}".format(toxiproxy_host, toxiproxy_port) logger.debug("Calculated toxiproxy URL is: {}".format(url)) return url
def tag_virtual_instance(instance_id: str, configuration: Configuration, service: VpcV1, tagname: str): ins = service.get_instance(id=instance_id)._to_dict() crn = (ins['result']['crn']) resource = {'resource_id': crn} resources = [resource] api_key = configuration.get("api_key") authenticator = IAMAuthenticator(api_key) global_tagging = GlobalTaggingV1(authenticator=authenticator) global_tagging.attach_tag(resources=resources, tag_names=['chaostoolkit', tagname])
def configure_control( experiment: Experiment, configuration: Configuration, secrets: Secrets, settings: Settings, ): if configuration: experiment["control-value"] = configuration.get("dummy-key", "default") elif settings: experiment["control-value"] = settings.get("dummy-key", "default")
def tag_lb(lb_id: str, service: VpcV1, configuration: Configuration, *args): lb_ins = service.get_load_balancer(lb_id)._to_dict() crn = lb_ins['result']['crn'] resource = {'resource_id': crn} resources = [resource] api_key = configuration.get("api_key") authenticator = IAMAuthenticator(api_key) global_tagging = GlobalTaggingV1(authenticator=authenticator) arr = ['chaostoolkit'] arr.append(*args) global_tagging.attach_tag(resources=resources, tag_names=arr)
def get_value_from_configuration(conf: Configuration, field_name: str): """ Extracts value from chaostoolkit Configuration object with check for None :param conf: chaostoolkit Configuration object :param field_name: name of field to extract from root of conf :return: Value of field named as field_name, None otherwise """ retval = None if conf is not None and field_name in conf.keys(): retval = conf[field_name] return retval
def get_event(event_id: str, configuration: Configuration, secrets: Secrets) -> InstanaResponse: """ Get all an event from instana with the privded event_id for details of the api see https://instana.github.io/openapi/#operation/getEvent """ logger.debug("get_event") instana_host = configuration.get("instana_host") instana_api_token = secrets.get("instana_api_token") if not (instana_host and instana_api_token): raise ActivityFailed( "No Instana Host or API Token Secrete were found.") url = "{}/api/events/{}".format(configuration.get("instana_host"), event_id) params = {} result = execute_instana_get_request(url, params, secrets) return result