def _get_lambda_env(): conf = get_faasm_config() state_redis_url = conf["AWS"]["redis_state"] queue_redis_url = conf["AWS"]["redis_queue"] env = { "FUNCTION_STORAGE": "s3", "HOST_TYPE": "lambda", "LOG_LEVEL": "debug", "CGROUP_MODE": "off", "NETNS_MODE": "off", "BUCKET_NAME": "faasm-runtime", "QUEUE_NAME": "faasm-messages", "SERIALISATION": "proto", "REDIS_STATE_HOST": state_redis_url, "REDIS_QUEUE_HOST": queue_redis_url, "NO_SCHEDULER": "1", "GLOBAL_MESSAGE_BUS": "redis", "AWS_LOG_LEVEL": "info", "GLOBAL_MESSAGE_TIMEOUT": "120000", "UNBOUND_TIMEOUT": "30000", "THREADS_PER_WORKER": "10", } return env
def _do_redis_command(sub_cmd, local, docker, knative, ibm): if local: cmd = ["redis-cli", sub_cmd] elif docker: cmd = ["docker-compose", "exec", "redis-queue", "redis-cli", sub_cmd] elif ibm: faasm_conf = get_faasm_config() cmd = [ "redis-cli", "-h {}".format(faasm_conf["IBM"]["redis_host_public"]), sub_cmd ] elif knative: cmd = [ "kubectl", "exec", "-n faasm", "redis-queue", "--", "redis-cli", sub_cmd ] else: cmd = ["redis-cli", sub_cmd] cmd_string = " ".join(cmd) print(cmd_string) ret_code = call(" ".join(cmd), shell=True, cwd=PROJ_ROOT) if ret_code != 0: print("Command failed: {}".format(cmd_string))
def _add_sqs_event_source(client, func_name): conf = get_faasm_config() queue_arn = conf["AWS"]["sqs_arn"] queue_url = conf["AWS"]["sqs_url"] print("Adding SQS source for {} from queue {}".format( func_name, queue_url)) # List existing response = client.list_event_source_mappings(EventSourceArn=queue_arn, FunctionName=func_name, MaxItems=1) if len(response["EventSourceMappings"]) > 0: uuid = response["EventSourceMappings"][0]["UUID"] print("Already have event source mapping, attempting to update UUID ", uuid) client.update_event_source_mapping(UUID=uuid, FunctionName=func_name, Enabled=True, BatchSize=1) else: print("Creating new event source mapping") client.create_event_source_mapping( EventSourceArn=queue_arn, FunctionName=func_name, Enabled=True, BatchSize=1, )
def deploy_knative(ctx, replicas, local=False, ibm=False): faasm_conf = get_faasm_config() shell_env = {} if ibm: # IBM requires specifying custom kubeconfig shell_env["KUBECONFIG"] = get_ibm_kubeconfig() extra_env = { "FUNCTION_STORAGE": "ibm", "IBM_API_KEY": faasm_conf["IBM"]["api_key"], } else: extra_env = { "FUNCTION_STORAGE": "fileserver", "FILESERVER_URL": "http://upload:8002", } # Deploy the other K8s stuff (e.g. redis) _kubectl_apply(join(COMMON_CONF, "namespace.yml"), env=shell_env) _kubectl_apply(COMMON_CONF, env=shell_env) _kubectl_apply(BARE_METAL_CONF) if local: _kubectl_apply(LOCAL_CONF) else: _kubectl_apply(BARE_METAL_REMOTE_CONF) _deploy_knative_fn(FAASM_WORKER_NAME, FAASM_WORKER_IMAGE, replicas, 4, FAASM_WORKER_ANNOTATIONS, extra_env=extra_env, shell_env=shell_env)
def get_kubernetes_host_port(): faasm_config = get_faasm_config() if faasm_config.has_section("Kubernetes"): host = faasm_config["Kubernetes"].get("invoke_host", "localhost") port = faasm_config["Kubernetes"].get("invoke_port", 8080) return host, port else: return None, None
def get_kubernetes_upload_host(knative, host): faasm_conf = get_faasm_config() if knative: if not faasm_conf.has_section("Kubernetes"): host = host if host else "localhost" else: host = faasm_conf["Kubernetes"]["upload_host"] return host
def ibm_k8s_config(ctx): faasm_conf = get_faasm_config() print("Getting cluster config") cmd = [ "ibmcloud", "ks", "cluster", "config", "--cluster", faasm_conf["IBM"]["k8s_cluster_id"] ] call(" ".join(cmd), shell=True)
def _get_github_instance(): conf = get_faasm_config() if not conf.has_section("Github") or not conf.has_option( "Github", "access_token"): print("Must set up Github config with access token") token = conf["Github"]["access_token"] g = Github(token) return g
def ibm_login(ctx): config = get_faasm_config() api_key = config["IBM"]["api_key"] email = config["IBM"]["email"] cmd = ["ibmcloud", "login", "--apikey", api_key] call(" ".join(cmd), shell=True) cmd = ["ibmcloud", "target", "-o", email, "-s", "dev"] call(" ".join(cmd), shell=True)
def get_upload_host_port(host_in, port_in): faasm_config = get_faasm_config() if not host_in and faasm_config.has_section("Kubernetes"): host = faasm_config["Kubernetes"].get("upload_host", "127.0.0.1") port = faasm_config["Kubernetes"].get("upload_port", 8002) else: host = host_in if host_in else "127.0.0.1" port = port_in if port_in else 8002 return host, port
def get_invoke_host_port(host_in=None, port_in=None): faasm_config = get_faasm_config() if not host_in and faasm_config.has_section("Faasm"): host = faasm_config["Faasm"].get("invoke_host", "localhost") port = faasm_config["Faasm"].get("invoke_port", 8080) else: host = host_in if host_in else "127.0.0.1" port = port_in if port_in else 8080 return host, port
def list_event_sources(ctx, func_name): client = boto3.client("lambda", region_name=AWS_REGION) conf = get_faasm_config() sqs_arn = conf["AWS"]["sqs_arn"] response = client.list_event_source_mappings( EventSourceArn=sqs_arn, FunctionName=func_name, ) for es in response["EventSourceMappings"]: print(es)
def ibm_k8s_add_knative(ctx): faasm_conf = get_faasm_config() print("Enabling knative") cmd = [ "ibmcloud", "ks", "cluster-addon-enable", "knative", "--cluster", faasm_conf["IBM"]["k8s_cluster_id"], ] call(" ".join(cmd), shell=True)
def aws(ctx): config = get_faasm_config() config["AWS"]["redis_state"] = get_elasticache_url("faasm-redis-state") config["AWS"]["redis_queue"] = get_elasticache_url("faasm-redis-queue") subnet_ids = get_private_subnet_ids() config["AWS"]["subnet_ids"] = ",".join(subnet_ids) security_group_ids = get_security_group_ids() config["AWS"]["security_group_ids"] = ",".join(security_group_ids) queue_url, queue_arn = get_sqs_info() config["AWS"]["sqs_url"] = queue_url config["AWS"]["sqs_arn"] = queue_arn with open(FAASM_CONFIG_FILE, "w") as fh: config.write(fh)
def _do_deploy_knative_native(func_name, image_name, replicas): faasm_config = get_faasm_config() if not faasm_config.has_section("Kubernetes"): print("Must have faasm config set up with kubernetes section") return 1 # Host and port required for chaining native functions invoke_host, invoke_port = get_kubernetes_host_port() _deploy_knative_fn( func_name, image_name, replicas, 1, NATIVE_WORKER_ANNOTATIONS, extra_env={ "FAASM_INVOKE_HOST": invoke_host, "FAASM_INVOKE_PORT": invoke_port, }, )
def ibm_deploy_worker(ctx, update=False): config = get_faasm_config() redis_host = config["IBM"]["redis_host_private"] api_key = config["IBM"]["api_key"] # Note that concurrency here is _intra_ container, i.e. how many concurrent # invocations can each container support cmd = [ "ibmcloud", "fn", "action", "update" if update else "create", "--param FUNCTION_STORAGE ibm", "--param REDIS_QUEUE_HOST {}".format(redis_host), "--param REDIS_STATE_HOST {}".format(redis_host), "--param IBM_API_KEY {}".format(api_key), "--param CGROUP_MODE off", "--param NETNS_MODE off", "--memory 1024", "--timeout 30000", "--concurrency 20", "faasm-worker", "--docker", "faasm/ibm-worker:0.1.1" ] cmd_string = " ".join(cmd) print(cmd_string) call(cmd_string, shell=True) print("Done")
def is_kubernetes(): faasm_conf = get_faasm_config() return faasm_conf.has_section("Kubernetes")
def _get_sqs_client(): conf = get_faasm_config() url = conf["AWS"]["sqs_url"] client = boto3.client("sqs", region_name=AWS_REGION) return client, url
def _do_deploy(func_name, memory=DEFAULT_LAMBDA_MEM, timeout=DEFAULT_LAMBDA_TIMEOUT, concurrency=DEFAULT_LAMBDA_CONCURRENCY, environment=None, zip_file_path=None, s3_bucket=None, s3_key=None, sqs=False): assert zip_file_path or ( s3_bucket and s3_key), "Must give either a zip file or S3 bucket and key" client = boto3.client("lambda", region_name=AWS_REGION) if zip_file_path: assert exists(zip_file_path), "Expected zip file at {}".format( zip_file_path) # Get subnet IDs and security groups conf = get_faasm_config() subnet_ids = conf["AWS"]["subnet_ids"].split(",") security_group_ids = conf["AWS"]["security_group_ids"].split(",") # Check if function exists is_existing = True try: client.get_function(FunctionName=func_name, ) except ClientError: is_existing = False kwargs = { "FunctionName": func_name, } content = None if zip_file_path: with open(zip_file_path, "rb") as fh: content = fh.read() if is_existing: print("{} already exists, updating".format(func_name)) if zip_file_path: kwargs["ZipFile"] = content else: kwargs["S3Bucket"] = s3_bucket kwargs["S3Key"] = s3_key client.update_function_code(**kwargs) else: print("{} does not already exist, creating".format(func_name)) kwargs.update({ "Runtime": "provided", "Role": "arn:aws:iam::{}:role/{}".format(AWS_ACCOUNT_ID, AWS_LAMBDA_ROLE), "Handler": func_name, "MemorySize": memory, "Timeout": timeout, "VpcConfig": { "SubnetIds": subnet_ids, "SecurityGroupIds": security_group_ids } }) if zip_file_path: kwargs["Code"] = {"ZipFile": content} else: kwargs["Code"] = {"S3Bucket": s3_bucket, "S3Key": s3_key} if environment: lambda_env = {"Variables": environment} kwargs["Environment"] = lambda_env client.create_function(**kwargs) # Set up concurrency client.put_function_concurrency( FunctionName=func_name, ReservedConcurrentExecutions=concurrency, ) if sqs: _add_sqs_event_source(client, func_name)
def create_faasm_config(ctx): get_faasm_config()
def ibm_ssh_redis(ctx): config = get_faasm_config() cmd = ["ssh", "root@{}".format(config["IBM"]["redis_host_public"])] call(" ".join(cmd), shell=True)
def create(ctx): """ Set up skeleton Faasm config """ get_faasm_config()
do_post(url, input, json=True) def invoke_impl(user, func, host=None, port=None, input=None, py=False, async=False, knative=True, native=False, ibm=False, poll=False, poll_interval_ms=1000): faasm_config = get_faasm_config() # Provider-specific stuff if ibm: host = faasm_config["IBM"]["k8s_subdomain"] port = 8080 elif knative: host, port = get_kubernetes_host_port() # Defaults host = host if host else "127.0.0.1" port = port if port else 8080 # Polling always requires async if poll: async = True
def get_ibm_kubeconfig(): # NOTE: we assume LON02 datacentre here faasm_config = get_faasm_config() return join(HOME_DIR, ".bluemix/plugins/container-service/clusters", faasm_config["IBM"]["k8s_cluster_id"], "kube-config-lon02-faasm.yml")