def build_resource_with_yaml(yaml_file): if check_kube_resource("kubectl get -f {0}".format(yaml_file)): utils.write_line( "Resource {0} already exists, skipping\n".format(yaml_file)) else: utils.execute_command( "kubectl create -f {0} --save-config".format(yaml_file), False)
def print_configuration(self): printString = "{" printString += "vpc_stack_name:" + str(self.vpc_stack_name) printString += ", eks_cluster_name:" + str(self.eks_cluster_name) printString += ", worker_nodes: " + str(self.worker_nodes) printString += "}" utils.write_line(printString)
def load_aws_instances(): print("Loading AWS Instance Types") try: cloud.load_instance_type("./resources/aws_general.json", "General Purpose") cloud.load_instance_type("./resources/aws_compute.json", "Compute Optimized") cloud.load_instance_type("./resources/aws_memory.json", "Memory Optimized") except IOError: utils.write_line("Failed to load instance type information")
def build_ns(path, namespace): utils.write_line("Building namespace") check_and_cleanup_file(path, "namespace.yaml") with open("{0}/{1}.yaml".format(path, namespace), "w") as ns_yaml: ns_yaml.write("kind: Namespace\n") ns_yaml.write("apiVersion: v1\n") ns_yaml.write("metadata:\n") ns_yaml.write(" name: {0}\n".format(namespace)) ns_yaml.write(" labels:\n") ns_yaml.write(" name: {0}\n".format(namespace)) yaml_file = path + "/" + namespace + ".yaml" build_resource_with_yaml(yaml_file)
def build_custom_pod(path, cbcluster_config, name, image): utils.write_line("Deploying Application Pods") check_and_cleanup_file(path, "app-pod.yaml") with open("{0}/app-pod.yaml".format(path), "w") as app_pod: app_pod.write("apiVersion: apps/v1\n") app_pod.write("kind: Deployment\n") app_pod.write("metadata:\n") app_pod.write(" name: {}\n".format(name)) app_pod.write("spec:\n") app_pod.write(" selector:\n") app_pod.write(" matchLabels:\n") app_pod.write(" app: {}\n".format(name)) app_pod.write(" template:\n") app_pod.write(" metadata:\n") app_pod.write(" labels:\n") app_pod.write(" app: {}\n".format(name)) app_pod.write(" spec:\n") app_pod.write(" containers:\n") app_pod.write(" - name: {}\n".format(name)) app_pod.write(" image: {}\n".format(image)) app_pod.write(" imagePullPolicy: Always\n")
def build_operator_role_binding(path, namespace): utils.write_line("Building operator role binding") check_and_cleanup_file(path, "operator-role-binding.yaml") with open("{0}/operator-role-binding.yaml".format(path), "w") as orb_yaml: orb_yaml.write("apiVersion: rbac.authorization.k8s.io/v1\n") orb_yaml.write("kind: RoleBinding\n") orb_yaml.write("metadata:\n") orb_yaml.write(" creationTimestamp: null\n") orb_yaml.write(" name: couchbase-operator\n") orb_yaml.write("roleRef:\n") orb_yaml.write(" apiGroup: rbac.authorization.k8s.io\n") orb_yaml.write(" kind: Role\n") orb_yaml.write(" name: couchbase-operator\n") orb_yaml.write("subjects:\n") orb_yaml.write("- kind: ServiceAccount\n") orb_yaml.write(" name: couchbase-operator\n") orb_yaml.write(" namespace: {0}\n".format(namespace)) yaml_file = path + "/operator-role-binding.yaml" build_resource_with_yaml("{0} --namespace {1}".format( yaml_file, namespace))
def build_sgw_config(path, cbcluster_config, is_import): utils.write_line("Deploying SGW") check_and_cleanup_file(path, "sgw-config.json") with open("{0}/sgw-config.json".format(path), "w") as sgw_config: sgw_config.write("{\n") sgw_config.write(" \"logging\": {\n") sgw_config.write(" \"log_file_path\": \"/var/tmp/sglogs\",\n") sgw_config.write(" \"console\": {\n") sgw_config.write(" \"enabled\": true,\n") sgw_config.write(" \"log_level\": \"info\",\n") sgw_config.write(" \"log_keys\": [\"*\"]\n") sgw_config.write(" }\n") sgw_config.write(" },\n") sgw_config.write(" \"databases\": {\n") sgw_config.write(" \"db\": {\n") sgw_config.write( " \"server\": \"{0}-0000.{0}.{1}.svc:8091\",\n".format( cbcluster_config.clustername, cbcluster_config.namespace)) sgw_config.write(" \"bucket\": \"{0}\",\n".format( list(cbcluster_config.buckets)[0])) sgw_config.write(" \"username\": \"Administrator\",\n") sgw_config.write(" \"password\": \"password\",\n") sgw_config.write( " \"users\": { \"GUEST\": { \"disabled\": false, \"admin_channels\": [\"*\"] } },\n" ) sgw_config.write(" \"allow_conflicts\": false,\n") sgw_config.write(" \"revs_limit\": 20,\n") if is_import: sgw_config.write(" \"enable_shared_bucket_access\": true,\n") sgw_config.write(" \"import_docs\": true\n") else: sgw_config.write(" \"enable_shared_bucket_access\": true\n") sgw_config.write(" }\n") sgw_config.write(" }\n") sgw_config.write("}\n")
def check_cluster_running(cbcluster_config): utils.write_line("Checking couchbasecluster status") counter = "0" ret_val = True #command = kubectl get pods testcluster-0000 --namespace testns | tail -1 | tr -s [:blank:] | cut -d' ' -f2 for itr in cbcluster_config.servers: server = cbcluster_config.servers[itr] if server.size >= 1: for i in range(0, int(server.size)): my_attempts = 1 run = True ready = False while my_attempts <= cbcluster_config.attempts and run: utils.write_line("Checking pod status...") result = utils.execute_command_with_return( "kubectl get pods {0}-{1} --namespace {2} | tail -1 | tr -s [:blank:] | cut -d' ' -f2" .format(cbcluster_config.clustername, counter.zfill(4), cbcluster_config.namespace), False, False, True)[0] utils.write_line("Got result {}".format(result)) if result == "1/1": run = False ready = True else: time.sleep(cbcluster_config.wait_sec) if not ready: ret_val = False return ret_val else: counter = str(int(counter) + 1) return ret_val
def print_worker_nodes(self): for wrk_node in self.worker_nodes: utils.write_line(wrk_node + " -> " + str(self.worker_nodes[wrk_node]))
def deploy_operator_sa(namespace): utils.write_line("Deploying Couchbase Autonmous Operator Service Account") build_resource_with_yaml( "./resources/cbao/{0}/operator-service-account.yaml --namespace {1}". format(version, namespace))
def deploy_operator_role(namespace): utils.write_line("Deploying Couchbase Autonmous Operator Role") build_resource_with_yaml( "./resources/cbao/{0}/operator-role.yaml --namespace {1}".format( version, namespace))
def deploy_crd(): utils.write_line("Deploying CRD") build_resource_with_yaml("./resources/cbao/{0}/crd.yaml".format(version))
def setup_tls(cb_config): #Pulled from earlier work on testing internal CA and couchbasesummit #TODO - Convert to code generation and not external project utils.write_line("Generating TLS certificate") if os.path.exists("./work/{0}/easy-rsa".format(cb_config.name)): utils.execute_command( "rm -rf ./work/{0}/easy-rsa".format(cb_config.name), False) utils.execute_command( "git clone https://github.com/OpenVPN/easy-rsa ./work/{0}/easy-rsa". format(cb_config.name), False) os.environ['EASYRSA_PKI'] = "./work/{0}/easy-rsa/easyrsa3/pki".format( cb_config.name) utils.write_line("EASYRSA_PKI set to : {}".format( os.environ['EASYRSA_PKI'])) utils.execute_command( "sh ./work/{0}/easy-rsa/easyrsa3/easyrsa init-pki".format( cb_config.name), False) utils.execute_command( "sh ./work/{0}/easy-rsa/easyrsa3/easyrsa build-ca nopass < ./resources/cbao/{1}/couchbase_tls.txt" .format(cb_config.name, version), False) san_string = "--subject-alt-name=\"DNS:*.{1}.{2}.svc,DNS:*.{2}.svc,DNS:*.{1}.{3}\"".format( cb_config.name, cb_config.get_cbcluster_config().clustername, cb_config.get_cbcluster_config().namespace, cb_config.get_cbcluster_config().dns) utils.execute_command( "sh ./work/{0}/easy-rsa/easyrsa3/easyrsa {1} build-server-full couchbase-server nopass" .format(cb_config.name, san_string), False) utils.execute_command( "openssl rsa -in ./work/{0}/easy-rsa/easyrsa3/pki/private/couchbase-server.key -out ./work/{0}/easy-rsa/easyrsa3/pki/private/pkey.key.der -outform DER" .format(cb_config.name), False) utils.execute_command( "openssl rsa -in ./work/{0}/easy-rsa/easyrsa3/pki/private/pkey.key.der -inform DER -out ./work/{0}/easy-rsa/easyrsa3/pki/private/pkey.key -outform PEM" .format(cb_config.name), False) utils.execute_command( "cp -p ./work/{0}/easy-rsa/easyrsa3/pki/issued/couchbase-server.crt ./work/{0}/easy-rsa/easyrsa3/pki/issued/chain.pem" .format(cb_config.name), False) utils.execute_command( "cp -p ./work/{0}/easy-rsa/easyrsa3/pki/issued/couchbase-server.crt ./work/{0}/easy-rsa/easyrsa3/pki/issued/tls-cert-file" .format(cb_config.name), False) utils.execute_command( "cp -p ./work/{0}/easy-rsa/easyrsa3/pki/private/pkey.key ./work/{0}/easy-rsa/easyrsa3/pki/private/tls-private-key-file" .format(cb_config.name), False) PRIVATE_PATH = "./work/{0}/easy-rsa/easyrsa3/pki/private".format( cb_config.name) ISSUED_PATH = "./work/{0}/easy-rsa/easyrsa3/pki/issued".format( cb_config.name) utils.execute_command( "kubectl create secret generic couchbase-server-tls --from-file {0} --from-file {1} --namespace {2}" .format(PRIVATE_PATH + "/pkey.key", ISSUED_PATH + "/chain.pem", cb_config.get_cbcluster_config().namespace), False) utils.execute_command( "kubectl create secret generic couchbase-operator-admission --from-file {0} --from-file {1} --namespace {2}" .format(ISSUED_PATH + "/tls-cert-file", PRIVATE_PATH + "/tls-private-key-file", cb_config.get_cbcluster_config().namespace), False) utils.execute_command( "kubectl create secret generic couchbase-operator-tls --from-file {0} --namespace {1}" .format( "./work/{0}/easy-rsa/easyrsa3/pki/ca.crt".format(cb_config.name), cb_config.get_cbcluster_config().namespace), False)
def build_cluster(cb_config): # Update kubernetes config with settings # w.cb_config.get_cbcluster_config().namespace = w.TEntry_NS.get() # w.cb_config.get_cbcluster_config().clustername = w.TEntry_Cluster.get() # w.cb_config.get_cbcluster_config().version = versionbox # w.cb_config. update_config(cb_config) kube_utils.build_resource_with_yaml("./resources/cbao/{0}/io1.yaml".format(kube_utils.version)) utils.check_dir(w.cb_config.name, "kube") kube_utils.build_ns("./work/{0}/kube".format(w.cb_config.name), w.cb_config.get_cbcluster_config().namespace) kube_utils.setup_tls(cb_config) kube_utils.deploy_crd() kube_utils.deploy_operator_role(w.cb_config.get_cbcluster_config().namespace) kube_utils.deploy_operator_sa(w.cb_config.get_cbcluster_config().namespace) kube_utils.build_operator_role_binding("./work/{0}/kube".format(w.cb_config.name), w.cb_config.get_cbcluster_config().namespace) kube_utils.deploy_operator(w.cb_config.get_cbcluster_config().namespace) kube_utils.deploy_secret(w.cb_config.get_cbcluster_config().namespace) kube_utils.build_cb_cluster("./work/{0}/kube".format(w.cb_config.name), cb_config.get_cbcluster_config()) kube_utils.build_resource_with_yaml("./work/{0}/kube/couchbase-cluster.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace )) running = kube_utils.check_cluster_running(cb_config.get_cbcluster_config()) if not running: utils.on_error("Couchbase Cluster is not running") return # App Servers for i in range(0, int(w.cb_config.get_cbcluster_config().app)): kube_utils.build_custom_pod("./work/{0}/kube".format(w.cb_config.name), cb_config.get_cbcluster_config(), "couchbase-{0}".format(i), "couchbase/server:enterprise-{0}".format( w.cb_config.get_cbcluster_config().version)) kube_utils.build_resource_yaml_no_check("./work/{0}/kube/app-pod.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace)) # Couchmart for i in range(0, int(w.cb_config.get_cbcluster_config().couchmart)): kube_utils.build_custom_pod("./work/{0}/kube".format(w.cb_config.name), cb_config.get_cbcluster_config(), "couchmart-{0}".format(i), "cbck/couchmart:python2") kube_utils.build_resource_yaml_no_check("./work/{0}/kube/app-pod.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace)) # kube_utils.build_resource_yaml_no_check("./resources/cbao/{0}/couchmart.yaml --namespace {1}".format( # kube_utils.version, w.cb_config.get_cbcluster_config().namespace # )) # SGW try: sgw_conf = w.cb_config.get_cbcluster_config().sgw_conf except AttributeError: sgw_conf = None if sgw_conf is not None: utils.write_line("\nCreating sync gateway with conf: {}".format(sgw_conf)) utils.execute_command("kubectl create secret generic sgw-config --from-file {0} --namespace {1}".format( sgw_conf, w.cb_config.get_cbcluster_config().namespace), False) copyfile("./resources/cbao/{0}/sgw-deployment.yaml".format(kube_utils.version), "./work/{0}/kube/sgw-deployment.yaml".format(w.cb_config.name)) utils.execute_command("sed -i .bkup s/###replica###/{0}/g ./work/{1}/kube/sgw-deployment.yaml".format( str(int(w.cb_config.get_cbcluster_config().sgw)), w.cb_config.name ), False) cffile_array = sgw_conf.split("/") cffile = cffile_array[len(cffile_array) - 1] print("File = {}".format(cffile)) utils.execute_command("sed -i .bkup s/###conffile###/{0}/g ./work/{1}/kube/sgw-deployment.yaml".format( cffile, w.cb_config.name ), False) kube_utils.build_resource_with_yaml( "./work/{0}/kube/sgw-deployment.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace)) else: for i in range(0, min(int(w.cb_config.get_cbcluster_config().sgw), 2)): if i == 0: kube_utils.build_sgw_config("./work/{0}/kube".format( w.cb_config.name), w.cb_config.get_cbcluster_config(), True) utils.execute_command( "kubectl create secret generic sgw-config-import --from-file {0} --namespace {1}".format( "./work/{0}/kube/sgw-config.json".format(w.cb_config.name), w.cb_config.get_cbcluster_config().namespace), False) kube_utils.build_resource_with_yaml( "./resources/cbao/{0}/sgw-deployment-import.yaml --namespace {1}".format( kube_utils.version, w.cb_config.get_cbcluster_config().namespace)) else: # Build non-import SGW using replicas kube_utils.build_sgw_config("./work/{0}/kube".format( w.cb_config.name), w.cb_config.get_cbcluster_config(), False) utils.execute_command("kubectl create secret generic sgw-config --from-file {0} --namespace {1}".format( "./work/{0}/kube/sgw-config.json".format(w.cb_config.name), w.cb_config.get_cbcluster_config().namespace), False) copyfile("./resources/cbao/{0}/sgw-deployment.yaml".format(kube_utils.version), "./work/{0}/kube/sgw-deployment.yaml".format(w.cb_config.name)) utils.execute_command("sed -i .bkup s/###replica###/{0}/g ./work/{1}/kube/sgw-deployment.yaml".format( str(int(w.cb_config.get_cbcluster_config().sgw) - 1), w.cb_config.name ), False) utils.execute_command("sed -i .bkup s/###conffile###/{0}/g ./work/{1}/kube/sgw-deployment.yaml".format( "sgw-config.json", w.cb_config.name ), False) kube_utils.build_resource_with_yaml( "./work/{0}/kube/sgw-deployment.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace)) if ext_dns.get() == 1: utils.write_line("\nDeploying external DNS\n") kube_utils.build_resource_with_yaml("./resources/cbao/{0}/externaldns-sa.yaml --namespace {1}".format( kube_utils.version, w.cb_config.get_cbcluster_config().namespace )) kube_utils.build_resource_with_yaml("./resources/cbao/{0}/externaldns-cr.yaml".format( kube_utils.version )) # Externaldns-crb copyfile("./resources/cbao/{0}/externaldns-crb.yaml".format(kube_utils.version), "./work/{0}/kube/externaldns-crb.yaml".format(w.cb_config.name)) utils.execute_command("sed -i .bkup s/###namespace###/{0}/g ./work/{1}/kube/externaldns-crb.yaml".format( w.cb_config.get_cbcluster_config().namespace, w.cb_config.name ), False) kube_utils.build_resource_with_yaml("./work/{0}/kube/externaldns-crb.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace )) # Externaldns-deployment copyfile("./resources/cbao/{0}/externaldns-deployment.yaml".format(kube_utils.version), "./work/{0}/kube/externaldns-deployment.yaml".format(w.cb_config.name)) extdns_policy_pass = True for i in w.cb_config.get_eks_config().worker_nodes: utils.write_line("Attaching External policy for worker nodes {}".format(i)) extdns_policy_pass = extdns_policy_pass and \ cloud.attach_externaldns_policy(w.cb_config.get_cbcluster_config().dns, i) if extdns_policy_pass: #print("sed -i .bkup s/###hostedzone###/{0}/g ./work/{1}/kube/externaldns-deployment.yaml".format( # cloud.get_hosted_zone(w.cb_config.get_cbcluster_config().dns), w.cb_config.name # )) utils.execute_command( "sed -i .bkup s/###hostedzone###/{0}/g ./work/{1}/kube/externaldns-deployment.yaml".format( cloud.get_hosted_zone(w.cb_config.get_cbcluster_config().dns), w.cb_config.name ), False) kube_utils.build_resource_with_yaml("./work/{0}/kube/externaldns-deployment.yaml --namespace {1}".format( w.cb_config.name, w.cb_config.get_cbcluster_config().namespace )) else: utils.write_warn("Unable attach policy and deploy ExternalDNS\n") utils.write_line("Build of Couchbase Cluster is complete")
def build_cluster_exec(eks_config, cb_config): utils.write_line("Building VPC") build_vpc(eks_config.get_vpc_stack_name(), eks_config.get_attempts(), eks_config.get_wait_sec()) utils.write_line("Building EKS Cluster") build_kube_cluster(eks_config.get_eks_cluster_name(), eks_config.get_attempts(), eks_config.get_wait_sec(), eks_config.get_arn()) utils.write_line("Connecting to Kubernetes Cluster") connect_to_eks_cluster(eks_config.get_eks_cluster_name()) utils.write_line("Building Worker Nodes") wrk_nodes = eks_config.get_work_nodes() for inst in wrk_nodes: build_work_nodes(wrk_nodes[inst], eks_config.get_eks_cluster_name(), eks_config.get_attempts(), eks_config.get_wait_sec(), eks_config.get_name()) utils.write_line("Applying auth map") apply_auth_map(eks_config.get_name()) utils.write_line("Validating Nodes are ready") if validate_nodes_ready(eks_config.get_attempts(), eks_config.get_wait_sec()): utils.write_line("Applying labels") for inst in wrk_nodes: apply_labels(wrk_nodes[inst]) else: utils.write_error("Worker nodes not ready") utils.on_error("Worker nodes not ready") utils.write_line("Linking Node Groups") link_node_groups(cb_config) utils.write_line("Build of Kubernetes Cluster is complete")
def read_input(self): myInput = "" myInput = utils.prompt_input("> ") if str(myInput) == "0": regions = utils.parse_results(cloud.list_regions("\"Regions[*].{RegionName:RegionName}\"")) for reg in regions: utils.write_line(str(regions[reg]["RegionName"])) elif str(myInput) == "1": data = utils.parse_results( cloud.get_running_instances( "\"Reservations[*].Instances[*].{Type:InstanceType,LaunchTime:LaunchTime,AZ:Placement.AvailabilityZone,Tags:Tags}\"")) for inst in data: utils.write_line("") utils.write_line(str(data[inst])) elif str(myInput) == "2": region = utils.prompt_input("enter new region: ") cloud.switch_region(region) elif str(myInput) == "3": profile = utils.prompt_input("enter new profile: ") cloud.switch_profile(profile) elif str(myInput) == "4": instances = self.get_instance_type() for inst in instances: utils.write_line(inst) elif str(myInput) == "5": vpcs = utils.parse_results(cloud.list_vpc()) for vpc in vpcs: utils.write_line(str(vpcs[vpc])) elif str(myInput) == "6": stacks = utils.parse_results(cloud.get_stacks("\"Stacks[*].{Name:StackName,Status:StackStatus}\"")) for stack in stacks: utils.write_line(str(stacks[stack])) elif str(myInput) == "7": utils.write_line(cloud.list_kube_clusters()) elif str(myInput) == "8": utils.write_line("Option not yet implemented") elif str(myInput) == "9": utils.write_line("Option not yet implemented") elif str(myInput) == "10": cloud.build_vpc(self.config.get_vpc_stack_name(), self.config.get_attempts(), self.config.get_wait_sec()) elif str(myInput) == "11": vpc_name = utils.prompt_input("Enter a vpc to connect to [Blank for default name]: ") if len(vpc_name) > 1: cloud.connect_to_vpc(vpc_name) else: cloud.connect_to_vpc(self.config.get_vpc_stack_name()) elif str(myInput) == "12": cloud.build_kube_cluster(self.config.get_eks_cluster_name(), self.config.get_attempts(), self.config.get_wait_sec(), EKSConfiguration._eks_role_arn) elif str(myInput) == "13": eks_name = utils.prompt_input("Enter an eks cluster [Blank for default name]: ") if len(eks_name) > 1: cloud.connect_to_kube_cluster(eks_name) else: cloud.connect_to_kube_cluster(self.config.get_eks_cluster_name()) elif str(myInput) == "14": wrk_node = utils.prompt_input("Enter a worker node config to edit [Blank to create new one]: ") if len(wrk_node) > 1: self.config.configure_worker_nodes(self.config.worker_nodes[wrk_node]) else: self.config.configure_worker_nodes(None) elif str(myInput) == "15": worker_node_cfg = utils.prompt_input("Enter a worker node configuration to delete") self.config.del_worker_node(worker_node_cfg) elif str(myInput) == "16": self.config.print_worker_nodes() elif str(myInput) == "17": version = utils.prompt_input("Enter a K8S version [Blank for all]: ") if len(version) >= 1: utils.write_line(version + " -> " + cloud.get_ami_version(version)) else: results = cloud.get_ami_all() for inst in results: utils.write_line(inst + " -> " + results[inst]['ami']) elif str(myInput) == "18": wrk_node_name = utils.prompt_input("Enter a worker node configuration to build [Blank for all]: ") if len(wrk_node_name) > 1: cloud.build_work_nodes(self.config.get_work_nodes()[wrk_node_name], self.config.get_eks_cluster_name(), self.config.get_attempts(),self.config.get_wait_sec(),self.config.get_name()) else: wrk_nodes = self.config.get_work_nodes() for inst in wrk_nodes: cloud.build_work_nodes(wrk_nodes[inst], self.config.get_eks_cluster_name(), self.config.get_attempts(), self.config.get_wait_sec(),self.config.get_name()) cloud.apply_auth_map(self.config.get_name()) if cloud.validate_nodes_ready(self.config.get_attempts(),self.config.get_wait_sec()): if len(wrk_node_name) > 1: cloud.apply_labels(self.config.get_work_nodes()[wrk_node_name]) else: wrk_nodes = self.config.get_work_nodes() for inst in wrk_nodes: cloud.apply_labels(wrk_nodes[inst]) else: utils.on_error("Worker nodes not ready") elif str(myInput) == "19": print(cloud.validate_nodes_ready(self.config.get_attempts(),self.config.get_wait_sec())) elif str(myInput) == "20": cloud.get_current_cluster() elif str(myInput) == "21": work_nodes = self.config.get_work_nodes() for itr in work_nodes: cloud.apply_labels(work_nodes[itr]) #elif str(myInput) == "22": # cloud.load_instance_type() elif str(myInput) == "q": EKSConsole.should_run = False
def build_cb_cluster(path, cbcluster_config): utils.write_line("Deploying Couchbase Cluster") check_and_cleanup_file(path, "couchbase-cluster.yaml") with open("{0}/couchbase-cluster.yaml".format(path), "w") as cb_yaml: #General Info cb_yaml.write("apiVersion: couchbase.com/v1\n") cb_yaml.write("kind: CouchbaseCluster\n") cb_yaml.write("metadata:\n") cb_yaml.write(" name: {0}\n".format(cbcluster_config.clustername)) cb_yaml.write(" namespace: {0}\n".format(cbcluster_config.namespace)) cb_yaml.write("spec:\n") cb_yaml.write(" baseImage: couchbase/server\n") cb_yaml.write(" version: enterprise-{0}\n".format( cbcluster_config.version)) cb_yaml.write(" paused: false\n") if cbcluster_config.antiaffinity: cb_yaml.write(" antiAffinity: true\n") else: cb_yaml.write(" antiAffinity: false\n") #TODO - TLS Section if cbcluster_config.tls or cbcluster_config.expose_features['admin'] or \ cbcluster_config.expose_features['xdcr'] or cbcluster_config.expose_features['client'] or \ cbcluster_config.external_dns: cb_yaml.write(" tls:\n") cb_yaml.write(" static:\n") cb_yaml.write(" member:\n") cb_yaml.write(" serverSecret: couchbase-server-tls\n") cb_yaml.write(" operatorSecret: couchbase-operator-tls\n") cb_yaml.write(" authSecret: cb-example-auth\n") #Admin Services if cbcluster_config.expose_admin_console: cb_yaml.write(" exposeAdminConsole: true\n") cb_yaml.write(" adminConsoleServiceType: LoadBalancer\n") cb_yaml.write(" adminConsoleServices:\n") if cbcluster_config.expose_admin_svcs == "data": cb_yaml.write(" - data\n") if cbcluster_config.expose_admin_svcs == "index": cb_yaml.write(" - index\n") if cbcluster_config.expose_admin_svcs == "query": cb_yaml.write(" - query\n") if cbcluster_config.expose_admin_svcs == "search": cb_yaml.write(" - search\n") if cbcluster_config.expose_admin_svcs == "eventing": cb_yaml.write(" - eventing\n") if cbcluster_config.expose_admin_svcs == "analytics": cb_yaml.write(" - analytics\n") else: cb_yaml.write(" exposeAdminConsole: false\n") #Exposed Features if cbcluster_config.expose_features['admin'] or cbcluster_config.expose_features['xdcr'] or \ cbcluster_config.expose_features['client']: cb_yaml.write(" exposedFeatures:\n") for itr in cbcluster_config.expose_features: if cbcluster_config.expose_features[itr]: cb_yaml.write(" - {}\n".format(itr)) cb_yaml.write(" exposedFeatureServiceType: LoadBalancer\n") if cbcluster_config.disable_bucket_management: cb_yaml.write(" disableBucketManagement: true\n") else: cb_yaml.write(" disableBucketManagement: false\n") #DNS cb_yaml.write(" dns:\n") cb_yaml.write(" domain: {}\n".format(cbcluster_config.dns)) #Cluster cb_yaml.write(" cluster:\n") for itr in cbcluster_config.cluster: cb_yaml.write(" {0}: {1}\n".format( itr, cbcluster_config.cluster[itr])) #Buckets cb_yaml.write(" buckets:\n") if not cbcluster_config.disable_bucket_management and len( cbcluster_config.buckets) < 1: utils.write_error( "Bucket management is enabled and 0 buckets defined") return else: for itr in cbcluster_config.buckets: bucket = cbcluster_config.buckets[itr] cb_yaml.write(" - name: {}\n".format(bucket.name)) cb_yaml.write(" type: {}\n".format(bucket.type)) cb_yaml.write(" memoryQuota: {}\n".format( bucket.memoryQuota)) cb_yaml.write(" replicas: {}\n".format(bucket.replicas)) cb_yaml.write(" ioPriority: {}\n".format( bucket.ioPriority)) cb_yaml.write(" evictionPolicy: {}\n".format( bucket.evictionPolicy)) cb_yaml.write(" conflictResolution: {}\n".format( bucket.conflictResolution)) cb_yaml.write(" enableFlush: {}\n".format( bucket.enableFlush)) cb_yaml.write(" enableIndexReplica: false\n") cb_yaml.write(" compressionMode: passive\n") #Servers if len(cbcluster_config.servers) < 1: utils.write_error("At least one server must be configured") return else: vct_string = "" vct_map = {} cb_yaml.write(" servers:\n") for itr in cbcluster_config.servers: server = cbcluster_config.servers[itr] cb_yaml.write(" - size: {}\n".format(server.size)) cb_yaml.write(" name: {}\n".format(server.name)) cb_yaml.write(" services:\n") for svc_itr in server.services: if server.services[svc_itr] == "1": cb_yaml.write(" - {}\n".format(svc_itr)) #server.pod if check_pod(server): cb_yaml.write(" pod:\n") if check_requests(server) or check_limts(server): cb_yaml.write(" resources:\n") if check_limts(server): cb_yaml.write(" limits:\n") if int(server.pod.limits['cpu']) > 0: cb_yaml.write( " cpu: \"{}\"\n".format( server.pod.limits['cpu'])) if int(server.pod.limits['memory']) > 0: cb_yaml.write( " memory: \"{0}{1}\"\n".format( server.pod.limits['memory'], server.pod.limits['memory_size'])) #if int(server.pod.limits['storage']) > 0: # cb_yaml.write(" storage: \"{0}{1}\"\n".format( # server.pod.limits['storage'], server.pod.limits['storage_size'] # )) if check_requests(server): cb_yaml.write(" requests:\n") if int(server.pod.requests['cpu']) > 0: cb_yaml.write( " cpu: \"{}\"\n".format( server.pod.requests['cpu'])) if int(server.pod.requests['memory']) > 0: cb_yaml.write( " memory: \"{0}{1}\"\n".format( server.pod.requests['memory'], server.pod.requests['memory_size'])) #if int(server.pod.requests['storage']) > 0: # cb_yaml.write(" storage: \"{0}{1}\"\n".format( # server.pod.requests['storage'], server.pod.requests['storage_size'] # )) if len(server.pod.nodeselector) >= 1: cb_yaml.write(" nodeSelector:\n") for itr in server.pod.nodeselector: cb_yaml.write(" {0}: {1}\n".format( itr, server.pod.nodeselector[itr])) #Volume Mounts #TODO - Review Volume Mounts (Possible timeout issue) if check_volume_mount(server): cb_yaml.write(" volumeMounts:\n") if server.pod.volume_mount['default'] != "": cb_yaml.write(" default: {0}\n".format( server.pod.volume_mount['default'])) #if server.pod.volume_mount['data'] != "": # cb_yaml.write(" data: {0}\n".format(server.pod.volume_mount['data'])) #if server.pod.volume_mount['index'] != "": # cb_yaml.write(" index: {0}\n".format(server.pod.volume_mount['index'])) #vm_analytics = " analytics:\n" #for itr in server.pod.volume_mount['analytics']: # if itr != "": # vm_analytics = vm_analytics + " - {}\n".format(itr) #if vm_analytics != " analytics:\n": # cb_yaml.write(vm_analytics) if len(cbcluster_config.vct) >= 1: if len(vct_string) < 1: vct_string = vct_string + " volumeClaimTemplates:\n" for itr in cbcluster_config.vct: tmp_vct = cbcluster_config.vct[itr] if tmp_vct.name not in vct_map: vct_map[tmp_vct.name] = "Added" vct_string = vct_string + " - metadata:\n" vct_string = vct_string + " name: {}\n".format( tmp_vct.name) vct_string = vct_string + " spec:\n" vct_string = vct_string + " storageClassName: \"{}\"\n".format( tmp_vct.storage_class) vct_string = vct_string + " resources:\n" vct_string = vct_string + " requests:\n" vct_string = vct_string + " storage: \"{0}{1}\"\n".format( tmp_vct.size, tmp_vct.size_type) #After all the servers are added to the yaml configure the VCT once if len(vct_string) > 1: cb_yaml.write(vct_string)
def deploy_secret(namespace): utils.write_line("Deploying Couchbase Administrator Password") build_resource_with_yaml( "./resources/cbao/{0}/secret.yaml --namespace {1}".format( version, namespace))