options = {'bootstrap_servers': self._bootstrap_endpoint()} options.update(kwargs) return options def _bootstrap_endpoint(self): v1 = kubernetes.client.CoreV1Api(self._cluster.Kubectl.client()) port = v1.read_namespaced_service(namespace=self._namespace, name='kafka-cluster-kafka-external-bootstrap').spec.ports[0].node_port return f"{self._master.ip}:{port}" def admin(self, **kwargs): self._expose() options = self._add_default_options(kwargs) return kafka.KafkaAdminClient(**options) def consumer(self, *topics, **kwargs): self._expose() options = self._add_default_options(kwargs) return kafka.KafkaConsumer(*topics, **options) def producer(self, **kwargs): self._expose() options = self._add_default_options(kwargs) return kafka.KafkaProducer(**options) def ping(self): return self.consumer().topics() cluster_plugins.register('Kafka', Kafka)
lambda: self._num_ready_pods() == len(self._cluster.hosts), timeout=120) logging.debug(f"Deployment created. status={res.metadata.name}") def kill(self): if not self.running: logging.debug("nothing to remove") return logging.debug("trying to remove automation-proxy daemonset") try: self._k8s_v1_client.delete_namespaced_daemon_set( name=self.daemon_set_name, namespace='default') except ApiException as e: logging.exception( "Exception when calling AppsV1Api->create_namespaced_daemon_set: %s\n" % e) waiter.wait_for_predicate(lambda: not self.running) for host in self._cluster.hosts.values(): host.TunnelManager.clear() logging.debug("removed successfully!") def restart(self): self.run() def _num_ready_pods(self): return self._k8s_v1_client.read_namespaced_daemon_set( name=self.daemon_set_name, namespace="default").status.number_ready cluster_plugins.register("ProxyDaemonSet", ProxyDaemonSet)
if self._api_token: return ssh = self._master.SshDirect try: ssh.execute("sudo kubectl create sa automation-admin") ssh.execute( "sudo kubectl create clusterrolebinding automation-admin --serviceaccount=default:automation-admin --clusterrole=cluster-admin" ) except SSHCalledProcessError as e: pass get_sa_token = lambda: ssh.execute( '''sudo kubectl get secrets -n default -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='automation-admin')].data.token}"|base64 --decode''' ).strip() waiter.wait_for_predicate(get_sa_token, timeout=30) self._api_token = get_sa_token() def client(self, **kwargs): config = self._create_config(**kwargs) return ApiClient(config) def verify_functionality(self): api = kubernetes.client.CoreV1Api(self.client()) res = api.list_pod_for_all_namespaces(watch=False) for i in res.items: print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) cluster_plugins.register('Kubectl', Kubectl)
from infra.model import cluster_plugins from devops_automation_infra.utils import s3 import boto3 class Seaweed: def __init__(self, cluster): self._cluster = cluster self.DNS = 'seaweedfs-s3.default.svc.cluster.local' self.PORT = 8333 self.filer_host = 'seaweedfs-filer.default.svc.cluster.local' self.filer_port = 8888 @property def _master(self): return self._cluster.K8SMaster() @property def _tunnel(self): return self._master.TunnelManager.get_or_create("Seaweed-s3", self.DNS, self.PORT) def create_client(self): host, port = self._tunnel.host_port return boto3.client('s3', endpoint_url=f"http://{host}:{port}", aws_secret_access_key='any', aws_access_key_id='any') cluster_plugins.register('Seaweed', Seaweed)
class K8SMaster: def __init__(self, cluster): self._cluster = cluster def __call__(self): jobs = { host.ip: partial(self._kubectl, host) for host in self._cluster.hosts.values() } bg = concurrently.Background(jobs) bg.start() master_ip = next( iter( bg.wait(return_when=concurrently.Completion.WAIT_FIRST_SUCCESS ).keys())) master = next(host for host in self._cluster.hosts.values() if host.ip == master_ip) if not master: raise Exception("Couldn't find running masters nodes") return master def _kubectl(self, host): return waiter.wait_nothrow( lambda: host.SshDirect.execute("sudo kubectl get po"), timeout=150) cluster_plugins.register('K8SMaster', K8SMaster)
wait=True, timeout="120", force=True, **kwargs): kwargs["global.localRegistry"] = docker_registry kwargs["global.pullPolicy"] = image_pull_policy cmd_options = "" if self.app_exists(app_name) and force: logging.info(f"{app_name} is Already exists, Running upgrade...") self.upgrade_app(app_name, version, **kwargs) else: rancher_cmd = "rancher app install" for k, v in kwargs.items(): cmd_options += f" --set {k}={v}" cmd_options += f" --version {version} --namespace {namespace} {app_name} {app_name} --no-prompt" cmd = f"{rancher_cmd} {cmd_options}" logging.debug(cmd) self.cli_execute(cmd) if wait: self.wait_for_app(app_name, timeout) def delete_app(self, app_name): self.cli_execute(f"rancher app delete {app_name}") def app_exists(self, app_name): res = requests.get(f"{self.base_url}/v3/project/local:p-8n6zr/apps/p-8n6zr%3A{app_name}", headers=self.auth_header, verify=False) assert res.status_code == 200 or res.status_code == 404 return res.status_code != 404 cluster_plugins.register("Rancher", Rancher)
class Postgresql(object): def __init__(self, cluster): self._cluster = cluster self.DNS = 'postgres.default.svc.cluster.local' self.PORT = 5432 @property def _master(self): return self._cluster.K8SMaster() @property def password(self): return kubectl_utils.get_secret_data(self._cluster.Kubectl.client(), namespace="default", name='postgres-secret', path='password').decode() def connection(self): tunnel = self._master.TunnelManager.get_or_create( 'postgres', self.DNS, self.PORT) connection = psycopg2.connect(host=tunnel.host_port[0], port=tunnel.host_port[1], user='******', password=self.password, database='anv_db') return connection cluster_plugins.register('Postgresql', Postgresql)
from devops_automation_infra.k8s_plugins.k8s_master import K8SMaster from infra.model import cluster_plugins class Gravity: def __init__(self, cluster): self._cluster = cluster self._master = self._cluster.K8SMaster() def exec(self, command): return self._master.SshDirect.execute(f"sudo gravity exec {command}") cluster_plugins.register('Gravity', Gravity)
import redis from devops_automation_infra.plugins import tunnel_manager from infra.model import cluster_plugins class Redis(object): def __init__(self, cluster): self._cluster = cluster @property def _tunnel(self): master = self._cluster.master return master.TunnelManager.get_or_create( 'redis', dns_name="redis-master", port=6379, transport=master.SSH.get_transport()) def create_client(self, db=0): return redis.Redis(host="127.0.0.1", port=self._tunnel.local_port, db=db) cluster_plugins.register('Redis', Redis)
from devops_automation_infra.utils import kubectl class Consul(object): def __init__(self, cluster): self._cluster = cluster self.NAME = "consul-server" self.DNS_NAME = f'{self.NAME}.default.svc.cluster.local' self.URI = "/consul" self.PORT = 8500 @property def _master(self): return self._cluster.K8SMaster() @property def _tunnel(self): return self._master.TunnelManager.get_or_create( 'consul', self.DNS_NAME, self.PORT) def create_client(self): host, port = self._tunnel.host_port return consul.Consul(host, port) def clear_data(self): kubectl.delete_stateful_set_data(self._cluster.Kubectl.client(), self.NAME) cluster_plugins.register('Consul', Consul)