def test(): num = 3 base = 27020 k8s = HTTPClient(KubeConfig.from_service_account()) k8s.url = 'http://127.0.0.1:8001' k8s.session = k8s.build_session() def get_mongo_pods(): return [ Pod( None, { 'metadata': { 'labels': { 'hostname': 'fb-1.db.waverbase.com:%d' % p } }, 'status': { 'podIP': '127.0.0.1:%d' % p } }) for p in range(base, base + num) ] for p in range(base, base + num): replica_manager = ReplicaManager( app_name='testapp', creator_name='testcreator', hostname='fb-1.db.waverbase.com:%d' % p, k8s=k8s, local_mongo_server_conn='mongodb://127.0.0.1:%d' % p, external_ip='127.0.0.1:%d' % p) replica_manager.local_pod_ip = '127.0.0.1:%d' % p replica_manager.get_mongo_pods = get_mongo_pods replica_manager.start()
def __init__(self, app, nworkers, **kwargs): # Check if pykube was importable, fail if not assert operator is not None, K8S_IMPORT_MESSAGE runner_param_specs = dict( k8s_config_path=dict(map=str, default=os_environ.get('KUBECONFIG', None)), k8s_use_service_account=dict(map=bool, default=False), k8s_persistent_volume_claim_name=dict(map=str), k8s_persistent_volume_claim_mount_path=dict(map=str), k8s_namespace=dict(map=str, default="default"), k8s_pod_retrials=dict(map=int, valid=lambda x: int > 0, default=3)) if 'runner_param_specs' not in kwargs: kwargs['runner_param_specs'] = dict() kwargs['runner_param_specs'].update(runner_param_specs) """Start the job runner parent object """ super(KubernetesJobRunner, self).__init__(app, nworkers, **kwargs) # self.cli_interface = CliInterface() if "k8s_use_service_account" in self.runner_params and self.runner_params[ "k8s_use_service_account"]: self._pykube_api = HTTPClient(KubeConfig.from_service_account()) else: self._pykube_api = HTTPClient( KubeConfig.from_file(self.runner_params["k8s_config_path"])) self._galaxy_vol_name = "pvc-galaxy" # TODO this needs to be read from params!! self._init_monitor_thread() self._init_worker_threads()
def deploy_service(template_pod, template_service, branch, domain_zone): #api = HTTPClient(KubeConfig.from_file("{0}/.kube/config".format(os.environ['HOME']))) api = HTTPClient(KubeConfig.from_service_account()) with open(template_service) as t_file: ts = yaml.load(t_file) svc_name = ts['metadata']['name'] name = "{0}-{1}".format(svc_name, branch) ts['spec']['type'] = 'LoadBalancer' ts['spec']['selector']['name'] = name ts['metadata']['name'] = name ts['metadata']['labels']['name'] = name new = Service(api, ts) new.create() print "New service created" with open(template_pod) as t_file: tp = yaml.load(t_file) name = tp['metadata']['name'] name = "{0}-{1}".format(name, branch) image = tp['spec']['containers'][0]['image'] image = "{0}:{1}".format(image, branch) tp['spec']['containers'][0]['image'] = image tp['spec']['containers'][0]['name'] = name tp['metadata']['name'] = name tp['metadata']['labels']['name'] = name new = Pod(api, tp) new.create() print "New pod created" print "Waiting for ELB to spawn" lb_name = get_service_lb.wait_for_lb_name(name) print "Got ELB {0}".format(lb_name) return lb_name, svc_name
def __init__(self, app, nworkers, **kwargs): # Check if pykube was importable, fail if not assert KubeConfig is not None, K8S_IMPORT_MESSAGE runner_param_specs = dict( k8s_config_path=dict(map=str, default=os_environ.get('KUBECONFIG', None)), k8s_use_service_account=dict(map=bool, default=False), k8s_persistent_volume_claim_name=dict(map=str), k8s_persistent_volume_claim_mount_path=dict(map=str), k8s_namespace=dict(map=str, default="default"), k8s_pod_retrials=dict(map=int, valid=lambda x: int > 0, default=3)) if 'runner_param_specs' not in kwargs: kwargs['runner_param_specs'] = dict() kwargs['runner_param_specs'].update(runner_param_specs) """Start the job runner parent object """ super(KubernetesJobRunner, self).__init__(app, nworkers, **kwargs) # self.cli_interface = CliInterface() if "k8s_use_service_account" in self.runner_params and self.runner_params["k8s_use_service_account"]: self._pykube_api = HTTPClient(KubeConfig.from_service_account()) else: self._pykube_api = HTTPClient(KubeConfig.from_file(self.runner_params["k8s_config_path"])) self._galaxy_vol_name = "pvc-galaxy" # TODO this needs to be read from params!! self._init_monitor_thread() self._init_worker_threads()
def __init__(self, task_name=None): self.kube_api = HTTPClient(KubeConfig.from_service_account()) self.kube_api.session.verify = False if task_name: self.task_name = task_name else: self.task_name = None self.namespace = os.environ['NAMESPACE']
def _init_kubernetes(self): if self.auth_method == "kubeconfig": self.__kube_api = HTTPClient( KubeConfig.from_file(self.kubeconfig_path)) elif self.auth_method == "service-account": self.__kube_api = HTTPClient(KubeConfig.from_service_account()) else: raise ValueError("Illegal auth_method") self.create_id()
def pykube_client_from_dict(params): if "k8s_use_service_account" in params and params["k8s_use_service_account"]: pykube_client = HTTPClient(KubeConfig.from_service_account()) else: config_path = params.get("k8s_config_path") if config_path is None: config_path = os.environ.get('KUBECONFIG', None) if config_path is None: config_path = '~/.kube/config' pykube_client = HTTPClient(KubeConfig.from_file(config_path)) return pykube_client
def _init_kubernetes(self): self.__logger = logger self.__logger.debug("Kubernetes auth method: " + self.auth_method) if (self.auth_method == "kubeconfig"): self.__kube_api = HTTPClient( KubeConfig.from_file(self.kubeconfig_path)) elif (self.auth_method == "service-account"): self.__kube_api = HTTPClient(KubeConfig.from_service_account()) else: raise ValueError("Illegal auth_method") self.job_uuid = str(uuid.uuid4().hex) self.uu_name = self.name + "-luigi-" + self.job_uuid
def _init_kubernetes(self): self.__logger = logger self.__logger.debug("Kubernetes auth method: " + self.auth_method) if self.auth_method == "kubeconfig": self.__kube_api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path)) elif self.auth_method == "service-account": self.__kube_api = HTTPClient(KubeConfig.from_service_account()) else: raise ValueError("Illegal auth_method") self.job_uuid = str(uuid.uuid4().hex) now = datetime.utcnow() self.uu_name = "%s-%s-%s" % (self.name, now.strftime('%Y%m%d%H%M%S'), self.job_uuid[:16])
def __init__(self, app, nworkers, **kwargs): # Check if pykube was importable, fail if not assert KubeConfig is not None, K8S_IMPORT_MESSAGE runner_param_specs = dict( k8s_config_path=dict(map=str, default=os.environ.get('KUBECONFIG', None)), k8s_use_service_account=dict(map=bool, default=False), k8s_persistent_volume_claims=dict(map=str), k8s_namespace=dict(map=str, default="default"), k8s_galaxy_instance_id=dict(map=str), k8s_timeout_seconds_job_deletion=dict(map=int, valid=lambda x: int > 0, default=30), k8s_job_api_version=dict(map=str, default="batch/v1"), k8s_supplemental_group_id=dict(map=str), k8s_pull_policy=dict(map=str, default="Default"), k8s_run_as_user_id=dict( map=str, valid=lambda s: s == "$uid" or s.isdigit()), k8s_run_as_group_id=dict( map=str, valid=lambda s: s == "$gid" or s.isdigit()), k8s_fs_group_id=dict(map=int), k8s_default_requests_cpu=dict(map=str, default=None), k8s_default_requests_memory=dict(map=str, default=None), k8s_default_limits_cpu=dict(map=str, default=None), k8s_default_limits_memory=dict(map=str, default=None), k8s_pod_retrials=dict(map=int, valid=lambda x: int >= 0, default=3)) if 'runner_param_specs' not in kwargs: kwargs['runner_param_specs'] = dict() kwargs['runner_param_specs'].update(runner_param_specs) """Start the job runner parent object """ super(KubernetesJobRunner, self).__init__(app, nworkers, **kwargs) if "k8s_use_service_account" in self.runner_params and self.runner_params[ "k8s_use_service_account"]: self._pykube_api = HTTPClient(KubeConfig.from_service_account()) else: self._pykube_api = HTTPClient( KubeConfig.from_file(self.runner_params["k8s_config_path"])) self._galaxy_instance_id = self.__get_galaxy_instance_id() self._run_as_user_id = self.__get_run_as_user_id() self._run_as_group_id = self.__get_run_as_group_id() self._supplemental_group = self.__get_supplemental_group() self._fs_group = self.__get_fs_group() self._default_pull_policy = self.__get_pull_policy() self._init_monitor_thread() self._init_worker_threads() self.setup_volumes()
def run(): k8s = HTTPClient(KubeConfig.from_service_account()) mongo_connection_string = os.environ.get('MONGO_CONNECTION_STRING', 'mongodb://127.0.0.1') logging.info('Mongo server %s', mongo_connection_string) replica_manager = ReplicaManager( app_name=os.environ['APP_NAME'], creator_name=os.environ['CREATOR_NAME'], hostname=os.environ['MONGO_HOSTNAME'], k8s=k8s, local_mongo_server_conn = mongo_connection_string, external_ip=os.environ['EXTERNAL_IP'] ) replica_manager.start()
def run(): k8s = HTTPClient(KubeConfig.from_service_account()) mongo_connection_string = os.environ.get('MONGO_CONNECTION_STRING', 'mongodb://127.0.0.1') logging.info('Mongo server %s', mongo_connection_string) replica_manager = ReplicaManager( app_name=os.environ['APP_NAME'], creator_name=os.environ['CREATOR_NAME'], hostname=os.environ['MONGO_HOSTNAME'], k8s=k8s, local_mongo_server_conn=mongo_connection_string, external_ip=os.environ['EXTERNAL_IP']) replica_manager.start()
def test(): num = 3 base = 27020 k8s = HTTPClient(KubeConfig.from_service_account()) k8s.url = 'http://127.0.0.1:8001' k8s.session = k8s.build_session() def get_mongo_pods(): return [ Pod(None, { 'metadata': { 'labels': { 'hostname': 'fb-1.db.waverbase.com:%d' % p } }, 'status': { 'podIP': '127.0.0.1:%d' % p } } ) for p in range(base, base+num) ] for p in range(base, base+num): replica_manager = ReplicaManager( app_name='testapp', creator_name='testcreator', hostname='fb-1.db.waverbase.com:%d' % p, k8s=k8s, local_mongo_server_conn = 'mongodb://127.0.0.1:%d' % p, external_ip='127.0.0.1:%d' % p ) replica_manager.local_pod_ip = '127.0.0.1:%d' % p replica_manager.get_mongo_pods = get_mongo_pods replica_manager.start()
"service_name": service_name, "is_default_endpoint": is_default_endpoint, "edge_num": i, "edge_location": edge_location, "edge_target": edge_target, "run_id": pod_run_id , "additional" : additional} else: print('Unable to get details of the tool {} from API due to errors. Empty endpoints will be returned'.format(docker_image)) else: print('Unable to get details of the tool {} from API due to errors. Empty endpoints will be returned'.format(docker_image)) else: print('Unable to get details of a RunID {} from API due to errors'.format(pod_run_id)) return service_list kube_api = HTTPClient(KubeConfig.from_service_account()) kube_api.session.verify = False edge_kube_service = Service.objects(kube_api).filter(selector={EDGE_SVC_ROLE_LABEL: EDGE_SVC_ROLE_LABEL_VALUE}) if len(edge_kube_service.response['items']) == 0: print('EDGE service is not found by label: cloud-pipeline/role=EDGE') exit(1) else: edge_kube_service_object = edge_kube_service.response['items'][0] edge_kube_service_object_metadata = edge_kube_service_object['metadata'] if 'labels' in edge_kube_service_object_metadata and EDGE_SVC_HOST_LABEL in edge_kube_service_object_metadata['labels']: edge_service_external_ip = edge_kube_service_object_metadata['labels'][EDGE_SVC_HOST_LABEL] if 'labels' in edge_kube_service_object_metadata and EDGE_SVC_PORT_LABEL in edge_kube_service_object_metadata['labels']: edge_service_port = edge_kube_service_object_metadata['labels'][EDGE_SVC_PORT_LABEL]
def __init__(self): self.__kube_api = HTTPClient(KubeConfig.from_service_account()) self.__kube_api.session.verify = False
if str in ['true', 'True']: return True return False def parse_service(service): data = Bunch(service.annotations) data.ip = service.obj['spec']['clusterIP'] data.proxy_web_socket = str2bool(data.proxy_web_socket) data.proxy_http = str2bool(data.proxy_http) data.proxy_https = str2bool(data.proxy_https) data.proxy_https_redirect = str2bool(data.proxy_https_redirect) data.port = service.obj['spec']['ports'][0]['port'] return data if __name__ == "__main__": config = KubeConfig.from_service_account() api = HTTPClient(config) services = [] for namespace in os.getenv('PROXYED_NAMESPACES', 'default').split(','): services += Service.objects(api).filter(namespace=namespace, selector={'proxied': 'true'}) data = [] for service in services: data.append(parse_service(service)) result = render(data) with open('/etc/nginx/nginx.conf', 'w') as file: file.write(result)