def get_log_urls(self, service_instance_id): cname = self.get_main_container_name() url_run = "/api/v1/namespaces/{}/pods/{}/log?container={}&follow=true".format( self.namespace, self.name, cname) docker_id = None pod = self._get_status_obj() cstats = pod.status.container_statuses for cstat in cstats: if cstat.name != cname: continue if cstat.container_id is None: # Running: The pod has been bound to a node, and all of the containers have been created. # At least one container is still running, or is in the process of starting or restarting. raise AXPlatformException( "log urls can only be obtained after pod {} has started. Current status of container is {}" .format(self.name, cstat)) docker_id = cstat.container_id[len("docker://"):] assert docker_id is not None, "Docker ID of created container {} in pod {} was not found".format( self.name, cname) name_id = AXClusterId().get_cluster_name_id() bucket = AXClusterDataPath(name_id).bucket() prefix = AXClusterDataPath(name_id).artifact() url_done = "/{}/{}/{}/{}.{}.log".format(bucket, prefix, service_instance_id, cname, docker_id) return url_run, url_done
def _set_s3(self): """ Set bucket, log_s3_prefix, s3_processor """ logger.info("Setting up s3 ...") cluster_name_id = AXClusterId().get_cluster_name_id() self._bucket_name = AXClusterDataPath(cluster_name_id).bucket() self._bucket = Cloud().get_bucket(self._bucket_name) artifact_prefix = AXClusterDataPath(cluster_name_id).artifact() self._log_s3_prefix = artifact_prefix self._bucket_ax_is_external = AXLogPath(cluster_name_id).is_external() self._bucket_name_ax = AXLogPath(cluster_name_id).bucket() self._bucket_ax = Cloud().get_bucket(self._bucket_name_ax) artifact_prefix_ax = AXLogPath(cluster_name_id).artifact() self._log_s3_prefix_ax = artifact_prefix_ax assert self._bucket.exists(), "S3 bucket {} DOES NOT exist".format( self._bucket_name) assert self._bucket_ax.exists(), "S3 bucket {} DOES NOT exist".format( self._bucket_name_ax) logger.info("Using S3 bucket %s, with log prefix %s", self._bucket.get_bucket_name(), self._log_s3_prefix) logger.info("Using S3 bucket %s, with log prefix %s for AX", self._bucket_ax.get_bucket_name(), self._log_s3_prefix_ax)
def get_log_urls_for_container(pstat, podname, containername, instance_id): assert pstat.metadata.self_link, "Pod status does not have self_link" url_run = "{}/log?container={}".format(pstat.metadata.self_link, containername) cstats = pstat.status.container_statuses docker_id = None for cstat in cstats: if cstat.name != containername: continue if cstat.container_id is None: # Running: The pod has been bound to a node, and all of the containers have been created. # At least one container is still running, or is in the process of starting or restarting. raise AXPlatformException( "log urls can only be obtained after pod {} has started. Current status of container is {}" .format(podname, cstat)) docker_id = cstat.container_id[len("docker://"):] assert docker_id is not None, "Docker ID of created container {} in pod {} was not found".format( containername, podname) name_id = AXClusterId().get_cluster_name_id() bucket = AXClusterDataPath(name_id).bucket() prefix = AXClusterDataPath(name_id).artifact() url_done = "/{}/{}/{}/{}.{}.log".format(bucket, prefix, instance_id, containername, docker_id) return url_run, url_done
def __init__(self, name, namespace="axuser"): self.name = name self.namespace = namespace self.client = KubernetesApiClient(use_proxy=True) self.service = None # this is the argo.services.service.Service object self._host_vols = [] self._name_id = AXClusterId().get_cluster_name_id() self._s3_bucket_ax_is_external = AXLogPath(self._name_id).is_external() self._s3_bucket_ax = AXLogPath(self._name_id).bucket() self._s3_key_prefix_ax = AXLogPath(self._name_id).artifact() self._s3_bucket = AXClusterDataPath(self._name_id).bucket() self._s3_key_prefix = AXClusterDataPath(self._name_id).artifact() self.software_info = SoftwareInfo() self._resources = AXResources()
def __init__(self): self.client = KubernetesApiClient(use_proxy=True) self.batchapi = self.client.batchv self.kube_namespace = "axuser" self.jobname = None self.service = None # this is the argo.services.service.Service object self._host_vols = [] self._name_id = AXClusterId().get_cluster_name_id() self._s3_bucket_ax_is_external = AXLogPath(self._name_id).is_external() self._s3_bucket_ax = AXLogPath(self._name_id).bucket() self._s3_key_prefix_ax = AXLogPath(self._name_id).artifact() self._s3_bucket = AXClusterDataPath(self._name_id).bucket() self._s3_key_prefix = AXClusterDataPath(self._name_id).artifact() self._attribute_map = {"uuid": "metadata.uid"} self.software_info = SoftwareInfo() self._ax_resources = {}
def _update_data_bucket(self): data_bucket = Cloud().get_bucket(AXClusterDataPath(name_id=self._name_id).bucket(), aws_profile=self._aws_profile, region=self._aws_region) if not data_bucket.create(): raise AXPlatformException("Failed to create S3 bucket {}".format(data_bucket.get_bucket_name())) # Update CORS config for data bucket too. logger.info("Checking CORS config for %s.", data_bucket.get_bucket_name()) data_bucket.put_cors(DATA_CORS_CONFIG) logger.info("Created %s bucket ... DONE", data_bucket.get_bucket_name())
def _delete_data_bucket(self): logger.info("Deleting applatix-data bucket contents for cluster %s ...", self._name_id) data_bucket = Cloud().get_bucket(AXClusterDataPath(name_id=self._name_id).bucket(), aws_profile=self._aws_profile, region=self._aws_region) cluster_name = AXClusterId(name=self._name_id).get_cluster_name() prefix = cluster_name + "/" logger.info("Deleting objects for cluster %s from bucket %s. This may take some while.", cluster_name, data_bucket.get_bucket_name()) data_bucket.delete_all(obj_prefix=prefix) logger.info("Deleting objects for cluster %s from bucket %s ... DONE", cluster_name, data_bucket.get_bucket_name())