예제 #1
0
def main():
    parser = argparse.ArgumentParser(description='artifactmanager')
    parser.add_argument('--version', action='version', version="%(prog)s {}".format(__version__))
    parser.add_argument('--port', type=int, default=ARTIFACTMANAGER_DEFAULT_PORT, help="Run server on the specified port")
    args = parser.parse_args()

    logging.basicConfig(format="%(asctime)s %(levelname)s %(name)s %(lineno)d %(threadName)s: %(message)s",
                        datefmt="%Y-%m-%dT%H:%M:%S",
                        stream=sys.stdout)
    logging.getLogger("ax").setLevel(logging.DEBUG)

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGUSR1, signal_debugger)

    try:
        Cloud().set_target_cloud(Cloud().own_cloud())
        rest.artifact_manager = ArtifactManager()
        rest.artifact_manager.init()
        rest.artifact_manager.start_background_process()  # start retention thread
        server = pywsgi.WSGIServer(('', args.port), rest.app)
        logger.info("Artifact manager %s serving on port %s", __version__, args.port)
        server.serve_forever()
    except SystemExit:
        raise
    except Exception as err:
        logger.exception("Unhandled exception: %s", err)
        if rest.artifact_manager:
            rest.artifact_manager.stop_background_process()  # stop retention thread
        sys.exit(1)
예제 #2
0
    def _validate_and_set_cloud_provider(self):
        all_errs = []
        if self.cloud_provider not in Cloud.VALID_TARGET_CLOUD_INPUT:
            all_errs.append(
                "Cloud provider {} not supported. Please choose from {}".
                format(self.cloud_provider, Cloud.VALID_TARGET_CLOUD_INPUT))
        else:
            try:
                # Validate placement only for AWS
                c = Cloud(target_cloud=self.cloud_provider)
                if c.target_cloud_aws():
                    ec2 = EC2(profile=self.cloud_profile,
                              region=self.cloud_region)
                    zones = ec2.get_availability_zones()
                    if self.cloud_placement:
                        if self.cloud_placement not in zones:
                            all_errs.append(
                                "Invalid cloud placement {}. Please choose from {}"
                                .format(self.cloud_placement, zones))
                    else:
                        self.cloud_placement = random.choice(zones)
                        logger.info(
                            "Cloud placement not provided, setting it to %s from currently available zones %s",
                            self.cloud_placement, zones)

            except Exception as e:
                all_errs.append(
                    "Cloud provider validation error: {}".format(e))
        return all_errs
예제 #3
0
파일: client.py 프로젝트: nuaays/argo
    def _parse_config(self, kube_config):
        """Return config information from current kubernetes context"""
        cred_info = {}
        # cluster info
        context = next(c['context'] for c in kube_config['contexts']
                       if c['name'] == kube_config['current-context'])
        cred_info.update(context)
        context_cluster = next(c['cluster'] for c in kube_config['clusters']
                               if c['name'] == context['cluster'])
        cred_info['host'] = context_cluster['server'].split('/')[-1]
        if 'certificate-authority-data' in context_cluster:
            cred_info['certificate-authority-data'] = context_cluster[
                'certificate-authority-data']
        # user info
        context_user = next(c['user'] for c in kube_config['users']
                            if c['name'] == context['user'])
        if Cloud().target_cloud_aws():
            if 'token' in context_user:
                cred_info['token'] = "Bearer " + context_user['token']
            else:
                if 'username' in context_user:
                    cred_info['token'] = urllib3.util.make_headers(basic_auth=context_user['username'] + \
                                                                   ':' + context_user['password']).get('authorization')
                # minikube
                if 'client-certificate' in context_user:
                    cred_info['client-certificate'] = context_user[
                        'client-certificate']
                if 'client-key' in context_user:
                    cred_info['client-key'] = context_user['client-key']

        elif Cloud().target_cloud_gcp():
            cred_info['token'] = GCPToken().token
        return cred_info
예제 #4
0
    def validate(self):
        all_errs = []
        all_errs += self._validate_critical_directories()

        # Because we have strict validation during installation, so we can assume
        # cluster has a valid name and cluster config
        if not self.cluster_name:
            all_errs.append("Please provide cluster name to pause the cluster")

        if self.cloud_provider not in Cloud.VALID_TARGET_CLOUD_INPUT:
            all_errs.append(
                "Cloud provider {} not supported. Please choose from {}".
                format(self.cloud_provider, Cloud.VALID_TARGET_CLOUD_INPUT))
        else:
            # Cloud singleton should be instantiated during validation stage so
            # we can ensure customer ID
            Cloud(target_cloud=self.cloud_provider)
            if self.force_uninstall and self.cloud_region and self.cloud_placement:
                try:
                    # Validate placement only for AWS
                    c = Cloud(target_cloud=self.cloud_provider)
                    if c.target_cloud_aws():
                        ec2 = EC2(profile=self.cloud_profile,
                                  region=self.cloud_region)
                        zones = ec2.get_availability_zones()
                        if self.cloud_placement not in zones:
                            all_errs.append(
                                "Invalid cloud placement {}. Please choose from {}"
                                .format(self.cloud_placement, zones))
                except Exception as e:
                    all_errs.append(
                        "Cloud provider validation error: {}".format(e))

        return all_errs
예제 #5
0
파일: cluster_id.py 프로젝트: nuaays/argo
 def _instantiate_bucket_if_needed(self):
     if not self._bucket:
         logger.info("Instantiating cluster bucket ...")
         self._bucket = Cloud().get_bucket(self._bucket_name,
                                           aws_profile=self._aws_profile)
         assert self._bucket.exists(), "Bucket {} not created yet".format(
             self._bucket.get_bucket_name())
예제 #6
0
    def _update_cluster_bucket(self):
        bucket_name = AXClusterConfigPath(name_id=self._name_id).bucket()
        cluster_bucket = Cloud().get_bucket(bucket_name, aws_profile=self._aws_profile, region=self._aws_region)

        if not cluster_bucket.create():
            raise AXPlatformException("Failed to create S3 bucket {}".format(cluster_bucket.get_bucket_name()))
        logger.info("Created %s bucket ... DONE", cluster_bucket.get_bucket_name())
예제 #7
0
파일: cluster_id.py 프로젝트: nuaays/argo
    def create_cluster_name_id(self):
        """
        User input cluster name in format of "<name>" or "<name>-<id>", and this function creates
        a record in S3. If he name caller passed in does not include an ID, we generate one.

        If we already have a cluster name/id record in s3, this function should not be called to avoid
        existing clusters's records to get overridden
        :return: <cluster-name>-<cluster-id>
        """
        assert not self._cluster_name_id, "Cluster {} has it's name id already created".format(
            self._cluster_name_id)
        assert self._input_name, "Must provide input name to create cluster name id"
        name, cid = self._format_name_id(self._input_name)
        if cid is None:
            logger.info("Cluster id not provided, generate one.")
            if Cloud().target_cloud_gcp():
                cid = str(uuid.uuid4())[:8]
            elif Cloud().target_cloud_aws():
                cid = str(uuid.uuid1())
            else:
                assert False, "Must provide valid target cloud to create cluster name id. Currently target cloud is set to {}".format(
                    Cloud().target_cloud())
        logger.info("Created new name-id %s", name + "-" + cid)

        # fill in cluster name id info
        self._cluster_name = name
        self._cluster_id = cid
        self._cluster_name_id = self._cluster_name + "-" + self._cluster_id
        return self._cluster_name_id
예제 #8
0
 def _format_name_id(input_name):
     if Cloud().target_cloud_aws():
         return AXClusterNameIdParser.parse_cluster_name_id_aws(input_name)
     elif Cloud().target_cloud_gcp():
         return AXClusterNameIdParser.parse_cluster_name_id_gcp(input_name)
     else:
         assert False, "Invalid cloud provider: {}. Only aws and gcp are supported".format(
             Cloud().target_cloud())
예제 #9
0
    def __init__(self,
                 cluster_name_id,
                 kube_config=None,
                 key_file=None,
                 metadata=None,
                 aws_profile=None):
        """
        Config file initialization

        :param cluster_name_id: Cluster name_id in format of name-uuid, lcj-cluster-515d9828-7515-11e6-9b3e-a0999b1b4e15
        :param kube_config: kubernetes saved config file.
        :param key_file: cluster ssh key path
        :param metadata: path to cluster metadata
        :param aws_profile: AWS profile to access S3.
        """
        assert AXEnv().is_in_pod(
        ) or cluster_name_id, "Must specify cluster name from outside cluster"
        self._aws_profile = aws_profile
        self._cluster_name_id = cluster_name_id

        self._config = AXClusterConfig(cluster_name_id=cluster_name_id,
                                       aws_profile=aws_profile)
        self._kube_config = kube_config if kube_config else self.default_config_path.format(
            cluster_name_id)
        tmp_kube_config = kube_config if kube_config else self.default_config_path.format(
            cluster_name_id)
        self._kube_config = os.getenv("ARGO_KUBE_CONFIG_PATH", tmp_kube_config)
        self._key_file = key_file if key_file else self.default_key_path.format(
            cluster_name_id)
        self._metadata_file = metadata if metadata else self.default_cluster_meta_path

        config_path = AXClusterConfigPath(name_id=cluster_name_id)
        self._bucket_name = config_path.bucket()
        self._bucket = Cloud().get_bucket(self._bucket_name,
                                          aws_profile=aws_profile)
        self._s3_kube_config_key = config_path.kube_config()
        self._s3_cluster_ssh_key = config_path.kube_ssh()
        self._s3_cluster_state_before_pause = config_path.state_before_pause()
        self._s3_cluster_meta = config_path.cluster_metadata()
        self._s3_cluster_software_info = config_path.versions()
        self._s3_platform_manifest_dir = config_path.platform_manifest_dir()
        self._s3_platform_config = config_path.platform_config()
        self._s3_cluster_current_state = config_path.current_state()
        self._s3_portal_support_flag = config_path.portal_support()

        self._s3_master_config_prefix = config_path.master_config_dir()
        self._s3_master_attributes_path = config_path.master_attributes_path()
        self._s3_master_user_data_path = config_path.master_user_data_path()

        # For cluster staging info, stage1 and stage2 can be uploaded, downloaded, deleted with AXClusterInfo
        # stage0 will can only be downloaded with AXClusterInfo. It will be uploaded during cluster information
        # initialization (i.e. upload cluster id an cluster config), and deleted during cluster information
        # clean up (i.e. during axinstaller uninstall)
        self._staging_info = {
            "stage0": config_path.cluster_install_stage0_key(),
            "stage1": config_path.cluster_install_stage1_key(),
            "stage2": config_path.cluster_install_stage2_key()
        }
예제 #10
0
파일: kube_object.py 프로젝트: zhan849/argo
 def wait_for_svc_lb_validator(status):
     if bool(status["loadBalancer"] and status["loadBalancer"]["ingress"]
             and len(status["loadBalancer"]["ingress"]) == 1):
         if Cloud().target_cloud_aws():
             return "elb.amazonaws.com" in status["loadBalancer"][
                 "ingress"][0]["hostname"]
         elif Cloud().target_cloud_gcp():
             return "ip" in status["loadBalancer"]["ingress"][0]
     return False
예제 #11
0
 def __init__(self, cluster_name_id=None, aws_profile=None, config=None):
     self._cluster_name_id = AXClusterId(
         name=cluster_name_id,
         aws_profile=aws_profile).get_cluster_name_id()
     self._bucket_name = AXClusterConfigPath(self._cluster_name_id).bucket()
     self._bucket = Cloud().get_bucket(self._bucket_name,
                                       aws_profile=aws_profile)
     self._cluster_config_key = AXClusterConfigPath(
         self._cluster_name_id).cluster_config()
     self._conf = config
예제 #12
0
    def __init__(self, customer_id, cluster_name_id, aws_profile):
        self._customer_id = customer_id
        self._cluster_name_id = cluster_name_id
        self._cluster_name = AXClusterId(cluster_name_id).get_cluster_name()
        self._aws_profile = aws_profile

        cluster_bucket_name = AXClusterConfigPath(cluster_name_id).bucket()
        self._cluster_bucket = Cloud().get_bucket(cluster_bucket_name, aws_profile=self._aws_profile)

        support_bucket_name = AXSupportConfigPath(cluster_name_id).bucket()
        self._support_bucket = Cloud().get_bucket(support_bucket_name, aws_profile=self._aws_profile)
예제 #13
0
    def _get_bucket_region_from_aws(self):
        def _do_get_region(start_region):
            s3 = boto3.Session(
                profile_name=self._aws_profile,
                region_name=start_region).client(
                    "s3",
                    aws_access_key_id=os.environ.get("ARGO_S3_ACCESS_KEY_ID",
                                                     None),
                    aws_secret_access_key=os.environ.get(
                        "ARGO_S3_ACCESS_KEY_SECRET", None),
                    endpoint_url=os.environ.get("ARGO_S3_ENDPOINT", None),
                    config=Config(signature_version='s3v4'))
            logger.debug(
                "Finding region for bucket %s from with initial region %s",
                self._name, start_region)
            try:
                response = s3.head_bucket(Bucket=self._name)
                logger.debug("Head_bucket returned OK %s", response)
            except ClientError as e:
                response = getattr(e, "response", {})
                logger.debug(
                    "Head_bucket returned error %s, inspecting headers",
                    response)
                return None
            headers = response.get("ResponseMetadata",
                                   {}).get("HTTPHeaders", {})
            region = headers.get("x-amz-bucket-region",
                                 headers.get("x-amz-region", None))
            logger.debug("Found region %s from head_bucket for %s, headers %s",
                         region, self._name, headers)
            return region

        if Cloud().own_cloud() == Cloud.CLOUD_AWS:
            # When running on AWS instance, we query metadata server for initial region to get bucket region
            return _do_get_region(Cloud().meta_data().get_region())
        else:
            # Assume we don't have AWS metadata server access
            for r in PARTITION_DEFAULT_REGIONS:
                logger.debug(
                    "Trying partition default region %s to get bucket region.",
                    r)
                bucket_region = None
                try:
                    bucket_region = _do_get_region(r)
                except ClientError as e:
                    logger.info(
                        "Get region failed with: %s. Assuming region is None ...",
                        e)
                if bucket_region:
                    return bucket_region
        return None
예제 #14
0
파일: axtool.py 프로젝트: teddybearz/argo
    def cluster(self, args):
        from ax.platform.ax_cluster_info import AXClusterInfo
        from ax.meta import AXClusterId
        from ax.platform_client.env import AXEnv

        Cloud().set_target_cloud(args.target_cloud)

        assert AXEnv().is_in_pod(
        ) or args.cluster_name, "Must specify cluster name from outside cluster"

        if args.subcommand in ['start', 'create']:
            logger.error("=" * 80)
            logger.error(
                "axtool cluster start/create has be moved to axinstaller")
            logger.error("=" * 80)
            sys.exit(1)
        elif args.subcommand in ['stop', 'delete']:
            logger.error("=" * 80)
            logger.error(
                "axtool cluster stop/delete has be moved to axinstaller")
            logger.error("=" * 80)
            sys.exit(1)
        elif args.subcommand == 'show':
            import subprocess
            name_id = AXClusterId(args.cluster_name,
                                  args.aws_profile).get_cluster_name_id()
            AXClusterInfo(name_id,
                          aws_profile=args.aws_profile).download_kube_key()
            conf_file = AXClusterInfo(
                name_id, aws_profile=args.aws_profile).download_kube_config()
            logger.info("Kubeconfig")
            with open(conf_file, "r") as f:
                conf = f.read()
            logger.info("%s", conf)
            subprocess.call(
                ["kubectl", "--kubeconfig", conf_file, "cluster-info"])
            subprocess.call(
                ["kubectl", "--kubeconfig", conf_file, "get", "no"])
            subprocess.call([
                "kubectl", "--kubeconfig", conf_file, "--namespace", "axsys",
                "get", "po"
            ])
        elif args.subcommand == 'download-config':
            name_id = AXClusterId(args.cluster_name,
                                  args.aws_profile).get_cluster_name_id()
            if Cloud().target_cloud_aws():
                AXClusterInfo(
                    name_id, aws_profile=args.aws_profile).download_kube_key()
            AXClusterInfo(name_id,
                          aws_profile=args.aws_profile).download_kube_config()
예제 #15
0
    def _delete_cluster_bucket(self):
        logger.info("Deleting applatix-cluster bucket contents for cluster %s ...", self._name_id)
        cluster_bucket = Cloud().get_bucket(AXClusterConfigPath(name_id=self._name_id).bucket(),
                                            aws_profile=self._aws_profile, region=self._aws_region)

        idobj = AXClusterId(name=self._name_id)
        cluster_config_path = AXClusterConfigPath(name_id=self._name_id)
        cluster_name = idobj.get_cluster_name()
        prefix = cluster_name + "/"

        # TODO: Not idempotent here.
        # Consider the following case: if there is exception thrown when deleting S3 objects, install stage 1
        # information has already been deleted but not everything are successfully deleted, the next time user
        # executes "delete", this program will assume install stage 1 has been cleaned up.
        exempt = [idobj.get_cluster_id_s3_key(), cluster_config_path.cluster_install_stage0_key()]
        logger.info("Deleting objects for cluster %s from bucket %s. This may take some while.",
                    cluster_name,
                    cluster_bucket.get_bucket_name())
        cluster_bucket.delete_all(obj_prefix=prefix, exempt=exempt)
        logger.info("Deleting objects for cluster %s from bucket %s ... DONE",
                    cluster_name, cluster_bucket.get_bucket_name())
        logger.info("Deleting stage0 information ...")
        for item in exempt:
            cluster_bucket.delete_object(item)
        logger.info("Deleting stage0 information ... DONE")
예제 #16
0
    def __init__(self, customer_image):
        super(InitContainerPullImage,
              self).__init__(INIT_CONTAINER_NAME_PULLIMAGE, customer_image)
        nothing_hostpath = ContainerVolume("bin-nothing", "/bin/nothing")
        if Cloud().in_cloud_aws():
            nothing_hostpath.set_type("HOSTPATH", "/bin/nothing")
        elif Cloud().in_cloud_gcp():
            nothing_hostpath.set_type("HOSTPATH", "/etc/nothing")
        self.add_volume(nothing_hostpath)

        self.command = ["/bin/nothing"]

        # AA-3175: CPU and memory are set to lowest possible so that pod requests are kept at a minimum
        self.add_resource_constraints("cpu_cores", 0.001)
        self.add_resource_constraints("mem_mib", 4)
예제 #17
0
파일: pod.py 프로젝트: zhan849/argo
    def enable_docker(self, size_in_mb):
        if "main" not in self.cmap:
            raise AXPlatformException("Pod needs to have main container before enabling docker")

        # create the dind sidecar container
        dind_c = SidecarDockerDaemon(size_in_mb)
        if Cloud().in_cloud_aws():
            dind_c.args = ["--storage-driver=overlay2"]
        elif Cloud().in_cloud_gcp():
            # Current GKE defaults to overlay.
            dind_c.args = ["--storage-driver=overlay"]
        self.cmap["dind"] = dind_c
        self.cmap["main"].add_env("DOCKER_HOST", value="tcp://localhost:2375")

        return dind_c
예제 #18
0
    def __init__(self, customer_image, namespace, version):
        super(SidecarTask, self).__init__(SIDEKICK_WAIT_CONTAINER_NAME,
                                          customer_image, namespace, version)

        # Sidecar needs to manage logs so add the log path here
        logpath = ContainerVolume("containerlogs", "/logs")
        if Cloud().in_cloud_aws():
            logpath.set_type("HOSTPATH", "/mnt/ephemeral/docker/containers")
        elif Cloud().in_cloud_gcp():
            logpath.set_type("HOSTPATH", "/var/lib/docker/containers")
        self.add_volume(logpath)
        self.add_env("LOGMOUNT_PATH", "/logs")
        self.add_env("AX_CLUSTER_NAME_ID", os.getenv("AX_CLUSTER_NAME_ID"))

        # set the arguments
        self.args = ["post"]
예제 #19
0
    def __init__(self, cfg):
        cfg.cluster_size = AXClusterSize.CLUSTER_USER_PROVIDED
        cfg.cluster_type = "standard"
        cfg.vpc_id = None
        cfg.vpc_cidr_base = None
        cfg.subnet_mask_size = None
        cfg.trusted_cidrs = ClusterInstallDefaults.TRUSTED_CIDR
        cfg.user_on_demand_nodes = None
        cfg.spot_instances_option = "none"
        cfg.cluster_autoscaling_scan_interval = None
        cfg.support_object_store_name = ""
        cfg.enable_sandbox = None
        cfg.software_version_info = None

        self.cluster_size = cfg.cluster_size
        if cfg.cloud_provider == "minikube":
            self.service_manifest_root = "/ax/config/service/argo-wfe"
            self.platform_bootstrap_config = "/ax/config/service/config/argo-wfe-platform-bootstrap.cfg"
            Cloud(target_cloud="aws")
        elif cfg.cloud_provider == "gke":
            self.service_manifest_root = "/ax/config/service/argo-gke"
            self.platform_bootstrap_config = "/ax/config/service/config/argo-wfe-platform-bootstrap.cfg"
            Cloud(target_cloud="aws")
        else:
            self.service_manifest_root = "/ax/config/service/argo-all"
            self.platform_bootstrap_config = "/ax/config/service/config/argo-all-platform-bootstrap.cfg"

        super(PlatformOnlyInstallConfig, self).__init__(cfg)
        self.install_config = ClusterInstallConfig(cfg=cfg)
        self.install_config.validate()

        self.cluster_bucket = cfg.cluster_bucket
        self.kube_config = cfg.kubeconfig
        try:
            self.bucket_endpoint = cfg.endpoint
            self.access_key = cfg.access_key
            self.secret_key = cfg.secret_key
        except Exception as ae:
            self.bucket_endpoint = None
            self.access_key = None
            self.secret_key = None

        # Overwrite the manifest_root and bootstrap_config.
        self.install_config.manifest_root = self.service_manifest_root
        self.install_config.bootstrap_config = self.platform_bootstrap_config

        return
예제 #20
0
    def __init__(self, host_ip=None):

        self._host_ip = host_ip if host_ip else Cloud().meta_data(
        ).get_private_ip()
        assert self._host_ip, "Kubelet Client is not properly initialized: Missing host ip"
        logger.info("Kubelet client uses host ip %s", self._host_ip)
        self._kubelet_url = "http://{}:{}".format(self._host_ip,
                                                  self.KUBELET_RO_PORT)
예제 #21
0
    def _generate_raw_cluster_config_dict(self):
        """
        This is a standalone method to generate cluster config dictionary based on install config. We might want to
        move it to ax.platform.cluster_config package for sanity
        :return:
        """
        config_file_name = CLUSTER_CONFIG_TEMPLATES[self._cfg.cluster_size]
        config_file_full_path = os.path.join(*[CLUSTER_CONFIG_ROOT, self._cfg.cluster_type, config_file_name])
        with open(config_file_full_path, "r") as f:
            config = json.load(f)

        if Cloud().target_cloud_aws():
            return self._generate_raw_cluster_config_dict_aws(config)
        elif Cloud().target_cloud_gcp():
            return self._generate_raw_cluster_config_dict_gcp(config)
        else:
            # Should never come here as aws/gcp is ensured at CLI validation level
            return config
예제 #22
0
    def _update_data_bucket(self):
        data_bucket = Cloud().get_bucket(AXClusterDataPath(name_id=self._name_id).bucket(),
                                         aws_profile=self._aws_profile, region=self._aws_region)

        if not data_bucket.create():
            raise AXPlatformException("Failed to create S3 bucket {}".format(data_bucket.get_bucket_name()))
        # Update CORS config for data bucket too.
        logger.info("Checking CORS config for %s.", data_bucket.get_bucket_name())
        data_bucket.put_cors(DATA_CORS_CONFIG)

        logger.info("Created %s bucket ... DONE", data_bucket.get_bucket_name())
예제 #23
0
파일: kube_object.py 프로젝트: zhan849/argo
 def poll_for_elb_exists(poll_result):
     if not poll_result:
         return False
     assert isinstance(poll_result,
                       list), "Poll result should be a list of objects"
     for svc in poll_result:
         try:
             if bool(svc.status.load_balancer.ingress
                     and len(svc.status.load_balancer.ingress) == 1):
                 if Cloud().target_cloud_aws():
                     return "elb.amazonaws.com" in svc.status.load_balancer.ingress[
                         0].hostname
                 elif Cloud().target_cloud_gcp():
                     return hasattr(svc.status.load_balancer.ingress[0],
                                    "ip")
             return False
         except Exception:
             return False
     return True
예제 #24
0
    def get_region(self):
        if os.environ.get("AX_AWS_REGION", None):
            return os.environ.get("AX_AWS_REGION")
        if Cloud().own_cloud() != Cloud.CLOUD_AWS:
            return "unknown-region"

        url = self._meta_url + "placement/availability-zone"
        retry = AXRetry(retry_exception=(Exception, ))
        r = ax_retry(requests.get, retry, url, timeout=10)
        return r.text[:-1]
예제 #25
0
    def _ensure_customer_id(cloud_profile):
        if os.getenv("AX_CUSTOMER_ID", None):
            logger.info("Using customer ID %s", os.getenv("AX_CUSTOMER_ID"))
            return

        # TODO (#111): set customer id to GCP
        if Cloud().target_cloud_aws():
            account_info = SecurityToken(aws_profile=cloud_profile).get_caller_identity()
            customer_id = str(uuid.uuid5(uuid.NAMESPACE_OID, account_info["Account"]))
            logger.info("Using AWS account ID hash (%s) for customer id", customer_id)
            os.environ["AX_CUSTOMER_ID"] = customer_id
예제 #26
0
    def _clean_up_kubernetes_cluster(self):
        """
        This step cleans up Kubernetes if needed. It only touches components in cloud provider that
        Kubernetes needs, including:
            - Load Balancers
            - Instances
            - Auto scaling groups
            - launch configurations
            - Volumes
            - Security groups
            - Elastic IPs
            - VPCs (If this VPC is not shared)
        :return:
        """
        if not check_cluster_staging(
                cluster_info_obj=self._cluster_info,
                stage="stage0") and not self._cfg.force_uninstall:
            logger.info("Skip clean up Kubernetes cluster")
            return

        logger.info("Cluster uninstall step: Clean Up Kubernetes Cluster")

        if self._cfg.force_uninstall:
            msg = "{}\n\nIt is possible that cluster S3 bucket is accidentally deleted,\n".format(
                COLOR_YELLOW)
            msg += "or S3 bucket information has been altered unintentionally. In this\n"
            msg += "case, we still try to delete cluster since this is force uninstall.\n"
            msg += "NOTE: cluster deletion might NOT be successful and still requires\n"
            msg += "user to clean up left-over resources manually.{}\n".format(
                COLOR_NORM)
            logger.warning(msg)

        env = {
            "KUBERNETES_PROVIDER": self._cfg.cloud_provider,
            "KUBE_AWS_ZONE": self._cfg.cloud_placement,
            "KUBE_AWS_INSTANCE_PREFIX": self._name_id
        }

        if self._cfg.cloud_profile:
            env["AWS_DEFAULT_PROFILE"] = self._cfg.cloud_profile
        else:
            env["AWS_DEFAULT_PROFILE"] = AWS_DEFAULT_PROFILE

        logger.info("\n\n%sCalling kube-down ...%s\n", COLOR_GREEN, COLOR_NORM)
        AXKubeUpDown(cluster_name_id=self._name_id,
                     env=env,
                     aws_profile=self._cfg.cloud_profile).down()

        # TODO (#111): revise volume teardown in GCP
        if Cloud().target_cloud_aws():
            delete_tagged_ebs(aws_profile=self._cfg.cloud_profile,
                              tag_key=COMMON_CLOUD_RESOURCE_TAG_KEY,
                              tag_value=self._name_id,
                              region=self._cfg.cloud_region)
예제 #27
0
 def _parse_config(self, kube_config):
     """Return config information from current kubernetes context"""
     cred_info = {}
     # cluster info
     context = next(c['context'] for c in kube_config['contexts']
                    if c['name'] == kube_config['current-context'])
     cred_info.update(context)
     context_cluster = next(c['cluster'] for c in kube_config['clusters']
                            if c['name'] == context['cluster'])
     cred_info['host'] = context_cluster['server'].split('/')[-1]
     cred_info['certificate-authority-data'] = context_cluster[
         'certificate-authority-data']
     # user info
     context_user = next(c['user'] for c in kube_config['users']
                         if c['name'] == context['user'])
     if Cloud().target_cloud_aws():
         cred_info['token'] = context_user['token']
     elif Cloud().target_cloud_gcp():
         cred_info['token'] = GCPToken().token
     return cred_info
예제 #28
0
파일: rest.py 프로젝트: zhan849/argo
def axmon_domains_list():
    """
    Return a list of hosted zones (domains) that the cluster has access to
    Returns:
        { 'result': [list of domains] }
    """
    if Cloud().target_cloud_gcp():
        return jsonify([])
    else:
        r53client = Route53(boto3.client("route53"))
        return jsonify(result=[x.name for x in r53client.list_hosted_zones()])
예제 #29
0
    def __init__(self, size_in_mb):
        super(SidecarDockerDaemon, self).__init__(DIND_CONTAINER_NAME,
                                                  "argoproj/dind:1.12.6")

        # Add lib modules for dind to load aufs module.
        libmodule_hostpath = ContainerVolume("kernel-lib-module",
                                             "/lib/modules")
        libmodule_hostpath.set_type("HOSTPATH", "/lib/modules")
        self.add_volume(libmodule_hostpath)

        # Add per node dgs to sidecar
        dgs_vol = ContainerVolume("docker-graph-storage", "/var/lib/docker")
        if Cloud().target_cloud_aws():
            dgs_vol.set_type("DOCKERGRAPHSTORAGE", size_in_mb)
        elif Cloud().target_cloud_gcp():
            dgs_vol.set_type("EMPTYDIR")
        self.add_volume(dgs_vol)

        # dind daemon needs to be privileged!
        self.privileged = True
예제 #30
0
파일: rest.py 프로젝트: nuaays/argo
def axmon_domains_domain(domainname):
    """
    Return a list of records for the domain
    Returns:
        { 'result': [list of records for domain] }
    """
    if Cloud().target_cloud_gcp():
        return jsonify([])
    else:
        r53client = Route53(boto3.client("route53"))
        zone = Route53HostedZone(r53client, domainname)
        return jsonify(result=[x for x in zone.list_records()])