def __init__(self, args, connection_info):
     self.args = args
     self.env = os.environ.copy()
     if _has_not_blank_property(connection_info, 'accessKey'):
         self.env['AWS_ACCESS_KEY_ID'] = connection_info['accessKey']
     if _has_not_blank_property(connection_info, 'secretKey'):
         self.env['AWS_SECRET_ACCESS_KEY'] = connection_info['secretKey']
     if _has_not_blank_property(connection_info, 'region'):
         self.env['AWS_DEFAULT_REGION'] = connection_info['region']
Exemplo n.º 2
0
def get_cluster_from_connection_info(config_connection_info,
                                     plugin_config_connection_info):
    credentials = None
    if _has_not_blank_property(plugin_config_connection_info, 'credentials'):
        credentials = get_credentials_from_json_or_file(
            plugin_config_connection_info['credentials'])
    return Clusters(config_connection_info.get("projectId", None),
                    config_connection_info.get("zone", None), credentials)
Exemplo n.º 3
0
def get_first_kube_config(kube_config_path=None):
    if kube_config_path is None:
        if _has_not_blank_property(os.environ, 'KUBECONFIG'):
            kube_config_path = os.environ['KUBECONFIG'].split(':')[0]
        else:
            kube_config_path = os.path.join(os.environ['HOME'], '.kube',
                                            'config')
    return kube_config_path
Exemplo n.º 4
0
    def stop(self, data):
        connection_info = self.config.get('connectionInfo', {})

        args = ['delete', 'cluster']
        args = args + ['-v', '4']
        args = args + ['--name', self.cluster_id]
        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]
        c = EksctlCommand(args, connection_info)

        if c.run_and_log() != 0:
            raise Exception("Failed to stop cluster")
Exemplo n.º 5
0
            def add_tags(resources, tag, connection_info, command_outputs):
                args = ['ec2', 'create-tags']

                if _has_not_blank_property(connection_info, 'region'):
                    args = args + ['--region', connection_info['region']]
                elif 'AWS_DEFAULT_REGION' is os.environ:
                    args = args + [
                        '--region', os.environ['AWS_DEFAULT_REGION']
                    ]

                args = args + ["--resources"] + resources
                args = args + ["--tags", tag]

                c = AwsCommand(args, connection_info)
                command_outputs.append(c.run())
                if command_outputs[-1][1] != 0:
                    return make_html(command_outputs)
Exemplo n.º 6
0
    def start(self):
        connection_info = self.config.get('connectionInfo', {})
        networking_settings = self.config["networkingSettings"]

        args = ['create', 'cluster']
        args = args + ['-v', '4']

        if not self.config.get('advanced'):
            args = args + ['--name', self.cluster_id]

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            args = args + ['--full-ecr-access']

            subnets = networking_settings.get('subnets', [])
            if networking_settings.get('privateNetworking', False):
                args = args + ['--node-private-networking']
                private_subnets = networking_settings.get('privateSubnets', [])
                if len(private_subnets) > 0:
                    args = args + [
                        '--vpc-private-subnets', ','.join(private_subnets)
                    ]
            if len(subnets) > 0:
                args = args + ['--vpc-public-subnets', ','.join(subnets)]

            security_groups = networking_settings.get('securityGroups', [])
            if len(security_groups) > 0:
                args = args + [
                    '--node-security-groups', ','.join(security_groups)
                ]

            node_pool = self.config.get('nodePool', {})
            if 'machineType' in node_pool:
                args = args + ['--node-type', node_pool['machineType']]
            if 'diskType' in node_pool:
                args = args + ['--node-volume-type', node_pool['diskType']]
            if 'diskSizeGb' in node_pool and node_pool['diskSizeGb'] > 0:
                args = args + [
                    '--node-volume-size',
                    str(node_pool['diskSizeGb'])
                ]

            args = args + ['--nodes', str(node_pool.get('numNodes', 3))]
            if node_pool.get('numNodesAutoscaling', False):
                args = args + ['--asg-access']
                args = args + [
                    '--nodes-min',
                    str(node_pool.get('minNumNodes', 2))
                ]
                args = args + [
                    '--nodes-max',
                    str(node_pool.get('maxNumNodes', 5))
                ]

            k8s_version = self.config.get("k8sVersion", None)
            if not _is_none_or_blank(k8s_version):
                args = args + ['--version', k8s_version.strip()]
        else:
            yaml_dict = yaml.safe_load(self.config.get("advancedYaml"))
            yaml_loc = os.path.join(os.getcwd(),
                                    self.cluster_id + '_advanced.yaml')
            with open(yaml_loc, 'w') as outfile:
                yaml.dump(yaml_dict, outfile, default_flow_style=False)

            args = args + ['-f', yaml_loc]

        # we don't add the context to the main config file, to not end up with an oversized config,
        # and because 2 different clusters could be concurrently editing the config file
        kube_config_path = os.path.join(os.getcwd(), 'kube_config')
        args = args + ['--kubeconfig', kube_config_path]

        c = EksctlCommand(args, connection_info)
        if c.run_and_log() != 0:
            raise Exception("Failed to start cluster")

        args = ['get', 'cluster']
        args = args + ['--name', self.cluster_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]
        args = args + ['-o', 'json']

        if _has_not_blank_property(connection_info,
                                   'accessKey') and _has_not_blank_property(
                                       connection_info, 'secretKey'):
            creds_in_env = {
                'AWS_ACCESS_KEY_ID': connection_info['accessKey'],
                'AWS_SECRET_ACCESS_KEY': connection_info['secretKey']
            }
            add_authenticator_env(kube_config_path, creds_in_env)

        if not self.config.get('advanced'):
            if node_pool.get('numNodesAutoscaling', False):
                logging.info("Nodegroup is autoscaling, ensuring autoscaler")
                add_autoscaler_if_needed(self.cluster_id, kube_config_path)
        elif self.config.get('clusterAutoScaling'):
            logging.info("Nodegroup is autoscaling, ensuring autoscaler")
            add_autoscaler_if_needed(self.cluster_id, kube_config_path)

        c = EksctlCommand(args, connection_info)
        cluster_info = json.loads(c.run_and_get_output())[0]

        with open(kube_config_path, "r") as f:
            kube_config = yaml.safe_load(f)

        # collect and prepare the overrides so that DSS can know where and how to use the cluster
        overrides = make_overrides(self.config, kube_config, kube_config_path)
        return [
            overrides, {
                'kube_config_path': kube_config_path,
                'cluster': cluster_info
            }
        ]
Exemplo n.º 7
0
    def run(self, progress_callback):
        cluster_data, dss_cluster_settings, dss_cluster_config = get_cluster_from_dss_cluster(
            self.config['clusterId'])

        if get_cluster_generic_property(dss_cluster_settings,
                                        'alb-ingress.controller',
                                        'false') == 'true':
            raise Exception("ALB controller already installed, remove first")

        # retrieve the actual name in the cluster's data
        if cluster_data is None:
            raise Exception("No cluster data (not started?)")
        cluster_def = cluster_data.get("cluster", None)
        if cluster_def is None:
            raise Exception("No cluster definition (starting failed?)")
        cluster_id = cluster_def["Name"]
        kube_config_path = dss_cluster_settings.get_raw()['containerSettings'][
            'executionConfigsGenericOverrides']['kubeConfigPath']
        connection_info = dss_cluster_config.get('config',
                                                 {}).get('connectionInfo', {})

        env = os.environ.copy()
        env['KUBECONFIG'] = kube_config_path

        command_outputs = []
        keep_going = True

        # setup iam stuff in eksctl
        args = ['utils', 'associate-iam-oidc-provider', '--approve']
        #args = args + ['-v', '4']
        args = args + ['--cluster', cluster_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        c = EksctlCommand(args, connection_info)
        command_outputs.append(c.run())
        if command_outputs[-1][1] != 0:
            return make_html(command_outputs)

        # checking if we need to create the policy
        policy_name = self.config.get('policyName',
                                      'ALBIngressControllerIAMPolicy')

        args = ['iam', 'list-policies']

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        c = AwsCommand(args, connection_info)
        command_outputs.append(c.run())
        if command_outputs[-1][1] != 0:
            return make_html(command_outputs)

        policy_arn = None
        for policy in json.loads(command_outputs[-1][2])['Policies']:
            if policy.get('PolicyName', None) == policy_name:
                policy_arn = policy.get('Arn', None)

        if policy_arn is None:
            if not self.config.get("createPolicy", False):
                raise Exception(
                    "Policy %s doesn't exist and the macro isn't allowed to create it"
                    % policy_name)
            # create the policy
            policy_document_url = 'https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.8/docs/examples/iam-policy.json'
            policy_document = requests.get(policy_document_url).text
            with open("policy.json", "w") as p:
                p.write(policy_document)

            args = ['iam', 'create-policy']
            args = args + ['--policy-name', policy_name]
            args = args + ['--policy-document', 'file://policy.json']

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            c = AwsCommand(args, connection_info)
            command_outputs.append(c.run())
            if command_outputs[-1][1] != 0:
                return make_html(command_outputs)

            policy_arn = json.loads(command_outputs[-1][2])['Policy'].get(
                'Arn', None)

        # create the role on the cluster
        cmd = [
            'kubectl', 'apply', '-f',
            'https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.4/docs/examples/rbac-role.yaml'
        ]
        logging.info("Run : %s" % json.dumps(cmd))
        try:
            out, err = run_with_timeout(cmd, env=env, timeout=100)
            command_outputs.append((cmd, 0, out, err))
        except KubeCommandException as e:
            command_outputs.append((cmd, e.rv, e.out, e.err))
            keep_going = False

        if not keep_going:
            return make_html(command_outputs)

        # attach the role to the policy

        args = [
            'create', 'iamserviceaccount',
            '--override-existing-serviceaccounts', '--approve'
        ]
        #args = args + ['-v', '4']
        args = args + ['--name', 'alb-ingress-controller'
                       ]  # that's the name in the rbac-role.yaml
        args = args + ['--namespace', 'kube-system'
                       ]  # that's the name in the rbac-role.yaml
        args = args + ['--cluster', cluster_id]
        args = args + ['--attach-policy-arn', policy_arn]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        c = EksctlCommand(args, connection_info)
        command_outputs.append(c.run())
        if command_outputs[-1][1] != 0:
            return make_html(command_outputs)

        r = requests.get(
            'https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.4/docs/examples/alb-ingress-controller.yaml'
        )
        service_data = r.content
        cluster_flag_pattern = '#.*cluster\\-name=.*'
        cluster_flag_replacement = '- --cluster-name=%s' % cluster_id
        service_data = re.sub(cluster_flag_pattern, cluster_flag_replacement,
                              service_data)

        print(service_data)
        with open('./alb-ingress-controller.yaml', 'w') as f:
            f.write(service_data)

        cmd = ['kubectl', 'apply', '-f', './alb-ingress-controller.yaml']
        logging.info("Run : %s" % json.dumps(cmd))
        try:
            out, err = run_with_timeout(cmd, env=env, timeout=100)
            command_outputs.append((cmd, 0, out, err))
        except KubeCommandException as e:
            command_outputs.append((cmd, e.rv, e.out, e.err))
            keep_going = False

        if not keep_going:
            return make_html(command_outputs)

        if self.config.get("tagSubnets", False):
            networking_settings = dss_cluster_config.get('config', {}).get(
                'networkingSettings', {})
            subnets = networking_settings.get('subnets', [])
            if networking_settings.get('privateNetworking', False):
                private_subnets = dss_cluster_config.get('config', {}).get(
                    'networkingSettings', {}).get('privateSubnets', [])
            else:
                private_subnets = []

            def add_tags(resources, tag, connection_info, command_outputs):
                args = ['ec2', 'create-tags']

                if _has_not_blank_property(connection_info, 'region'):
                    args = args + ['--region', connection_info['region']]
                elif 'AWS_DEFAULT_REGION' is os.environ:
                    args = args + [
                        '--region', os.environ['AWS_DEFAULT_REGION']
                    ]

                args = args + ["--resources"] + resources
                args = args + ["--tags", tag]

                c = AwsCommand(args, connection_info)
                command_outputs.append(c.run())
                if command_outputs[-1][1] != 0:
                    return make_html(command_outputs)

            if len(subnets) > 0:
                add_tags(subnets, 'Key=kubernetes.io/role/elb,Value=1',
                         connection_info, command_outputs)
            if len(private_subnets) > 0:
                add_tags(private_subnets,
                         'Key=kubernetes.io/role/internal-elb,Value=1',
                         connection_info, command_outputs)

        set_cluster_generic_property(dss_cluster_settings,
                                     'alb-ingress.controller', 'true', True)

        return make_html(command_outputs)
Exemplo n.º 8
0
    def run(self, progress_callback):
        cluster_data, dss_cluster_settings, dss_cluster_config = get_cluster_from_dss_cluster(
            self.config['clusterId'])

        # retrieve the actual name in the cluster's data
        if cluster_data is None:
            raise Exception("No cluster data (not started?)")
        cluster_def = cluster_data.get("cluster", None)
        if cluster_def is None:
            raise Exception("No cluster definition (starting failed?)")
        cluster_id = cluster_def["Name"]

        connection_info = dss_cluster_config.get('config',
                                                 {}).get('connectionInfo', {})

        node_group_id = self.config.get('nodeGroupId', None)
        if node_group_id is None or len(node_group_id) == 0:
            args = ['get', 'nodegroup']
            #args = args + ['-v', '4']
            args = args + ['--cluster', cluster_id]

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            args = args + ['-o', 'json']

            c = EksctlCommand(args, connection_info)
            node_groups = json.loads(c.run_and_get_output())
            node_group_ids = [node_group['Name'] for node_group in node_groups]
            if len(node_group_ids) != 1:
                raise Exception(
                    "Cluster has %s node groups, cannot resize. Specify a node group explicitely among %s"
                    % (len(node_group_ids), json.dumps(node_group_ids)))
            node_group_id = node_group_ids[0]

        args = ['get', 'nodegroup']
        #args = args + ['-v', '4']
        args = args + ['--cluster', cluster_id]
        args = args + ['--name', node_group_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        args = args + ['-o', 'json']

        c = EksctlCommand(args, connection_info)
        node_group_batch = json.loads(c.run_and_get_output())
        if len(node_group_batch) == 0:
            raise Exception("Unable to retrieve info of node group %s" %
                            node_group_id)

        node_group = node_group_batch[0]

        desired_count = self.config['numNodes']
        logging.info("Resize to %s" % desired_count)
        if desired_count == 0:
            args = ['delete', 'nodegroup']
            args = args + ['-v', '4']
            args = args + ['--cluster', cluster_id]
            args = args + ['--name', node_group_id]

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            c = EksctlCommand(args, connection_info)
            rv, out, err = c.run_and_get()
            if rv == 0:
                logging.info("Cluster node group deleted")
                return '<div>Deleted</div><pre class="debug">%s</pre>' % node_group_id
            else:
                logging.info("Cluster node group failed to delete")
                return '<div>Failed to delete the node group</div><pre class="debug">%s</pre>' % (
                    err)

        else:
            args = ['scale', 'nodegroup']
            args = args + ['-v', '4']
            args = args + ['--cluster', cluster_id]
            args = args + ['--name', node_group_id]
            args = args + ['--nodes', str(desired_count)]
            desired_min_count = self.config.get('minNumNodes', -1)
            desired_max_count = self.config.get('maxNumNodes', -1)
            if desired_min_count > 0:
                args = args + ['--nodes-min', str(desired_min_count)]
            if desired_max_count > 0:
                args = args + ['--nodes-max', str(desired_max_count)]

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            c = EksctlCommand(args, connection_info)
            rv, out, err = c.run_and_get()
            if rv == 0:
                logging.info("Cluster node group resized")
                return '<div>Resized</div><pre class="debug">%s</pre>' % node_group_id
            else:
                logging.info("Cluster node group failed to resize")
                return '<div>Failed to resize the node group</div><pre class="debug">%s</pre>' % (
                    err)
Exemplo n.º 9
0
    def run(self, progress_callback):
        cluster_data, dss_cluster_settings, dss_cluster_config = get_cluster_from_dss_cluster(self.config['clusterId'])

        if get_cluster_generic_property(dss_cluster_settings, 'alb-ingress.controller', 'false') != 'true':
            raise Exception("ALB controller not installed (or not by the installation macro)")

        # retrieve the actual name in the cluster's data
        if cluster_data is None:
            raise Exception("No cluster data (not started?)")
        cluster_def = cluster_data.get("cluster", None)
        if cluster_def is None:
            raise Exception("No cluster definition (starting failed?)")
        cluster_id = cluster_def["Name"]
        kube_config_path = dss_cluster_settings.get_raw()['containerSettings']['executionConfigsGenericOverrides']['kubeConfigPath']
        connection_info = dss_cluster_config.get('config', {}).get('connectionInfo', {})
        
        env = os.environ.copy()
        env['KUBECONFIG'] = kube_config_path

        command_outputs = []
        keep_going = True
        
        # delete the controller
        cmd = ['kubectl', 'delete', '-f', 'https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.4/docs/examples/alb-ingress-controller.yaml']
        logging.info("Run : %s" % json.dumps(cmd))
        try:
            out, err = run_with_timeout(cmd, env=env, timeout=100)
            command_outputs.append((cmd, 0, out, err))
        except KubeCommandException as e:
            command_outputs.append((cmd, e.rv, e.out, e.err))
            keep_going = False

        if not keep_going:
            return make_html(command_outputs)

        # detach the role from the policy
        args = ['delete', 'iamserviceaccount']
        #args = args + ['-v', '4']
        args = args + ['--name', 'alb-ingress-controller'] # that's the name in the rbac-role.yaml
        args = args + ['--namespace', 'kube-system'] # that's the name in the rbac-role.yaml
        args = args + ['--cluster', cluster_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        c = EksctlCommand(args, connection_info)
        command_outputs.append(c.run())
        if command_outputs[-1][1] != 0:
            return make_html(command_outputs)
        
        # delete the role on the cluster
        cmd = ['kubectl', 'delete', '-f', 'https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.4/docs/examples/rbac-role.yaml']
        logging.info("Run : %s" % json.dumps(cmd))
        try:
            out, err = run_with_timeout(cmd, env=env, timeout=100)
            command_outputs.append((cmd, 0, out, err))
        except KubeCommandException as e:
            command_outputs.append((cmd, e.rv, e.out, e.err))

        set_cluster_generic_property(dss_cluster_settings, 'alb-ingress.controller', 'false', True)

        return make_html(command_outputs)
Exemplo n.º 10
0
    def run(self, progress_callback):
        cluster_data, dss_cluster_settings, dss_cluster_config = get_cluster_from_dss_cluster(
            self.config['clusterId'])

        # retrieve the actual name in the cluster's data
        if cluster_data is None:
            raise Exception("No cluster data (not started?)")
        cluster_def = cluster_data.get("cluster", None)
        if cluster_def is None:
            raise Exception("No cluster definition (starting failed?)")
        cluster_id = cluster_def["Name"]

        connection_info = dss_cluster_config.get('config',
                                                 {}).get('connectionInfo', {})

        node_group_id = self.config.get('nodeGroupId', None)
        if node_group_id is None or len(node_group_id) == 0:
            args = ['get', 'nodegroup']
            #args = args + ['-v', '4']
            args = args + ['--cluster', cluster_id]

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            args = args + ['-o', 'json']

            c = EksctlCommand(args, connection_info)
            node_groups = json.loads(c.run_and_get_output())
            node_group_ids = [node_group['Name'] for node_group in node_groups]
        else:
            node_group_ids = [node_group_id]

        node_groups = []
        for node_group_id in node_group_ids:
            args = ['get', 'nodegroup']
            #args = args + ['-v', '4']
            args = args + ['--cluster', cluster_id]
            args = args + ['--name', node_group_id]

            if _has_not_blank_property(connection_info, 'region'):
                args = args + ['--region', connection_info['region']]
            elif 'AWS_DEFAULT_REGION' is os.environ:
                args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

            args = args + ['-o', 'json']

            c = EksctlCommand(args, connection_info)
            node_group_batch = json.loads(c.run_and_get_output())
            if len(node_group_batch) == 0:
                node_groups.append(
                    '<h5>%s</h5><div class="alert alert-error">Unable to get details</div>'
                    % (node_group_id))
                continue

            node_group = node_group_batch[0]

            node_group_stack_name = node_group['StackName']

            args = ['cloudformation', 'describe-stack-resources']
            args = args + ['--stack-name', node_group_stack_name]

            c = AwsCommand(args, connection_info)
            node_group_resources = json.loads(c.run_and_get_output()).get(
                'StackResources', [])

            # find the auto-scaling-group
            auto_scaling_resource = None
            for r in node_group_resources:
                if r.get('ResourceType',
                         '') == 'AWS::AutoScaling::AutoScalingGroup':
                    auto_scaling_resource = r

            if auto_scaling_resource is None:
                node_groups.append(
                    '<h5>%s</h5><div class="alert alert-error">Unable to get auto-scaling group</div><pre class="debug">%s</pre>'
                    % (node_group_id, json.dumps(node_group, indent=2)))
                continue

            node_group_auto_scaling_id = auto_scaling_resource[
                'PhysicalResourceId']

            args = ['autoscaling', 'describe-auto-scaling-groups']
            args = args + [
                '--auto-scaling-group-names', node_group_auto_scaling_id
            ]

            c = AwsCommand(args, connection_info)
            auto_scaling_resources = json.loads(c.run_and_get_output()).get(
                'AutoScalingGroups', [])

            if len(auto_scaling_resources) == 0:
                node_groups.append(
                    '<h5>%s</h5><div class="alert alert-error">Unable to get auto-scaling group\'s resources</div><pre class="debug">%s</pre>'
                    % (node_group_id, json.dumps(node_group, indent=2)))
                continue

            auto_scaling_resource = auto_scaling_resources[0]

            min_instances = auto_scaling_resource.get('MinSize', '')
            cur_instances = len(auto_scaling_resource.get('Instances', []))
            max_instances = auto_scaling_resource.get('MaxSize', '')
            node_groups.append(
                '<h5>%s</h5><pre class="debug">%s</pre><div>Min=%s, current=%s, max=%s</div><pre class="debug">%s</pre>'
                % (node_group_id, json.dumps(node_group, indent=2),
                   min_instances, cur_instances, max_instances,
                   json.dumps(auto_scaling_resource.get('Instances', []),
                              indent=2)))

        return '<div>%s</div>' % ''.join(node_groups)
Exemplo n.º 11
0
    def run(self, progress_callback):
        cluster_data, dss_cluster_settings, dss_cluster_config = get_cluster_from_dss_cluster(
            self.config['clusterId'])

        # retrieve the actual name in the cluster's data
        if cluster_data is None:
            raise Exception("No cluster data (not started?)")
        cluster_def = cluster_data.get("cluster", None)
        if cluster_def is None:
            raise Exception("No cluster definition (starting failed?)")
        cluster_id = cluster_def["Name"]

        # the cluster is accessible via the kubeconfig
        kube_config_path = dss_cluster_settings.get_raw()['containerSettings'][
            'executionConfigsGenericOverrides']['kubeConfigPath']

        connection_info = dss_cluster_config.get('config',
                                                 {}).get('connectionInfo', {})

        node_group_id = self.config.get('nodeGroupId', None)

        args = ['create', 'nodegroup']
        args = args + ['-v', '4']
        args = args + ['--cluster', cluster_id]
        if node_group_id is not None and len(node_group_id) > 0:
            args = args + ['--name', node_group_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        if dss_cluster_config['config'].get('useEcr', False):
            args = args + ['--full-ecr-access']

        if dss_cluster_config.get('privateNetworking', False):
            args = args + ['--node-private-networking']

        security_groups = dss_cluster_config['config'].get(
            'securityGroups', [])
        if len(security_groups) > 0:
            args = args + ['--node-security-groups', ','.join(security_groups)]

        node_pool = self.config.get('nodePool', {})
        if 'machineType' in node_pool:
            args = args + ['--node-type', node_pool['machineType']]
        if 'diskType' in node_pool:
            args = args + ['--node-volume-type', node_pool['diskType']]
        if 'diskSizeGb' in node_pool and node_pool['diskSizeGb'] > 0:
            args = args + ['--node-volume-size', str(node_pool['diskSizeGb'])]

        args = args + ['--nodes', str(node_pool.get('numNodes', 3))]
        if node_pool.get('numNodesAutoscaling', False):
            args = args + ['--asg-access']
            args = args + ['--nodes-min', str(node_pool.get('minNumNodes', 2))]
            args = args + ['--nodes-max', str(node_pool.get('maxNumNodes', 5))]

        c = EksctlCommand(args, connection_info)
        if c.run_and_log() != 0:
            raise Exception("Failed to add nodegroup")

        if node_pool.get('numNodesAutoscaling', False):
            logging.info("Nodegroup is autoscaling, ensuring autoscaler")
            add_autoscaler_if_needed(cluster_id, kube_config_path)

        args = ['get', 'nodegroup']
        #args = args + ['-v', '4']
        args = args + ['--cluster', cluster_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]

        args = args + ['-o', 'json']

        c = EksctlCommand(args, connection_info)
        node_groups_str = c.run_and_get_output()

        return '<h5>Nodegroup added<h5><pre class="debug">%s</pre>' % node_groups_str
Exemplo n.º 12
0
    def start(self):
        cluster_id = self.config['clusterId']
        # retrieve the cluster info from EKS
        # this will fail if the cluster doesn't exist, but the API message is enough
        connection_info = self.config.get('connectionInfo', {})
        args = ['get', 'cluster']
        args = args + ['--name', cluster_id]

        if _has_not_blank_property(connection_info, 'region'):
            args = args + ['--region', connection_info['region']]
        elif 'AWS_DEFAULT_REGION' is os.environ:
            args = args + ['--region', os.environ['AWS_DEFAULT_REGION']]
        args = args + ['-o', 'json']

        c = EksctlCommand(args, connection_info)
        cluster_info = json.loads(c.run_and_get_output())[0]

        kube_config_str = """
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: %s
    server: %s
  name: cluster-__CLUSTER_ID__
contexts:
- context:
    cluster: cluster-__CLUSTER_ID__
    user: user-__CLUSTER_ID__
  name: context-__CLUSTER_ID__
current-context: context-__CLUSTER_ID__
kind: Config
preferences: {}
users:
- name: user-__CLUSTER_ID__
  user:
    exec:
      apiVersion: client.authentication.k8s.io/v1alpha1
      args:
      - token
      - -i
      - %s
      command: aws-iam-authenticator
      env: null
        """ % (cluster_info['CertificateAuthority']['Data'],
               cluster_info['Endpoint'], cluster_id)
        kube_config_str = kube_config_str.replace(
            "__CLUSTER_ID__", cluster_id
        )  # cluster_id is as good as anything, since this kubeconfig won't be merged into another one

        # build the config file for kubectl
        # we don't add the context to the main config file, to not end up with an oversized config,
        # and because 2 different clusters could be concurrently editing the config file
        kube_config_path = os.path.join(os.getcwd(), 'kube_config')
        with open(kube_config_path, 'w') as f:
            f.write(kube_config_str)

        if _has_not_blank_property(connection_info,
                                   'accessKey') and _has_not_blank_property(
                                       connection_info, 'secretKey'):
            creds_in_env = {
                'AWS_ACCESS_KEY_ID': connection_info['accessKey'],
                'AWS_SECRET_ACCESS_KEY': connection_info['secretKey']
            }
            add_authenticator_env(kube_config_path, creds_in_env)

        kube_config = yaml.safe_load(kube_config_str)

        # collect and prepare the overrides so that DSS can know where and how to use the cluster
        overrides = make_overrides(self.config, kube_config, kube_config_path)
        return [
            overrides, {
                'kube_config_path': kube_config_path,
                'cluster': cluster_info
            }
        ]