예제 #1
0
파일: certs.py 프로젝트: talkhasib/magma
def add(ctx, self_signed):
    """Add creates application and self signed(optional) certs

    Args:
        ctx ([type]): Click context
        self_signed ([type]): add self_signed certs. Defaults to False.
    """
    run_playbook(certs_cmd(ctx.obj, self_signed))
예제 #2
0
def verify_sanity(ctx, namespace):
    # check if KUBECONFIG is set else find kubeconfig file and set the
    # environment variable
    constants = ctx.obj

    # set kubeconfig
    kubeconfig = os.environ.get('KUBECONFIG')
    if not kubeconfig:
        kubeconfigs = glob.glob(constants['project_dir'] + "/kubeconfig_*")
        if len(kubeconfigs) != 1:
            if len(kubeconfigs) == 0:
                print_success_msg('No kubeconfig found!!!')
            else:
                print_error_msg("multiple kubeconfigs found %s!!!" %
                                repr(kubeconfigs))
            return
        kubeconfig = kubeconfigs[0]
        set_kubeconfig_environ(kubeconfig)

    # check if we have a valid namespace
    all_namespaces = get_all_namespaces(kubeconfig)
    while namespace not in all_namespaces:
        namespace = click.prompt('Provide orc8r namespace',
                                 type=click.Choice(all_namespaces))

    rc = run_playbook(verify_cmd(ctx.obj, namespace))
    if rc != 0:
        print_error_msg("Post deployment verification checks failed!!!")
        sys.exit(1)
    print_success_msg("Post deployment verification ran successfully")
예제 #3
0
def raw_cleanup(
        constants: dict,
        override_dict: dict = None,
        dryrun: bool = False,
        max_retries: int = 2):
    """Perform raw cleanup of resources using internal commands

    Args:
        constants (dict): config dict
        overrides (dict): overide dict
        dryrun (bool): flag to indicate dryrun. Defaults to False.
        max_retries (int): maximum number of retries
    Returns:
        list: command list
    """
    if not override_dict and not constants.get('cleanup_state'):
        backup_fn = tf_backup_fn(constants['project_dir'])
        if Path(backup_fn).exists():
            constants['cleanup_state'] = backup_fn
    if override_dict:
        constants.update(override_dict)

    # sometimes cleanups might not fully happen due to timing related
    # resource dependencies. Run it few times to eliminate all resources
    # completely
    for i in range(max_retries):
        rc = run_playbook(cleanup_cmd(constants, dryrun))
        if rc != 0:
            print_error_msg("Failed cleaning up resources!!!")
예제 #4
0
def precheck(ctx):
    """
    Performs various checks to ensure successful upgrade
    """
    rc = run_playbook(precheck_cmd(ctx.obj))
    if rc != 0:
        print_error_msg("Upgrade prechecks failed!!!")
        sys.exit(1)
    print_success_msg("Upgrade prechecks ran successfully")
예제 #5
0
    def destroy_gateways(self, cluster_config):
        """ destroy the AWS gateways instantiated through test cluster
        Args:
            cluster_config (dict): Test cluster configuration
        """
        if not cluster_config.gateways:
            print("Gateway information not found in cluster configs")
            return

        project_dir = self.constants["project_dir"]
        playbook_dir = self.constants["cloudstrapper_playbooks"]

        # cloudstrapper expects secrets.yaml to be present
        Path(f"{project_dir}/secrets.yaml").touch()

        aws_configs = get_aws_configs()
        cstrap_dict = {
            "testClusterStacks":
            [gw.gateway_id for gw in cluster_config.gateways],
            "dirLocalInventory": self.constants["project_dir"],
            "idSite": "TestCluster",
            "awsAccessKey": aws_configs["aws_access_key_id"],
            "awsSecretKey": aws_configs["aws_secret_access_key"],
            "dirSecretsLocal": self.constants["secret_dir"],
        }

        cluster_cleanup = AnsiblePlay(
            playbook=f"{playbook_dir}/cluster-provision.yaml",
            tags=['clusterCleanup'],
            extra_vars=cstrap_dict,
        )
        network_cleanup = AnsiblePlay(
            playbook=f"{playbook_dir}/agw-provision.yaml",
            tags=['cleanupBridge', 'cleanupNet'],
            skip_tags=['attachIface'],
            extra_vars=cstrap_dict,
        )

        for playbook in [cluster_cleanup, network_cleanup]:
            print(f"Running playbook {playbook}")
            rc = run_playbook(playbook)
            if rc != 0:
                raise ClusterDestroyError(f"Failed destroying cluster")
예제 #6
0
    def create_orc8r(
        self,
        constants: dict,
        cluster_uuid: str,
        template: ClusterTemplate = None,
        skip_certs=False,
        skip_precheck=False,
    ) -> Dict[str, Any]:
        """ Create an orc8r instance in the test cluster

        Args:
            constants (dict): Constants dictionary
            template (ClusterTemplate, optional): Cluster template definition. Defaults to None.
            skip_certs (bool, optional): Skip certs creation. Defaults to False.
            skip_precheck (bool, optional): Skip prechecks. Defaults to False.

        Raises:
            ClusterCreateError: Exception raised when cluster creation fails
        """
        # configure deployment
        template.orc8r.infra['magma_uuid'] = cluster_uuid
        template.orc8r.infra.update(get_aws_configs())

        # set elastic deploy role based on current state
        k = 'deploy_elasticsearch_service_linked_role'
        template.orc8r.platform[k] = check_elastic_role_not_exists()

        mgr = ConfigManager(constants)
        template_dict = template.orc8r.to_dict()
        for component, configs in template_dict.items():
            for k, v in configs.items():
                mgr.set(component, k, v)
            mgr.commit(component)

        # run playbooks in order
        if not skip_certs:
            logging.debug("Adding self signed and application certs")
            rc = run_playbook(certs_cmd(constants, self_signed=True))
            if rc != 0:
                raise ClusterCreateError(f"Failed running adding certs")

        if not skip_precheck:
            logging.debug("Running installation prechecks")
            rc = run_playbook(precheck_cmd(constants))
            if rc != 0:
                raise ClusterCreateError(f"Failed running prechecks")

        # create the orc8r cluster
        rc = tf_install(constants, warn=False)
        if rc != 0:
            raise ClusterCreateError(f"Failed installing cluster")

        # update dns record for parent domain
        dns_dict = {
            "domain_name": template.orc8r.infra["orc8r_domain_name"],
        }
        dns_dict.update(constants)
        rc = run_playbook(
            AnsiblePlay(
                playbook=f"{constants['playbooks']}/main.yml",
                tags=['update_dns_records'],
                extra_vars=dns_dict,
            ), )
        if rc != 0:
            raise ClusterCreateError(
                f"Failed updating dns records for parent domain", )

        cluster_config_dict = {
            "uuid": cluster_uuid,
            "cluster_type": ClusterType.AWS,
            "template": template,
        }
        return cluster_config_dict
예제 #7
0
    def create_gateways(
        self,
        constants: dict,
        cluster_uuid: str,
        template: ClusterTemplate = None,
    ) -> Dict[str, Any]:
        """ Create AGW gateways in the test cluster

        Args:
            constants (dict): Constants dictionary
            template (ClusterTemplate, optional): Cluster template definition. Defaults to None.
            skip_certs (bool, optional): Skip certs creation. Defaults to False.
            skip_precheck (bool, optional): Skip prechecks. Defaults to False.

        Raises:
            ClusterCreateError: Exception raised when cluster creation fails

        Returns:
            ClusterConfig: Returns the cluster configuration
        """
        # create edge network
        project_dir = constants["project_dir"]
        playbook_dir = constants["cloudstrapper_playbooks"]

        # cloudstrapper expects secrets.yaml to be present
        Path(f"{project_dir}/secrets.yaml").touch()

        aws_configs = get_aws_configs()
        cluster_stack = AWSClusterFactory.generate_cluster_stack(
            template.gateway.prefix,
            template.gateway.count,
        )

        cstrap_dict = {
            "clusterUuid": cluster_uuid,
            "dirLocalInventory": constants["project_dir"],
            "idSite": "TestCluster",
            "testClusterStacks": cluster_stack,
            "awsAccessKey": aws_configs["aws_access_key_id"],
            "awsSecretKey": aws_configs["aws_secret_access_key"],
            "idGw": "dummy_gateway",
            "dirSecretsLocal": constants["secret_dir"],
            "awsAgwAmi": template.gateway.ami,
            "awsCloudstrapperAmi": template.gateway.cloudstrapper_ami,
            "awsAgwRegion": template.gateway.region,
            "awsAgwAz": template.gateway.az,
            "orc8rDomainName": template.orc8r.infra['orc8r_domain_name'],
        }

        key_create = AnsiblePlay(
            playbook=f"{playbook_dir}/aws-prerequisites.yaml",
            tags=['keyCreate'],
            extra_vars=cstrap_dict,
        )
        bridge_gw_create = AnsiblePlay(
            playbook=f"{playbook_dir}/agw-provision.yaml",
            tags=['createNet', 'createBridge', 'inventory'],
            skip_tags=['attachIface'],
            extra_vars=cstrap_dict,
        )

        # create test instances
        test_inst_create = AnsiblePlay(
            playbook=f"{playbook_dir}/cluster-provision.yaml",
            tags=['clusterStart'],
            extra_vars=cstrap_dict,
        )

        jump_config_dict = {"agws": f"tag_Name_TestClusterBridge"}
        jump_config_dict.update(cstrap_dict)
        test_ssh_configure = AnsiblePlay(
            playbook=f"{playbook_dir}/cluster-provision.yaml",
            tags=['clusterJump'],
            extra_vars=jump_config_dict,
        )

        # configure test instances
        agws_config_dict = {
            "agws": f"tag_Name_{template.gateway.prefix}*",
        }
        agws_config_dict.update(template.gateway.service_config)
        agws_config_dict.update(cstrap_dict)
        test_inst_configure = AnsiblePlay(
            inventory=f"{project_dir}/common_instance_aws_ec2.yaml",
            playbook=f"{playbook_dir}/cluster-configure.yaml",
            tags=['exporter', 'clusterConfigure'],
            extra_vars=agws_config_dict,
        )

        max_retries = 3
        for i in range(max_retries):
            fail = False
            for playbook in [
                    key_create,
                    bridge_gw_create,
                    test_inst_create,
                    test_ssh_configure,
                    test_inst_configure,
            ]:
                print(f"Running playbook {playbook}")
                rc = run_playbook(playbook)
                if rc != 0:
                    fail = True
                    print("Failed creating gateway cluster...trying again")
                    break
                # sleep 10 seconds
                time.sleep(10)

            if not fail:
                break

        # get the newly instantiated gateways
        gateways = []
        for gw_info in get_gateways(template.gateway.prefix):
            (gateway_id, hostname) = gw_info
            gateways.append(
                GatewayConfig(
                    gateway_id=gateway_id,
                    hostname=hostname,
                    hardware_id="",
                ), )
        internal_config = ClusterInternalConfig(bastion_ip=get_bastion_ip(), )
        cluster_config_dict = {
            "uuid": cluster_uuid,
            "internal_config": internal_config,
            "cluster_type": ClusterType.AWS,
            "template": template,
            "gateways": gateways,
        }
        return cluster_config_dict