Exemple #1
0
def run_in_pod(pod_name, command, deployment_target=None, errors_to_ignore=None, print_command=True, verbose=False, is_interactive=False):
    """Execute an arbitrary linux command inside the given pod.
    Assumes there's only 1 instance with the given pod_name.

    Args:
        pod_name (str): either the pod's "name" label (eg. 'phenotips' or 'nginx'), or the full pod name (eg. "phenotips-cdd4d7dc9-vgmjx")
        command (str): linux command to execute inside the pod
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "minikube", "gcloud-dev", etc.
        errors_to_ignore (list): if the command's return code isn't in ok_return_codes, but its
            output contains one of the strings in this list, the bad return code will be ignored,
            and this function will return None. Otherwise, it raises a RuntimeException.
        print_command (bool):
        verbose (bool):
        is_interactive (bool): whether the command expects input from the user
    """

    full_pod_name = get_pod_name(pod_name, deployment_target=deployment_target)
    if not full_pod_name:
        # assume it's already a full pod name
        full_pod_name = pod_name

    it_arg = "-it" if is_interactive else ""
    run("kubectl exec %(it_arg)s %(full_pod_name)s -- %(command)s" % locals(),
        errors_to_ignore=errors_to_ignore,
        print_command=print_command,
        verbose=verbose,
        is_interactive=is_interactive)
Exemple #2
0
def deploy_secrets(settings, components=None):
    """Deploys or updates k8s secrets."""

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    print_separator("secrets")

    create_namespace(settings)

    if not components:
        components = SECRETS.keys()

    # deploy secrets
    for secret_label in components:
        run("kubectl delete secret {}-secrets".format(secret_label), verbose=False, errors_to_ignore=["not found"])

    for secret_label in components:
        secret_files = SECRETS.get(secret_label)
        if not secret_files:
            raise Exception('Invalid secret component {}'.format(secret_label))

        secret_command = ['kubectl create secret generic {secret_label}-secrets'.format(secret_label=secret_label)]
        secret_command += [
            '--from-file deploy/secrets/gcloud/{secret_label}/{file}'.format(secret_label=secret_label, file=file)
            for file in secret_files
        ]
        if secret_label == GCLOUD_CLIENT:
            secret_command.append('--from-file deploy/secrets/shared/gcloud/boto')
        run(" ".join(secret_command).format(deploy_to=settings['DEPLOY_TO']), errors_to_ignore=["already exists"])
def copy_files_to_or_from_pod(component,
                              deployment_target,
                              source_path,
                              dest_path,
                              direction=1):
    """Copy file(s) to or from the given component.

    Args:
        component (string): component label (eg. "postgres")
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "gcloud-dev"
        source_path (string): source file path. If copying files to the component, it should be a local path. Otherwise, it should be a file path inside the component pod.
        dest_path (string): destination file path. If copying files from the component, it should be a local path. Otherwise, it should be a file path inside the component pod.
        direction (int): If > 0 the file will be copied to the pod. If < 0, then it will be copied from the pod.
    """
    full_pod_name = get_pod_name(component,
                                 deployment_target=deployment_target)
    if not full_pod_name:
        raise ValueError(
            "No '%(pod_name)s' pods found. Is the kubectl environment configured in this terminal? and has this type of pod been deployed?"
            % locals())

    if direction < 0:  # copy from pod
        source_path = "%s:%s" % (full_pod_name, source_path)
    elif direction > 0:  # copy to pod
        dest_path = "%s:%s" % (full_pod_name, dest_path)

    run("kubectl cp '%(source_path)s' '%(dest_path)s'" % locals())
Exemple #4
0
def create_namespace(settings):
    run("kubectl create -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/namespace.yaml"
        % settings,
        errors_to_ignore=["already exists"])

    # switch kubectl to use the new namespace
    run("kubectl config set-context $(kubectl config current-context) --namespace=%(NAMESPACE)s"
        % settings)
Exemple #5
0
def _set_elasticsearch_kubernetes_resources():
    has_kube_resource = run('kubectl explain elasticsearch',
                            errors_to_ignore=[
                                "server doesn't have a resource type",
                                "couldn't find resource for"
                            ])
    if not has_kube_resource:
        run('kubectl apply -f deploy/kubernetes/elasticsearch/kubernetes-elasticsearch-all-in-one.yaml'
            )
Exemple #6
0
def deploy_kube_scan(settings):
    print_separator("kube-scan")

    if settings["DELETE_BEFORE_DEPLOY"]:
        run("kubectl delete -f https://raw.githubusercontent.com/octarinesec/kube-scan/master/kube-scan.yaml")

        if settings["ONLY_PUSH_TO_REGISTRY"]:
            return

    run("kubectl apply -f https://raw.githubusercontent.com/octarinesec/kube-scan/master/kube-scan.yaml")
def troubleshoot_component(component, deployment_target):
    """Runs kubectl command to print detailed debug output for the given component.

    Args:
        component (string): component label (eg. "postgres")
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "gcloud-dev"
    """

    pod_name = get_pod_name(component, deployment_target=deployment_target)

    run("kubectl get pods -o yaml %(pod_name)s" % locals(), verbose=True)
def deploy_external_connector(settings, connector_name):
    if connector_name not in ["elasticsearch"]:
        raise ValueError("Invalid connector name: %s" % connector_name)

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    print_separator("external-%s-connector" % connector_name)

    run((
        "kubectl apply -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/external-connectors/"
        % settings) + "external-%(connector_name)s.yaml" % locals())
Exemple #9
0
def delete_pod(component_label, settings, custom_yaml_filename=None):
    deployment_target = settings["DEPLOY_TO"]

    yaml_filename = custom_yaml_filename or (component_label+".gcloud.yaml")

    if is_pod_running(component_label, deployment_target):
        run(" ".join([
            "kubectl delete",
            "-f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/"+component_label+"/"+yaml_filename,
            ]) % settings, errors_to_ignore=["not found"])

    logger.info("waiting for \"%s\" to exit Running status" % component_label)
    while is_pod_running(component_label, deployment_target):
        time.sleep(5)
def set_environment(deployment_target):
    """Configure the shell environment to point to the given deployment_target using 'gcloud config set-context' and other commands.

    Args:
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "gcloud-dev", etc.
    """

    settings = collections.OrderedDict()
    load_settings([
        "deploy/kubernetes/shared-settings.yaml",
        "deploy/kubernetes/%(deployment_target)s-settings.yaml" % locals(),
    ], settings)

    if deployment_target.startswith("gcloud"):
        os.environ["KUBECONFIG"] = os.path.expanduser("~/.kube/config")
        run("gcloud config set core/project %(GCLOUD_PROJECT)s" % settings,
            print_command=True)
        run("gcloud config set compute/zone %(GCLOUD_ZONE)s" % settings,
            print_command=True)
        run("gcloud container clusters get-credentials --zone=%(GCLOUD_ZONE)s %(CLUSTER_NAME)s"
            % settings,
            print_command=True)
    else:
        raise ValueError("Unexpected deployment_target value: %s" %
                         (deployment_target, ))

    run("kubectl config set-context $(kubectl config current-context) --namespace=%(NAMESPACE)s"
        % settings)
Exemple #11
0
def delete_all(deployment_target):
    """Runs kubectl and gcloud commands to delete the given cluster and all objects in it.

    Args:
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "gcloud-dev"

    """
    run('deploy/kubectl_helpers/utils/check_context.sh {}'.format(
        deployment_target.replace('gcloud-', '')))
    settings = {}

    load_settings([
        "deploy/kubernetes/shared-settings.yaml",
        "deploy/kubernetes/%(deployment_target)s-settings.yaml" % locals(),
    ], settings)

    run("gcloud container clusters delete --project %(GCLOUD_PROJECT)s --zone %(GCLOUD_ZONE)s --no-async %(CLUSTER_NAME)s"
        % settings,
        is_interactive=True)
    run('gcloud sql instances delete postgres-{}'.format(
        deployment_target.replace('gcloud-', '')))

    for disk_label in [d.strip() for d in settings['DISKS'].split(',') if d]:
        for disk_name in get_disk_names(disk_label, settings):
            run('gcloud compute disks delete --zone {zone} {disk_name}'.format(
                zone=settings['GCLOUD_ZONE'], disk_name=disk_name),
                is_interactive=True)
Exemple #12
0
def _get_resource_info(
        resource_type="pod",
        labels={},
        json_path="{.items[].metadata.name}",
        errors_to_ignore=("array index out of bounds:",),
        verbose=False,
    ):
    """Runs 'kubectl get <resource_type> -l <label1=value1,label2=value2...> -o jsonpath=<json path>' and returns its output.

    Args:
        resource_type (string): "pod", "service", etc.
        labels (dict): (eg. {'name': 'phenotips'})
        json_path (string): a json path query string (for example, "{.items[*].metadata.name}")
        errors_to_ignore (list):
        verbose (bool):

    Returns:
        (string) kubectl command output (eg. "postgres-410765475-1vtkn") or None if the kubectl command returned nothing
    """

    l_arg = "-l {}".format(",".join(["%s=%s" % (key, value) for key, value in labels.items()])) if labels else ""

    output = run(
        "kubectl get %(resource_type)s %(l_arg)s -o jsonpath=%(json_path)s" % locals(),
        errors_to_ignore=errors_to_ignore,
        print_command=False,
        verbose=verbose,
    )

    return output.strip('\n') if output is not None else None
def deploy_seqr(settings):
    print_separator("seqr")

    if settings["BUILD_DOCKER_IMAGES"]:
        seqr_git_hash = run("git log -1 --pretty=%h",
                            errors_to_ignore=["Not a git repository"])
        seqr_git_hash = (
            ":" + seqr_git_hash.strip()) if seqr_git_hash is not None else ""

        docker_build("seqr", settings, [
            "--build-arg SEQR_SERVICE_PORT=%s" % settings["SEQR_SERVICE_PORT"],
            "--build-arg SEQR_UI_DEV_PORT=%s" % settings["SEQR_UI_DEV_PORT"],
            "-f deploy/docker/seqr/Dockerfile",
            "-t %(DOCKER_IMAGE_NAME)s" + seqr_git_hash,
        ])

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    restore_seqr_db_from_backup = settings.get("RESTORE_SEQR_DB_FROM_BACKUP")
    reset_db = settings.get("RESET_DB")

    deployment_target = settings["DEPLOY_TO"]
    postgres_pod_name = get_pod_name("postgres",
                                     deployment_target=deployment_target)

    if settings["DELETE_BEFORE_DEPLOY"]:
        delete_pod("seqr", settings)
    elif reset_db or restore_seqr_db_from_backup:
        seqr_pod_name = get_pod_name('seqr',
                                     deployment_target=deployment_target)
        if seqr_pod_name:
            sleep_until_pod_is_running("seqr",
                                       deployment_target=deployment_target)

            run_in_pod(seqr_pod_name,
                       "/usr/local/bin/stop_server.sh",
                       verbose=True)

    if reset_db:
        _drop_seqr_db(postgres_pod_name)
    if restore_seqr_db_from_backup:
        _drop_seqr_db(postgres_pod_name)
        _restore_seqr_db_from_backup(postgres_pod_name,
                                     restore_seqr_db_from_backup)
    else:
        run_in_pod(
            postgres_pod_name,
            "psql -U postgres postgres -c 'create database seqrdb'",
            errors_to_ignore=["already exists"],
            verbose=True,
        )
        run_in_pod(
            postgres_pod_name,
            "psql -U postgres postgres -c 'create database reference_data_db'",
            errors_to_ignore=["already exists"],
            verbose=True,
        )

    deploy_pod("seqr", settings, wait_until_pod_is_ready=True)
Exemple #14
0
def docker_build(component_label, settings, custom_build_args=()):
    params = dict(settings)  # make a copy before modifying
    params["COMPONENT_LABEL"] = component_label
    params[
        "DOCKER_IMAGE_NAME"] = "%(DOCKER_IMAGE_PREFIX)s/%(COMPONENT_LABEL)s" % params

    docker_tags = set([
        "",
        ":latest",
        ":%(TIMESTAMP)s" % settings,
    ])

    if settings.get("DOCKER_IMAGE_TAG"):
        docker_tags.add(params["DOCKER_IMAGE_TAG"])
    if component_label == 'elasticsearch' and settings.get(
            'ELASTICSEARCH_VERSION'):
        docker_tags.add("%(DOCKER_IMAGE_TAG)s-%(ELASTICSEARCH_VERSION)s" %
                        settings)

    if not settings["BUILD_DOCKER_IMAGES"]:
        logger.info(
            "Skipping docker build step. Use --build-docker-image to build a new image (and --force to build from the beginning)"
        )
    else:
        docker_build_command = ""
        docker_build_command += "docker build deploy/docker/%(COMPONENT_LABEL)s/ "
        docker_build_command += (" ".join(custom_build_args) + " ")
        if settings["FORCE_BUILD_DOCKER_IMAGES"]:
            docker_build_command += "--no-cache "

        for tag in docker_tags:
            docker_image_name_with_tag = params["DOCKER_IMAGE_NAME"] + tag
            docker_build_command += "-t %(docker_image_name_with_tag)s " % locals(
            )

        run(docker_build_command % params, verbose=True)

    if settings["PUSH_TO_REGISTRY"]:
        for tag in docker_tags:
            docker_image_name_with_tag = params["DOCKER_IMAGE_NAME"] + tag
            docker_push_command = ""
            docker_push_command += "docker push %(docker_image_name_with_tag)s" % locals(
            )
            run(docker_push_command, verbose=True)
            logger.info(
                "==> Finished uploading image: %(docker_image_name_with_tag)s"
                % locals())
Exemple #15
0
def _restore_seqr_db_from_backup(postgres_pod_name, seqrdb_backup, reference_data_backup=None):
    run_in_pod(postgres_pod_name, "psql -U postgres postgres -c 'create database seqrdb'", verbose=True)
    run_in_pod(postgres_pod_name, "psql -U postgres postgres -c 'create database reference_data_db'", verbose=True)
    run("kubectl cp '{backup}' {postgres_pod_name}:/root/$(basename {backup})".format(
        postgres_pod_name=postgres_pod_name, backup=seqrdb_backup), verbose=True)
    run_in_pod(
        postgres_pod_name, "/root/restore_database_backup.sh postgres seqrdb /root/$(basename {backup})".format(
            backup=seqrdb_backup), verbose=True)
    run_in_pod(postgres_pod_name, "rm /root/$(basename {backup})".format(backup=seqrdb_backup, verbose=True))

    if reference_data_backup:
        run("kubectl cp '{backup}' {postgres_pod_name}:/root/$(basename {backup})".format(
            postgres_pod_name=postgres_pod_name, backup=reference_data_backup), verbose=True)
        run_in_pod(
            postgres_pod_name, "/root/restore_database_backup.sh postgres reference_data_db /root/$(basename {backup})".format(
                backup=reference_data_backup), verbose=True)
        run_in_pod(postgres_pod_name, "rm /root/$(basename {backup})".format(backup=reference_data_backup, verbose=True))
Exemple #16
0
def deploy_pod(component_label, settings, wait_until_pod_is_running=True, wait_until_pod_is_ready=False):
    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    if settings["DELETE_BEFORE_DEPLOY"]:
        delete_pod(component_label, settings)

    run(" ".join([
        "kubectl apply",
        "-f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/"+component_label+"/"+component_label+".gcloud.yaml"
    ]) % settings)

    if wait_until_pod_is_running:
        sleep_until_pod_is_running(component_label, deployment_target=settings["DEPLOY_TO"])

    if wait_until_pod_is_ready:
        sleep_until_pod_is_ready(component_label, deployment_target=settings["DEPLOY_TO"])
Exemple #17
0
def deploy(deployment_target,
           components,
           output_dir=None,
           runtime_settings={}):
    """Deploy one or more components to the kubernetes cluster specified as the deployment_target.

    Args:
        deployment_target (string): value from DEPLOYMENT_ENVS - eg. "gcloud-dev"
            indentifying which cluster to deploy these components to
        components (list): The list of component names to deploy (eg. "postgres", "redis" - each string must be in
            constants.DEPLOYABLE_COMPONENTS). Order doesn't matter.
        output_dir (string): path of directory where to put deployment logs and rendered config files
        runtime_settings (dict): a dictionary of other key-value pairs that override settings file(s) values.
    """
    if not components:
        raise ValueError("components list is empty")

    if components and "init-cluster" not in components:
        run('deploy/kubectl_helpers/utils/check_context.sh {}'.format(
            deployment_target.replace('gcloud-', '')))

    settings = prepare_settings_for_deployment(deployment_target, output_dir,
                                               runtime_settings)

    # make sure namespace exists
    if "init-cluster" not in components and not runtime_settings.get(
            "ONLY_PUSH_TO_REGISTRY"):
        create_namespace(settings)

    if components[0] == 'secrets':
        deploy_secrets(settings, components=components[1:])
        return

    # call deploy_* functions for each component in "components" list, in the order that these components are listed in DEPLOYABLE_COMPONENTS
    for component in DEPLOYABLE_COMPONENTS:
        if component in components:
            # only deploy requested components
            func_name = "deploy_" + component.replace("-", "_")
            f = globals().get(func_name)
            if f is not None:
                f(settings)
            else:
                raise ValueError(
                    "'deploy_{}' function not found. Is '{}' a valid component name?"
                    .format(func_name, component))
Exemple #18
0
def deploy_elasticsearch(settings):
    print_separator("elasticsearch")

    docker_build("elasticsearch", settings, ["--build-arg ELASTICSEARCH_SERVICE_PORT=%s" % settings["ELASTICSEARCH_SERVICE_PORT"]])

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    _set_elasticsearch_kubernetes_resources()

    # create persistent volumes
    pv_template_path = 'deploy/kubernetes/elasticsearch/persistent-volumes/es-data.yaml'
    disk_names = get_disk_names('es-data', settings)
    for disk_name in disk_names:
        volume_settings = {'DISK_NAME': disk_name}
        volume_settings.update(settings)
        _process_templates(volume_settings, [pv_template_path])
        run('kubectl create -f {}/{}'.format(settings['DEPLOYMENT_TEMP_DIR'], pv_template_path),
            print_command=True, errors_to_ignore=['already exists'])

    deploy_pod("elasticsearch", settings, wait_until_pod_is_running=False)

    wait_for_not_resource(
        'elasticsearch', resource_type='elasticsearch', json_path='{.items[0].status.phase}', invalid_status='Invalid',
        deployment_target=settings["DEPLOY_TO"], verbose_template='elasticsearch status')

    total_pods = 0
    for num_pods in ['ES_DATA_NUM_PODS', 'ES_CLIENT_NUM_PODS', 'ES_MASTER_NUM_PODS', 'ES_LOADING_NUM_PODS']:
        total_pods += settings.get(num_pods, 0)
    for pod_number_i in range(total_pods):
        sleep_until_pod_is_running('elasticsearch', deployment_target=settings["DEPLOY_TO"], pod_number=pod_number_i)
    for pod_number_i in range(total_pods):
        sleep_until_pod_is_ready('elasticsearch', deployment_target=settings["DEPLOY_TO"], pod_number=pod_number_i)

    wait_for_resource(
        'elasticsearch', resource_type='elasticsearch', json_path='{.items[0].status.phase}', expected_status='Ready',
        deployment_target=settings["DEPLOY_TO"], verbose_template='elasticsearch status')

    wait_for_resource(
        'elasticsearch', resource_type='elasticsearch', json_path='{.items[0].status.health}', expected_status='green',
        deployment_target=settings["DEPLOY_TO"], verbose_template='elasticsearch health')
Exemple #19
0
def create_vpc(gcloud_project, network_name):
    run(
        " ".join([
            #"gcloud compute networks create seqr-project-custom-vpc --project=%(GCLOUD_PROJECT)s --mode=custom"
            "gcloud compute networks create %(network_name)s",
            "--project=%(gcloud_project)s",
            "--subnet-mode=auto"
        ]) % locals(),
        errors_to_ignore=["already exists"])

    # add recommended firewall rules to enable ssh, etc.
    run(" ".join([
        "gcloud compute firewall-rules create custom-vpc-allow-tcp-udp-icmp",
        "--project %(gcloud_project)s",
        "--network %(network_name)s",
        "--allow tcp,udp,icmp",
        "--source-ranges 10.0.0.0/8",
    ]) % locals(),
        errors_to_ignore=["already exists"])

    run(" ".join([
        "gcloud compute firewall-rules create custom-vpc-allow-ports",
        "--project %(gcloud_project)s",
        "--network %(network_name)s",
        "--allow tcp:22,tcp:3389,icmp",
        "--source-ranges 10.0.0.0/8",
    ]) % locals(),
        errors_to_ignore=["already exists"])
Exemple #20
0
def deploy_init_cluster(settings):
    """Provisions a GKE cluster, persistent disks, and any other prerequisites for deployment."""

    print_separator("init-cluster")

    # initialize the VM
    _init_cluster_gcloud(settings)

    node_name = get_node_name()
    if not node_name:
        raise Exception("Unable to retrieve node name. Was the cluster created successfully?")

    set_environment(settings["DEPLOY_TO"])

    create_namespace(settings)

    # create priority classes - " Priority affects scheduling order of Pods and out-of-resource eviction ordering
    # on the Node.... A PriorityClass is a non-namespaced object .. The higher the value, the higher the priority."
    # (from https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)
    run("kubectl create priorityclass medium-priority --value=1000" % settings, errors_to_ignore=["already exists"])
    run("kubectl create priorityclass high-priority --value=10000" % settings, errors_to_ignore=["already exists"])

    # print cluster info
    run("kubectl cluster-info", verbose=True)

    # wait for the cluster to initialize
    for retry_i in range(1, 5):
        try:
            deploy_settings(settings)
            break
        except RuntimeError as e:
            logger.error(("Error when deploying config maps: %(e)s. This sometimes happens when cluster is "
                          "initializing. Retrying...") % locals())
            time.sleep(5)
Exemple #21
0
def _init_gcloud_disks(settings):
    for disk_label in [d.strip() for d in settings['DISKS'].split(',') if d]:
        setting_prefix = disk_label.upper().replace('-', '_')

        disk_names = get_disk_names(disk_label, settings)

        snapshots = [
            d.strip() for d in settings.get(
                '{}_SNAPSHOTS'.format(setting_prefix), '').split(',') if d
        ]
        if snapshots and len(snapshots) != len(disk_names):
            raise Exception(
                'Invalid configuration for {}: {} disks to create and {} snapshots'
                .format(disk_label, len(disk_names), len(snapshots)))

        for i, disk_name in enumerate(disk_names):
            command = [
                'gcloud compute disks create',
                disk_name,
                '--zone',
                settings['GCLOUD_ZONE'],
            ]
            if settings.get('{}_DISK_TYPE'.format(setting_prefix)):
                command += [
                    '--type', settings['{}_DISK_TYPE'.format(setting_prefix)]
                ]
            if snapshots:
                command += ['--source-snapshot', snapshots[i]]
            else:
                command += [
                    '--size',
                    str(settings['{}_DISK_SIZE'.format(setting_prefix)])
                ]

            run(' '.join(command),
                verbose=True,
                errors_to_ignore=['lready exists'])
Exemple #22
0
def deploy_nginx(settings):
    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    print_separator("nginx")
    run("kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.41.2/deploy/static/provider/cloud/deploy.yaml" % locals())
    if settings["DELETE_BEFORE_DEPLOY"]:
        run("kubectl delete -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/nginx/nginx.yaml" % settings, errors_to_ignore=["not found"])
    run("kubectl apply -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/nginx/nginx.yaml" % settings)
def deploy_elasticsearch_sharded(settings, component):
    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    print_separator(component)

    if component == "es-master":
        config_files = [
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-discovery-svc.yaml",
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-master.yaml",
        ]
    elif component == "es-client":
        config_files = [
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-svc.yaml",
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-client.yaml",
        ]
    elif component == "es-data":
        config_files = [
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-data-svc.yaml",
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-data-stateful.yaml",
        ]
    elif component == "es-kibana":
        config_files = [
            "%(DEPLOYMENT_TEMP_DIR)s/hail_elasticsearch_pipelines/kubernetes/elasticsearch-sharded/es-kibana.yaml",
        ]
    elif component == "kibana":
        config_files = [
            "%(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/kibana/kibana.%(DEPLOY_TO_PREFIX)s.yaml",
        ]
    else:
        raise ValueError("Unexpected component: " + component)

    if settings["DELETE_BEFORE_DEPLOY"]:
        for config_file in config_files:
            run("kubectl delete -f " + config_file % settings,
                errors_to_ignore=["not found"])

    for config_file in config_files:
        run("kubectl apply -f " + config_file % settings)

    if component in ["es-client", "es-master", "es-data", "es-kibana"]:
        # wait until all replicas are running
        num_pods = int(
            settings.get(component.replace("-", "_").upper() + "_NUM_PODS", 1))
        for pod_number_i in range(num_pods):
            sleep_until_pod_is_running(component,
                                       deployment_target=settings["DEPLOY_TO"],
                                       pod_number=pod_number_i)

    if component == "es-client":
        run("kubectl describe svc elasticsearch")
Exemple #24
0
def deploy_linkerd(settings):
    print_separator('linkerd')

    version_match = run("linkerd version | awk '/Client/ {print $3}'")
    if version_match.strip() != settings["LINKERD_VERSION"]:
        raise Exception("Your locally installed linkerd version does not match %s. "
                        "Download the correct version from https://github.com/linkerd/linkerd2/releases/tag/%s" % \
                        (settings['LINKERD_VERSION'], settings['LINKERD_VERSION']))

    has_namespace = run('kubectl get namespace linkerd', errors_to_ignore=['namespaces "linkerd" not found'])
    if not has_namespace:
        run('linkerd install | kubectl apply -f -')

        run('linkerd check')
Exemple #25
0
def deploy_seqr(settings):
    print_separator("seqr")

    if settings["BUILD_DOCKER_IMAGES"]:
        seqr_git_hash = run("git log -1 --pretty=%h",
                            errors_to_ignore=["Not a git repository"])
        seqr_git_hash = (
            ":" + seqr_git_hash.strip()) if seqr_git_hash is not None else ""

        docker_build("seqr", settings, [
            "--build-arg SEQR_SERVICE_PORT=%s" % settings["SEQR_SERVICE_PORT"],
            "-f deploy/docker/seqr/Dockerfile",
            "-t %(DOCKER_IMAGE_NAME)s" + seqr_git_hash,
        ])

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    if settings["DELETE_BEFORE_DEPLOY"]:
        delete_pod("seqr", settings)

    deploy_pod("seqr", settings, wait_until_pod_is_ready=True)
Exemple #26
0
def deploy_elasticsearch_snapshot_infra(settings):
    print_separator('elasticsearch snapshot infra')

    if settings['ES_CONFIGURE_SNAPSHOTS']:
        # create the bucket
        run("gsutil mb -p seqr-project -c STANDARD -l US-CENTRAL1 gs://%(ES_SNAPSHOTS_BUCKET)s" % settings,
            errors_to_ignore=["already exists"])
        # create the IAM user
        run(" ".join([
            "gcloud iam service-accounts create %(ES_SNAPSHOTS_ACCOUNT_NAME)s",
            "--display-name %(ES_SNAPSHOTS_ACCOUNT_NAME)s"]) % settings,
            errors_to_ignore="already exists within project projects/seqr-project")
        # grant storage admin permissions on the snapshot bucket
        run(" ".join([
            "gsutil iam ch",
            "serviceAccount:%(ES_SNAPSHOTS_ACCOUNT_NAME)[email protected]:roles/storage.admin",
            "gs://%(ES_SNAPSHOTS_BUCKET)s"]) % settings)
Exemple #27
0
def deploy_elasticsearch_snapshot_config(settings):
    print_separator('elasticsearch snapshot configuration')

    docker_build("curl", settings)

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    if settings['ES_CONFIGURE_SNAPSHOTS']:
        # run the k8s job to set up the repo
        run('kubectl apply -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/elasticsearch/configure-snapshot-repo.yaml' % settings)
        wait_for_resource(
            'configure-es-snapshot-repo', resource_type='job', json_path='{.items[0].status.conditions[0].type}',
            expected_status='Complete')
        # clean up the job after completion
        run('kubectl delete -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/elasticsearch/configure-snapshot-repo.yaml' % settings)
        # Set up the monthly cron job
        run('kubectl apply -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/elasticsearch/snapshot-cronjob.yaml' % settings)
Exemple #28
0
def deploy_settings(settings):
    """Deploy settings as a config map"""
    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    # write out a ConfigMap file
    configmap_file_path = os.path.join(settings["DEPLOYMENT_TEMP_DIR"], "deploy/kubernetes/all-settings.properties")
    with open(configmap_file_path, "w") as f:
        for key, value in settings.items():
            if value is None:
                continue

            f.write('%s=%s\n' % (key, value))

    create_namespace(settings)

    run("kubectl delete configmap all-settings", errors_to_ignore=["not found"])
    run("kubectl create configmap all-settings --from-file=%(configmap_file_path)s" % locals())
    run("kubectl get configmaps all-settings -o yaml")
def delete_component(component, deployment_target=None):
    """Runs kubectl commands to delete any running deployment, service, or pod objects for the given component(s).

    Args:
        component (string): component to delete (eg. 'postgres' or 'nginx').
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "gcloud-dev"
    """
    if component == "cockpit":
        run("kubectl delete rc cockpit", errors_to_ignore=["not found"])
    elif component == 'elasticsearch':
        run('kubectl delete elasticsearch elasticsearch',
            errors_to_ignore=['not found'])
        # Deleting a released persistent volume does not delete the data on the underlying disk
        wait_for_resource(component,
                          '{.items[0].status.phase}',
                          'Released',
                          deployment_target=deployment_target,
                          resource_type='pv')
        pv = get_resource_name(component,
                               resource_type='pv',
                               deployment_target=deployment_target)
        while pv:
            run('kubectl delete pv {}'.format(pv))
            pv = get_resource_name(component,
                                   resource_type='pv',
                                   deployment_target=deployment_target)
    elif component == 'kibana':
        run('kubectl delete kibana kibana', errors_to_ignore=['not found'])
    elif component == "nginx":
        raise ValueError("TODO: implement deleting nginx")

    run("kubectl delete deployments %(component)s" % locals(),
        errors_to_ignore=["not found"])
    run("kubectl delete services %(component)s" % locals(),
        errors_to_ignore=["not found"])

    pod_name = get_pod_name(component, deployment_target=deployment_target)
    if pod_name:
        run("kubectl delete pods %(pod_name)s" % locals(),
            errors_to_ignore=["not found"])

        logger.info("waiting for \"%s\" to exit Running status" % component)
        while is_pod_running(component, deployment_target):
            time.sleep(5)

    # print services and pods status
    run("kubectl get services" % locals(), verbose=True)
    run("kubectl get pods" % locals(), verbose=True)
def show_dashboard():
    """Opens the kubernetes dashboard in a new browser window."""

    p = run_in_background('kubectl proxy')
    run('open http://localhost:8001/ui')
    p.wait()