Beispiel #1
0
def deploy_kibana(settings):
    print_separator("kibana")

    docker_build("kibana", settings)

    _set_elasticsearch_kubernetes_resources()

    deploy_pod("kibana", settings, wait_until_pod_is_ready=True)

    wait_for_resource(
        'kibana', resource_type='kibana', json_path='{.items[0].status.health}', expected_status='green',
        deployment_target=settings["DEPLOY_TO"], verbose_template='kibana health')
Beispiel #2
0
def delete_component(component, deployment_target=None):
    """Runs kubectl commands to delete any running deployment, service, or pod objects for the given component(s).

    Args:
        component (string): component to delete (eg. 'postgres' or 'nginx').
        deployment_target (string): value from DEPLOYMENT_TARGETS - eg. "gcloud-dev"
    """
    pod_name = run('deploy/kubectl_helpers/utils/get_pod_name.sh {} {}'.format(
        deployment_target.replace('gcloud-', ''), component))

    if component == "cockpit":
        run("kubectl delete rc cockpit", errors_to_ignore=["not found"])
    elif component == 'elasticsearch':
        run('kubectl delete elasticsearch elasticsearch',
            errors_to_ignore=['not found'])
        # Deleting a released persistent volume does not delete the data on the underlying disk
        wait_for_resource(component,
                          '{.items[0].status.phase}',
                          'Released',
                          deployment_target=deployment_target,
                          resource_type='pv')
        pv = get_resource_name(component,
                               resource_type='pv',
                               deployment_target=deployment_target)
        while pv:
            run('kubectl delete pv {}'.format(pv))
            pv = get_resource_name(component,
                                   resource_type='pv',
                                   deployment_target=deployment_target)
    elif component == 'kibana':
        run('kubectl delete kibana kibana', errors_to_ignore=['not found'])
    elif component == 'postgres':
        run('gcloud sql instances delete postgres-{}'.format(
            deployment_target.replace('gcloud-', '')))
    elif component == "nginx":
        raise ValueError("TODO: implement deleting nginx")

    run("kubectl delete deployments %(component)s" % locals(),
        errors_to_ignore=["not found"])
    run("kubectl delete services %(component)s" % locals(),
        errors_to_ignore=["not found"])

    if pod_name:
        run("kubectl delete pods %(pod_name)s" % locals(),
            errors_to_ignore=["not found"])

        logger.info("waiting for \"%s\" to exit Running status" % component)
        while is_pod_running(component, deployment_target):
            time.sleep(5)

    # print services and pods status
    run("kubectl get services" % locals(), verbose=True)
    run("kubectl get pods" % locals(), verbose=True)
Beispiel #3
0
def deploy_elasticsearch_snapshot_config(settings):
    print_separator('elasticsearch snapshot configuration')

    docker_build("curl", settings)

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    if settings['ES_CONFIGURE_SNAPSHOTS']:
        # run the k8s job to set up the repo
        run('kubectl apply -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/elasticsearch/configure-snapshot-repo.yaml' % settings)
        wait_for_resource(
            'configure-es-snapshot-repo', resource_type='job', json_path='{.items[0].status.conditions[0].type}',
            expected_status='Complete')
        # clean up the job after completion
        run('kubectl delete -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/elasticsearch/configure-snapshot-repo.yaml' % settings)
        # Set up the monthly cron job
        run('kubectl apply -f %(DEPLOYMENT_TEMP_DIR)s/deploy/kubernetes/elasticsearch/snapshot-cronjob.yaml' % settings)
Beispiel #4
0
def deploy_elasticsearch(settings):
    print_separator("elasticsearch")

    docker_build("elasticsearch", settings, ["--build-arg ELASTICSEARCH_SERVICE_PORT=%s" % settings["ELASTICSEARCH_SERVICE_PORT"]])

    if settings["ONLY_PUSH_TO_REGISTRY"]:
        return

    _set_elasticsearch_kubernetes_resources()

    # create persistent volumes
    pv_template_path = 'deploy/kubernetes/elasticsearch/persistent-volumes/es-data.yaml'
    disk_names = get_disk_names('es-data', settings)
    for disk_name in disk_names:
        volume_settings = {'DISK_NAME': disk_name}
        volume_settings.update(settings)
        _process_templates(volume_settings, [pv_template_path])
        run('kubectl create -f {}/{}'.format(settings['DEPLOYMENT_TEMP_DIR'], pv_template_path),
            print_command=True, errors_to_ignore=['already exists'])

    deploy_pod("elasticsearch", settings, wait_until_pod_is_running=False)

    wait_for_not_resource(
        'elasticsearch', resource_type='elasticsearch', json_path='{.items[0].status.phase}', invalid_status='Invalid',
        deployment_target=settings["DEPLOY_TO"], verbose_template='elasticsearch status')

    total_pods = 0
    for num_pods in ['ES_DATA_NUM_PODS', 'ES_CLIENT_NUM_PODS', 'ES_MASTER_NUM_PODS', 'ES_LOADING_NUM_PODS']:
        total_pods += settings.get(num_pods, 0)
    for pod_number_i in range(total_pods):
        sleep_until_pod_is_running('elasticsearch', deployment_target=settings["DEPLOY_TO"], pod_number=pod_number_i)
    for pod_number_i in range(total_pods):
        sleep_until_pod_is_ready('elasticsearch', deployment_target=settings["DEPLOY_TO"], pod_number=pod_number_i)

    wait_for_resource(
        'elasticsearch', resource_type='elasticsearch', json_path='{.items[0].status.phase}', expected_status='Ready',
        deployment_target=settings["DEPLOY_TO"], verbose_template='elasticsearch status')

    wait_for_resource(
        'elasticsearch', resource_type='elasticsearch', json_path='{.items[0].status.health}', expected_status='green',
        deployment_target=settings["DEPLOY_TO"], verbose_template='elasticsearch health')