Esempio n. 1
0
def run_on_kubernetes(args):
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    tag = util.get_tag(args, NAME, build)
    t = open('daemon.yaml').read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image       = tag,
                           namespace   = util.get_current_namespace(),
                           pull_policy = util.pull_policy(args)))
        tmp.flush()
        util.update_daemonset(tmp.name)
Esempio n. 2
0
def run_on_kubernetes(args):
    context = util.get_kube_context()
    namespace = util.get_current_namespace()
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open('storage-daemon.yaml').read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image        = tag,
                           namespace    = util.get_current_namespace(),
                           pull_policy  = util.pull_policy(args)))
        tmp.flush()
        util.update_daemonset(tmp.name)
Esempio n. 3
0
def run_on_kubernetes(args):
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    tag = util.get_tag(args, NAME, build)
    # ensure there is a rethinkdb secret, even if blank, so that daemon will start with reduced functionality
    util.ensure_secret_exists('rethinkdb-password', 'rethinkdb')
    t = open('storage-daemon.yaml').read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(
            t.format(image=tag,
                     namespace=util.get_current_namespace(),
                     pull_policy=util.pull_policy(args)))
        tmp.flush()
        util.update_daemonset(tmp.name)
Esempio n. 4
0
def run_on_kubernetes(args):
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    # ensure there is a rethinkdb secret, even if blank, so that daemon will start with reduced functionality
    util.ensure_secret_exists('rethinkdb-password', 'rethinkdb')
    t = open('storage-daemon.yaml').read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image        = tag,
                           namespace    = util.get_current_namespace(),
                           pull_policy  = util.pull_policy(args)))
        tmp.flush()
        util.update_daemonset(tmp.name)
Esempio n. 5
0
def run_on_kubernetes(args):
    context = util.get_kube_context()
    namespace = util.get_current_namespace()
    if len(args.number) == 0:
        # Figure out the nodes based on the names of persistent disks, or just node 0 if none.
        args.number = range(
            max(1, len(get_persistent_disks(context, namespace))))
    ensure_services_exist()
    util.ensure_secret_exists('rethinkdb-password', 'rethinkdb')
    args.local = False  # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    for number in args.number:
        ensure_persistent_disk_exists(context, namespace, number, args.size,
                                      args.type)
        with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
            tmp.write(
                t.format(image=tag,
                         number=number,
                         pd_name=pd_name(context=context,
                                         namespace=namespace,
                                         number=number),
                         health_delay=args.health_delay,
                         pull_policy=util.pull_policy(args)))
            tmp.flush()
            util.update_deployment(tmp.name)
Esempio n. 6
0
def run_on_kubernetes(args):
    if args.test:
        cpu_request = '10m'
        memory_request = '200Mi'
    else:
        cpu_request = '500m'
        memory_request = '2Gi'

    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    if len(args.number) == 0:
        # Figure out the nodes based on the names of persistent disks, or just node 0 if none.
        args.number = range(max(1,len(get_persistent_disks(context, namespace))))
    ensure_services_exist()
    util.ensure_secret_exists('rethinkdb-password', 'rethinkdb')
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    for number in args.number:
        ensure_persistent_disk_exists(context, namespace, number, args.size, args.type)
        with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
            tmp.write(t.format(image        = tag,
                               number       = number,
                               pd_name      = pd_name(context=context, namespace=namespace, number=number),
                               health_delay = args.health_delay,
                               cpu_request  = cpu_request,
                               memory_request = memory_request,
                               pull_policy  = util.pull_policy(args)))
            tmp.flush()
            util.update_deployment(tmp.name)
Esempio n. 7
0
def run_on_kubernetes(args):
    create_gcloud_secret()
    context   = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    if len(args.number) == 0:
        # Figure out the nodes based on the names of persistent disks, or just node 0 if none.
        args.number = range(max(1,len(get_persistent_disks(context, namespace))))
    if 'storage-projects' not in util.get_services():
        util.run(['kubectl', 'create', '-f', 'conf/service.yaml'])
    args.local = False # so tag is for gcloud

    tag = util.get_tag(args, NAME, build)
    if not args.tag:
        tag = tag[:tag.rfind('-')]   # get rid of the final -[service] part of the tag.

    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    ensure_ssh()
    for number in args.number:
        deployment_name = "{name}{number}".format(name=NAME, number=number)
        ensure_persistent_disk_exists(context, namespace, number, args.size, args.type)
        with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
            tmp.write(t.format(image         = tag,
                               number        = number,
                               gcloud_bucket = gcloud_bucket(namespace=namespace),
                               pd_name       = pd_name(context=context, namespace=namespace, number=number),
                               health_delay  = args.health_delay,
                               pull_policy   = util.pull_policy(args)))
            tmp.flush()
            util.update_deployment(tmp.name)
Esempio n. 8
0
def run_on_kubernetes(args):
    validate_project_ids(args)
    args.local = False  # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    resources = {
        'requests': {
            'memory': "40Mi",
            'cpu': '5m'
        },
        'limits': {
            'memory': "1000Mi",
            'cpu': "1000m"
        }
    }
    resources = '{' + yaml.dump(resources).replace('\n', ',')[:-1] + '}'

    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(
            t.format(image=tag,
                     project_id=args.project_id,
                     namespace=util.get_current_namespace(),
                     storage_server=args.storage_server,
                     disk_size=args.disk_size,
                     resources=resources,
                     preemptible='true' if args.preemptible else 'false',
                     pull_policy=util.pull_policy(args)))
        tmp.flush()
        util.update_deployment(tmp.name)
Esempio n. 9
0
def config():
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    if not os.path.exists(datadog_fn):
        raise Exception('No datadog API key stored in "%s"' % datadog_fn)
    API_KEY = open(datadog_fn).read().strip()
    NS = util.get_current_namespace()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(API_KEY=API_KEY, namespace=NS))
        tmp.flush()
        # report back the temp filename
        yield tmp.name
Esempio n. 10
0
def run_on_kubernetes(args):
    validate_project_ids(args)
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image          = tag,
                           project_id     = args.project_id,
                           namespace      = namespace,
                           storage_server = args.storage_server,
                           disk_size      = args.disk_size,
                           pull_policy    = util.pull_policy(args)))
        tmp.flush()
        util.update_deployment(tmp.name)
Esempio n. 11
0
def delete(args):
    if len(args.number) == 0:
        if args.obliterate_disk:
            raise ValueError("you must explicitly specify the nodes when using --obliterate-disk")
        args.number = all_node_numbers()
    for number in args.number:
        deployment_name = "{name}{number}".format(name=NAME, number=number)
        util.stop_deployment(deployment_name)
    if args.obliterate_disk and args.number:
        context = util.get_cluster_prefix()
        namespace = util.get_current_namespace()
        what = "%s-%s"%(context, namespace)
        if args.obliterate_disk == what:
            delete_persistent_disks(context, namespace, args.number)
        else:
            raise ValueError("to obliterate the disk you must do --obliterate-disk=%s"%what)
Esempio n. 12
0
def run_on_kubernetes(args):
    if args.test or util.get_current_namespace() == 'test':
        rethink_cpu_request = hub_cpu_request = proxy_cpu_request = '10m'
        rethink_memory_request = hub_memory_request = proxy_memory_request = '200Mi'
    else:
        hub_cpu_request = '500m'
        hub_memory_request = '1Gi'
        proxy_cpu_request = '200m'
        proxy_memory_request = '500Mi'
        rethink_cpu_request = '500m'
        rethink_memory_request = '2Gi'

    util.ensure_secret_exists('sendgrid-api-key', 'sendgrid')
    util.ensure_secret_exists('zendesk-api-key', 'zendesk')
    if args.replicas is None:
        args.replicas = util.get_desired_replicas(NAME, 2)

    opts = {
        'replicas': args.replicas,
        'pull_policy': util.pull_policy(args),
        'min_read_seconds': args.gentle,
        'smc_db_pool': args.database_pool_size,
        'smc_db_concurrent_warn': args.database_concurrent_warn,
        'hub_cpu_request': hub_cpu_request,
        'hub_memory_request': hub_memory_request,
        'proxy_cpu_request': proxy_cpu_request,
        'proxy_memory_request': proxy_memory_request,
        'rethink_cpu_request': rethink_cpu_request,
        'rethink_memory_request': rethink_memory_request
    }
    for image in IMAGES:
        opts['image_{image}'.format(image=image)] = get_tag(args, image)

    from argparse import Namespace
    ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False)
    opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy')
    filename = 'hub.template.yaml'
    t = open(join('conf', filename)).read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        r = t.format(**opts)
        #print(r)
        tmp.write(r)
        tmp.flush()
        util.update_deployment(tmp.name)

    if NAME not in util.get_services():
        util.run(['kubectl', 'expose', 'deployment', NAME])
Esempio n. 13
0
def run_on_kubernetes(args):
    validate_project_ids(args)
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    args.local = False  # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(
            t.format(image=tag,
                     project_id=args.project_id,
                     namespace=namespace,
                     storage_server=args.storage_server,
                     disk_size=args.disk_size,
                     pull_policy=util.pull_policy(args)))
        tmp.flush()
        util.update_deployment(tmp.name)
Esempio n. 14
0
def run_on_kubernetes(args):
    ensure_ssl()
    if args.replicas is None:
        args.replicas = util.get_desired_replicas(NAME, 2)
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    print("tag='{tag}', replicas='{replicas}'".format(tag=tag, replicas=args.replicas))
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    namespace = util.get_current_namespace()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image       = tag,
                           replicas    = args.replicas,
                           pull_policy = util.pull_policy(args),
                           namespace   = namespace))
        tmp.flush()
        util.update_deployment(tmp.name)
    expose()
Esempio n. 15
0
def run_on_kubernetes(args):
    ensure_ssl()
    if args.replicas is None:
        args.replicas = util.get_desired_replicas(NAME, 2)
    args.local = False  # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    print("tag='{tag}', replicas='{replicas}'".format(tag=tag,
                                                      replicas=args.replicas))
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()
    namespace = util.get_current_namespace()
    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(
            t.format(image=tag,
                     replicas=args.replicas,
                     pull_policy=util.pull_policy(args),
                     namespace=namespace))
        tmp.flush()
        util.update_deployment(tmp.name)
    expose()
Esempio n. 16
0
def run_on_kubernetes(args):
    create_kubectl_secret()
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    if args.project_tag:
        default_image = util.gcloud_docker_repo('smc-project:' + args.project_tag)
    else:
        default_image = util.gcloud_most_recent_image('smc-project')

    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image          = tag,
                           namespace      = util.get_current_namespace(),
                           cluster_prefix = util.get_cluster_prefix(),
                           default_image  = default_image,
                           node_selector  = node_selector(),
                           pull_policy    = util.pull_policy(args)))
        tmp.flush()
        util.update_deployment(tmp.name)
Esempio n. 17
0
def run_on_kubernetes(args):
    create_kubectl_secret()
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    if args.project_tag:
        default_image = util.gcloud_docker_repo('smc-project:' + args.project_tag)
    else:
        default_image = util.gcloud_most_recent_image('smc-project')

    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image          = tag,
                           namespace      = util.get_current_namespace(),
                           cluster_prefix = util.get_cluster_prefix(),
                           default_image  = default_image,
                           node_selector  = node_selector(),
                           pull_policy    = util.pull_policy(args)))
        tmp.flush()
        util.update_deployment(tmp.name)
Esempio n. 18
0
def delete(args):
    if len(args.number) == 0:
        if args.obliterate_disk:
            raise ValueError(
                "you must explicitly specify the nodes when using --obliterate-disk"
            )
        args.number = all_node_numbers()
    for number in args.number:
        deployment_name = "{name}{number}".format(name=NAME, number=number)
        util.stop_deployment(deployment_name)
    if args.obliterate_disk and args.number:
        context = util.get_cluster_prefix()
        namespace = util.get_current_namespace()
        what = "%s-%s" % (context, namespace)
        if args.obliterate_disk == what:
            delete_persistent_disks(context, namespace, args.number)
        else:
            raise ValueError(
                "to obliterate the disk you must do --obliterate-disk=%s" %
                what)
Esempio n. 19
0
def run_on_kubernetes(args):
    validate_project_ids(args)
    args.local = False # so tag is for gcloud
    tag = util.get_tag(args, NAME, build)
    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    resources = {'requests':{'memory':"40Mi", 'cpu':'5m'}, 'limits':{'memory': "1000Mi", 'cpu': "1000m"}}
    resources = '{' + yaml.dump(resources).replace('\n',',')[:-1] + '}'

    with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
        tmp.write(t.format(image          = tag,
                           project_id     = args.project_id,
                           namespace      = util.get_current_namespace(),
                           storage_server = args.storage_server,
                           disk_size      = args.disk_size,
                           resources      = resources,
                           preemptible    = 'true' if args.preemptible else 'false',
                           pull_policy    = util.pull_policy(args)))
        tmp.flush()
        util.update_deployment(tmp.name)
Esempio n. 20
0
def run_on_kubernetes(args):
    create_gcloud_secret()
    context = util.get_cluster_prefix()
    namespace = util.get_current_namespace()
    if len(args.number) == 0:
        # Figure out the nodes based on the names of persistent disks, or just node 0 if none.
        args.number = range(
            max(1, len(get_persistent_disks(context, namespace))))
    if 'storage-projects' not in util.get_services():
        util.run(['kubectl', 'create', '-f', 'conf/service.yaml'])
    args.local = False  # so tag is for gcloud

    tag = util.get_tag(args, NAME, build)
    if not args.tag:
        tag = tag[:tag.rfind(
            '-')]  # get rid of the final -[service] part of the tag.

    t = open(join('conf', '{name}.template.yaml'.format(name=NAME))).read()

    ensure_ssh()
    for number in args.number:
        deployment_name = "{name}{number}".format(name=NAME, number=number)
        ensure_persistent_disk_exists(context, namespace, number, args.size,
                                      args.type)
        with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
            tmp.write(
                t.format(image=tag,
                         number=number,
                         gcloud_bucket=gcloud_bucket(namespace=namespace),
                         pd_name=pd_name(context=context,
                                         namespace=namespace,
                                         number=number),
                         health_delay=args.health_delay,
                         pull_policy=util.pull_policy(args)))
            tmp.flush()
            util.update_deployment(tmp.name)