Exemple #1
0
def parse_role(component, topology, configmaps):
    service_dir = component["service_dir"]
    role = component["service_content"]
    component_name = component["component_name"]
    service = role["service"]
    service_name = service["name"]

    if service_name not in topology:
        LOG.info("Service %s not in topology config, skipping deploy",
                 service_name)
        return
    LOG.info("Scheduling service %s deployment", service_name)
    _expand_files(service, role.get("files"))

    files_cm = _create_files_configmap(
        service_dir, service_name, role.get("files"))
    meta_cm = _create_meta_configmap(service)

    workflows = _parse_workflows(service)
    workflow_cm = _create_workflow(workflows, service_name)
    configmaps = configmaps + (files_cm, meta_cm, workflow_cm)

    if CONF.action.dry_run:
        cm_version = 'dry-run'
    else:
        cm_version = _get_configmaps_version(
            configmaps, service_dir, role.get("files"), CONF.configs._dict)

    for cont in service["containers"]:
        daemon_cmd = cont["daemon"]
        daemon_cmd["name"] = cont["name"]

        _create_pre_jobs(service, cont, component_name)
        _create_post_jobs(service, cont, component_name)
        cont['cm_version'] = cm_version

    cont_spec = templates.serialize_daemon_pod_spec(service)
    affinity = templates.serialize_affinity(service, topology)

    replicas = CONF.replicas.get(service_name)
    if service.get("kind") == 'DaemonSet':
        if replicas is not None:
            LOG.error("Replicas was specified for %s, but it's implemented "
                      "using Kubernetes DaemonSet that will deploy service on "
                      "all matching nodes (section 'nodes' in config file)",
                      service_name)
            raise RuntimeError("Replicas couldn't be specified for services "
                               "implemented using Kubernetes DaemonSet")

        obj = templates.serialize_daemonset(service_name, cont_spec,
                                            affinity, component_name)
    else:
        replicas = replicas or 1
        obj = templates.serialize_deployment(service_name, cont_spec,
                                             affinity, replicas,
                                             component_name)
    kubernetes.process_object(obj)

    _create_service(service)
    LOG.info("Service %s successfuly scheduled", service_name)
Exemple #2
0
 def _create_pod(self, pod_spec):
     spec = copy.deepcopy(pod_spec)
     spec["metadata"].setdefault("labels", {})
     spec.update({
         "kind": "Pod",
         "apiVersion": "v1"})
     kubernetes.process_object(spec)
Exemple #3
0
    def _create_configmap(self):
        data = {
            "config": CONF.configs._json(sort_keys=True),
            "nodes-config": utils.get_nodes_config(CONF.nodes),
            "workflow": self._get_workflow()
        }
        data.update(self._get_file_templates())

        cm = templates.serialize_configmap(self.k8s_name, data)
        kubernetes.process_object(cm)
Exemple #4
0
    def test_object_create(self):
        obj_dict = {'kind': self.kind, 'metadata': {'name': 'test'}}
        m_obj = mock.Mock(exists=mock.Mock(return_value=False))
        m_class = self.useFixture(
            fixtures.MockPatch('pykube.{}'.format(self.kind),
                               return_value=m_obj))

        kubernetes.process_object(obj_dict, client=mock.Mock())
        m_class.mock.assert_called_once_with(mock.ANY, obj_dict)
        m_obj.create.assert_called_once_with()
Exemple #5
0
def _create_registry_secret():
    dockercfg = {
        CONF.registry.address: {
            "username": CONF.registry.username,
            "password": CONF.registry.password
        }
    }
    data = {".dockercfg": json.dumps(dockercfg, sort_keys=True)}
    secret = templates.serialize_secret("registry-key",
                                        "kubernetes.io/dockercfg", data)
    kubernetes.process_object(secret)
Exemple #6
0
def _create_registry_secret():
    dockercfg = {
        CONF.registry.address: {
            "username": CONF.registry.username,
            "password": CONF.registry.password
        }
    }
    data = {".dockercfg": json.dumps(dockercfg, sort_keys=True)}
    secret = templates.serialize_secret(
        "registry-key", "kubernetes.io/dockercfg", data)
    kubernetes.process_object(secret)
Exemple #7
0
    def _create_configmap(self):
        CONF.configs._update(action_parameters=self._get_custom_parameters())
        data = {
            "config": CONF.configs._json(sort_keys=True),
            "secret-config": CONF.secret_configs._json(sort_keys=True),
            "nodes-config": utils.get_nodes_config(CONF.nodes),
            "workflow": self._get_workflow()
        }
        data.update(self._get_file_templates())

        cm = templates.serialize_configmap(self.k8s_name, data)
        kubernetes.process_object(cm)
Exemple #8
0
def _create_exports_configmap(exports_map):
    """Create config map of files from fuel-ccp-repo/exports dirs."""
    exported_files_content = {}
    for key in exports_map:
        exported_files_content[key] = exports_map[key]['body']
    serialized = templates.serialize_configmap(templates.EXPORTS_CONFIG,
                                               exported_files_content)
    return kubernetes.process_object(serialized)
Exemple #9
0
def _create_exports_configmap(exports_map):
    """Create config map of files from fuel-ccp-repo/exports dirs."""
    exported_files_content = {}
    for key in exports_map:
        exported_files_content[key] = exports_map[key]['body']
    serialized = templates.serialize_configmap(templates.EXPORTS_CONFIG,
                                               exported_files_content)
    return kubernetes.process_object(serialized)
Exemple #10
0
def _create_meta_configmap(service):
    configmap_name = "%s-%s" % (service["name"], templates.META_CONFIG)
    data = {
        templates.META_CONFIG: json.dumps(
            {"service-name": service["name"],
             "host-net": service.get("hostNetwork", False)}, sort_keys=True)
    }
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #11
0
def _create_service(service):
    template_ports = service.get("ports")
    if not template_ports:
        return
    ports = []
    for port in service["ports"]:
        source_port, _, node_port = str(port).partition(":")
        source_port = int(source_port)
        name_port = str(source_port)
        if node_port:
            node_port = int(node_port)
        if node_port:
            ports.append({"port": source_port, "name": name_port,
                          "node-port": node_port})
        else:
            ports.append({"port": source_port, "name": name_port})
    template = templates.serialize_service(service["name"], ports)
    kubernetes.process_object(template)
Exemple #12
0
def _create_meta_configmap(service):
    configmap_name = "%s-%s" % (service["name"], templates.META_CONFIG)
    data = {
        templates.META_CONFIG: json.dumps(
            {"service-name": service["name"],
             "host-net": service.get("hostNetwork", False)}, sort_keys=True)
    }
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #13
0
 def _create_job(self, pod_spec):
     job_spec = templates.serialize_job(name=self.k8s_name,
                                        spec=pod_spec,
                                        component_name=self.component,
                                        app_name=self.name)
     job_spec["metadata"]["labels"].update({"ccp-action": "true"})
     if kubernetes.process_object(job_spec):
         LOG.info('%s: action "%s" has been successfully run',
                  self.component, self.k8s_name)
Exemple #14
0
def _create_files_configmap(service_name, files, macros_imports):
    configmap_name = "%s-%s" % (service_name, templates.FILES_CONFIG)
    data = {}
    if files:
        for filename, f in files.items():
            with open(f["content"], "r") as f:
                data[filename] = macros_imports + f.read()
    data["placeholder"] = ""
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #15
0
def _create_files_configmap(service_name, files, macros_imports):
    configmap_name = "%s-%s" % (service_name, templates.FILES_CONFIG)
    data = {}
    if files:
        for filename, f in files.items():
            with open(f["content"], "r") as f:
                data[filename] = macros_imports + f.read()
    data["placeholder"] = ""
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #16
0
 def _create_job(self, pod_spec):
     job_spec = templates.serialize_job(
         name=self.k8s_name,
         spec=pod_spec,
         component_name=self.component,
         app_name=self.name)
     job_spec["metadata"]["labels"].update({"ccp-action": "true"})
     if kubernetes.process_object(job_spec):
         LOG.info('%s: action "%s" has been successfully run',
                  self.component, self.k8s_name)
Exemple #17
0
def _create_files_configmap(service_dir, service_name, files):
    configmap_name = "%s-%s" % (service_name, templates.FILES_CONFIG)
    data = {}
    if files:
        for filename, f in files.items():
            with open(os.path.join(
                    service_dir, "files", f["content"]), "r") as f:
                data[filename] = f.read()
    data["placeholder"] = ""
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #18
0
def _create_start_script_configmap():
    start_scr_path = os.path.join(CONF.repositories.path,
                                  "fuel-ccp-entrypoint",
                                  "fuel_ccp_entrypoint",
                                  "start_script.py")
    with open(start_scr_path) as f:
        start_scr_data = f.read()

    data = {
        templates.SCRIPT_CONFIG: start_scr_data
    }
    cm = templates.serialize_configmap(templates.SCRIPT_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #19
0
def _create_nodes_configmap(nodes):
    nodes_config = utils.get_nodes_config(nodes)
    data = {templates.NODES_CONFIG: nodes_config}
    cm = templates.serialize_configmap(templates.NODES_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #20
0
def _create_nodes_configmap(nodes):
    nodes_config = utils.get_nodes_config(nodes)
    data = {templates.NODES_CONFIG: nodes_config}
    cm = templates.serialize_configmap(templates.NODES_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #21
0
def _create_namespace(configs):
    if CONF.action.dry_run:
        return

    template = templates.serialize_namespace(configs['namespace'])
    kubernetes.process_object(template)
Exemple #22
0
def _create_namespace(configs):
    if CONF.action.dry_run:
        return

    template = templates.serialize_namespace(configs['namespace'])
    kubernetes.process_object(template)
Exemple #23
0
def _create_globals_configmap(config):
    data = {templates.GLOBAL_CONFIG: config._json(sort_keys=True)}
    cm = templates.serialize_configmap(templates.GLOBAL_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #24
0
def create_start_script_configmap():
    data = {
        templates.SCRIPT_CONFIG: get_start_script()
    }
    cm = templates.serialize_configmap(templates.SCRIPT_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #25
0
def _create_service_configmap(service_name, service_config):
    configmap_name = "%s-%s" % (service_name, templates.SERVICE_CONFIG)
    data = {templates.SERVICE_CONFIG: service_config._json(sort_keys=True)}
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #26
0
def _create_service_configmap(service_name, service_config):
    configmap_name = "%s-%s" % (service_name, templates.SERVICE_CONFIG)
    data = {templates.SERVICE_CONFIG: service_config._json(sort_keys=True)}
    template = templates.serialize_configmap(configmap_name, data)
    return kubernetes.process_object(template)
Exemple #27
0
def create_start_script_configmap():
    data = {templates.SCRIPT_CONFIG: get_start_script()}
    cm = templates.serialize_configmap(templates.SCRIPT_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #28
0
def _create_job(service, container, job, component_name):
    cont_spec = templates.serialize_job_container_spec(container, job)
    pod_spec = templates.serialize_job_pod_spec(service, job, cont_spec)
    job_spec = templates.serialize_job(job["name"], pod_spec, component_name)
    kubernetes.process_object(job_spec)
Exemple #29
0
def _create_workflow(workflow, name):
    configmap_name = "%s-%s" % (name, templates.ROLE_CONFIG)
    template = templates.serialize_configmap(configmap_name, workflow)
    return kubernetes.process_object(template)
Exemple #30
0
def deploy_components(components_map, components):

    topology = _make_topology(CONF.nodes, CONF.roles, CONF.replicas)
    if not components:
        components = set(topology.keys()) & set(components_map.keys())
    else:
        diff = components - set(topology.keys())
        if diff:
            raise ValueError('The next components are not '
                             'defined in topology: %s' % list(diff))

    deploy_validation.validate_requested_components(components, components_map)

    if CONF.action.export_dir:
        os.makedirs(os.path.join(CONF.action.export_dir, 'configmaps'))

    _create_namespace(CONF.configs)
    _create_registry_secret()
    _create_globals_configmap(CONF.configs)
    _create_globals_secret(CONF.secret_configs)
    _create_nodes_configmap(CONF.nodes)
    start_script_cm = create_start_script_configmap()

    # load exported j2 templates, which can be used across all repositories
    exports_map = utils.get_repositories_exports()
    j2_imports_files_header = jinja_utils.generate_jinja_imports(exports_map)

    exports_cm = _create_exports_configmap(exports_map)
    exports_ctx = {'files_header': j2_imports_files_header, 'map': exports_map}

    configmaps = (start_script_cm, exports_cm)

    upgrading_components = {}
    for service_name in components:
        service = components_map[service_name]
        service["service_content"]['service']['exports_ctx'] = exports_ctx
        objects_gen = parse_role(service, topology, configmaps, components_map)
        objects = list(itertools.chain.from_iterable(objects_gen))
        component_name = service['component_name']
        do_upgrade = component_name in upgrading_components
        if not do_upgrade and service['component']['upgrades']:
            res = check_images_change(objects)
            do_upgrade = bool(res)
            if do_upgrade:
                from_image, to_image = res
                from_version, to_version = version_diff(from_image, to_image)
                upgrading_components[component_name] = {
                    '_meta': {
                        'from': from_version,
                        'to': to_version,
                        'component': service['component']
                    },
                }
                LOG.info('Upgrade will be triggered for %s'
                         ' from version %s to version %s because image for %s'
                         ' changed from %s to %s',
                         component_name, from_version, to_version,
                         service_name, from_image, to_image)

        if not do_upgrade:
            for obj in objects:
                kubernetes.process_object(obj)
        else:
            upgrading_components[component_name][service_name] = objects

    for component_name, component_upg in upgrading_components.items():
        create_upgrade_jobs(component_name, component_upg, configmaps,
                            topology, exports_ctx)

    if 'keystone' in components:
        conf = utils.get_rendering_config()
        _create_openrc(conf)
Exemple #31
0
def create_upgrade_jobs(component_name, upgrade_data, configmaps, topology,
                        exports_ctx):
    from_version = upgrade_data['_meta']['from']
    to_version = upgrade_data['_meta']['to']
    component = upgrade_data['_meta']['component']
    upgrade_def = component['upgrades']['default']['upgrade']
    files = component['upgrades']['default'].get('files')
    prefix = '{}-{}-{}'.format(upgrade_def['name'], from_version, to_version)

    LOG.info("Scheduling component %s upgrade", component_name)
    for step in upgrade_def['steps']:
        if step.get('files'):
            step['files'] = {f: files[f] for f in step['files']}

    process_files(files, component['service_dir'])
    _create_files_configmap(prefix, files, exports_ctx['files_header'])
    container = {
        "name": prefix,
        "pre": [],
        "daemon": {},
        "image": upgrade_def['image'],
    }
    service = {
        "name": prefix,
        "containers": [container],
        "exports_ctx": exports_ctx,
    }
    _create_meta_configmap(service)
    _create_service_configmap(prefix, _yaml.AttrDict())

    workflows = {prefix: ""}
    jobs = container["pre"]
    last_deps = []

    for step in upgrade_def['steps']:
        step_type = step.get('type', 'single')
        job_name = "{}-{}".format(prefix, step['name'])
        job = {"name": step['name'], "type": "single"}
        for key in ['files', 'volumes', 'topology_key']:
            if step.get(key):
                job[key] = step[key]
        jobs.append(job)
        workflow = {
            'name': job_name,
            'dependencies': last_deps,
        }
        last_deps = [job_name]
        if step_type == 'single':
            workflow['job'] = job = {}
            _fill_cmd(job, step)
            _push_files_to_workflow(workflow, step.get('files'))
        elif step_type == 'rolling-upgrade':
            services = step.get('services')
            if services is None:
                services = [s for s in upgrade_data if s != '_meta']
            workflow['roll'] = roll = []
            for service_name in services:
                roll.extend(upgrade_data[service_name])
        elif step_type == 'kill-services':
            services = step.get('services')
            if services is None:
                services = [s for s in upgrade_data if s != '_meta']
            workflow['kill'] = kill = []
            for service_name in services:
                for object_dict in upgrade_data[service_name]:
                    if object_dict['kind'] == 'Deployment':
                        kill.append(object_dict)
        else:
            raise RuntimeError("Unsupported upgrade step type: %s" % step_type)
        workflows[step['name']] = \
            json.dumps({'workflow': workflow}, sort_keys=True)

    _create_workflow(workflows, prefix)

    job_specs = _create_pre_jobs(service, container, component_name, topology)
    for job_spec in job_specs:
        kubernetes.process_object(job_spec)

    LOG.info("Upgrade of component %s successfuly scheduled", component_name)
Exemple #32
0
def _create_workflow(workflow, name):
    configmap_name = "%s-%s" % (name, templates.ROLE_CONFIG)
    template = templates.serialize_configmap(configmap_name, workflow)
    return kubernetes.process_object(template)
Exemple #33
0
def deploy_components(components_map, components):

    topology = _make_topology(CONF.nodes, CONF.roles, CONF.replicas)
    if not components:
        components = set(topology.keys()) & set(components_map.keys())
    else:
        diff = components - set(topology.keys())
        if diff:
            raise ValueError('The next components are not '
                             'defined in topology: %s' % list(diff))

    deploy_validation.validate_requested_components(components, components_map)

    if CONF.action.export_dir:
        os.makedirs(os.path.join(CONF.action.export_dir, 'configmaps'))

    _create_namespace(CONF.configs)
    _create_registry_secret()
    _create_globals_configmap(CONF.configs)
    _create_globals_secret(CONF.secret_configs)
    _create_nodes_configmap(CONF.nodes)
    start_script_cm = create_start_script_configmap()

    # load exported j2 templates, which can be used across all repositories
    exports_map = utils.get_repositories_exports()
    j2_imports_files_header = jinja_utils.generate_jinja_imports(exports_map)

    exports_cm = _create_exports_configmap(exports_map)
    exports_ctx = {'files_header': j2_imports_files_header, 'map': exports_map}

    configmaps = (start_script_cm, exports_cm)

    upgrading_components = {}
    for service_name in components:
        service = components_map[service_name]
        service["service_content"]['service']['exports_ctx'] = exports_ctx
        objects_gen = parse_role(service, topology, configmaps, components_map)
        objects = list(itertools.chain.from_iterable(objects_gen))
        component_name = service['component_name']
        do_upgrade = component_name in upgrading_components
        if not do_upgrade and service['component']['upgrades']:
            res = check_images_change(objects)
            do_upgrade = bool(res)
            if do_upgrade:
                from_image, to_image = res
                from_version, to_version = version_diff(from_image, to_image)
                upgrading_components[component_name] = {
                    '_meta': {
                        'from': from_version,
                        'to': to_version,
                        'component': service['component']
                    },
                }
                LOG.info(
                    'Upgrade will be triggered for %s'
                    ' from version %s to version %s because image for %s'
                    ' changed from %s to %s', component_name, from_version,
                    to_version, service_name, from_image, to_image)

        if not do_upgrade:
            for obj in objects:
                kubernetes.process_object(obj)
        else:
            upgrading_components[component_name][service_name] = objects

    for component_name, component_upg in upgrading_components.items():
        create_upgrade_jobs(component_name, component_upg, configmaps,
                            topology, exports_ctx)

    if 'keystone' in components:
        conf = utils.get_rendering_config()
        _create_openrc(conf)
Exemple #34
0
def _create_globals_configmap(config):
    data = {templates.GLOBAL_CONFIG: config._json(sort_keys=True)}
    cm = templates.serialize_configmap(templates.GLOBAL_CONFIG, data)
    return kubernetes.process_object(cm)
Exemple #35
0
 def _create_pod(self, pod_spec):
     spec = copy.deepcopy(pod_spec)
     spec["metadata"].setdefault("labels", {})
     spec.update({"kind": "Pod", "apiVersion": "v1"})
     kubernetes.process_object(spec)
Exemple #36
0
def _create_globals_secret(conf):
    data = {templates.GLOBAL_SECRET_CONFIG: conf._json(sort_keys=True)}
    secret = templates.serialize_secret(templates.GLOBAL_SECRET_CONFIG,
                                        data=data)
    return kubernetes.process_object(secret)
Exemple #37
0
def create_upgrade_jobs(component_name, upgrade_data, configmaps, topology,
                        exports_ctx):
    from_version = upgrade_data['_meta']['from']
    to_version = upgrade_data['_meta']['to']
    component = upgrade_data['_meta']['component']
    upgrade_def = component['upgrades']['default']['upgrade']
    files = component['upgrades']['default'].get('files')
    prefix = '{}-{}-{}'.format(upgrade_def['name'], from_version, to_version)

    LOG.info("Scheduling component %s upgrade", component_name)
    for step in upgrade_def['steps']:
        if step.get('files'):
            step['files'] = {f: files[f] for f in step['files']}

    process_files(files, component['service_dir'])
    _create_files_configmap(prefix, files, exports_ctx['files_header'])
    container = {
        "name": prefix,
        "pre": [],
        "daemon": {},
        "image": upgrade_def['image'],
    }
    service = {
        "name": prefix,
        "containers": [container],
        "exports_ctx": exports_ctx,
    }
    _create_meta_configmap(service)
    _create_service_configmap(prefix, _yaml.AttrDict())

    workflows = {prefix: ""}
    jobs = container["pre"]
    last_deps = []

    for step in upgrade_def['steps']:
        step_type = step.get('type', 'single')
        job_name = "{}-{}".format(prefix, step['name'])
        job = {"name": step['name'], "type": "single"}
        for key in ['files', 'volumes', 'topology_key']:
            if step.get(key):
                job[key] = step[key]
        jobs.append(job)
        workflow = {
            'name': job_name,
            'dependencies': last_deps,
        }
        last_deps = [job_name]
        if step_type == 'single':
            workflow['job'] = job = {}
            _fill_cmd(job, step)
            _push_files_to_workflow(workflow, step.get('files'))
        elif step_type == 'rolling-upgrade':
            services = step.get('services')
            if services is None:
                services = [s for s in upgrade_data if s != '_meta']
            workflow['roll'] = roll = []
            for service_name in services:
                roll.extend(upgrade_data[service_name])
        elif step_type == 'kill-services':
            services = step.get('services')
            if services is None:
                services = [s for s in upgrade_data if s != '_meta']
            workflow['kill'] = kill = []
            for service_name in services:
                for object_dict in upgrade_data[service_name]:
                    if object_dict['kind'] == 'Deployment':
                        kill.append(object_dict)
        else:
            raise RuntimeError("Unsupported upgrade step type: %s" % step_type)
        workflows[step['name']] = \
            json.dumps({'workflow': workflow}, sort_keys=True)

    _create_workflow(workflows, prefix)

    job_specs = _create_pre_jobs(service, container, component_name, topology)
    for job_spec in job_specs:
        kubernetes.process_object(job_spec)

    LOG.info("Upgrade of component %s successfuly scheduled", component_name)
Exemple #38
0
def _create_globals_secret(conf):
    data = {templates.GLOBAL_SECRET_CONFIG: conf._json(sort_keys=True)}
    secret = templates.serialize_secret(
        templates.GLOBAL_SECRET_CONFIG, data=data)
    return kubernetes.process_object(secret)