Esempio n. 1
0
    def stop_deployment(self):
        log.info('Job started for {}.'.format(self.deployment_id))

        self.job.set_progress(10)

        try:
            if Deployment.is_component(self.deployment):
                self.stop_component()
            elif Deployment.is_application(self.deployment):
                self.stop_application()
            elif Deployment.is_application_kubernetes(self.deployment):
                self.stop_application_kubernetes()
        except Exception as ex:
            log.error('Failed to {0} {1}: {2}'.format(self.job['action'],
                                                      self.deployment_id, ex))
            try:
                self.job.set_status_message(repr(ex))
                self.api_dpl.set_state_error(self.deployment_id)
            except Exception as ex_state:
                log.error('Failed to set error state for {0}: {1}'.format(
                    self.deployment_id, ex_state))

            raise ex

        self.try_delete_deployment_credentials(self.deployment_id)

        self.api_dpl.set_state_stopped(self.deployment_id)

        return 0
Esempio n. 2
0
    def start_application_kubernetes(self, deployment: dict):
        deployment_id = Deployment.id(deployment)

        connector = initialize_connector(kubernetes_cli_connector, self.job,
                                         deployment)

        module_content = Deployment.module_content(deployment)
        deployment_owner = Deployment.owner(deployment)
        docker_compose = module_content['docker-compose']
        registries_auth = self.private_registries_auth(deployment)

        result, services = connector.start(
            docker_compose=docker_compose,
            stack_name=Deployment.uuid(deployment),
            env=get_env(deployment),
            files=module_content.get('files'),
            registries_auth=registries_auth)

        self.job.set_status_message(result)

        self.create_deployment_parameter(
            deployment_id=deployment_id,
            user_id=deployment_owner,
            param_name=DeploymentParameter.HOSTNAME['name'],
            param_value=self.get_hostname(),
            param_description=DeploymentParameter.HOSTNAME['description'])

        application_params_update(self.api_dpl, deployment, services)
Esempio n. 3
0
 def create_user_output_params(self, deployment):
     module_content = Deployment.module_content(deployment)
     for output_param in module_content.get('output-parameters', []):
         self.create_deployment_parameter(
             deployment_id=Deployment.id(deployment),
             user_id=Deployment.owner(deployment),
             param_name=output_param['name'],
             param_description=output_param.get('description'))
Esempio n. 4
0
 def get_connector_name(deployment):
     if Deployment.is_component(deployment):
         return 'docker_service'
     elif Deployment.is_application(deployment):
         is_compose = Deployment.is_compatibility_docker_compose(deployment)
         return 'docker_compose' if is_compose else 'docker_stack'
     elif Deployment.is_application_kubernetes(deployment):
         return 'kubernetes'
Esempio n. 5
0
 def get_deployment_api(self, deployment_id):
     creds = Deployment._get_attr(
         Deployment(self.api).get(deployment_id), 'api-credentials')
     insecure = not self.api.session.verify
     api = Api(endpoint=self.api.endpoint,
               insecure=insecure,
               persist_cookie=False,
               reauthenticate=True)
     api.login_apikey(creds['api-key'], creds['api-secret'])
     return Deployment(api)
Esempio n. 6
0
 def get_update_params_docker_stack(self, deployment, registries_auth):
     module_content = Deployment.module_content(deployment)
     kwargs = {
         'env': get_env(deployment),
         'files': module_content.get('files'),
         'stack_name': Deployment.uuid(deployment),
         'docker_compose': module_content['docker-compose'],
         'registries_auth': registries_auth
     }
     return kwargs
Esempio n. 7
0
    def handle_deployment(self, deployment: dict):

        if Deployment.is_component(deployment):
            self.start_component(deployment)
        elif Deployment.is_application(deployment):
            self.start_application(deployment)
        elif Deployment.is_application_kubernetes(deployment):
            self.start_application_kubernetes(deployment)

        self.create_user_output_params(deployment)
Esempio n. 8
0
def push_state(deployment):
    try:
        if Deployment.is_component(nuvla_deployment):
            ds.get_component_state(nuvla_deployment)
        elif Deployment.is_application(nuvla_deployment):
            ds.get_application_state(nuvla_deployment)
        elif Deployment.is_application_kubernetes(nuvla_deployment):
            ds.get_application_kubernetes_state(nuvla_deployment)
    except Exception as ex:
        logging.exception('Failed to get deployment state for {}: {}'.format(Deployment.id(deployment), ex))
        pass
Esempio n. 9
0
def application_params_update(api_dpl, deployment, services):
    if services:
        for service in services:
            node_id = service['node-id']
            for key, value in service.items():
                api_dpl.set_parameter_create_if_needed(
                    Deployment.id(deployment),
                    Deployment.owner(deployment),
                    f'{node_id}.{key}',
                    param_value=value,
                    node_id=node_id)
Esempio n. 10
0
    def stop_application(self):
        if Deployment.is_compatibility_docker_compose(self.deployment):
            connector = initialize_connector(docker_compose_cli_connector,
                                             self.job, self.deployment)
        else:
            connector = initialize_connector(docker_cli_connector, self.job,
                                             self.deployment)

        result = connector.stop(stack_name=Deployment.uuid(self.deployment),
                                docker_compose=Deployment.module_content(
                                    self.deployment)['docker-compose'])

        self.job.set_status_message(result)
Esempio n. 11
0
    def get_application_state(self):
        stack_name = Deployment.uuid(self.deployment)

        if Deployment.is_compatibility_docker_compose(self.deployment):
            module_content = Deployment.module_content(self.deployment)
            compose_file   = module_content['docker-compose']
            connector      = initialize_connector(docker_compose_cli_connector, self.job, self.deployment)
            services       = connector.stack_services(stack_name, compose_file)
        else:
            connector = initialize_connector(docker_cli_connector, self.job, self.deployment)
            services  = connector.stack_services(stack_name)

        application_params_update(self.api_dpl, self.deployment, services)
Esempio n. 12
0
    def stop_application_kubernetes(self):
        connector = initialize_connector(kubernetes_cli_connector, self.job,
                                         self.deployment)

        result = connector.stop(stack_name=Deployment.uuid(self.deployment))

        self.job.set_status_message(result)
Esempio n. 13
0
    def do_work(self):
        log.info('Job started for {}.'.format(self.deployment_id))
        self.job.set_progress(10)

        try:
            if Deployment.is_component(self.deployment):
                self.get_component_state()
            elif Deployment.is_application(self.deployment):
                self.get_application_state()
            elif Deployment.is_application_kubernetes(self.deployment):
                self.get_application_kubernetes_state()
        except Exception as ex:
            log.error('Failed to {0} {1}: {2}'.format(self.job['action'], self.deployment_id, ex))
            self.job.set_status_message(repr(ex))
            raise ex

        return 0
Esempio n. 14
0
 def get_update_params_docker_service(self, deployment, registries_auth):
     module_content = Deployment.module_content(deployment)
     restart_policy = module_content.get('restart-policy', {})
     module_ports = module_content.get('ports')
     kwargs = {
         'service_name': Deployment.uuid(deployment),
         'env': get_env(deployment),
         'image': module_content['image'],
         'mounts_opt': module_content.get('mounts'),
         'cpu_ratio': module_content.get('cpus'),
         'memory': module_content.get('memory'),
         'ports_opt': module_ports,
         'registries_auth': registries_auth,
         'restart_policy_condition': restart_policy.get('condition'),
         'restart_policy_delay': restart_policy.get('delay'),
         'restart_policy_max_attempts': restart_policy.get('max-attempts'),
         'restart_policy_window': restart_policy.get('window')
     }
     return kwargs
Esempio n. 15
0
def initialize_connector(connector_class, job, deployment):
    credential_id = Deployment.credential_id(deployment)
    credential = get_from_context(job, credential_id)
    infrastructure_service = copy.deepcopy(
        get_from_context(job, credential['parent']))
    # if you uncomment this, the pull-mode deployment_* will only work with the NB compute-api.
    # Which means standalone ISs and k8s capable NuvlaBoxes, are not supported
    if job.is_in_pull_mode and infrastructure_service.get('subtype',
                                                          '') == 'swarm':
        infrastructure_service['endpoint'] = 'https://compute-api:5000'
    return connector_class.instantiate_from_cimi(infrastructure_service,
                                                 credential)
Esempio n. 16
0
def get_env(deployment: dict):
    env_variables = {
        'NUVLA_DEPLOYMENT_UUID': deployment['id'].split('/')[-1],
        'NUVLA_DEPLOYMENT_ID': deployment['id'],
        'NUVLA_API_KEY': deployment['api-credentials']['api-key'],
        'NUVLA_API_SECRET': deployment['api-credentials']['api-secret'],
        'NUVLA_ENDPOINT': deployment['api-endpoint']
    }

    module_content = Deployment.module_content(deployment)

    for env_var in module_content.get('environmental-variables', []):
        env_variables[env_var['name']] = env_var.get('value')

    return env_variables
Esempio n. 17
0
    def stop_component(self):
        deployment_id = Deployment.id(self.deployment)

        connector = initialize_connector(docker_connector, self.job,
                                         self.deployment)
        filter_params = 'parent="{}" and name="service-id"'.format(
            deployment_id)

        deployment_params = self.api.search(
            'deployment-parameter',
            filter=filter_params,
            select='node-id,name,value').resources

        if len(deployment_params) > 0:
            service_id = deployment_params[0].data.get('value')
            if service_id is not None:
                connector.stop(service_id=service_id)
            else:
                self.job.set_status_message(
                    "Deployment parameter {} doesn't have a value!".format(
                        deployment_params[0].data.get('id')))
        else:
            self.job.set_status_message(
                'No deployment parameters with service ID found!')
Esempio n. 18
0
    def fetch_log(self, deployment_log):

        service_name = deployment_log['service']

        last_timestamp = deployment_log.get('last-timestamp')

        since = deployment_log.get('since')

        lines = deployment_log.get('lines', 200)

        deployment_id = deployment_log['parent']

        deployment = self.api_dpl.get(deployment_id)

        deployment_uuid = Deployment.uuid(deployment)

        tmp_since = last_timestamp or since

        if Deployment.is_application_kubernetes(deployment):
            connector = initialize_connector(kubernetes_cli_connector,
                                             self.job, deployment)
            since_opt = ['--since-time', tmp_since] if tmp_since else []
            list_opts = [
                service_name, '--timestamps=true', '--tail',
                str(lines), '--namespace', deployment_uuid
            ] + since_opt
        else:
            is_docker_compose = Deployment.is_compatibility_docker_compose(
                deployment)

            if is_docker_compose:
                connector = initialize_connector(docker_compose_cli_connector,
                                                 self.job, deployment)
                no_trunc = []
            else:
                connector = initialize_connector(docker_cli_connector,
                                                 self.job, deployment)
                no_trunc = ['--no-trunc']

            if Deployment.is_application(deployment):
                if is_docker_compose:
                    log.info(deployment_id)
                    log.info(service_name)

                    docker_service_name = self.api_dpl.get_parameter(
                        deployment_id, service_name,
                        service_name + '.service-id')
                else:
                    docker_service_name = deployment_uuid + '_' + service_name
            else:
                docker_service_name = deployment_uuid

            since_opt = ['--since', tmp_since] if tmp_since else []

            list_opts = ['-t'] + no_trunc + since_opt + [docker_service_name]

        result = connector.log(list_opts).strip().split('\n')[:lines]

        new_last_timestamp = DeploymentLogFetchJob.extract_last_timestamp(
            result)

        update_deployment_log = {'log': result}

        if new_last_timestamp:
            update_deployment_log['last-timestamp'] = new_last_timestamp

        self.api.edit(deployment_log['id'], update_deployment_log)
Esempio n. 19
0
 def __init__(self, _, job):
     self.job = job
     self.api = job.api
     self.api_dpl = Deployment(self.api)
Esempio n. 20
0
class DeploymentLogFetchJob(object):
    def __init__(self, _, job):
        self.job = job
        self.api = job.api
        self.api_dpl = Deployment(self.api)

    @staticmethod
    def extract_last_timestamp(result):
        timestamp = result[-1].strip().split(' ')[0]
        # timestamp limit precision to be compatible with server to pico
        return timestamp[:23] + 'Z' if timestamp else None

    def fetch_log(self, deployment_log):

        service_name = deployment_log['service']

        last_timestamp = deployment_log.get('last-timestamp')

        since = deployment_log.get('since')

        lines = deployment_log.get('lines', 200)

        deployment_id = deployment_log['parent']

        deployment = self.api_dpl.get(deployment_id)

        deployment_uuid = Deployment.uuid(deployment)

        tmp_since = last_timestamp or since

        if Deployment.is_application_kubernetes(deployment):
            connector = initialize_connector(kubernetes_cli_connector,
                                             self.job, deployment)
            since_opt = ['--since-time', tmp_since] if tmp_since else []
            list_opts = [
                service_name, '--timestamps=true', '--tail',
                str(lines), '--namespace', deployment_uuid
            ] + since_opt
        else:
            is_docker_compose = Deployment.is_compatibility_docker_compose(
                deployment)

            if is_docker_compose:
                connector = initialize_connector(docker_compose_cli_connector,
                                                 self.job, deployment)
                no_trunc = []
            else:
                connector = initialize_connector(docker_cli_connector,
                                                 self.job, deployment)
                no_trunc = ['--no-trunc']

            if Deployment.is_application(deployment):
                if is_docker_compose:
                    log.info(deployment_id)
                    log.info(service_name)

                    docker_service_name = self.api_dpl.get_parameter(
                        deployment_id, service_name,
                        service_name + '.service-id')
                else:
                    docker_service_name = deployment_uuid + '_' + service_name
            else:
                docker_service_name = deployment_uuid

            since_opt = ['--since', tmp_since] if tmp_since else []

            list_opts = ['-t'] + no_trunc + since_opt + [docker_service_name]

        result = connector.log(list_opts).strip().split('\n')[:lines]

        new_last_timestamp = DeploymentLogFetchJob.extract_last_timestamp(
            result)

        update_deployment_log = {'log': result}

        if new_last_timestamp:
            update_deployment_log['last-timestamp'] = new_last_timestamp

        self.api.edit(deployment_log['id'], update_deployment_log)

    def fetch_deployment_log(self):
        deployment_log_id = self.job['target-resource']['href']

        log.info('Job started for {}.'.format(deployment_log_id))

        deployment_log = self.api.get(
            deployment_log_id,
            select='id, parent, service, since, lines, last-timestamp').data

        self.job.set_progress(10)

        try:
            self.fetch_log(deployment_log)
        except Exception as ex:
            log.error('Failed to {0} {1}: {2}'.format(self.job['action'],
                                                      deployment_log_id, ex))
            try:
                self.job.set_status_message(repr(ex))
            except Exception as ex_state:
                log.error('Failed to set error state for {0}: {1}'.format(
                    deployment_log_id, ex_state))

            raise ex

        return 0

    def do_work(self):
        return self.fetch_deployment_log()
Esempio n. 21
0
    def get_component_state(self):
        connector = initialize_connector(docker_connector, self.job, self.deployment)

        did = Deployment.id(self.deployment)
        # FIXME: at the moment deployment UUID is the service name.
        sname = self.api_dpl.uuid(self.deployment)

        desired = connector.service_replicas_desired(sname)

        tasks = sorted(connector.service_tasks(filters={'service': sname}),
                       key=lambda x: x['CreatedAt'], reverse=True)

        if len(tasks) > 0:
            current_task    = tasks[0]
            current_desired = current_task.get('DesiredState')
            current_state   = None
            current_error   = None
            current_status  = current_task.get('Status')
            if current_status is not None:
                current_state = current_status.get('State')
                current_error = current_status.get('Err', "no error")

            if current_desired is not None:
                self.api_dpl.set_parameter_ignoring_errors(
                    did, sname, DeploymentParameter.CURRENT_DESIRED['name'], current_desired)

            if current_state is not None:
                self.api_dpl.set_parameter_ignoring_errors(
                    did, sname, DeploymentParameter.CURRENT_STATE['name'], current_state)

            if current_error is not None:
                self.api_dpl.set_parameter_ignoring_errors(
                    did, sname, DeploymentParameter.CURRENT_ERROR['name'], current_error)

        t_running = list(filter(lambda x:
                                x['DesiredState'] == 'running' and
                                x['Status']['State'] == 'running', tasks))
        t_failed = list(filter(lambda x:
                               x['DesiredState'] == 'shutdown' and
                               x['Status']['State'] == 'failed', tasks))
        t_rejected = list(filter(lambda x:
                                 x['DesiredState'] == 'shutdown' and
                                 x['Status']['State'] == 'rejected', tasks))

        self.api_dpl.set_parameter(did, sname, DeploymentParameter.CHECK_TIMESTAMP['name'],
                                   utcnow())

        self.api_dpl.set_parameter(did, sname, DeploymentParameter.REPLICAS_DESIRED['name'],
                                   str(desired))

        self.api_dpl.set_parameter(did, sname, DeploymentParameter.REPLICAS_RUNNING['name'],
                                   str(len(t_running)))

        if len(t_failed) > 0:
            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_NUMBER['name'],
                                       str(len(t_failed)))

            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_TIMESTAMP['name'],
                                       t_failed[0].get('CreatedAt', ''))

            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_ERR_MSG['name'],
                                       t_failed[0].get('Status', {}).get('Err', ''))

            exit_code = str(
                t_failed[0].get('Status', {}).get('ContainerStatus', {}).get('ExitCode', ''))
            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_EXIT_CODE['name'],
                                       exit_code)
        elif len(t_rejected) > 0:
            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_NUMBER['name'],
                                       str(len(t_rejected)))

            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_TIMESTAMP['name'],
                                       t_rejected[0].get('CreatedAt', ''))

            self.api_dpl.set_parameter(did, sname, DeploymentParameter.RESTART_ERR_MSG['name'],
                                       t_rejected[0].get('Status', {}).get('Err', ''))

        # update any port mappings that are available
        services = connector.list(filters={"name": sname})
        if services:
            ports_mapping = connector.extract_vm_ports_mapping(services[0])
            self.api_dpl.update_port_parameters(self.deployment, ports_mapping)
Esempio n. 22
0
 def get_application_kubernetes_state(self):
     connector  = initialize_connector(kubernetes_cli_connector, self.job, self.deployment)
     stack_name = Deployment.uuid(self.deployment)
     services   = connector.stack_services(stack_name)
     application_params_update(self.api_dpl, self.deployment, services)
Esempio n. 23
0
    def start_component(self, deployment: dict):
        connector = initialize_connector(docker_connector, self.job,
                                         deployment)

        deployment_id = Deployment.id(deployment)
        node_instance_name = Deployment.uuid(deployment)
        deployment_owner = Deployment.owner(deployment)
        module_content = Deployment.module_content(deployment)

        restart_policy = module_content.get('restart-policy', {})

        # create deployment parameters (with empty values) for all port mappings
        module_ports = module_content.get('ports')
        for port in (module_ports or []):
            target_port = port.get('target-port')
            protocol = port.get('protocol', 'tcp')
            if target_port is not None:
                self.create_deployment_parameter(
                    deployment_id=deployment_id,
                    user_id=deployment_owner,
                    param_name="{}.{}".format(protocol, str(target_port)),
                    param_description="mapping for {} port {}".format(
                        protocol, str(target_port)),
                    node_id=node_instance_name)

        registries_auth = self.private_registries_auth(deployment)

        _, service = connector.start(
            service_name=node_instance_name,
            image=module_content['image'],
            env=get_env(deployment),
            mounts_opt=module_content.get('mounts'),
            ports_opt=module_ports,
            cpu_ratio=module_content.get('cpus'),
            memory=module_content.get('memory'),
            restart_policy_condition=restart_policy.get('condition'),
            restart_policy_delay=restart_policy.get('delay'),
            restart_policy_max_attempts=restart_policy.get('max-attempts'),
            restart_policy_window=restart_policy.get('window'),
            registry_auth=registries_auth[0] if registries_auth else None)

        # FIXME: get number of desired replicas of Replicated service from deployment. 1 for now.
        desired = 1

        deployment_parameters = (
            (DeploymentParameter.SERVICE_ID, connector.extract_vm_id(service)),
            (DeploymentParameter.HOSTNAME, self.get_hostname()),
            (DeploymentParameter.REPLICAS_DESIRED, str(desired)),
            (DeploymentParameter.REPLICAS_RUNNING, '0'),
            (DeploymentParameter.CURRENT_DESIRED, ''),
            (DeploymentParameter.CURRENT_STATE, ''),
            (DeploymentParameter.CURRENT_ERROR, ''),
            (DeploymentParameter.RESTART_EXIT_CODE, ''),
            (DeploymentParameter.RESTART_ERR_MSG, ''),
            (DeploymentParameter.RESTART_TIMESTAMP, ''),
            (DeploymentParameter.RESTART_NUMBER, ''),
            (DeploymentParameter.CHECK_TIMESTAMP, ''),
        )

        for deployment_parameter, value in deployment_parameters:
            self.create_deployment_parameter(
                param_name=deployment_parameter['name'],
                param_value=value,
                param_description=deployment_parameter['description'],
                deployment_id=deployment_id,
                node_id=node_instance_name,
                user_id=deployment_owner)

        # immediately update any port mappings that are already available
        ports_mapping = connector.extract_vm_ports_mapping(service)
        self.api_dpl.update_port_parameters(deployment, ports_mapping)
Esempio n. 24
0
 def get_hostname(self):
     credential_id = Deployment.credential_id(self.deployment)
     credential = self.get_from_context(credential_id)
     endpoint = self.get_from_context(credential['parent'])['endpoint']
     return re.search('(?:http.*://)?(?P<host>[^:/ ]+)',
                      endpoint).group('host')