def _get_parameters(self, ctxt, image_format, image_loc, image_name, assem, ports, osc, t_logger): parameters = None if image_format == 'docker': glance_img_uuid = image_loc LOG.debug("Image id:%s" % glance_img_uuid) LOG.debug("Specified ports:%s" % ports) LOG.debug("Picking first port..") port_to_use = ports[0] LOG.debug("Application port:%s" % port_to_use) parameters = {'app_name': assem.name, 'image': glance_img_uuid, 'port': port_to_use} parameters.update(heat_utils.get_network_parameters(osc)) elif image_format == 'vm': parameters = {'name': str(assem.uuid), 'flavor': cfg.CONF.deployer.flavor, 'key_name': cfg.CONF.deployer.key_name, 'image': cfg.CONF.deployer.image} ports_str = '' for port in ports: ports_str += ' -p {pt}:{pt}'.format(pt=port) parameters['location'] = image_loc parameters['du'] = image_name parameters['publish_ports'] = ports_str.strip() else: LOG.debug("Image format %s is not supported." % image_format) update_assembly(ctxt, assem.id, {'status': STATES.ERROR}) t_logger.log(logging.DEBUG, "Solum config error: Image format.") t_logger.upload() return parameters
def _get_parameters(self, ctxt, image_format, image_loc, image_name, assem, ports, osc, t_logger): parameters = None if image_format == "docker": glance_img_uuid = image_loc LOG.debug("Image id:%s" % glance_img_uuid) LOG.debug("Specified ports:%s" % ports) LOG.debug("Picking first port..") port_to_use = ports[0] LOG.debug("Application port:%s" % port_to_use) parameters = {"app_name": assem.name, "image": glance_img_uuid, "port": port_to_use} parameters.update(heat_utils.get_network_parameters(osc)) elif image_format == "vm": parameters = {"name": str(assem.uuid), "flavor": cfg.CONF.deployer.flavor, "image": cfg.CONF.deployer.image} ports_str = "" for port in ports: ports_str += " -p {pt}:{pt}".format(pt=port) parameters["location"] = image_loc parameters["du"] = image_name parameters["publish_ports"] = ports_str.strip() else: LOG.debug("Image format %s is not supported." % image_format) update_assembly(ctxt, assem.id, {"status": STATES.ERROR}) t_logger.log(logging.DEBUG, "Solum config error: Image format.") t_logger.upload() return parameters
def deploy(self, ctxt, assembly_id, image_id): osc = clients.OpenStackClients(ctxt) assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id) parameters = {'app_name': assem.name, 'image': image_id} parameters.update(heat_utils.get_network_parameters(osc)) # TODO(asalkeld) support template flavors (maybe an autoscaling one) # this could also be stored in glance. template_flavor = 'basic' try: template = catalog.get('templates', template_flavor) except exception.ObjectNotFound as onf_ex: LOG.excepion(onf_ex) assem.status = STATES.ERROR assem.save(ctxt) return stack_name = self._get_stack_name(assem) stack_id = self._find_id_if_stack_exists(osc, assem) if stack_id is not None: osc.heat().stacks.update(stack_id, stack_name=stack_name, template=template, parameters=parameters) else: created_stack = osc.heat().stacks.create(stack_name=stack_name, template=template, parameters=parameters) stack_id = created_stack['stack']['id'] comp_name = 'Heat_Stack_for_%s' % assem.name comp_description = 'Heat Stack %s' % ( yaml.load(template).get('description')) objects.registry.Component.assign_and_create(ctxt, assem, comp_name, 'Heat Stack', comp_description, created_stack['stack'] ['links'][0]['href'], stack_id) assem.status = STATES.DEPLOYING assem.save(ctxt) self._update_assembly_status(ctxt, assem, osc, stack_id)
def deploy(self, ctxt, assembly_id, image_id): osc = clients.OpenStackClients(ctxt) assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id) parameters = {'app_name': assem.name, 'image': image_id} parameters.update(heat_utils.get_network_parameters(osc)) # TODO(asalkeld) support template flavors (maybe an autoscaling one) # this could also be stored in glance. template_flavor = 'basic' try: template = catalog.get('templates', template_flavor) except exception.ObjectNotFound as onf_ex: LOG.excepion(onf_ex) assem.status = STATES.ERROR assem.save(ctxt) return stack_name = self._get_stack_name(assem) stack_id = self._find_id_if_stack_exists(osc, assem) if stack_id is not None: osc.heat().stacks.update(stack_id, stack_name=stack_name, template=template, parameters=parameters) else: created_stack = osc.heat().stacks.create(stack_name=stack_name, template=template, parameters=parameters) stack_id = created_stack['stack']['id'] comp_name = 'Heat_Stack_for_%s' % assem.name comp_description = 'Heat Stack %s' % ( yaml.load(template).get('description')) objects.registry.Component.assign_and_create( ctxt, assem, comp_name, 'Heat Stack', comp_description, created_stack['stack']['links'][0]['href'], stack_id) assem.status = STATES.DEPLOYING assem.save(ctxt) self._update_assembly_status(ctxt, assem, osc, stack_id)
def _build_execution_context(self, pipeline): # try and read the context from the previous execution ctx = self._get_context_from_last_execution(pipeline) if ctx is not None: return ctx ctx = {} # service urls. kc = self._clients.keystone() ctx['heat_service_url'] = kc.client.service_catalog.url_for( service_type='orchestration', interface='publicURL') ctx['build_service_url'] = kc.client.service_catalog.url_for( service_type='image_builder', interface='publicURL') # extract context from the plan # TODO(asalkeld) this should be versioned. plan_obj = objects.registry.Plan.get_by_id(self.context, pipeline.plan_id) ctx['name'] = plan_obj.name artifacts = plan_obj.raw_content.get('artifacts', []) for arti in artifacts: ctx['source_uri'] = arti['content']['href'] ctx['base_image_id'] = arti.get('language_pack', 'auto') ctx['source_format'] = arti.get('artifact_type', 'heroku') ctx['image_format'] = arti.get('image_format', CONF.api.image_format) ctx['template'] = catalog.get('templates', 'basic') # TODO(asalkeld) add support to the plan to pass heat parameters. ctx['parameters'] = {'app_name': pipeline.name} ctx['parameters'].update( heat_utils.get_network_parameters(self._clients)) ctx['stack_id'] = self._create_empty_stack(pipeline) ctx['stack_name'] = pipeline.name # TODO(asalkeld) integrate the Environment into the context. return ctx
def _build_execution_context(self, pipeline): # try and read the context from the previous execution ctx = self._get_context_from_last_execution(pipeline) if ctx is not None: return ctx ctx = {} # service urls. kc = self._clients.keystone() ctx["heat_service_url"] = kc.client.service_catalog.url_for( service_type="orchestration", endpoint_type="publicURL" ) ctx["build_service_url"] = kc.client.service_catalog.url_for( service_type="image_builder", endpoint_type="publicURL" ) # extract context from the plan # TODO(asalkeld) this should be versioned. plan_obj = objects.registry.Plan.get_by_id(self.context, pipeline.plan_id) ctx["name"] = plan_obj.name artifacts = plan_obj.raw_content.get("artifacts", []) for arti in artifacts: ctx["source_uri"] = arti["content"]["href"] ctx["base_image_id"] = arti.get("language_pack", "auto") ctx["source_format"] = arti.get("artifact_type", "heroku") ctx["image_format"] = arti.get("image_format", CONF.api.image_format) ctx["template"] = catalog.get("templates", "basic") # TODO(asalkeld) add support to the plan to pass heat parameters. ctx["parameters"] = {"app_name": pipeline.name} ctx["parameters"].update(heat_utils.get_network_parameters(self._clients)) ctx["stack_id"] = self._create_empty_stack(pipeline) ctx["stack_name"] = pipeline.name # TODO(asalkeld) integrate the Environment into the context. return ctx