def test_lb_dict_to_provider_dict(self, mock_load_cert, mock_secret, mock_get_session, mock_get_flavor): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE', 'ca cert', 'X509 CRL FILE', 'ca cert', 'X509 CRL FILE', 'X509 POOL CA CERT FILE', 'X509 CRL FILE'] listener_certs = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} pool_cert = data_models.TLSContainer(certificate='pool cert') pool_certs = {'tls_cert': pool_cert, 'sni_certs': []} mock_load_cert.side_effect = [pool_certs, listener_certs, listener_certs, listener_certs, listener_certs] mock_get_flavor.return_value = {'shaved_ice': 'cherry'} test_lb_dict = {'name': 'lb1', 'project_id': self.sample_data.project_id, 'vip_subnet_id': self.sample_data.subnet_id, 'vip_port_id': self.sample_data.port_id, 'vip_address': self.sample_data.ip_address, 'vip_network_id': self.sample_data.network_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'id': self.sample_data.lb_id, 'listeners': [], 'pools': [], 'description': '', 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'flavor_id': 'flavor_id', 'provider': 'noop_driver'} ref_listeners = copy.deepcopy(self.sample_data.provider_listeners) ref_prov_lb_dict = { 'vip_address': self.sample_data.ip_address, 'admin_state_up': True, 'loadbalancer_id': self.sample_data.lb_id, 'vip_subnet_id': self.sample_data.subnet_id, 'listeners': ref_listeners, 'description': '', 'project_id': self.sample_data.project_id, 'vip_port_id': self.sample_data.port_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'vip_network_id': self.sample_data.network_id, 'pools': self.sample_data.provider_pools, 'flavor': {'shaved_ice': 'cherry'}, 'name': 'lb1'} vip = data_models.Vip(ip_address=self.sample_data.ip_address, network_id=self.sample_data.network_id, port_id=self.sample_data.port_id, subnet_id=self.sample_data.subnet_id, qos_policy_id=self.sample_data.qos_policy_id) provider_lb_dict = utils.lb_dict_to_provider_dict( test_lb_dict, vip=vip, db_pools=self.sample_data.test_db_pools, db_listeners=self.sample_data.test_db_listeners) self.assertEqual(ref_prov_lb_dict, provider_lb_dict)
def put(self, id, load_balancer): """Updates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan_request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT) if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType): network_driver = utils.get_network_driver() validate.qos_extension_enabled(network_driver) if load_balancer.vip_qos_policy_id is not None: if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id: validate.qos_policy_exists(load_balancer.vip_qos_policy_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_lb_status(lock_session, id) # Prepare the data for the driver data model lb_dict = load_balancer.to_dict(render_unsets=False) lb_dict['id'] = id vip_dict = lb_dict.pop('vip', {}) lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict) if 'qos_policy_id' in vip_dict: lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] # Also prepare the baseline object data old_provider_lb = ( driver_utils.db_loadbalancer_to_provider_loadbalancer( db_lb, for_delete=True)) # Dispatch to the driver LOG.info("Sending update Load Balancer %s to provider " "%s", id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_update, old_provider_lb, driver_dm.LoadBalancer.from_dict(lb_dict)) db_lb_dict = load_balancer.to_dict(render_unsets=False) if 'vip' in db_lb_dict: db_vip_dict = db_lb_dict.pop('vip') self.repositories.vip.update(lock_session, id, **db_vip_dict) if db_lb_dict: self.repositories.load_balancer.update(lock_session, id, **db_lb_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_lb = self._get_db_lb(context.session, id) result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) return lb_types.LoadBalancerRootResponse(loadbalancer=result)
def put(self, id, load_balancer): """Updates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan.request.context.get('octavia_context') db_lb = self._get_db_lb(context.session, id, show_deleted=False) self._auth_validate_action(context, db_lb.project_id, constants.RBAC_PUT) if not isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType): network_driver = utils.get_network_driver() validate.qos_extension_enabled(network_driver) if load_balancer.vip_qos_policy_id is not None: if db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id: validate.qos_policy_exists(load_balancer.vip_qos_policy_id) # Load the driver early as it also provides validation driver = driver_factory.get_driver(db_lb.provider) with db_api.get_lock_session() as lock_session: self._test_lb_status(lock_session, id) # Prepare the data for the driver data model lb_dict = load_balancer.to_dict(render_unsets=False) lb_dict['id'] = id vip_dict = lb_dict.pop('vip', {}) lb_dict = driver_utils.lb_dict_to_provider_dict(lb_dict) if 'qos_policy_id' in vip_dict: lb_dict['vip_qos_policy_id'] = vip_dict['qos_policy_id'] # Also prepare the baseline object data old_provider_lb = ( driver_utils.db_loadbalancer_to_provider_loadbalancer(db_lb)) # Dispatch to the driver LOG.info("Sending update Load Balancer %s to provider " "%s", id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_update, old_provider_lb, driver_dm.LoadBalancer.from_dict(lb_dict)) db_lb_dict = load_balancer.to_dict(render_unsets=False) if 'vip' in db_lb_dict: db_vip_dict = db_lb_dict.pop('vip') self.repositories.vip.update(lock_session, id, **db_vip_dict) if db_lb_dict: self.repositories.load_balancer.update(lock_session, id, **db_lb_dict) # Force SQL alchemy to query the DB, otherwise we get inconsistent # results context.session.expire_all() db_lb = self._get_db_lb(context.session, id) result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) return lb_types.LoadBalancerRootResponse(loadbalancer=result)
def test_lb_dict_to_provider_dict(self, mock_load_cert, mock_secret): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_secret.side_effect = ['X509 POOL CA CERT FILE', 'X509 POOL CRL FILE', 'ca cert', 'X509 CRL FILE', 'ca cert', 'X509 CRL FILE', 'X509 POOL CA CERT FILE', 'X509 CRL FILE'] listener_certs = {'tls_cert': cert1, 'sni_certs': [cert2, cert3]} pool_cert = data_models.TLSContainer(certificate='pool cert') pool_certs = {'tls_cert': pool_cert, 'sni_certs': []} mock_load_cert.side_effect = [pool_certs, listener_certs, listener_certs, listener_certs, listener_certs] test_lb_dict = {'name': 'lb1', 'project_id': self.sample_data.project_id, 'vip_subnet_id': self.sample_data.subnet_id, 'vip_port_id': self.sample_data.port_id, 'vip_address': self.sample_data.ip_address, 'vip_network_id': self.sample_data.network_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'id': self.sample_data.lb_id, 'listeners': [], 'pools': [], 'description': '', 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'flavor_id': '', 'provider': 'noop_driver'} ref_prov_lb_dict = { 'vip_address': self.sample_data.ip_address, 'admin_state_up': True, 'loadbalancer_id': self.sample_data.lb_id, 'vip_subnet_id': self.sample_data.subnet_id, 'listeners': self.sample_data.provider_listeners, 'description': '', 'project_id': self.sample_data.project_id, 'vip_port_id': self.sample_data.port_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'vip_network_id': self.sample_data.network_id, 'pools': self.sample_data.provider_pools, 'name': 'lb1'} vip = data_models.Vip(ip_address=self.sample_data.ip_address, network_id=self.sample_data.network_id, port_id=self.sample_data.port_id, subnet_id=self.sample_data.subnet_id, qos_policy_id=self.sample_data.qos_policy_id) provider_lb_dict = utils.lb_dict_to_provider_dict( test_lb_dict, vip=vip, db_pools=self.sample_data.test_db_pools, db_listeners=self.sample_data.test_db_listeners) self.assertEqual(ref_prov_lb_dict, provider_lb_dict)
def test_lb_dict_to_provider_dict(self, mock_load_cert): cert1 = data_models.TLSContainer(certificate='cert 1') cert2 = data_models.TLSContainer(certificate='cert 2') cert3 = data_models.TLSContainer(certificate='cert 3') mock_load_cert.return_value = { 'tls_cert': cert1, 'sni_certs': [cert2, cert3] } test_lb_dict = { 'name': 'lb1', 'project_id': self.sample_data.project_id, 'vip_subnet_id': self.sample_data.subnet_id, 'vip_port_id': self.sample_data.port_id, 'vip_address': self.sample_data.ip_address, 'vip_network_id': self.sample_data.network_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'id': self.sample_data.lb_id, 'listeners': [], 'pools': [], 'description': '', 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, 'flavor_id': '', 'provider': 'noop_driver' } ref_prov_lb_dict = { 'vip_address': self.sample_data.ip_address, 'admin_state_up': True, 'loadbalancer_id': self.sample_data.lb_id, 'vip_subnet_id': self.sample_data.subnet_id, 'listeners': self.sample_data.provider_listeners, 'description': '', 'project_id': self.sample_data.project_id, 'flavor_id': '', 'vip_port_id': self.sample_data.port_id, 'vip_qos_policy_id': self.sample_data.qos_policy_id, 'vip_network_id': self.sample_data.network_id, 'pools': self.sample_data.provider_pools, 'name': 'lb1' } vip = data_models.Vip(ip_address=self.sample_data.ip_address, network_id=self.sample_data.network_id, port_id=self.sample_data.port_id, subnet_id=self.sample_data.subnet_id, qos_policy_id=self.sample_data.qos_policy_id) provider_lb_dict = utils.lb_dict_to_provider_dict( test_lb_dict, vip=vip, db_pools=self.sample_data.test_db_pools, db_listeners=self.sample_data.test_db_listeners) self.assertEqual(ref_prov_lb_dict, provider_lb_dict)
def post(self, load_balancer): """Creates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan_request.context.get('octavia_context') if not load_balancer.project_id and context.project_id: load_balancer.project_id = context.project_id if not load_balancer.project_id: raise exceptions.ValidationException(detail=_( "Missing project ID in request where one is required. " "An administrator should check the keystone settings " "in the Octavia configuration.")) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_POST) self._validate_vip_request_object(load_balancer, context=context) self._validate_flavor(context.session, load_balancer) self._validate_availability_zone(context.session, load_balancer) provider = self._get_provider(context.session, load_balancer) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.LoadBalancer, load_balancer.project_id): raise exceptions.QuotaException( resource=data_models.LoadBalancer._name()) db_lb, db_pools, db_lists = None, None, None lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict( render_unsets=False )) vip_dict = lb_dict.pop('vip', {}) # Make sure we store the right provider in the DB lb_dict['provider'] = driver.name # NoneType can be weird here, have to force type a second time listeners = lb_dict.pop('listeners', []) or [] pools = lb_dict.pop('pools', []) or [] flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver, lb_dict) az_dict = self._validate_and_return_az_dict(lock_session, driver, lb_dict) # Validate the network as soon as we have the AZ data validate.network_allowed_by_config( load_balancer.vip_network_id, valid_networks=az_dict.get(constants.VALID_VIP_NETWORKS)) db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) # Pass the flavor dictionary through for the provider drivers # This is a "virtual" lb_dict item that includes the expanded # flavor dict instead of just the flavor_id we store in the DB. lb_dict['flavor'] = flavor_dict # Do the same with the availability_zone dict lb_dict['availability_zone'] = az_dict # See if the provider driver wants to manage the VIP port # This will still be called if the user provided a port to # allow drivers to collect any required information about the # VIP port. octavia_owned = False try: provider_vip_dict = driver_utils.vip_dict_to_provider_dict( vip_dict) vip_dict = driver_utils.call_provider( driver.name, driver.create_vip_port, db_lb.id, db_lb.project_id, provider_vip_dict) vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict) except exceptions.ProviderNotImplementedError: # create vip port if not exist, driver didn't want to create # the VIP port vip = self._create_vip_port_if_not_exist(db_lb) LOG.info('Created VIP port %s for provider %s.', vip.port_id, driver.name) # If a port_id wasn't passed in and we made it this far # we created the VIP if 'port_id' not in vip_dict or not vip_dict['port_id']: octavia_owned = True # Check if the driver claims octavia owns the VIP port. if vip.octavia_owned: octavia_owned = True self.repositories.vip.update( lock_session, db_lb.id, ip_address=vip.ip_address, port_id=vip.port_id, network_id=vip.network_id, subnet_id=vip.subnet_id, octavia_owned=octavia_owned) if listeners or pools: db_pools, db_lists = self._graph_create( context.session, lock_session, db_lb, listeners, pools) # Prepare the data for the driver data model driver_lb_dict = driver_utils.lb_dict_to_provider_dict( lb_dict, vip, db_pools, db_lists) # Dispatch to the driver LOG.info("Sending create Load Balancer %s to provider %s", db_lb.id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_create, driver_dm.LoadBalancer.from_dict(driver_lb_dict)) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_lb = self._get_db_lb(context.session, db_lb.id) result = self._convert_db_to_type( db_lb, lb_types.LoadBalancerFullResponse) return lb_types.LoadBalancerFullRootResponse(loadbalancer=result)
def generate(flow_list, output_directory): # Create the diagrams base_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir) diagram_list = [] with open(os.path.join(base_path, flow_list), 'r') as flowlist: for row in flowlist: if row.startswith('#'): continue current_tuple = tuple(row.strip().split(' ')) current_class = getattr(importlib.import_module(current_tuple[0]), current_tuple[1]) current_instance = current_class() get_flow_method = getattr(current_instance, current_tuple[2]) if (current_tuple[1] == 'AmphoraFlows' and current_tuple[2] == 'get_failover_amphora_flow'): amp1 = dmh.generate_amphora() amp2 = dmh.generate_amphora() lb = dmh.generate_load_balancer(amphorae=[amp1, amp2]) if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) amp1 = amp1.to_dict() current_engine = engines.load(get_flow_method(amp1, 2)) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_create_load_balancer_flow'): current_engine = engines.load( get_flow_method(constants.TOPOLOGY_ACTIVE_STANDBY)) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_delete_load_balancer_flow'): lb = dmh.generate_load_balancer() if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) delete_flow = get_flow_method(lb) else: delete_flow, store = get_flow_method(lb) current_engine = engines.load(delete_flow) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_cascade_delete_load_balancer_flow'): listeners = [{ constants.LISTENER_ID: '368dffc7-7440-4ee0-aca5-11052d001b05' }, { constants.LISTENER_ID: 'd9c45ec4-9dbe-491b-9f21-6886562348bf' }] pools = [{ constants.POOL_ID: '6886a40b-1f2a-41a3-9ece-5c51845a7ac4' }, { constants.POOL_ID: '08ada7a2-3eff-42c6-bdd8-b6f2ecd73358' }] lb = dmh.generate_load_balancer() if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) delete_flow = get_flow_method(lb, listeners, pools) else: delete_flow, store = get_flow_method(lb) current_engine = engines.load(delete_flow) elif (current_tuple[1] == 'LoadBalancerFlows' and current_tuple[2] == 'get_failover_LB_flow'): amp1 = dmh.generate_amphora() amp2 = dmh.generate_amphora() lb = dmh.generate_load_balancer( amphorae=[amp1, amp2], topology=constants.TOPOLOGY_ACTIVE_STANDBY) if 'v2' in current_tuple[0]: lb = utils.lb_dict_to_provider_dict(lb.to_dict()) flavor = { constants.LOADBALANCER_TOPOLOGY: constants.TOPOLOGY_ACTIVE_STANDBY } lb[constants.FLAVOR] = flavor amp1 = amp1.to_dict() amp2 = amp2.to_dict() current_engine = engines.load(get_flow_method([amp1, amp2], lb)) elif (current_tuple[1] == 'MemberFlows' and current_tuple[2] == 'get_batch_update_members_flow'): current_engine = engines.load(get_flow_method([], [], [])) else: current_engine = engines.load(get_flow_method()) current_engine.compile() # We need to render svg and not dot here so we can scale # the image in the restructured text page src = graphviz.Source( current_engine.compilation.execution_graph.export_to_dot()) src.format = 'svg' src.render(filename=current_tuple[1] + '-' + current_tuple[2], directory=os.path.join(base_path, output_directory), cleanup=True) diagram_list.append((current_tuple[1], current_tuple[2])) # Create the class docs diagram_list = sorted(diagram_list, key=getDiagKey) class_tracker = None current_doc_file = None for doc_tuple in diagram_list: # If we are still working on the same class, append if doc_tuple[0] == class_tracker: current_doc_file.write('\n') current_doc_file.write(doc_tuple[1] + '\n') current_doc_file.write('-' * len(doc_tuple[1]) + '\n') current_doc_file.write('\n') current_doc_file.write('.. only:: html\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') current_doc_file.write(' :target: ../../../_images/' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write('\n') current_doc_file.write('.. only:: latex\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') # First or new class, create the file else: if current_doc_file is not None: current_doc_file.close() current_doc_file = open( os.path.join(base_path, output_directory, doc_tuple[0] + '.rst'), 'w+') class_tracker = doc_tuple[0] file_title = constants.FLOW_DOC_TITLES.get(doc_tuple[0], 'Unknown Flows') current_doc_file.write('=' * len(file_title) + '\n') current_doc_file.write(file_title + '\n') current_doc_file.write('=' * len(file_title) + '\n') current_doc_file.write('\n') current_doc_file.write('.. contents::\n') current_doc_file.write(' :depth: 2\n') current_doc_file.write(' :backlinks: top\n') current_doc_file.write('\n') current_doc_file.write('.. only:: html\n') current_doc_file.write('\n') current_doc_file.write(' Click on any flow to view full size.\n') current_doc_file.write('\n') current_doc_file.write(doc_tuple[1] + '\n') current_doc_file.write('-' * len(doc_tuple[1]) + '\n') current_doc_file.write('\n') current_doc_file.write('.. only:: html\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') current_doc_file.write(' :target: ../../../_images/' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write('\n') current_doc_file.write('.. only:: latex\n') current_doc_file.write('\n') current_doc_file.write(' .. image:: ' + doc_tuple[0] + '-' + doc_tuple[1] + '.svg\n') current_doc_file.write(' :width: 660px\n') current_doc_file.close()
def post(self, load_balancer): """Creates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan.request.context.get('octavia_context') if not load_balancer.project_id and context.project_id: load_balancer.project_id = context.project_id if not load_balancer.project_id: raise exceptions.ValidationException(detail=_( "Missing project ID in request where one is required.")) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_POST) self._validate_vip_request_object(load_balancer) # Load the driver early as it also provides validation driver = driver_factory.get_driver(load_balancer.provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met(context.session, lock_session, data_models.LoadBalancer, load_balancer.project_id): raise exceptions.QuotaException db_lb, db_pools, db_lists = None, None, None lb_dict = db_prepare.create_load_balancer( load_balancer.to_dict(render_unsets=False)) vip_dict = lb_dict.pop('vip', {}) # Make sure we store the right provider in the DB lb_dict['provider'] = driver.name # NoneType can be weird here, have to force type a second time listeners = lb_dict.pop('listeners', []) or [] pools = lb_dict.pop('pools', []) or [] # TODO(johnsom) Remove flavor from the lb_dict # as it has not been implemented beyond the API yet. # Remove this line when it is implemented. lb_dict.pop('flavor', None) db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) # See if the provider driver wants to create the VIP port try: provider_vip_dict = driver_utils.vip_dict_to_provider_dict( vip_dict) vip_dict = driver_utils.call_provider(driver.name, driver.create_vip_port, db_lb.id, db_lb.project_id, provider_vip_dict) vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict) except exceptions.ProviderNotImplementedError: # create vip port if not exist, driver didn't want to create # the VIP port vip = self._create_vip_port_if_not_exist(db_lb) LOG.info('Created VIP port %s for provider %s.', vip.port_id, driver.name) self.repositories.vip.update(lock_session, db_lb.id, ip_address=vip.ip_address, port_id=vip.port_id, network_id=vip.network_id, subnet_id=vip.subnet_id) if listeners or pools: db_pools, db_lists = self._graph_create( context.session, lock_session, db_lb, listeners, pools) # Prepare the data for the driver data model driver_lb_dict = driver_utils.lb_dict_to_provider_dict( lb_dict, vip, db_pools, db_lists) # Dispatch to the driver LOG.info("Sending create Load Balancer %s to provider %s", db_lb.id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_create, driver_dm.LoadBalancer.from_dict(driver_lb_dict)) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_lb = self._get_db_lb(context.session, db_lb.id) result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerFullResponse) return lb_types.LoadBalancerFullRootResponse(loadbalancer=result)
def post(self, load_balancer): """Creates a load balancer.""" load_balancer = load_balancer.loadbalancer context = pecan.request.context.get('octavia_context') if not load_balancer.project_id and context.project_id: load_balancer.project_id = context.project_id if not load_balancer.project_id: raise exceptions.ValidationException(detail=_( "Missing project ID in request where one is required.")) self._auth_validate_action(context, load_balancer.project_id, constants.RBAC_POST) self._validate_vip_request_object(load_balancer) self._validate_flavor(context.session, load_balancer) provider = self._get_provider(context.session, load_balancer) # Load the driver early as it also provides validation driver = driver_factory.get_driver(provider) lock_session = db_api.get_session(autocommit=False) try: if self.repositories.check_quota_met( context.session, lock_session, data_models.LoadBalancer, load_balancer.project_id): raise exceptions.QuotaException( resource=data_models.LoadBalancer._name()) db_lb, db_pools, db_lists = None, None, None lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict( render_unsets=False )) vip_dict = lb_dict.pop('vip', {}) # Make sure we store the right provider in the DB lb_dict['provider'] = driver.name # NoneType can be weird here, have to force type a second time listeners = lb_dict.pop('listeners', []) or [] pools = lb_dict.pop('pools', []) or [] flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver, lb_dict) db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) # Pass the flavor dictionary through for the provider drivers # This is a "virtual" lb_dict item that includes the expanded # flavor dict instead of just the flavor_id we store in the DB. lb_dict['flavor'] = flavor_dict # See if the provider driver wants to create the VIP port octavia_owned = False try: provider_vip_dict = driver_utils.vip_dict_to_provider_dict( vip_dict) vip_dict = driver_utils.call_provider( driver.name, driver.create_vip_port, db_lb.id, db_lb.project_id, provider_vip_dict) vip = driver_utils.provider_vip_dict_to_vip_obj(vip_dict) except exceptions.ProviderNotImplementedError: # create vip port if not exist, driver didn't want to create # the VIP port vip = self._create_vip_port_if_not_exist(db_lb) LOG.info('Created VIP port %s for provider %s.', vip.port_id, driver.name) # If a port_id wasn't passed in and we made it this far # we created the VIP if 'port_id' not in vip_dict or not vip_dict['port_id']: octavia_owned = True self.repositories.vip.update( lock_session, db_lb.id, ip_address=vip.ip_address, port_id=vip.port_id, network_id=vip.network_id, subnet_id=vip.subnet_id, octavia_owned=octavia_owned) if listeners or pools: db_pools, db_lists = self._graph_create( context.session, lock_session, db_lb, listeners, pools) # Prepare the data for the driver data model driver_lb_dict = driver_utils.lb_dict_to_provider_dict( lb_dict, vip, db_pools, db_lists) # Dispatch to the driver LOG.info("Sending create Load Balancer %s to provider %s", db_lb.id, driver.name) driver_utils.call_provider( driver.name, driver.loadbalancer_create, driver_dm.LoadBalancer.from_dict(driver_lb_dict)) lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() raise exceptions.IDAlreadyExists() except Exception: with excutils.save_and_reraise_exception(): lock_session.rollback() db_lb = self._get_db_lb(context.session, db_lb.id) result = self._convert_db_to_type( db_lb, lb_types.LoadBalancerFullResponse) return lb_types.LoadBalancerFullRootResponse(loadbalancer=result)