def sync_resources(context, resource_type): driver = base_driver.get_driver(resource_type) resources = driver.get_resources_for_full_sync(context, resource_type) with db_api.autonested_transaction(context.session): for resource in resources: journal.record(context, resource_type, resource['id'], odl_const.ODL_CREATE, resource)
def sync_from_callback_precommit(self, context, operation, res_type, res_id, resource_dict, **kwargs): object_type = res_type.singular if resource_dict is not None: resource_dict = resource_dict[object_type] if (operation == odl_const.ODL_CREATE and object_type == odl_const.ODL_SG): self._sync_security_group_create_precommit(context, operation, object_type, res_id, resource_dict) return object_uuid = (resource_dict.get('id') if operation == 'create' else res_id) # NOTE(yamahata): DB auto deletion # Security Group Rule under this Security Group needs to # be deleted. At NeutronDB layer rules are auto deleted with # cascade='all,delete'. if (object_type == odl_const.ODL_SG and operation == odl_const.ODL_DELETE): for rule_id in kwargs['security_group_rule_ids']: journal.record(context, odl_const.ODL_SG_RULE, rule_id, odl_const.ODL_DELETE, [object_uuid]) assert object_uuid is not None journal.record(context, object_type, object_uuid, operation, resource_dict)
def _test_dependency_processing( self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def sync_resources(context, resource_type): driver = base_driver.get_driver(resource_type) resources = driver.get_resources_for_full_sync(context, resource_type) with db_api.CONTEXT_WRITER.savepoint.using(context): for resource in resources: journal.record(context, resource_type, resource['id'], odl_const.ODL_CREATE, resource)
def _test_dependency_processing(self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count)
def _sync_resources(plugin, context, object_type, collection_name): obj_getter = getattr(plugin, 'get_%s' % collection_name) resources = obj_getter(context) for resource in resources: journal.record(context, object_type, resource['id'], odl_const.ODL_CREATE, resource)
def _sync_security_group_create_precommit(self, context, operation, object_type, res_id, resource_dict): # TODO(yamahata): remove this work around once # https://review.openstack.org/#/c/281693/ # is merged. # For now, SG rules aren't passed down with # precommit event. We resort to get it by query. new_objects = context.session.new sgs = [ sg for sg in new_objects if isinstance(sg, securitygroup.SecurityGroup) ] if res_id is not None: sgs = [sg for sg in sgs if sg.id == res_id] for sg in sgs: sg_id = sg['id'] res = self._make_security_group_dict(sg) journal.record(context, object_type, sg_id, operation, res) # NOTE(yamahata): when security group is created, default rules # are also created. # NOTE(yamahata): at this point, rule.security_group_id isn't # populated. but it has rule.security_group rules = [ rule for rule in new_objects if (isinstance(rule, securitygroup.SecurityGroupRule) and rule.security_group == sg) ] for rule in rules: res_rule = self._make_security_group_rule_dict(rule, sg_id) journal.record(context, odl_const.ODL_SG_RULE, rule['id'], odl_const.ODL_CREATE, res_rule)
def sync_from_callback_precommit(self, context, operation, res_type, res_id, resource_dict, **kwargs): object_type = res_type.singular if resource_dict is not None: resource_dict = resource_dict[object_type] if (operation == odl_const.ODL_CREATE and object_type == odl_const.ODL_SG): self._sync_security_group_create_precommit( context, operation, object_type, res_id, resource_dict) return object_uuid = (resource_dict.get('id') if operation == 'create' else res_id) data = resource_dict if (operation == odl_const.ODL_DELETE): # NOTE(yamahata): DB auto deletion # Security Group Rule under this Security Group needs to # be deleted. At NeutronDB layer rules are auto deleted with # cascade='all,delete'. if (object_type == odl_const.ODL_SG): for rule_id in kwargs['security_group_rule_ids']: journal.record(context, odl_const.ODL_SG_RULE, rule_id, odl_const.ODL_DELETE, [object_uuid]) elif (object_type == odl_const.ODL_SG_RULE): # Set the parent security group id so that dependencies # to this security rule deletion can be properly found # in the journal. data = [kwargs['security_group_id']] assert object_uuid is not None journal.record(context, object_type, object_uuid, operation, data)
def update_router(self, context, router_id, router): session = db_api.get_session() with session.begin(subtransactions=True): router_dict = super(OpenDaylightL3RouterPlugin, self).update_router(context, router_id, router) journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_UPDATE, router_dict) return router_dict
def delete_router_assoc_precommit(self, context, router_assoc): bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) # NOTE(yamahata): precommit is called within db transaction. # so removing router_id is still associated. # it needs to be removed explicitly from dict. bgpvpn['routers'].remove(router_assoc['router_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
def delete_router_assoc_precommit(self, context, router_assoc): bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) # NOTE(yamahata): precommit is called within db transaction. # so removing router_id is still associated. # it needs to be removed explicitly from dict. bgpvpn['routers'].remove(router_assoc['router_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
def create_router(self, context, router): session = db_api.get_writer_session() with session.begin(subtransactions=True): router_dict = super(OpenDaylightL3RouterPlugin, self).create_router(context, router) journal.record(context, odl_const.ODL_ROUTER, router_dict['id'], odl_const.ODL_CREATE, router_dict) return router_dict
def sync_from_callback(self, operation, res_type, res_id, resource_dict): object_type = res_type.singular object_uuid = (resource_dict[object_type]['id'] if operation == 'create' else res_id) if resource_dict is not None: resource_dict = resource_dict[object_type] journal.record(db_api.get_session(), object_type, object_uuid, operation, resource_dict)
def delete_bgpvpn_postcommit(self, context, bgpvpn): # TODO(vivekanandan): The journal writing to be moved # to _precommit, after enabling bgpvpn plugin in # networking-bgpvpn to invoke _precommit for # delete_bgpvpn lifecycle journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_DELETE, []) self._postcommit()
def _router_del_association(self, resource, event, trigger, payload=None): router_id = payload.latest_state.id context = payload.context if not self._validate_l3_flavor(context, router_id): return # TODO(yamahata): process floating ip etc. or just raise error? dependency_list = [payload.latest_state.gw_port_id] journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list)
def _router_del_association(self, resource, event, trigger, **kwargs): router_id = kwargs['router_db'].id context = kwargs['context'] if not self._validate_l3_flavor(context, router_id): return # TODO(yamahata): process floating ip etc. or just raise error? dependency_list = [kwargs['router_db'].gw_port_id] journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list)
def sync_from_callback(self, context, operation, res_type, res_id, resource_dict): object_type = res_type.singular object_uuid = (resource_dict[object_type]['id'] if operation == 'create' else res_id) if resource_dict is not None: resource_dict = resource_dict[object_type] journal.record(context, None, object_type, object_uuid, operation, resource_dict)
def _router_del_association(self, resource, event, trigger, **kwargs): router_id = kwargs['router_db'].id context = kwargs['context'] if not self._validate_l3_flavor(context, router_id): return # TODO(yamahata): process floating ip etc. or just raise error? dependency_list = [kwargs['router_db'].gw_port_id] journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list)
def _router_add_association(self, resource, event, trigger, **kwargs): context = kwargs['context'] router_dict = kwargs['router'] router_dict['gw_port_id'] = kwargs['router_db'].gw_port_id router_id = kwargs['router_id'] if not self._validate_l3_flavor(context, router_id): return journal.record(context, odl_const.ODL_ROUTER, router_dict['id'], odl_const.ODL_CREATE, router_dict)
def sync_from_callback(self, operation, res_type_uri, res_id, resource_dict): object_type = res_type_uri.replace('-', '_')[:-1] object_uuid = (resource_dict[object_type]['id'] if operation == 'create' else res_id) if resource_dict is not None: resource_dict = resource_dict[object_type] journal.record(db_api.get_session(), object_type, object_uuid, operation, resource_dict)
def _router_add_association(self, resource, event, trigger, **kwargs): context = kwargs['context'] router_dict = kwargs['router'] router_dict['gw_port_id'] = kwargs['router_db'].gw_port_id router_id = kwargs['router_id'] if not self._validate_l3_flavor(context, router_id): return journal.record(context, odl_const.ODL_ROUTER, router_dict['id'], odl_const.ODL_CREATE, router_dict)
def _floatingip_delete_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_data = l3_obj.FloatingIP.get_objects( context, floating_port_id=kwargs['port']['id'])[0] if not self._validate_l3_flavor(context, fip_data.router_id): return dependency_list = [fip_data.router_id, fip_data.floating_network_id] journal.record(context, odl_const.ODL_FLOATINGIP, fip_data.id, odl_const.ODL_DELETE, dependency_list)
def delete_router_assoc_postcommit(self, context, router_assoc): # TODO(vivekanandan): The journal writing to be moved # to _precommit, after enabling bgpvpn plugin in # networking-bgpvpn to invoke _precommit for # delete_router_assoc lifecycle bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) self._postcommit()
def create_l2_gateway_connection_precommit(self, context, l2_gateway_connection): odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection) odl_l2_gateway_connection['gateway_id'] = ( l2_gateway_connection['l2_gateway_id']) odl_l2_gateway_connection.pop('l2_gateway_id') journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION, odl_l2_gateway_connection['id'], odl_const.ODL_CREATE, odl_l2_gateway_connection)
def delete_router(self, context, router_id): session = db_api.get_writer_session() router_dict = self.get_router(context, router_id) dependency_list = [router_dict['gw_port_id']] with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_router(context, router_id) journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list)
def _record_in_journal(context, object_type, operation, data=None): if data is None: data = context.current journal.record(context._plugin_context, object_type, context.current['id'], operation, data, ml2_context=context)
def _router_add_association(self, resource, event, trigger, payload=None): context = payload.context router_dict = payload.request_body router_dict['gw_port_id'] = payload.latest_state.gw_port_id router_id = payload.resource_id if not self._validate_l3_flavor(context, router_id): return journal.record(context, odl_const.ODL_ROUTER, router_dict['id'], odl_const.ODL_CREATE, router_dict)
def _floatingip_update_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_dict = kwargs['floatingip'] router_id = kwargs['floatingip_db'].router_id fip_dict['id'] = kwargs['floatingip_db'].id if not self._validate_l3_flavor(context, router_id): return self._update_floatingip_status(context, fip_dict) journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_UPDATE, fip_dict)
def delete_floatingip(self, context, floatingip_id): session = db_api.get_writer_session() floatingip_dict = self.get_floatingip(context, floatingip_id) dependency_list = [floatingip_dict['router_id']] dependency_list.append(floatingip_dict['floating_network_id']) with session.begin(subtransactions=True): super(OpenDaylightL3RouterPlugin, self).delete_floatingip( context, floatingip_id) journal.record(context, odl_const.ODL_FLOATINGIP, floatingip_id, odl_const.ODL_DELETE, dependency_list)
def _floatingip_update_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_dict = kwargs['floatingip'] router_id = kwargs['floatingip_db'].router_id fip_dict['id'] = kwargs['floatingip_db'].id if not self._validate_l3_flavor(context, router_id): return self._update_floatingip_status(context, fip_dict) journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_UPDATE, fip_dict)
def create_router_assoc_precommit(self, context, router_assoc): associated_routers = self.get_router_assocs(context, router_assoc['bgpvpn_id']) for assoc_router in associated_routers: if (router_assoc["router_id"] != assoc_router["router_id"]): raise bgpvpn_ext.BGPVPNMultipleRouterAssocNotSupported( driver="OpenDaylight V2") bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
def _floatingip_delete_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_data = l3_obj.FloatingIP.get_objects( context, floating_port_id=kwargs['port']['id'])[0] if not self._validate_l3_flavor(context, fip_data.router_id): return dependency_list = [fip_data.router_id, fip_data.floating_network_id] journal.record(context, odl_const.ODL_FLOATINGIP, fip_data.id, odl_const.ODL_DELETE, dependency_list)
def create_router_assoc_precommit(self, context, router_assoc): associated_routers = self.get_router_assocs(context, router_assoc['bgpvpn_id']) for assoc_router in associated_routers: if(router_assoc["router_id"] != assoc_router["router_id"]): raise bgpvpn_ext.BGPVPNMultipleRouterAssocNotSupported( driver="OpenDaylight V2") bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
def _sync_security_group_create_precommit(self, context, operation, object_type, res_id, sg_dict): journal.record(context, object_type, sg_dict['id'], operation, sg_dict) # NOTE(yamahata): when security group is created, default rules # are also created. for rule in sg_dict['security_group_rules']: journal.record(context, odl_const.ODL_SG_RULE, rule['id'], odl_const.ODL_CREATE, rule)
def _sync_security_group_create_precommit( self, context, operation, object_type, res_id, sg_dict): journal.record(context, object_type, sg_dict['id'], operation, sg_dict) # NOTE(yamahata): when security group is created, default rules # are also created. for rule in sg_dict['security_group_rules']: journal.record(context, odl_const.ODL_SG_RULE, rule['id'], odl_const.ODL_CREATE, rule)
def create_floatingip(self, context, floatingip, initial_status=q_const.FLOATINGIP_STATUS_ACTIVE): session = db_api.get_session() with session.begin(subtransactions=True): fip_dict = super(OpenDaylightL3RouterPlugin, self).create_floatingip(context, floatingip, initial_status) journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_CREATE, fip_dict) return fip_dict
def full_sync(context): if not _full_sync_needed(context): return db.delete_pending_rows(context, _OPS_TO_DELETE_ON_SYNC) for resource_type in _ORDERED_ODL_RESOURCES: handler = FULL_SYNC_RESOURCES.get(resource_type) if handler: _sync_resources(context, resource_type, handler) journal.record(context, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID, odl_const.ODL_CREATE, _CANARY_NETWORK_DATA)
def full_sync(context): if not _full_sync_needed(context): return db.delete_pending_rows(context.session, _OPS_TO_DELETE_ON_SYNC) for resource_type in _ORDERED_ODL_RESOURCES: handler = FULL_SYNC_RESOURCES.get(resource_type) if handler: _sync_resources(context, resource_type, handler) journal.record(context, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID, odl_const.ODL_CREATE, _CANARY_NETWORK_DATA)
def _sync_resource_to_odl(context, row, operation_type, exists_on_odl): resource = None try: resource = _get_latest_resource(context, row) except nexc.NotFound: if exists_on_odl: journal.record(context, row.object_type, row.object_uuid, odl_const.ODL_DELETE, []) else: journal.record(context, row.object_type, row.object_uuid, operation_type, resource) journal.entry_complete(context, row)
def _floatingip_create_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_dict = copy.deepcopy(kwargs['floatingip']) router_id = kwargs['floatingip_db'].router_id if not self._validate_l3_flavor(context, router_id): return fip_dict['id'] = kwargs['floatingip_id'] self._update_floatingip_status(context, fip_dict) if fip_dict['floating_ip_address'] is None: fip_dict['floating_ip_address'] = \ kwargs['floatingip_db'].floating_ip_address journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_CREATE, fip_dict)
def _sync_resource_to_odl(context, row, operation_type, exists_on_odl): resource = None try: resource = _get_latest_resource(context, row) except nexc.NotFound: if exists_on_odl: journal.record(context, row.object_type, row.object_uuid, odl_const.ODL_DELETE, []) else: journal.record(context, row.object_type, row.object_uuid, operation_type, resource) journal.entry_complete(context, row)
def _sync_resource_to_odl(context, row, operation_type, exists_on_odl): resource = None try: resource = _get_latest_resource(context, row) except nexc.NotFound: if exists_on_odl: journal.record(context, row.object_type, row.object_uuid, odl_const.ODL_DELETE, []) else: journal.record(context, row.object_type, row.object_uuid, operation_type, resource) db.update_db_row_state(context.session, row, odl_const.COMPLETED)
def _floatingip_create_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_dict = copy.deepcopy(kwargs['floatingip']) router_id = kwargs['floatingip_db'].router_id if not self._validate_l3_flavor(context, router_id): return fip_dict['id'] = kwargs['floatingip_id'] self._update_floatingip_status(context, fip_dict) if fip_dict['floating_ip_address'] is None: fip_dict['floating_ip_address'] = \ kwargs['floatingip_db'].floating_ip_address journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_CREATE, fip_dict)
def create_net_assoc_precommit(self, context, net_assoc): our_bgpvpn = None bgpvpns = self.get_bgpvpns(context) for bgpvpn in bgpvpns: # ODL only allows a network to be associated with one BGPVPN if bgpvpn['id'] == net_assoc['bgpvpn_id']: our_bgpvpn = bgpvpn else: if bgpvpn['networks'] and (net_assoc['network_id'] in bgpvpn['networks']): raise bgpvpn_ext.BGPVPNNetworkAssocExistsAnotherBgpvpn( driver="OpenDaylight V2", network=net_assoc['network_id'], bgpvpn=bgpvpn['id']) journal.record(context, odl_const.ODL_BGPVPN, our_bgpvpn['id'], odl_const.ODL_UPDATE, our_bgpvpn)
def _router_update_precommit(self, resource, event, trigger, **kwargs): # NOTE(manjeets) router update bypasses the driver controller # and argument type is different. payload = kwargs.get('payload', None) if payload: context = payload.context router_id = payload.states[0]['id'] router_dict = payload.request_body gw_port_id = payload.states[0]['gw_port_id'] else: # TODO(manjeets) Remove this shim once payload is fully adapted # https://bugs.launchpad.net/neutron/+bug/1747747 context = kwargs['context'] router_id = kwargs['router_db'].id router_dict = kwargs['router'] gw_port_id = kwargs['router_db'].gw_port_id if not self._validate_l3_flavor(context, router_id): return if 'gw_port_id' not in router_dict: router_dict['gw_port_id'] = gw_port_id journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_UPDATE, router_dict)
def test_record_logs_recording(self): logger = self.useFixture(fixtures.FakeLogger()) journal.record(self.db_context, *self.UPDATE_ROW) for arg in self.UPDATE_ROW[0:3]: self.assertIn(arg, logger.output)
def _record_in_journal(context, trunk_id, operation, data): journal.record(context, odl_const.ODL_TRUNK, trunk_id, operation, data)
def _record_in_journal(self, context, op_const, qos_policy): data = qos_utils.convert_rules_format(qos_policy.to_dict()) journal.record(context, odl_const.ODL_QOS_POLICY, data['id'], op_const, data)
def delete_bgpvpn_precommit(self, context, bgpvpn): journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_DELETE, [])
def update_bgpvpn_precommit(self, context, bgpvpn): journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
def _sync_resources(context, object_type, handler): resources = handler(context, object_type) for resource in resources: journal.record(context, object_type, resource['id'], odl_const.ODL_CREATE, resource)
def _record_in_journal(context, object_type, operation, object_id, data): journal.record(context, object_type, object_id, operation, data)
def _journal_record(self, context, obj_type, obj_id, operation, obj): obj_type = ("lbaas/%s" % obj_type) journal.record(context, obj_type, obj_id, operation, obj) self.journal.set_sync_event()
def _record_in_journal(context, object_type, operation, data=None): if data is None: data = context.current journal.record(context._plugin_context, object_type, context.current['id'], operation, data)
def test_record_logs_dependencies(self): entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW) logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) journal.record(self.db_context, *self.UPDATE_ROW) self.assertIn(str(entry.seqnum), logger.output)