def create_test_recordset(): record_set = objects.RecordSet(name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1'), objects.Record(data='192.0.2.2'), ])) return record_set
def test_action_delete_plus_update(self): rs = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', action='DELETE'), objects.Record(data='192.0.2.2', action='UPDATE'), ]) ) self.assertEqual(rs.action, 'UPDATE')
def test_managed(self): record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', managed=True), objects.Record(data='192.0.2.2'), ])) self.assertTrue(record_set.managed)
def test_action_create_plus_update(self): record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', action='CREATE'), objects.Record(data='192.0.2.2', action='UPDATE'), ])) self.assertEqual('UPDATE', record_set.action)
def test_action_delete_only(self): record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', action='DELETE'), objects.Record(data='192.0.2.2', action='DELETE'), ])) self.assertEqual('DELETE', record_set.action)
def test_status_pending2(self): record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.3', status='ACTIVE'), objects.Record(data='192.0.2.2', status='PENDING'), ])) self.assertEqual('PENDING', record_set.status)
def test__convert_to_rrset(self): zone = objects.Zone.from_dict({'ttl': 1234}) recordset = objects.RecordSet(name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1'), objects.Record(data='192.0.2.2'), ])) r_rrset = self.handler._convert_to_rrset(zone, recordset) self.assertEqual(2, len(r_rrset))
def test_status_error(self): statuses = ('ERROR', 'PENDING', 'ACTIVE') for s1, s2, s3 in itertools.permutations(statuses): record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', status=s1), objects.Record(data='192.0.2.2', status=s2), objects.Record(data='192.0.2.3', status=s3), ])) self.assertEqual(record_set.status, 'ERROR')
def create_record(domain_id): context = flask.request.environ.get('context') values = flask.request.json record_schema.validate(values) if values['type'] == 'SOA': raise exceptions.BadRequest('SOA records cannot be manually created.') recordset = _find_or_create_recordset(context, domain_id, values['name'], values['type'], values.get('ttl', None)) record = objects.Record(**_extract_record_values(values)) central_api = central_rpcapi.CentralAPI.get_instance() record = central_api.create_record(context, domain_id, recordset['id'], record) record = _format_record_v1(record, recordset) response = flask.jsonify(record_schema.filter(record)) response.status_int = 201 response.location = flask.url_for('.get_record', domain_id=domain_id, record_id=record['id']) return response
def test_parse_rrset_object_preserves_changes(self): old_ip = '1.1.1.1' new_ip = '8.8.8.8' original_records = objects.RecordList(objects=[ objects.Record(data=old_ip), ]) record_set = objects.RecordSet(name='www.example.org.', type='A', records=original_records) body = {'records': [new_ip]} record_set = DesignateAdapter.parse('API_v2', body, record_set) self.assertIn('records', record_set.obj_what_changed()) def get_data(record_list): return set([r.data for r in record_list]) self.assertEqual({old_ip}, get_data( record_set.obj_get_original_value('records'))) self.assertEqual({new_ip}, get_data(record_set.obj_get_changes()['records']))
def test_action_create(self): rs = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', action='CREATE'), ]) ) self.assertEqual(rs.action, 'CREATE')
def test_status_active(self): rs = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.3', status='ACTIVE'), ])) self.assertEqual('ACTIVE', rs.status)
def test_status_deleted(self): record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.2', status='DELETED'), ])) self.assertEqual('DELETED', record_set.status)
def _parse_object(cls, new_recordset, recordset, *args, **kwargs): # TODO(Graham): Remove this when # https://bugs.launchpad.net/designate/+bug/1432842 is fixed try: recordset.records except exceptions.RelationNotLoaded: recordset.records = objects.RecordList() original_records = set() for record in recordset.records: original_records.add(record.data) # Get new list of Records new_records = set() if 'records' in new_recordset: if isinstance(new_recordset['records'], list): for record in new_recordset['records']: new_records.add(record) else: errors = objects.ValidationErrorList() e = objects.ValidationError() e.path = ['records'] e.validator = 'type' e.validator_value = ["list"] e.message = ("'%(data)s' is not a valid list of records" % { 'data': new_recordset['records'] }) # Add it to the list for later errors.append(e) raise exceptions.InvalidObject( "Provided object does not match " "schema", errors=errors, object=cls.ADAPTER_OBJECT()) # Get differences of Records records_to_add = new_records.difference(original_records) records_to_rm = original_records.difference(new_records) # Update all items except records record_update = False if 'records' in new_recordset: record_update = True del new_recordset['records'] # Remove deleted records if we have provided a records array if record_update: recordset.records[:] = [ record for record in recordset.records if record.data not in records_to_rm ] # Add new records for record in records_to_add: recordset.records.append(objects.Record(data=record)) return super(RecordSetAPIv2Adapter, cls)._parse_object(new_recordset, recordset, *args, **kwargs)
def test_status_error(self): statuses = ('ERROR', 'PENDING', 'ACTIVE') failed = False for s1, s2, s3 in itertools.permutations(statuses): rs = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', status=s1), objects.Record(data='192.0.2.2', status=s2), objects.Record(data='192.0.2.3', status=s3), ])) if rs.status != 'ERROR': failed = True print("test_status_error failed for %s %s %s: %s" % (s1, s2, s3, rs.status)) self.assertFalse(failed)
def test_action(self): action = 'CREATE' record_set = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.1', action=action), ])) self.assertEqual(action, record_set.action)
def test_validate_mx_signed_zero(self): rs = objects.RecordSet( name='www.example.org.', type='MX', records=objects.RecordList(objects=[ objects.Record(data='-0 mail.example.org.', status='ACTIVE'), ])) self.assertRaises(InvalidObject, rs.validate)
def test_status_many_expect_active(self): rs = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.2', status='ACTIVE'), objects.Record(data='192.0.2.3', status='DELETED'), objects.Record(data='192.0.2.4', status='DELETED'), objects.Record(data='192.0.2.5', status='DELETED'), objects.Record(data='192.0.2.6', status='ACTIVE'), objects.Record(data='192.0.2.7', status='ACTIVE'), objects.Record(data='192.0.2.8', status='DELETED'), objects.Record(data='192.0.2.9', status='ACTIVE'), objects.Record(data='192.0.2.10', status='ACTIVE'), ])) self.assertEqual('ACTIVE', rs.status)
def load(self, context, request, body): """Extract a "central" compatible dict from an API call""" valid_keys = ('name', 'type', 'ttl', 'description', 'records') result = self._load(context, request, body, valid_keys) if 'records' in result: result['records'] = objects.RecordList( objects=[objects.Record(data=r) for r in result['records']]) return result
def test_validate_sshfp_signed_zero_fptype(self): record_set = objects.RecordSet( name='www.example.org.', type='SSHFP', records=objects.RecordList(objects=[ objects.Record( data='0 -0 72d30d211ce8c464de2811e534de23b9be9b4dc4', status='ACTIVE'), ]) ) self.assertRaises(InvalidObject, record_set.validate)
def create_record(self, domain, recordset, **kwargs): context = kwargs.pop('context', self.admin_context) fixture = kwargs.pop('fixture', 0) values = self.get_record_fixture(recordset['type'], fixture=fixture, values=kwargs) return self.central_service.create_record( context, domain['id'], recordset['id'], record=objects.Record(**values))
def create_record(self, domain, recordset, fixture=0, values=None, context=None): values = values or {} context = context or self.admin_context fixture = self.get_record_fixture(recordset['type'], fixture, values) return fixture, self.storage.create_record(context, domain['id'], recordset['id'], objects.Record(**fixture))
def test__handle_record_query_forbidden(self): self.storage.find_recordset.return_value = objects.RecordSet( name='www.example.org.', type='A', records=objects.RecordList(objects=[ objects.Record(data='192.0.2.2'), ])) self.storage.find_zone.side_effect = exceptions.Forbidden request = dns.message.make_query('www.example.org.', dns.rdatatype.A) request.environ = dict(context='ctx') response = tuple(self.handler._handle_record_query(request)) self.assertEqual(1, len(response)) self.assertEqual(dns.rcode.REFUSED, response[0].rcode())
def dnspythonrecord_to_recordset(rname, rdataset): record_type = rdatatype.to_text(rdataset.rdtype) # Create the other recordsets values = {'name': rname.to_text(), 'type': record_type} if rdataset.ttl != 0L: values['ttl'] = rdataset.ttl rrset = objects.RecordSet(**values) rrset.records = objects.RecordList() for rdata in rdataset: rr = objects.Record(data=rdata.to_text()) rrset.records.append(rr) return rrset
def dnspythonrecord_to_recordset(rname, rdataset): record_type = rdatatype.to_text(rdataset.rdtype) name = rname.to_text() if six.PY3 and isinstance(name, bytes): name = name.decode('utf-8') # Create the other recordsets values = {'name': name, 'type': record_type} if rdataset.ttl != 0: values['ttl'] = rdataset.ttl rrset = objects.RecordSet(**values) rrset.records = objects.RecordList() for rdata in rdataset: rr = objects.Record(data=rdata.to_text()) rrset.records.append(rr) return rrset
def test_create_record(self): _, domain = self.create_domain() _, recordset = self.create_recordset(domain, type='A') values = { 'data': '192.0.2.1', } result = self.storage.create_record(self.admin_context, domain['id'], recordset['id'], record=objects.Record(**values)) self.assertIsNotNone(result['id']) self.assertIsNotNone(result['created_at']) self.assertIsNotNone(result['hash']) self.assertIsNone(result['updated_at']) self.assertEqual(result['tenant_id'], self.admin_context.tenant) self.assertEqual(result['data'], values['data']) self.assertIn('status', result)
def _parse_object(cls, new_recordset, recordset, *args, **kwargs): # TODO(Graham): Remove this when # https://bugs.launchpad.net/designate/+bug/1432842 is fixed try: recordset.records except exceptions.RelationNotLoaded: recordset.records = objects.RecordList() original_records = set() for record in recordset.records: original_records.add(record.data) # Get new list of Records new_records = set() if 'records' in new_recordset: for record in new_recordset['records']: new_records.add(record) # Get differences of Records records_to_add = new_records.difference(original_records) records_to_rm = original_records.difference(new_records) # Update all items except records record_update = False if 'records' in new_recordset: record_update = True del new_recordset['records'] # Remove deleted records if we have provided a records array if record_update: recordset.records[:] = [ record for record in recordset.records if record.data not in records_to_rm ] # Add new records for record in records_to_add: recordset.records.append(objects.Record(data=record)) return super(RecordSetAPIv2Adapter, cls)._parse_object(new_recordset, recordset, *args, **kwargs)
def test_parse_rrset_object_preserves_changes_multiple_rrs(self): old_ips = ['1.1.1.1', '2.2.2.2'] new_ips = ['2.2.2.2', '8.8.8.8'] original_records = objects.RecordList( objects=[objects.Record(data=ip) for ip in old_ips]) rs = objects.RecordSet(name='www.example.org.', type='A', records=original_records) body = {'records': new_ips} rs = DesignateAdapter.parse('API_v2', body, rs) self.assertIn('records', rs.obj_what_changed()) def get_data(record_list): return set([r.data for r in record_list]) self.assertEqual(set(old_ips), get_data(rs.obj_get_original_value('records'))) self.assertEqual(set(new_ips), get_data(rs.obj_get_changes()['records']))
def _find_recordsets_with_records(self, context, criterion, zones_table, recordsets_table, records_table, one=False, marker=None, limit=None, sort_key=None, sort_dir=None, query=None, apply_tenant_criteria=True, force_index=False): sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'asc' data = criterion.pop('data', None) status = criterion.pop('status', None) filtering_records = data or status # sort key will be used for the ORDER BY key in query, # needs to use the correct table index for different sort keys index_hint = utils.get_rrset_index(sort_key) if force_index else None rzjoin = recordsets_table.join( zones_table, recordsets_table.c.zone_id == zones_table.c.id) if filtering_records: rzjoin = rzjoin.join( records_table, recordsets_table.c.id == records_table.c.recordset_id) inner_q = select([recordsets_table.c.id, # 0 - RS ID zones_table.c.name] # 1 - ZONE NAME ).select_from(rzjoin).\ where(zones_table.c.deleted == '0') count_q = select([func.count(distinct(recordsets_table.c.id))]).\ select_from(rzjoin).where(zones_table.c.deleted == '0') if index_hint: inner_q = inner_q.with_hint(recordsets_table, index_hint) if marker is not None: marker = utils.check_marker(recordsets_table, marker, self.session) try: inner_q = utils.paginate_query(inner_q, recordsets_table, limit, [sort_key, 'id'], marker=marker, sort_dir=sort_dir) except oslodb_utils.InvalidSortKey as sort_key_error: raise exceptions.InvalidSortKey(six.text_type(sort_key_error)) # Any ValueErrors are propagated back to the user as is. # Limits, sort_dir and sort_key are checked at the API layer. # If however central or storage is called directly, invalid values # show up as ValueError except ValueError as value_error: raise exceptions.ValueError(six.text_type(value_error)) if apply_tenant_criteria: inner_q = self._apply_tenant_criteria(context, recordsets_table, inner_q, include_null_tenant=False) count_q = self._apply_tenant_criteria(context, recordsets_table, count_q, include_null_tenant=False) inner_q = self._apply_criterion(recordsets_table, inner_q, criterion) count_q = self._apply_criterion(recordsets_table, count_q, criterion) if filtering_records: records_criterion = dict( (k, v) for k, v in (('data', data), ('status', status)) if v is not None) inner_q = self._apply_criterion(records_table, inner_q, records_criterion) count_q = self._apply_criterion(records_table, count_q, records_criterion) inner_q = self._apply_deleted_criteria(context, recordsets_table, inner_q) count_q = self._apply_deleted_criteria(context, recordsets_table, count_q) # Get the list of IDs needed. # This is a separate call due to # http://dev.mysql.com/doc/mysql-reslimits-excerpt/5.6/en/subquery-restrictions.html # noqa inner_rproxy = self.session.execute(inner_q) rows = inner_rproxy.fetchall() if len(rows) == 0: return 0, objects.RecordSetList() id_zname_map = {} for r in rows: id_zname_map[r[0]] = r[1] formatted_ids = six.moves.map(operator.itemgetter(0), rows) # Count query does not scale well for large amount of recordsets, # don't do it if the header 'OpenStack-DNS-Hide-Counts: True' exists if context.hide_counts: total_count = None else: resultproxy = self.session.execute(count_q) result = resultproxy.fetchone() total_count = 0 if result is None else result[0] # Join the 2 required tables rjoin = recordsets_table.outerjoin( records_table, records_table.c.recordset_id == recordsets_table.c.id) query = select([ # RS Info recordsets_table.c.id, # 0 - RS ID recordsets_table.c.version, # 1 - RS Version recordsets_table.c.created_at, # 2 - RS Created recordsets_table.c.updated_at, # 3 - RS Updated recordsets_table.c.tenant_id, # 4 - RS Tenant recordsets_table.c.zone_id, # 5 - RS Zone recordsets_table.c.name, # 6 - RS Name recordsets_table.c.type, # 7 - RS Type recordsets_table.c.ttl, # 8 - RS TTL recordsets_table.c.description, # 9 - RS Desc # R Info records_table.c.id, # 10 - R ID records_table.c.version, # 11 - R Version records_table.c.created_at, # 12 - R Created records_table.c.updated_at, # 13 - R Updated records_table.c.tenant_id, # 14 - R Tenant records_table.c.zone_id, # 15 - R Zone records_table.c.recordset_id, # 16 - R RSet records_table.c.data, # 17 - R Data records_table.c.description, # 18 - R Desc records_table.c.hash, # 19 - R Hash records_table.c.managed, # 20 - R Mngd Flg records_table.c.managed_plugin_name, # 21 - R Mngd Plg records_table.c.managed_resource_type, # 22 - R Mngd Type records_table.c.managed_resource_region, # 23 - R Mngd Rgn records_table.c.managed_resource_id, # 24 - R Mngd ID records_table.c.managed_tenant_id, # 25 - R Mngd T ID records_table.c.status, # 26 - R Status records_table.c.action, # 27 - R Action records_table.c.serial # 28 - R Serial ]).select_from(rjoin) query = query.where(recordsets_table.c.id.in_(formatted_ids)) # These make looking up indexes for the Raw Rows much easier, # and maintainable rs_map = { "id": 0, "version": 1, "created_at": 2, "updated_at": 3, "tenant_id": 4, "zone_id": 5, "name": 6, "type": 7, "ttl": 8, "description": 9, } r_map = { "id": 10, "version": 11, "created_at": 12, "updated_at": 13, "tenant_id": 14, "zone_id": 15, "recordset_id": 16, "data": 17, "description": 18, "hash": 19, "managed": 20, "managed_plugin_name": 21, "managed_resource_type": 22, "managed_resource_region": 23, "managed_resource_id": 24, "managed_tenant_id": 25, "status": 26, "action": 27, "serial": 28, } query, sort_dirs = utils.sort_query(query, recordsets_table, [sort_key, 'id'], sort_dir=sort_dir) try: resultproxy = self.session.execute(query) raw_rows = resultproxy.fetchall() # Any ValueErrors are propagated back to the user as is. # If however central or storage is called directly, invalid values # show up as ValueError except ValueError as value_error: raise exceptions.ValueError(six.text_type(value_error)) rrsets = objects.RecordSetList() rrset_id = None current_rrset = None for record in raw_rows: # If we're looking at the first, or a new rrset if record[0] != rrset_id: if current_rrset is not None: # If this isn't the first iteration rrsets.append(current_rrset) # Set up a new rrset current_rrset = objects.RecordSet() rrset_id = record[rs_map['id']] # Add all the loaded vars into RecordSet object for key, value in rs_map.items(): setattr(current_rrset, key, record[value]) current_rrset.zone_name = id_zname_map[current_rrset.id] current_rrset.obj_reset_changes(['zone_name']) current_rrset.records = objects.RecordList() if record[r_map['id']] is not None: rrdata = objects.Record() for key, value in r_map.items(): setattr(rrdata, key, record[value]) current_rrset.records.append(rrdata) else: # We've already got an rrset, add the rdata if record[r_map['id']] is not None: rrdata = objects.Record() for key, value in r_map.items(): setattr(rrdata, key, record[value]) current_rrset.records.append(rrdata) # If the last record examined was a new rrset, or there is only 1 rrset if len(rrsets) == 0 or \ (len(rrsets) != 0 and rrsets[-1] != current_rrset): if current_rrset is not None: rrsets.append(current_rrset) return total_count, rrsets