def test_poll_for_specific_record_data(self): # add a random zone to the nameserver with a known serial zone_name = datagen.random_zone_name() serial = 123456 ip = datagen.random_ip() tools.add_new_zone_to_bind(zone_name, serial=serial, ip=ip) self.assertTrue(dig.zone_exists(zone_name, NAMESERVER)) self.assertEqual(dig.get_serial(zone_name, NAMESERVER), serial) self.assertEqual(dig.get_record_data(zone_name, NAMESERVER, "A"), ip) new_ip = datagen.random_ip() assert new_ip != ip # ask digaas to poll for the until it sees the ip address resp = self.client.post_poll_request( query_name=zone_name, nameserver=NAMESERVER, serial=0, condition="data=%s" % new_ip, rdatatype="A", start_time=time.time(), timeout=15, frequency=1, ) self.assertEqual(resp.status_code, 202) id = resp.json()["id"] # wait two seconds before we update the ip address for the zone's A record min_duration = 2 time.sleep(min_duration) tools.update_zone(zone_name, serial, new_ip) # wait for the digaas to finish polling self.client.wait_for_completed_poll_request(id) resp = self.client.get_poll_request(id) # check the entire response body self.assertGreater(resp.json()["duration"], min_duration) self.assertEqual(resp.json()["query_name"], zone_name) self.assertEqual(resp.json()["nameserver"], NAMESERVER) self.assertEqual(resp.json()["frequency"], 1) self.assertEqual(resp.json()["timeout"], 15) self.assertEqual(resp.json()["condition"], "data=%s" % new_ip) self.assertEqual(resp.json()["rdatatype"], "A") self.assertEqual(resp.json()["status"], "COMPLETED") self.assertEqual(resp.json()["id"], id)
def test_timeout_on_polling_for_specific_record_data(self): # add a random zone to the nameserver with a known serial zone_name = datagen.random_zone_name() serial = 123456 ip = datagen.random_ip() tools.add_new_zone_to_bind(zone_name, serial=serial, ip=ip) self.assertTrue(dig.zone_exists(zone_name, NAMESERVER)) self.assertEqual(dig.get_serial(zone_name, NAMESERVER), serial) self.assertEqual(dig.get_record_data(zone_name, NAMESERVER, "A"), ip) new_ip = datagen.random_ip() assert new_ip != ip # ask digaas to poll for the until it sees a new ip address, which will never happen resp = self.client.post_poll_request( query_name=zone_name, nameserver=NAMESERVER, serial=0, condition="data=%s" % new_ip, rdatatype="A", start_time=time.time(), timeout=4, frequency=1, ) self.assertEqual(resp.status_code, 202) id = resp.json()["id"] # wait for the digaas to timeout self.client.wait_for_errored_poll_request(id) resp = self.client.get_poll_request(id) # check the entire response body self.assertEqual(resp.json()["duration"], None) self.assertEqual(resp.json()["query_name"], zone_name) self.assertEqual(resp.json()["nameserver"], NAMESERVER) self.assertEqual(resp.json()["frequency"], 1) self.assertEqual(resp.json()["timeout"], 4) self.assertEqual(resp.json()["condition"], "data=%s" % new_ip) self.assertEqual(resp.json()["rdatatype"], "A") self.assertEqual(resp.json()["status"], "ERROR") self.assertEqual(resp.json()["id"], id)
def _do_modify_record(self): """PATCH /zones/ID/recordsets/ID""" tenant = self.select_random_tenant() if not tenant: return client = self.designate_client.as_user(tenant) recordset = tenant.data.select_recordset_for_get() if not recordset: LOG.error("%s has no recordsets for updating", tenant) return payload = { "records": [datagen.random_ip()], "ttl": random.randint(2400, 7200) } start_time = time.time() put_resp = client.put_recordset( recordset.zone.id, recordset.id, data=json.dumps(payload), name="/v2/zones/ID/recordsets/ID", ) if not put_resp.ok: return if CONFIG.use_digaas: get_zone = client.get_zone(recordset.zone.id, name='/v2/zones/ID') if not get_zone.ok: LOG.error( "Failed to fetch zone %s to grab serial. We need the " "serial for digaas to poll for the recordset update", zone.id) else: self.digaas_behaviors.observe_zone_update(get_zone, start_time) api_call = lambda: client.get_recordset( zone_id=put_resp.json()['zone_id'], recordset_id=put_resp.json()['id'], name='/v2/zones/ID/recordsets/ID - status check') self._poll_until_active_or_error( api_call=api_call, status_function=lambda r: r.json()['status'], success_function=lambda: self.async_success( put_resp, start_time, '/v2/zones/ID/recordsets/ID - async', ), failure_function=lambda msg: self.async_failure( put_resp, start_time, '/v2/zones/ID/recordsets/ID - async', msg ), )
def _do_modify_record(self): """PATCH /zones/ID/recordsets/ID""" tenant = self.select_random_tenant() if not tenant: return client = self.designate_client.as_user(tenant) recordset = tenant.data.select_recordset_for_get() if not recordset: LOG.error("%s has no recordsets for updating", tenant) return payload = { "records": [ datagen.random_ip() ], "ttl": random.randint(2400, 7200) } start_time = time.time() put_resp = client.put_recordset( recordset.zone.id, recordset.id, data=json.dumps(payload), name="/v2/zones/ID/recordsets/ID", ) if not put_resp.ok: return if CONFIG.use_digaas: get_zone = client.get_zone(recordset.zone.id, name='/v2/zones/ID') if not get_zone.ok: LOG.error( "Failed to fetch zone %s to grab serial. We need the " "serial for digaas to poll for the recordset update", zone.id ) else: self.digaas_behaviors.observe_zone_update(get_zone, start_time) api_call = lambda: client.get_recordset( zone_id=put_resp.json()['zone_id'], recordset_id=put_resp.json()['id'], name='/v2/zones/ID/recordsets/ID - status check') self._poll_until_active_or_error( api_call=api_call, status_function=lambda r: r.json()['status'], success_function=lambda: self.async_success( put_resp, start_time, '/v2/zones/ID/recordsets/ID - async', ), failure_function=lambda msg: self.async_failure( put_resp, start_time, '/v2/zones/ID/recordsets/ID - async', msg ), )
def create_recordset(self, zone): record_name = "{0}.{1}".format(randomize("record"), zone.name) payload = { "name" : record_name, "type" : "A", "ttl" : 3600, "records" : [ random_ip() ] } resp = self.client.post_recordset(zone.id, data=json.dumps(payload)) check_resp(resp) recordset = Recordset( zone = zone, id = resp.json()['id'], data = resp.json()['records'][0], type = resp.json()['type']) print '%s: Created recordset %s' % (self.tenant.id, record_name) return recordset
def _do_create_record(self): tenant = self.select_random_tenant() if not tenant: return client = self.designate_client.as_user(tenant) zone = tenant.data.select_zone_for_get() if zone is None: LOG.warning("don't know of any zones to create records on") return record_name = "{0}.{1}".format(datagen.randomize("record"), zone.name) payload = { "name" : record_name, "type" : "A", "ttl" : 3600, "records" : [ datagen.random_ip() ] } start_time = time.time() post_resp = client.post_recordset( zone.id, data=json.dumps(payload), name='/v2/zones/ID/recordsets', ) if not post_resp.ok: return if CONFIG.use_digaas: # we need the zone's serial to confidently poll for the update. # the recordset doesn't have the serial. instead, grab the zone # and use whatever serial we get. this is not perfect - digaas may # record slightly longer propagation times than actual. get_zone = client.get_zone(zone.id, name='/v2/zones/ID') if not get_zone.ok: LOG.error( "Failed to fetch zone %s to grab serial. We need the " "serial for digaas to poll for the recordset create", zone.id ) else: self.digaas_behaviors.observe_zone_update(get_zone, start_time) api_call = lambda: client.get_recordset( zone_id=zone.id, recordset_id=post_resp.json()['id'], name='/v2/zones/ID/recordsets/ID - status check') self._poll_until_active_or_error( api_call=api_call, status_function=lambda r: r.json()['status'], success_function=lambda: self.async_success( post_resp, start_time, '/v2/zones/ID/recordsets - async', ), failure_function=lambda msg: self.async_failure( post_resp, start_time, '/v2/zones/ID/recordsets - async', msg ), ) # if we successfully created the recordset, add it to our list resp = api_call() if resp.ok and resp.json()['status'] == 'ACTIVE': recordset = Recordset( zone = zone, id = resp.json()['id'], data = resp.json()['records'][0], type = resp.json()['type']) # add to the list of things for deleting, to help us not run # out of zones to delete LOG.info("%s -- Added recordset %s", tenant, recordset) tenant.data.recordsets_for_delete.append(recordset) LOG.info("have %s records", tenant.data.recordset_count())
def _do_create_record(self): tenant = self.select_random_tenant() if not tenant: return client = self.designate_client.as_user(tenant) zone = tenant.data.select_zone_for_get() if zone is None: LOG.warning("don't know of any zones to create records on") return record_name = "{0}.{1}".format(datagen.randomize("record"), zone.name) payload = { "name": record_name, "type": "A", "ttl": 3600, "records": [datagen.random_ip()] } start_time = time.time() post_resp = client.post_recordset( zone.id, data=json.dumps(payload), name='/v2/zones/ID/recordsets', ) if not post_resp.ok: return if CONFIG.use_digaas: # we need the zone's serial to confidently poll for the update. # the recordset doesn't have the serial. instead, grab the zone # and use whatever serial we get. this is not perfect - digaas may # record slightly longer propagation times than actual. get_zone = client.get_zone(zone.id, name='/v2/zones/ID') if not get_zone.ok: LOG.error( "Failed to fetch zone %s to grab serial. We need the " "serial for digaas to poll for the recordset create", zone.id) else: self.digaas_behaviors.observe_zone_update(get_zone, start_time) api_call = lambda: client.get_recordset( zone_id=zone.id, recordset_id=post_resp.json()['id'], name='/v2/zones/ID/recordsets/ID - status check') self._poll_until_active_or_error( api_call=api_call, status_function=lambda r: r.json()['status'], success_function=lambda: self.async_success( post_resp, start_time, '/v2/zones/ID/recordsets - async', ), failure_function=lambda msg: self.async_failure( post_resp, start_time, '/v2/zones/ID/recordsets - async', msg), ) # if we successfully created the recordset, add it to our list resp = api_call() if resp.ok and resp.json()['status'] == 'ACTIVE': recordset = Recordset(zone=zone, id=resp.json()['id'], data=resp.json()['records'][0], type=resp.json()['type']) # add to the list of things for deleting, to help us not run # out of zones to delete LOG.info("%s -- Added recordset %s", tenant, recordset) tenant.data.recordsets_for_delete.append(recordset) LOG.info("have %s records", tenant.data.recordset_count())