def get_in_interval(first_month, last_month): sql = ("SELECT * FROM assignments " "WHERE first_month>='%s' " "AND last_month<='%s'") % (first_month, last_month) qry = QtSql.QSqlQuery() if not qry.exec_(sql): raise AllocatException(qry.lastError().text()) return Recordset.to_list(qry, Assignment)
def get_all(): sql = ("SELECT asn.*, prj.nickname as project_name, emp.name as employee_name " "FROM assignments asn " "JOIN projects prj on (asn.project_id=prj.id) " "JOIN employees emp on (asn.employee_id=emp.id)") qry = QtSql.QSqlQuery() if not qry.exec_(sql): raise AllocatException(qry.lastError().text()) return Recordset.to_list(qry, Assignment)
def get_projects(self): from models import Project self.set_assignments() if not self.assignments: return None prj_ids = set([asn.project_id for asn in self.assignments]) sql = "SELECT nickname FROM projects WHERE id IN (" +\ ','.join(str(prj_id) for prj_id in prj_ids) + ")" if not self.query.exec_(sql): raise AllocatException(self.query.lastError().text()) return Recordset.to_list(self.query, Project)
def set_assignments(self): if self.assignments: return from models import Assignment sql = ("SELECT asn.*, prj.nickname as project_name, emp.name as employee_name " "FROM assignments asn " "JOIN projects prj on (asn.project_id=prj.id) " "JOIN employees emp on (asn.employee_id=emp.id) " "WHERE asn.employee_id=" + str(self.id)) if not self.query.exec_(sql): raise AllocatException(self.query.lastError().text()) self.assignments = Recordset.to_list(self.query, Assignment)
def create_recordset(self, zone): record_name = "{0}.{1}".format(randomize("record"), zone.name) payload = { "name" : record_name, "type" : "A", "ttl" : 3600, "records" : [ random_ip() ] } resp = self.client.post_recordset(zone.id, data=json.dumps(payload)) check_resp(resp) recordset = Recordset( zone = zone, id = resp.json()['id'], data = resp.json()['records'][0], type = resp.json()['type']) print '%s: Created recordset %s' % (self.tenant.id, record_name) return recordset
def gather_recordsets(self): if self.done(): LOG.debug("we're done!") self.done_gathering.fire() return zone, link, tenant = self.frontier.pop_next_recordset_link() if not link: return path, params = self.frontier.parse_url(link) params['sort_key'] = 'id' client = self.designate_client.as_user(tenant) resp = client.get(path, name='/v2/zones/ID/recordsets', params=params) if not resp.ok: LOG.error("failed to list recordsets while gathering recordsets") return recordsets = resp.json()['recordsets'] links = resp.json()['links'] LOG.info("%s -- fetched %s recordsets for tenant %s", resp.request.url, len(recordsets), tenant) if 'next' in links: self.frontier.add_recordset_link(zone, links['next'], tenant) for r in recordsets: if r['type'] != 'A': continue # we're assuming only one record per recordset. # this is guaranteed as long we're in control of data creation recordset = Recordset(zone, r['id'], r['records'][0], r['type']) if len(tenant.data.recordsets_for_get) <= len(tenant.data.recordsets_for_delete): tenant.data.recordsets_for_get.append(recordset) else: tenant.data.recordsets_for_delete.append(recordset)
def list_a_recordsets(self, zone, limit): frontier = PaginationFrontier([self.tenant]) found_recordsets = [] initial_link = '/v2/zones/%s/recordsets' % zone.id frontier.add_recordset_link(zone, initial_link, self.tenant) while not frontier.is_empty(): zone, link, tenant = frontier.pop_next_recordset_link() if not link: return found_recordsets path, params = frontier.parse_url(link) params['sort_key'] = 'id' print "%s: GET %s" % (self.tenant.id, link) resp = self.client.get(path, params=params) check_resp(resp) recordsets = resp.json()['recordsets'] links = resp.json()['links'] if 'next' in links: next_link = links['next'] frontier.add_recordset_link(zone, next_link, tenant) for r in recordsets: if r['type'].upper() != 'A': continue recordset = Recordset(zone, r['id'], r['records'][0], r['type']) found_recordsets.append(recordset) if len(found_recordsets) >= limit: return found_recordsets return found_recordsets
def _do_create_record(self): tenant = self.select_random_tenant() if not tenant: return client = self.designate_client.as_user(tenant) zone = tenant.data.select_zone_for_get() if zone is None: LOG.warning("don't know of any zones to create records on") return record_name = "{0}.{1}".format(datagen.randomize("record"), zone.name) payload = { "name": record_name, "type": "A", "ttl": 3600, "records": [datagen.random_ip()] } start_time = time.time() post_resp = client.post_recordset( zone.id, data=json.dumps(payload), name='/v2/zones/ID/recordsets', ) if not post_resp.ok: return if CONFIG.use_digaas: # we need the zone's serial to confidently poll for the update. # the recordset doesn't have the serial. instead, grab the zone # and use whatever serial we get. this is not perfect - digaas may # record slightly longer propagation times than actual. get_zone = client.get_zone(zone.id, name='/v2/zones/ID') if not get_zone.ok: LOG.error( "Failed to fetch zone %s to grab serial. We need the " "serial for digaas to poll for the recordset create", zone.id) else: self.digaas_behaviors.observe_zone_update(get_zone, start_time) api_call = lambda: client.get_recordset( zone_id=zone.id, recordset_id=post_resp.json()['id'], name='/v2/zones/ID/recordsets/ID - status check') self._poll_until_active_or_error( api_call=api_call, status_function=lambda r: r.json()['status'], success_function=lambda: self.async_success( post_resp, start_time, '/v2/zones/ID/recordsets - async', ), failure_function=lambda msg: self.async_failure( post_resp, start_time, '/v2/zones/ID/recordsets - async', msg), ) # if we successfully created the recordset, add it to our list resp = api_call() if resp.ok and resp.json()['status'] == 'ACTIVE': recordset = Recordset(zone=zone, id=resp.json()['id'], data=resp.json()['records'][0], type=resp.json()['type']) # add to the list of things for deleting, to help us not run # out of zones to delete LOG.info("%s -- Added recordset %s", tenant, recordset) tenant.data.recordsets_for_delete.append(recordset) LOG.info("have %s records", tenant.data.recordset_count())
def get_all(): sql = "SELECT * FROM employees" qry = QtSql.QSqlQuery() if not qry.exec_(sql): raise AllocatException(qry.lastError().text()) return Recordset.to_dict(qry, Employee, 'name')
def get_all(): sql = "SELECT * FROM projects" qry = QtSql.QSqlQuery() if not qry.exec_(sql): raise AllocatException(qry.lastError().text()) return Recordset.to_dict(qry, Project, 'nickname')