def _tsv(json): # These should be the names of the original fields in the index document. download_fields = vnutil.download_field_list() values = [] for x in download_fields: if json.has_key(x): if x=='dynamicproperties': dp = vnutil.format_json(json[x]) values.append(unicode(dp.rstrip())) else: values.append(unicode(json[x]).rstrip()) else: values.append(u'') return u'\t'.join(values).encode('utf-8')
def _tsv(json): # These should be the names of the original fields in the index document. download_fields = vnutil.download_field_list() values = [] for x in download_fields: if json.has_key(x): if x=='dynamicproperties': dp = vnutil.format_json(json[x]) values.append(unicode(dp.rstrip())) else: values.append(unicode(json[x]).rstrip()) else: values.append(u'') # logging.debug('%s: JSON: %s' % (DOWNLOAD_VERSION, json)) # logging.debug('%s: DOWNLOAD_FIELDS: %s' % (UTIL_VERSION, download_fields)) # logging.debug('%s: VALUES: %s' % (DOWNLOAD_VERSION, values)) return u'\t'.join(values).encode('utf-8')
def tsv(self): # Note similar functionality in download.py _tsv(json) json = self.json # json['datasource_and_rights'] = json.get('url') # download_fields = vnutil.DWC_HEADER_LIST download_fields = vnutil.download_field_list() values = [] for x in download_fields: if json.has_key(x): if x=='dynamicproperties': # logging.info('dynamicproperties before: %s' % json[x] ) dp = vnutil.format_json(json[x]) # logging.info('dynamicproperties after: %s' % dp) values.append(unicode(dp.rstrip())) else: values.append(unicode(json[x]).rstrip()) else: values.append(u'') return u'\t'.join(values).encode('utf-8')
def _tsv(json): # These should be the names of the original fields in the index document. download_fields = vnutil.download_field_list() values = [] for x in download_fields: if json.has_key(x): if x == 'dynamicproperties': dp = vnutil.format_json(json[x]) values.append(unicode(dp.rstrip())) else: values.append(unicode(json[x]).rstrip()) else: values.append(u'') # logging.debug('%s: JSON: %s' % (DOWNLOAD_VERSION, json)) # logging.debug('%s: DOWNLOAD_FIELDS: %s' % (UTIL_VERSION, download_fields)) # logging.debug('%s: VALUES: %s' % (DOWNLOAD_VERSION, values)) return u'\t'.join(values).encode('utf-8')
def get(self): # logging.info('API search request: %s\nVersion: %s' % (self.request, API_VERSION) ) request = json.loads(self.request.get('q')) q, c, limit = map(request.get, ['q', 'c', 'l']) # Set the limit to 400 by default. This value is based on the results # of substantial performance testing. if not limit: limit = 400 if limit > 1000: # 1000 is the maximum value allowed by Google. limit = 1000 if limit < 0: limit = 1 curs = None if c: curs = search.Cursor(web_safe_string=c) else: curs = search.Cursor() result = vnsearch.query(q, limit, 'dwc', sort=None, curs=curs) response = None if len(result) == 4: recs, cursor, count, query_version = result if not c: type = 'query' query_count = count else: type = 'query-view' query_count = limit if cursor: cursor = cursor.web_safe_string # If count > 10,000, do not return the actual value of count # because it will be unreliable. Extensive testing revealed that # even for relatively small queries (>10,000 but <30,000 records), # it can be in error by one or more orders of magnitude. if count > 10000: count = '>10000' d = datetime.utcnow() # Process dynamicProperties JSON formatting for r in recs: if r.has_key('dynamicproperties'): r['dynamicproperties'] = vnutil.format_json( r['dynamicproperties']) response = json.dumps( dict(recs=recs, cursor=cursor, matching_records=count, limit=limit, response_records=len(recs), api_version=API_VERSION, query_version=query_version, request_date=d.isoformat(), request_origin=self.cityLatLong, submitted_query=q)) # logging.info('API search recs: %s\nVersion: %s' % (recs, API_VERSION) ) res_counts = vnutil.search_resource_counts(recs) params = dict(api_version=API_VERSION, count=len(recs), latlon=self.cityLatLong, matching_records=count, query=q, query_version=query_version, request_source='SearchAPI', response_records=len(recs), res_counts=json.dumps(res_counts), type=type) taskqueue.add(url='/apitracker', params=params, queue_name="apitracker") else: error = result[0].__class__.__name__ params = dict(error=error, query=q, type='query', latlon=self.cityLatLong) taskqueue.add(url='/apitracker', params=params, queue_name="apitracker") self.response.clear() message = 'Please try again. Error: %s' % error self.response.set_status(500, message=message) response = message self.response.out.headers['Content-Type'] = 'application/json' self.response.headers['charset'] = 'utf-8' self.response.out.write(response)
def get(self): s = 'API Version: %s' % API_VERSION s += '\nAPI search request: %s' % self.request logging.info(s) request = json.loads(self.request.get('q')) q, c, limit = map(request.get, ['q', 'c', 'l']) # Set the limit to 400 by default. This value is based on the results # of substantial performance testing. if not limit: limit = 400 if limit > 1000: # 1000 is the maximum value allowed by Google. limit = 1000 if limit < 0: limit = 1 curs = None if c: curs = search.Cursor(web_safe_string=c) else: curs = search.Cursor() result = vnsearch.query(q, limit, 'dwc', sort=None, curs=curs) response = None if len(result) == 4: recs, cursor, count, query_version = result if not c: type = 'query' query_count = count else: type = 'query-view' query_count = limit if cursor: cursor = cursor.web_safe_string # If count > 10,000, do not return the actual value of count # because it will be unreliable. Extensive testing revealed that # even for relatively small queries (>10,000 but <30,000 records), # it can be in error by one or more orders of magnitude. if count > 10000: count = '>10000' d=datetime.utcnow() # Process dynamicProperties JSON formatting for r in recs: if r.has_key('dynamicproperties'): r['dynamicproperties']=vnutil.format_json(r['dynamicproperties']) response = json.dumps(dict(recs=recs, cursor=cursor, matching_records=count, limit=limit, response_records=len(recs), api_version=API_VERSION, query_version=query_version, request_date=d.isoformat(), request_origin=self.cityLatLong, submitted_query=q)) # logging.info('API search recs: %s\nVersion: %s' % (recs, API_VERSION) ) res_counts = vnutil.search_resource_counts(recs) params = dict(api_version=API_VERSION, count=len(recs), latlon=self.cityLatLong, matching_records=count, query=q, query_version=query_version, request_source='SearchAPI', response_records=len(recs), res_counts=json.dumps(res_counts), type=type ) taskqueue.add(url='/apitracker', params=params, queue_name="apitracker") else: error = result[0].__class__.__name__ params = dict(error=error, query=q, type='query', latlon=self.cityLatLong) taskqueue.add(url='/apitracker', params=params, queue_name="apitracker") self.response.clear() message='Please try again. Error: %s' % error self.response.set_status(500, message=message) response = message self.response.out.headers['Content-Type'] = 'application/json' self.response.headers['charset'] = 'utf-8' self.response.out.write(response)