def post(self): # get 50 records n = 50 count = 0 curs = None if self.request.get('cursor'): curs = Cursor(urlsafe=self.request.get('cursor')) if self.request.get('count'): count = int(self.request.get('count')) query = APIData.query().order(APIData.created_time) data, cursor, more = query.fetch_page(n, start_cursor=curs) # reput if data: ndb.put_multi(data) count += len(data) logging.debug('count: ' + str(count)) # pass cursor if len(data) == n and cursor: taskqueue.add( url=('/api/v1/JMKr5roUu0EQyssRVv8mvkgXsmQBt3sgNDbfoBIkwoUi59dz' 'zQJnvmQ5jIlNtC4c'), params={ 'cursor': cursor.urlsafe(), 'count': str(count) })
def project_count_images(project_object): query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'IMAGE') query = query.filter(APIData.indexed_data == tag) tag = create_indexed_tag('project_code', project_object['code']) query = query.filter(APIData.indexed_data == tag) image_count = 0 more = True cursor = None while more: if not cursor: images, cursor, more = query.fetch_page(200, keys_only=True) else: images, cursor, more = query.fetch_page(200, start_cursor=cursor, keys_only=True) image_count += len(images) return image_count
def code_to_project_id(code): response = APIData.query(APIData.indexed_data == 'TYPE->PROJECT', APIData.indexed_data == 'CODE->' + code.upper().strip()).get(keys_only=True) if response: return response.id() else: return None
def get(self): self.tv['page_geoprocessing'] = True # self.tv["program"] = "all" self.tv['project_code'] = self.request.get('project_code') self.tv['system_url'] = 'openroads-geostore.appspot.com' if self.tv['project_code']: query = APIData.query(APIData.indexed_data == create_indexed_tag( 'project_code', str(self.tv['project_code']))) query = query.filter( APIData.indexed_data == create_indexed_tag('type', 'KML')) query = query.order(-APIData.created_time) kmls = query.fetch(20) logging.debug(kmls) self.tv['kmls'] = [] query = APIData.query(APIData.indexed_data == create_indexed_tag( 'parent_code', str(self.tv['project_code']))) query = query.filter( APIData.indexed_data == create_indexed_tag('type', 'DATASET')) query = query.order(-APIData.created_time) datasets = query.fetch(25) for kml in kmls: self.tv['kmls'].append({ 'filename': find_between_r(kml.file_url + ' ', '/', ' '), 'kml_id': kml.key.id(), 'file_url': kml.file_url, 'parent_code': kml.additional_data['parent_code'] }) self.tv['datasets'] = {} for dataset in datasets: self.tv['datasets'][dataset.additional_data.get( 'code')] = dataset.created_time.strftime('%B %d, %Y') self.tv['asset_values'] = json.dumps(ASSET_VALUES) self.render('geoprocessing-classification.html')
def post(self, data_id=None): # TODO: Rewrite this for environments if not data_id: desc = "ID is missing from the request." response["success"] = False response["response"] = "MissingParametersError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return data = APIData.get_by_id(normalize_id(data_id)) if not data: desc = "Cannot find the package." response["success"] = False response["response"] = "InvalidIDError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return if self.user.key != data.user: if not self.user.teams: msg = "You have insufficient rights to access this application." response["success"] = False response["response"] = "InvalidIDError" response["description"] = msg response["code"] = 400 wrap_response(self, response) return has_access = False for team in self.user.teams: if team in data.access_lock: has_access = True if not has_access: msg = "You have insufficient rights to access this application." response["success"] = False response["response"] = "InvalidIDError" response["description"] = msg response["code"] = 400 else: data.access_lock.remove("PRIVATE") data.access_lock.append("PUBLIC") data.access_lock = uniquify(data.access_lock) data.put() msg = "The data has been published and is now public." response["success"] = True response["response"] = "Success" response["description"] = msg response["code"] = 200 wrap_response(self, response)
def get(self): if not self.request.get('project_code') and not self.request.get('parent_code'): logging.error('missing project code') self.error(400) return project_code = self.request.get('project_code') parent_code = self.request.get('parent_code') kml_id = self.request.get('kml_id') if not kml_id: kml = APIData.query() kml = kml.filter(APIData.indexed_data == 'TYPE->KML') kml = kml.filter(APIData.indexed_data == 'PROJECT_CODE->' + project_code.upper()) kml = kml.get() kml_id = str(kml.key.id()) else: kml = APIData.get_by_id(normalize_id(kml_id)) if not kml: logging.error('cannot find kml_id: ' + kml_id) self.error(404) return # check if has GCSFile if self.request.get('output') and self.request.get('output') == 'json': file_type = 'JSON' else: file_type = 'KML' gcs_file = GCSFile.query(GCSFile.kml_id == kml_id, GCSFile.file_type == file_type).order(-GCSFile.created_time).get() if gcs_file: # check if classification classification_check = APIData.query(APIData.indexed_data == create_indexed_tag('TYPE', 'CLASSIFICATION')).order(-APIData.created_time).get() if classification_check and classification_check.created_time < gcs_file.created_time: # has file and still updated self.redirect(gcs_file.link.encode('utf-8')) return output = self.request.get('output') deferred.defer(merge_kml, parent_code, project_code, output, kml) self.tv['project_code'] = project_code self.render('kml-download.html') return
def get(self): self.tv['page_geoprocessing'] = True self.tv['project_code'] = self.request.get('project_code') if self.tv['project_code']: tag = create_indexed_tag('project_code', str(self.tv['project_code'])) query = APIData.query(APIData.indexed_data == tag) tag = create_indexed_tag('type', 'KML') query = query.filter(APIData.indexed_data == tag) query = query.order(-APIData.created_time) kmls = query.fetch(20) logging.debug(kmls) self.tv['kmls'] = [] tag = create_indexed_tag('parent_code', str(self.tv['project_code'])) query = APIData.query(APIData.indexed_data == tag) tag = create_indexed_tag('type', 'DATASET') query = query.filter(APIData.indexed_data == tag) query = query.order(-APIData.created_time) datasets = query.fetch(25) logging.debug(datasets) for kml in kmls: self.tv['kmls'].append({ 'filename': find_between_r(kml.file_url + ' ', '/', ' '), 'kml_id': kml.key.id(), 'file_url': kml.file_url, 'parent_code': kml.additional_data['parent_code'] }) self.tv['datasets'] = [] for dataset in datasets: self.tv['datasets'].append(dataset.to_api_object()) self.tv['asset_values'] = json.dumps(ASSET_VALUES) self.render('geoprocessing/new-classification-summary.html')
def get(self, data_id=None): """ Handles the /api/v1/data/[data_id] endpoint. Returns the details of the data_id provided. """ response = { "code": 200, "type": "Data details.", "method": "GET", "response": "OK", "data": {} } try: data_id = int(data_id) except ValueError: data_id = str(data_id) except Exception as e: logging.exception(e) wrap_response(self, response) return data = APIData.get_by_id(normalize_id(data_id)) if data: # TODO: Add check for authorization response["data"] = data.to_api_object() if self.request.get('show_environments'): public = False for environment_key in data.environment: logging.debug(str(data.environment)) if environment_key.id() == 'PUBLIC': public = True else: environment = environment_key.get() if environment: response["data"][ "environment_object"] = environment.to_api_object( ) if public: response["data"]["public"] = True else: response["data"]["public"] = False if self.GET("callback"): callback = self.GET("callback") d = json.dumps(response) self.response.out.write(callback + "(" + d + ");") else: wrap_response(self, response)
def get_project_kmls(project_id, dataset=None): query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->KML') query = query.filter(APIData.indexed_data == 'PROJECT_CODE->' + project_id) if dataset: query = query.filter(APIData.indexed_data == 'PARENT_CODE->' + dataset) n = 100 result, cursor, more = query.fetch_page(n) resp = {} for kml in result: resp[kml.file_url.split('/')[-1]] = compute_kml_length(kml.key.id()) return resp
def project_has_classification(project_object): query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'CLASSIFICATION') query = query.filter(APIData.indexed_data == tag) tag = create_indexed_tag('project_code', project_object['code']) query = query.filter(APIData.indexed_data == tag) classification = query.get(keys_only=True) if classification: return 1 else: return 0
def project_has_image(project_object): query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'IMAGE') query = query.filter(APIData.indexed_data == tag) tag = create_indexed_tag('project_code', project_object['code']) query = query.filter(APIData.indexed_data == tag) image = query.get(keys_only=True) if image: return 1 else: return 0
def post(self): if self.request.get('auth') == 'bd2c952a4b2febc39b81c967cd8556cd': response = {} response['cursor'] = '' query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'COA->0') n = 20 if self.request.get('n'): n = int(self.request.get('n')) if self.GET("cursor"): curs = Cursor(urlsafe=self.request.get("cursor")) data, cursor, more = query.fetch_page(n, start_cursor=curs) else: data, cursor, more = query.fetch_page(n) things_to_put = [] for d in data: d.additional_data['coa'] = '0' things_to_put.append(d) if things_to_put: ndb.put_multi(things_to_put) response['cursor'] = cursor.urlsafe() self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(response))
def compute_kml_length(kml_id): kml_id = normalize_id(kml_id) kml = APIData.get_by_id(kml_id) location = urllib.unquote(kml.file_url).replace("https://storage.googleapis.com", '') location = location.replace("http://storage.googleapis.com", '') response = {} try: with gcs.open(location) as f: if kml.file_url.endswith('.kmz'): zf = ZipFile(StringIO(f.read())) xml_data = zf.read('doc.kml') else: xml_data = f.read() response['points'] = get_kml_points(xml_data) except Exception as e: response['error'] = 'Error reading KML/KMZ.' logging.debug(e) response['length'] = [] for points in response['points']: p = 0 for i in range(1, len(points)): p += get_distance_in_km(points[i - 1], points[i]) response['length'].append(p) return response
if self.GET("cursor"): curs = Cursor(urlsafe=self.GET("cursor")) environments, cursor, more = query.fetch_page(30, start_cursor=curs) else: environments, cursor, more = query.fetch_page(30) self.tv["datasets"] = [] self.tv["environments"] = [] if environments: for environment in environments: self.tv["environments"].append(environment.to_object()) tag = create_indexed_tag("type", "DATASET") query = APIData.query() query = query.filter(APIData.indexed_data == tag) if self.GET("current_environment").lower() != "all": key = ndb.Key("Environment", int(self.GET("current_environment"))) query = query.filter(APIData.environment == key) if self.GET("cursor"): curs = Cursor(urlsafe=self.GET("cursor")) datasets, cursor, more = query.fetch_page(50, start_cursor=curs) else: datasets, cursor, more = query.fetch_page(50) if datasets: for d in datasets: self.tv["datasets"].append(d.to_api_object())
def get(self): """ Handles the /api/v1/data endpoint. Returns list of datasets. """ response = { "code": 200, "type": "List of geostore saved data.", "method": "GET", "response": "OK", "data": [] } # Default number of entities to be retrieved is 50. n = 50 if self.GET("n"): n = int(self.GET("n")) # if the number of entities to be retrieved given is # greater than 100. Switch back to default which is 100 if n > 100: n = 100 query = APIData.query() query = query.filter(APIData.archived == False) if not self.user: if "Authorization" in self.request.headers: token = Token.get_by_id(self.request.headers["Authorization"]) if not token: logging.info("Cannot find token: " + str(self.request.headers["Authorization"])) desc = "The token you provided is invalid." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(token) session = token.session.get() if not session: logging.info("Cannot find session") desc = "The token has already expired." response["error"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(session) if session.expires < datetime.datetime.now( ) or session.status is False: logging.info("token has expired or not active") desc = "The token has already expired." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return owner = session.owner.get() if not owner: logging.info("Cannot find user") desc = "Cannot find user." response["success"] = False response["response"] = "InvalidUserError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return self.user = owner if self.user and self.GET('workspace'): environment = ndb.Key(urlsafe=self.GET('workspace')).get() if environment: if self.user.key in environment.users: environment_key = environment.key query = query.filter( APIData.environment == environment_key) elif self.user and not self.GET('workspace') and ( not self.GET('type') or not self.GET('type') in ['PROJECT', 'SUBPROJECT']): environments_user = Environment.query( Environment.users == self.user.key).fetch(keys_only=True) environments_user.append(ndb.Key('Environment', 'PUBLIC')) query = query.filter( APIData.environment.IN(environments_user)).order(APIData._key) else: query = query.filter( APIData.environment == ndb.Key('Environment', 'PUBLIC')) if self.GET("_search_"): response["type"] = "Search geostore saved data." response["query"] = self.GET("_search_") search = self.GET("_search_").strip().upper() query = query.filter(APIData.tags >= search) query = query.order(APIData.tags) else: for arg in self.request.arguments(): if arg.lower() == "callback" \ or arg.lower() == "_" \ or arg.lower() == "order" \ or arg.lower() == "cursor" \ or arg.lower() == "n" \ or arg.lower() == "_search_" \ or arg.lower() == "show_image_dates" \ or arg.lower() == "start_updated_from" \ or arg.lower() == "start_created_from": continue ad_value = self.GET(arg) tag = create_indexed_tag(arg, ad_value) query = query.filter(APIData.indexed_data == tag) if self.GET("order"): if self.GET("order").lower() in ["asc", "ascending"]: query = query.order(APIData.updated_time) elif self.GET("order").lower() in ["desc", "descending"]: query = query.order(-APIData.updated_time) elif self.GET("order").lower() == "created_asc": query = query.order(APIData.created_time) elif self.GET("order").lower() == "created_desc": query = query.order(-APIData.created_time) elif self.GET("order").lower() == "modified": query = query.order(APIData.updated_time) else: query = query.order(-APIData.created_time) if self.GET("start_updated_from"): logging.debug(self.GET("start_updated_from")) if self.GET("order").lower() in ["desc", "descending"]: query = query.filter( APIData.updated_time <= datetime.datetime. fromtimestamp(int(self.GET("start_updated_from")))) else: query = query.filter( APIData.updated_time >= datetime.datetime. fromtimestamp(int(self.GET("start_updated_from")))) elif self.GET('start_created_from'): logging.debug(self.GET("start_created_from")) if self.GET("order").lower() in ["desc", "descending"]: query = query.filter( APIData.created_time <= datetime.datetime. fromtimestamp(int(self.GET("start_created_from")))) else: query = query.filter( APIData.created_time >= datetime.datetime. fromtimestamp(int(self.GET("start_created_from")))) logging.info(query) if self.GET("cursor"): curs = Cursor(urlsafe=self.GET("cursor")) data, cursor, more = query.fetch_page(n, start_cursor=curs) else: data, cursor, more = query.fetch_page(n) if data: response["cursor"] = "" for d in data: try: response["data"].append(d.to_api_object()) except Exception as e: logging.exception(e) if more: response["cursor"] = cursor.urlsafe() if self.GET('show_image_dates'): if self.GET('type'): if self.GET('type').upper() == 'CLASSIFICATION': if 'data' in response: image_ids = [] for classification in response['data']: image_ids.append( ndb.Key('APIData', int(classification['image_id']))) images = ndb.get_multi(image_ids) for image in images: date = '' image_latlng = image.additional_data['latlng'] if 'date' in image.additional_data: date = image.additional_data['date'] else: date = image.created_time.strftime( '%Y:%m:%d %H:%M:%S') for i in range(0, len(response['data'])): if response['data'][i]['image_id'] == str( image.key.id()): response['data'][i]['image_date'] = date response['data'][i][ 'image_latlng'] = image_latlng if self.GET("callback"): callback = self.GET("callback") d = json.dumps(response) self.response.out.write(callback + "(" + d + ");") else: wrap_response(self, response)
def post(self, program=None, code=None, report=None, dataset=None): headers = {} params = { 'file_image': self.request.POST['file'], 'indexed_program': program.upper(), 'indexed_project_code': code, 'indexed_project_id': self.request.POST['project-id'], 'indexed_dataset_code': report, 'indexed_dataset_id': self.request.POST['dataset-id'], 'indexed_parent_code': report, 'unindexed_data_collectors': self.request.POST['data-collectors'], 'unindexed_last_modified_date': self.request.POST['last_modified_date'] } if self.request.get('subproject'): params['indexed_subproject_code'] = report params['indexed_subproject_id'] = self.request.POST[ 'subproject-id'] params['indexed_dataset_code'] = dataset params['indexed_parent_code'] = dataset if self.request.get('special'): logging.debug('FILE RECEIVED') params['file_file'] = params.pop('file_image') params['indexed_type'] = 'FILE' if self.request.POST['file'].filename.lower().endswith( tuple(['.jpg', '.jpeg'])): image = JpegFile.fromString(params['file_file'].file.read()) try: lat, lng = image.get_geo() params['unindexed_latlng'] = str(lat) + ',' + str(lng) logging.debug('FILE HAS GPS') except: logging.debug('FILE HAS NO GPS') params['file_file'].file.seek(0) else: if self.request.POST['file'].filename.endswith( tuple(['.kml', '.kmz'])): logging.debug('KML RECEIVED') params['file_kml'] = params.pop('file_image') params['indexed_type'] = 'KML' kml_type = 'PROJECT' if params['file_kml'].filename.startswith('ACCESS-'): kml_type = 'ACCESS' ext = params['file_kml'].filename.split('.')[-1] params['file_kml'].filename = kml_type + '-' + code + '-' params['file_kml'].filename += self.request.get('kml') params['file_kml'].filename += '.' + ext query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'CODE->' + code) query = query.filter(APIData.indexed_data == 'HAS_KML->1') project = query.get(keys_only=True) logging.debug('PROJECT QUERIED AND HAS KML') logging.debug(project) if not project: params = {} params['indexed_has_kml'] = '1' update_api_data(self.request.POST['project-id'], params, self.user, 'application/json') elif self.request.POST['file'].filename.lower().endswith( tuple(['.jpg', '.jpeg'])): logging.debug('IMAGE RECEIVED') try: image = JpegFile.fromString( params['file_image'].file.read()) except Exception as e: logging.debug(e) params['file_image'].file.seek(0) try: image = JpegFile.fromString( StringIO(params['file_image'].file.read()).read()) except Exception as e: logging.debug('Using StringIO') traceback.print_exc() self.response.set_status(500) self.response.write('Image has invalid metadata') return try: lat, lng = image.get_geo() except Exception as e: logging.debug('Image does not a have GPS data') traceback.print_exc() self.response.set_status(500) self.response.write('Image does not a have GPS data') return params['unindexed_latlng'] = str(lat) + ',' + str(lng) params['unindexed_date'] = '' if image.exif.primary.GPS[0x7]: logging.debug(image.exif.primary.GPS[0x7]) gps_time = '' for a in image.exif.primary.GPS[0x7]: if a.num < 10: gps_time += '0' + str(a.num) else: gps_time += str(a.num) gps_time += ':' gps_time = gps_time[:-1] logging.debug(gps_time) params['unindexed_date'] = image.exif.primary.GPS[ 0x1d] + ' ' + gps_time logging.debug(params['unindexed_date']) else: try: params[ 'unindexed_date'] = image.exif.primary.ExtendedEXIF.DateTimeOriginal except: params['unindexed_date'] = datetime.datetime.now( ).strftime('%Y:%m:%d %H:%M:%S') params['file_image'].file.seek(0) params['indexed_type'] = 'IMAGE' query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'CODE->' + code) query = query.filter(APIData.indexed_data == 'HAS_IMAGE->1') project = query.get(keys_only=True) logging.debug('PROJECT QUERIED AND HAS IMAGE') logging.debug(project) if not project: params = {} params['indexed_has_image'] = '1' update_api_data(self.request.POST['project-id'], params, self.user, 'application/json') else: self.response.set_status(500) self.response.write('File is invalid') return headers['Authorization'] = API_KEY headers['Content-Type'] = 'multipart/form-data' logging.debug(params) write_to_api(params, self.user, headers['Content-Type'])
def run_counter(counter_id=None, cursor_urlsafe=None, set_classification_flags=False): if not counter_id: counter_id = generate_counter_id() counter_instance = Counter(id=counter_id) counters_data = {} else: counter_instance = Counter.get_by_id(counter_id) counters_data = counter_instance.data n = 50 query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'PROJECT') query = query.filter(APIData.indexed_data == tag) query = query.order(APIData._key) if cursor_urlsafe: curs = Cursor(urlsafe=cursor_urlsafe) projects, cursor, more = query.fetch_page(n, start_cursor=curs) else: projects, cursor, more = query.fetch_page(n) new_projects = [] for project in projects: project_data = get_project_data(project) counters_data = increment_counts_with_province(counters_data, project_data) if set_classification_flags: if project_data['has_image']: project.indexed_data.append( create_indexed_tag('has_image', '1')) project.additional_data['has_image'] = '1' else: project.indexed_data.append( create_indexed_tag('has_image', '0')) project.additional_data['has_image'] = '0' if project_data['has_kml']: project.indexed_data.append(create_indexed_tag('has_kml', '1')) project.additional_data['has_kml'] = '1' else: project.indexed_data.append(create_indexed_tag('has_kml', '0')) project.additional_data['has_kml'] = '0' if project_data['has_classification']: project.indexed_data.append( create_indexed_tag('has_classification', '1')) project.additional_data['has_classification'] = '1' else: project.indexed_data.append( create_indexed_tag('has_classification', '0')) project.additional_data['has_classification'] = '0' project.indexed_data = uniquify(project.indexed_data) new_projects.append(project) counter_instance.data = counters_data if not cursor: counter_instance.done = True counter_instance.put() if set_classification_flags and new_projects: ndb.put_multi(new_projects) return { "counter_id": counter_id, "cursor": cursor.urlsafe() if cursor else None }
def merge_kml(parent_code, project_code, output, kml): # run this # get all the attributes query = APIData.query(APIData.indexed_data == create_indexed_tag('TYPE', 'CLASSIFICATION')) if parent_code: query = query.filter(APIData.indexed_data == create_indexed_tag('parent_code', parent_code)) else: query = query.filter(APIData.indexed_data == create_indexed_tag('project_code', project_code)) cursor = None i = 1 n = 100 all_classifications = [] while i < 50: # logging.debug('querying...') classifications, cursor, more = query.fetch_page(n, start_cursor=cursor) i = i + 1 for classification in classifications: try: all_classifications.append({ 'classification_type': classification.additional_data['classification_type'].upper().strip(), 'classification': classification.additional_data['classification'].upper().strip(), 'latlng': classification.additional_data['lat'] + ',' + classification.additional_data['lng'] }) except KeyError: pass if len(classifications) < n: break logging.debug("i: " + str(i)) # generate new kml kml_id = str(kml.key.id()) try: if output: if output.lower() == 'json': content = merge_kml_and_attributes_json(all_classifications, kml.file_url) filename = 'merged_kml_and_attributes.json' gcs_filename = BUCKET_NAME gcs_filename += random_string(128) + "/" gcs_filename += filename gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(gcs_filename, 'w', options=gcs_options) gcs_file.write(json.dumps(content).encode('utf-8')) gcs_file.close() full_url = "https://storage.googleapis.com" + gcs_filename full_url = urllib.quote(full_url, safe="%/:=&?~#+!$,;'@()*[]") gcsfile = GCSFile() gcsfile.kml_id = kml_id gcsfile.link = full_url gcsfile.file_type = 'JSON' gcsfile.put() return else: content = merge_kml_and_attributes(all_classifications, kml.file_url) except Exception as e: logging.debug(e) return filename = find_between_r(kml.file_url + ' ', '/', ' ') filename = filename.replace('.kmz', '-kmz') + '.kml' # save file to gcs gcs_filename = BUCKET_NAME gcs_filename += random_string(128) + "/" gcs_filename += filename gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(gcs_filename, 'w', options=gcs_options) gcs_file.write(content.encode('utf-8')) gcs_file.close() full_url = "https://storage.googleapis.com" + gcs_filename full_url = urllib.quote(full_url, safe="%/:=&?~#+!$,;'@()*[]") gcsfile = GCSFile() gcsfile.kml_id = kml_id gcsfile.link = full_url gcsfile.file_type = 'KML' gcsfile.put()
def get(self): ''' Query projects that has a classification then put the total number of projects in the memcache. Optionally set a version. ''' self.response.headers['Content-Type'] = 'application/json' date = datetime.now() query = Statistics.query() query = query.order(-Statistics.created) statistics = query.get() self.response.write(json.dumps(statistics.statistics)) return coa_projects_geoprocessed = memcache.get( 'statistics_coa_projects_geoprocessed') if not coa_projects_geoprocessed: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter( APIData.indexed_data == 'HAS_CLASSIFICATION->1') query = query.filter(APIData.indexed_data == 'PROGRAM->COA') coa_projects_geoprocessed = len(query.fetch(keys_only=True)) memcache.set('statistics_coa_projects_geoprocessed', coa_projects_geoprocessed, 86400) prdp_projects_geoprocessed = memcache.get( 'statistics_prdp_projects_geoprocessed') if not prdp_projects_geoprocessed: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter( APIData.indexed_data == 'HAS_CLASSIFICATION->1') query = query.filter(APIData.indexed_data == 'PROGRAM->PRDP') prdp_projects_geoprocessed = len(query.fetch(keys_only=True)) memcache.set('statistics_prdp_projects_geoprocessed', prdp_projects_geoprocessed, 86400) bub_projects_geoprocessed = memcache.get( 'statistics_bub_projects_geoprocessed') if not bub_projects_geoprocessed: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter( APIData.indexed_data == 'HAS_CLASSIFICATION->1') query = query.filter(APIData.indexed_data == 'PROGRAM->BUB') bub_projects_geoprocessed = len(query.fetch(keys_only=True)) memcache.set('statistics_bub_projects_geoprocessed', bub_projects_geoprocessed, 86400) gaa_projects_geoprocessed = memcache.get( 'statistics_gaa_projects_geoprocessed') if not gaa_projects_geoprocessed: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter( APIData.indexed_data == 'HAS_CLASSIFICATION->1') query = query.filter(APIData.indexed_data == 'PROGRAM->GAA') gaa_projects_geoprocessed = len(query.fetch(keys_only=True)) memcache.set('statistics_gaa_projects_geoprocessed', gaa_projects_geoprocessed, 86400) trip_projects_geoprocessed = memcache.get( 'statistics_trip_projects_geoprocessed') if not trip_projects_geoprocessed: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter( APIData.indexed_data == 'HAS_CLASSIFICATION->1') query = query.filter(APIData.indexed_data == 'PROGRAM->TRIP') trip_projects_geoprocessed = len(query.fetch(keys_only=True)) memcache.set('statistics_trip_projects_geoprocessed', trip_projects_geoprocessed, 86400) coa_projects = memcache.get('statistics_coa_projects') if not coa_projects: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'PROGRAM->COA') coa_projects = len(query.fetch(keys_only=True)) memcache.set('statistics_coa_projects', coa_projects, 86400) prdp_projects = memcache.get('statistics_prdp_projects') if not prdp_projects: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'PROGRAM->PRDP') prdp_projects = len(query.fetch(keys_only=True)) memcache.set('statistics_prdp_projects', prdp_projects, 86400) bub_projects = memcache.get('statistics_bub_projects') if not bub_projects: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'PROGRAM->BUB') bub_projects = len(query.fetch(keys_only=True)) memcache.set('statistics_bub_projects', bub_projects, 86400) gaa_projects = memcache.get('statistics_gaa_projects') if not gaa_projects: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'PROGRAM->GAA') gaa_projects = len(query.fetch(keys_only=True)) memcache.set('statistics_gaa_projects', gaa_projects, 86400) trip_projects = memcache.get('statistics_trip_projects') if not trip_projects: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') query = query.filter(APIData.indexed_data == 'PROGRAM->TRIP') trip_projects = len(query.fetch(keys_only=True)) memcache.set('statistics_trip_projects', trip_projects, 86400) coa_geotag_images = memcache.get('statistics_coa_geotag_images') if not coa_geotag_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'IS_ROAD->1') query = query.filter(APIData.indexed_data == 'PROGRAM->COA') coa_geotag_images = len(query.fetch(keys_only=True)) memcache.set('statistics_coa_geotag_images', coa_geotag_images, 86400) coa_images = memcache.get('statistics_coa_images') if not coa_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'PROGRAM->COA') coa_images = len(query.fetch(keys_only=True)) memcache.set('statistics_coa_images', coa_images, 86400) prdp_geotag_images = memcache.get('statistics_prdp_geotag_images') if not prdp_geotag_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'IS_ROAD->1') query = query.filter(APIData.indexed_data == 'PROGRAM->PRDP') prdp_geotag_images = len(query.fetch(keys_only=True)) memcache.set('statistics_prdp_geotag_images', prdp_geotag_images, 86400) prdp_images = memcache.get('statistics_prdp_images') if not prdp_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'PROGRAM->PRDP') prdp_images = len(query.fetch(keys_only=True)) memcache.set('statistics_prdp_images', prdp_images, 86400) bub_geotag_images = memcache.get('statistics_bub_geotag_images') if not bub_geotag_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'IS_ROAD->1') query = query.filter(APIData.indexed_data == 'PROGRAM->BUB') bub_geotag_images = len(query.fetch(keys_only=True)) memcache.set('statistics_bub_geotag_images', bub_geotag_images, 86400) bub_images = memcache.get('statistics_bub_images') if not bub_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'PROGRAM->BUB') bub_images = len(query.fetch(keys_only=True)) memcache.set('statistics_bub_images', bub_images, 86400) gaa_geotag_images = memcache.get('statistics_gaa_geotag_images') if not gaa_geotag_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'IS_ROAD->1') query = query.filter(APIData.indexed_data == 'PROGRAM->GAA') gaa_geotag_images = len(query.fetch(keys_only=True)) memcache.set('statistics_gaa_geotag_images', gaa_geotag_images, 86400) gaa_images = memcache.get('statistics_gaa_images') if not gaa_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'PROGRAM->GAA') gaa_images = len(query.fetch(keys_only=True)) memcache.set('statistics_gaa_images', gaa_images, 86400) trip_geotag_images = memcache.get('statistics_trip_geotag_images') if not trip_geotag_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'IS_ROAD->1') query = query.filter(APIData.indexed_data == 'PROGRAM->TRIP') trip_geotag_images = len(query.fetch(keys_only=True)) memcache.set('statistics_trip_geotag_images', trip_geotag_images, 86400) trip_images = memcache.get('statistics_trip_images') if not trip_images: query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'PROGRAM->TRIP') trip_images = len(query.fetch(keys_only=True)) memcache.set('statistics_trip_images', trip_images, 86400) self.response.headers['Content-Type'] = 'application/json' self.response.write( json.dumps({ 'coa_projects_geoprocessed': memcache.get('statistics_coa_projects_geoprocessed'), 'prdp_projects_geoprocessed': memcache.get('statistics_prdp_projects_geoprocessed'), 'bub_projects_geoprocessed': memcache.get('statistics_bub_projects_geoprocessed'), 'gaa_projects_geoprocessed': memcache.get('statistics_gaa_projects_geoprocessed'), 'trip_projects_geoprocessed': memcache.get('statistics_trip_projects_geoprocessed'), 'coa_projects': memcache.get('statistics_coa_projects'), 'prdp_projects': memcache.get('statistics_prdp_projects'), 'bub_projects': memcache.get('statistics_bub_projects'), 'gaa_projects': memcache.get('statistics_gaa_projects'), 'trip_projects': memcache.get('statistics_trip_projects'), 'coa_geotag_images': memcache.get('statistics_coa_geotag_images'), 'coa_images': memcache.get('statistics_coa_images'), 'prdp_geotag_images': memcache.get('statistics_prdp_geotag_images'), 'prdp_images': memcache.get('statistics_prdp_images'), 'bub_geotag_images': memcache.get('statistics_bub_geotag_images'), 'bub_images': memcache.get('statistics_bub_images'), 'gaa_geotag_images': memcache.get('statistics_gaa_geotag_images'), 'gaa_images': memcache.get('statistics_gaa_images'), 'trip_geotag_images': memcache.get('statistics_trip_geotag_images'), 'trip_images': memcache.get('statistics_trip_images'), })) return
def post(self): logging.debug(self.request.POST) self.response.headers['Content-Type'] = 'application/json' date = datetime.now() query = Statistics.query() query = query.order(-Statistics.created) statistics = query.get() if statistics: if statistics.created.date() == date.date(): if statistics.done: logging.debug({'done': True}) return else: logging.debug({'done': False}) else: statistics = Statistics() else: statistics = Statistics() logging.debug(statistics.statistics) if statistics.statistics: statistics_ = statistics.statistics else: statistics_ = {} query = Program.query() programs = query.fetch() for program in programs: logging.debug(program.name) if program.name.upper() not in statistics_: statistics_[program.name.upper()] = {} query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->PROJECT') if self.request.get('cursor'): logging.debug('HAS CURSOR') c = Cursor(urlsafe=self.request.get('cursor')) results, cursor, more = query.fetch_page(200, start_cursor=c) else: logging.debug('NO CURSOR') results, cursor, more = query.fetch_page(200) logging.debug(len(results)) logging.debug(more) for project in results: program = project.additional_data['program'].upper() year = date.strftime('%Y') if program not in statistics_: logging.debug('PROGRAM DOES NOT EXIST') statistics_[program] = {} if 'year' in project.additional_data: year = project.additional_data['year'] else: for x in project.additional_data: if 'year' in x: if 'budget' in x: if project.additional_data[x]: year = project.additional_data[x] break else: continue else: if project.additional_data[x]: year = project.additional_data[x] if year not in statistics_[program]: statistics_[program][year] = {} statistics_[program][year]['projects'] = 0 statistics_[program][year]['geoprocessed_images'] = 0 statistics_[program][year]['geoprocessed_projects'] = 0 statistics_[program][year][ 'geoprocessed_projects_details'] = {} statistics_[program][year]['projects'] += 1 if 'has_geoprocessed_images' in project.additional_data: if project.additional_data['has_geoprocessed_images'] == '1': statistics_[program][year]['geoprocessed_projects'] += 1 statistics_[program][year]['geoprocessed_images'] += int( project.additional_data['geoprocessed_images']) statistics_[program][year][ 'geoprocessed_projects_details'][ project.additional_data['code']] = int( project.additional_data['geoprocessed_images']) statistics.statistics = statistics_ statistics.put() if cursor: params = {'cursor': cursor.urlsafe()} logging.debug(params) taskqueue.add(url='/statistics/generate', params=params) else: for x in statistics_: logging.debug(x.upper()) query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'PROGRAM->' + x.upper()) total_geotag_images = len(query.fetch(keys_only=True)) statistics_[ x.upper()]['total_geotag_images'] = total_geotag_images logging.debug('total_geotag_images:' + str(total_geotag_images)) query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->KML') query = query.filter(APIData.indexed_data == 'PROGRAM->' + x.upper()) kml = len(query.fetch(keys_only=True)) statistics_[x.upper()]['kml'] = kml logging.debug('kml:' + str(kml)) query = APIData.query() query = query.filter(APIData.indexed_data == 'TYPE->IMAGE') query = query.filter(APIData.indexed_data == 'IS_ROAD->1') query = query.filter(APIData.indexed_data == 'PROGRAM->' + x.upper()) geotag_images = len(query.fetch(keys_only=True)) statistics_[x.upper()]['geotag_images'] = geotag_images logging.debug('geotag_images:' + str(geotag_images)) statistics.statistics = statistics_ statistics.done = True statistics.put() return
def post(self): if self.request.get('target') and self.request.get('id') and self.request.get('permission'): if self.request.get('permission') not in ['PUBLIC', 'PRIVATE']: logging.error('Permission not valid') logging.error(self.request.get('permission')) self.response.write('Invalid Permission Value') self.error(400) return target = self.request.get('target') if target == 'DATA': # edit data permission setting data = APIData.get_by_id(normalize_id(self.request.get('id'))) if data: if data.additional_data['type'] not in ['PROJECT', 'DATASET']: data.permission = self.request.get('permission') data.put() return else: self.response.write('Cannot modify permission for Project or Dataset') self.error(400) return else: self.response.write('Data not found') logging.error('data not found') logging.error(self.request.get('id')) self.error(404) return elif target == 'DATASET': # edit dataset if self.user.role in ["CLUSTERDIRECTOR", 'GEOSTOREADMIN']: data = APIData.get_by_id(normalize_id(self.request.get('id'))) if data: if data.additional_data['type'] == 'DATASET': # modify environment key if self.request.get('permission') == 'PUBLIC': if ndb.Key('Environment', 'PUBLIC') not in data.environment: data.environment.append(ndb.Key('Environment', 'PUBLIC')) else: logging.error('dataset already in a public environment') elif self.request.get('permission') == 'PRIVATE': data.environment.remove(ndb.Key('Environment', 'PUBLIC')) else: logging.error('unknown permission value') logging.error(self.request.get('permission')) self.error(400) return data.put() else: self.response.write('Dataset only allowed') self.error(400) return else: self.response.write('Dataset not found') self.error(404) return else: self.response.write('Access Denied') self.error(403) else: logging.error('Missing Parameters') self.response.write('Missing parameters') self.error(400)
def update_api_data(data_id=None, items=None, user=None, content_type=None): data = APIData.get_by_id(normalize_id(data_id)) if not data: return if content_type == "application/json": tags = [] try: for key, value in items.items(): if key.startswith('unindexed_'): # unindexed_ ad_key = key.replace("unindexed_", "") data.additional_data[ad_key] = value.strip() if key.startswith('indexed_'): ad_key = key.replace("indexed_", "") data.additional_data[ad_key] = value for d in data.indexed_data: ad_key = key.replace("indexed_", "") if d.startswith(ad_key.upper()): try: data.indexed_data.remove(d) except Exception as e: logging.exception(e) logging.info("Cannot remove from list") data.indexed_data.append( create_indexed_tag( key, value)) try: tags += create_tags(value) except Exception as e: logging.exception("Cannot create tag from: ") if user: data.username = user.name data.user = user.key logging.info(tags) data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() except Exception as e: logging.exception(e) else: tags = [] try: for arg in self.request.arguments(): for d in data.indexed_data: ad_key = arg.replace("indexed_", "") if d.startswith(ad_key.upper()): try: data.indexed_data.remove(d) except Exception as e: logging.exception(e) if arg.startswith('unindexed_'): # unindexed_ ad_key = arg.replace("unindexed_", "") ad_value = self.request.POST.get(arg) data.additional_data[ad_key] = ad_value.strip() if arg.startswith('indexed_'): ad_key = arg.replace("indexed_", "") ad_value = self.request.POST.get(arg) data.additional_data[ad_key] = ad_value try: tags += create_tags(ad_value) except Exception as e: logging.exception("Cannot create tag from: ") data.indexed_data.append( create_indexed_tag( arg, self.request.POST.get(arg))) if arg.startswith('file_'): filename = BUCKET_NAME filename += random_string(20) + "/" ad_key = arg.replace("file_", "") data.additional_data[ad_key] = {} try: file_name = items.get(arg).filename filename += file_name gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(filename, 'w', options=gcs_options) gcs_file.write(self.request.get(arg)) gcs_file.close() full_url = "https://storage.googleapis.com" \ + filename data.file_url = full_url data.additional_data[ad_key]["file_url"] = full_url try: blob_key = blobstore.create_gs_key("/gs" + filename) data.serving_url = images.get_serving_url(blob_key) data.additional_data[ad_key]["serving_url"] = data.serving_url data.gcs_key = blobstore.BlobKey(blob_key) except Exception as e: logging.exception(e) data.additional_data[ad_key]["serving_url"] = full_url except AttributeError, e: logging.exception(e) if self.user: data.username = self.user.name data.user = self.user.key data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() except Exception as e: logging.exception(e)
def write_to_api_params(items=None, user=None, content_type=None, imported=False, user_request=None): data = APIData() data.additional_data = {} if user: tags = [] try: for arg in items: if arg.startswith('unindexed_'): ad_key = arg.replace("unindexed_", "") ad_value = user_request.request.get(arg) data.additional_data[ad_key] = ad_value.strip() if arg.startswith('indexed_'): ad_key = arg.replace("indexed_", "") ad_value = user_request.request.get(arg) data.additional_data[ad_key] = ad_value try: tags += create_tags(ad_value) except Exception as e: logging.exception("Cannot create tag from: ") data.indexed_data.append(create_indexed_tag(arg, user_request.request.get(arg))) if arg.startswith('file_'): logging.debug(arg) filename = BUCKET_NAME filename += random_string(128) + "/" ad_key = arg.replace("file_", "") data.additional_data[ad_key] = {} try: if not user_request: file_name = items[arg].filename filename += file_name gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(filename, 'w', options=gcs_options) gcs_file.write(items[arg].file.read()) gcs_file.close() else: file_name = user_request.request.POST.get(arg) file_name = file_name.filename filename += file_name gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(filename, 'w', options=gcs_options) gcs_file.write(user_request.request.get(arg)) gcs_file.close() full_url = "https://storage.googleapis.com" + filename data.file_url = full_url data.additional_data[ad_key]["file_url"] = full_url try: blob_key = blobstore.create_gs_key("/gs" + filename) data.serving_url = images.get_serving_url(blob_key) data.additional_data[ad_key]["serving_url"] = data.serving_url data.gcs_key = blobstore.BlobKey(blob_key) except Exception as e: logging.exception(e) logging.error("FILE IS NOT AN IMAGE") data.additional_data[ad_key]["serving_url"] = full_url except AttributeError, e: logging.exception(e) logging.exception("NO FILE ATTACHED") if user: data.username = user.name data.user = user.key data.indexed_data.append(create_indexed_tag("USER_ID", str(user.key.id()))) data.tags = uniquify(tags) data.put() return data except Exception as e: logging.exception('ERROR') logging.debug(e)
def post(self, team_id=None): if team_id: response = {} response["code"] = 200 response["data"] = [] response["description"] = "" response["success"] = True team = Environment.get_by_id(int(team_id)) if team: if self.POST("action"): if self.POST("action") == "delete_invited_user": if self.POST("email").strip().lower() in team.invited_users: team.invited_users.remove(self.POST("email")) team.put() response["data"] = team.to_object() response['description'] = 'Invitation to ' + self.POST('email').strip().lower() + ' has been cancelled.' elif self.POST("action") == "remove_member": user = User.get_by_id(int(self.POST("user_id"))) if user.key in team.users: team.users.remove(user.key) team.put() if str(team.key.id()) in user.access_key: user.access_key.remove(str(team.key.id())) if str(team.key.id()) in user.teams: user.teams.remove(str(team.key.id())) user.put() response["data"] = team.to_object() elif self.POST("action") == "invite_users": existing_invite = [] flag = True if self.POST("email"): for email in self.POST("email").strip().split(","): email = email.strip().lower() query = User.query() query = query.filter(User.current_email == email) user = query.get() if user: team.users.append(user.key) team.users_email.append(user.current_email) team.put() content = { "sender": self.user.name, "team_name": team.title, "team_id": str(team.key.id()), "receiver_name": "", "receiver_email": email, "subject": "You have been added to an environment", "email_type": "environment_add" } else: team.invited_users.append(email) team.put() content = { "sender": self.user.name, "team_name": team.title, "team_id": str(team.key.id()), 'user_email': base64.b64encode(email), "receiver_name": "", "receiver_email": email, "subject": "You have been invited to join an environment", "email_type": "environment_invite" } taskqueue.add( url="/tasks/email/send", params=content, method="POST") response["data"] = team.to_object() elif self.POST("action") == "leave_team": if str(self.user.key.id()) in team.users: team.users.remove(str(self.user.key.id())) team.put() self.user.teams.remove(str(team.key.id())) self.user.put() response["data"] = team.to_object() else: response["success"] = False response["description"] = "User is not part of the environment." elif self.POST("action") == "join_environment": if self.user.current_email.lower() in team.invited_users: if self.user.key not in team.users: team.users.append(self.user.key) team.users_email.append(self.user.current_email) try: team.invited_users.remove(self.user.current_email.lower()) except: logging.info("Email is not in invited users") pass team.put() response["data"] = team.to_object() else: response['success'] = False if self.user.key in team.users: response['description'] = "You are already a member of "+ team.title.upper() +" environment." else: response['description'] = "You have insufficient rights to join the environment." elif self.POST("action") == "update_team": if self.POST("environment_name"): query = Environment.query() query = query.filter(Environment.title == self.POST("environment_name").strip().upper()) team2 = query.get() logging.info(team2) logging.info(team) if team2: if str(team2.key.id()) != str(team.key.id()): response["success"] = False response["description"] = "Environment name "+self.POST("environment_name").strip().upper()+" already exists." wrap_response(self, response) return team.title = self.POST("environment_name") if self.POST("environment_description"): team.description = self.POST("environment_description") if self.POST("visibility") == 'PUBLIC' and team.private: # get all datasets in this environment, and add public environment datasets = APIData.query(APIData.environment == team.key).fetch(5000) modified_datasets = [] for dataset in datasets: has_public = False for environment in dataset.environment: if environment.id() == 'PUBLIC': has_public = True if not has_public: dataset.environment.append(ndb.Key('Environment', 'PUBLIC')) modified_datasets.append(dataset) if modified_datasets: ndb.put_multi(modified_datasets) team.private = False team.put() response['success'] = True response['description'] = 'Environment Visibility Updated' elif self.POST("visibility") == 'PRIVATE' and not team.private: # get all datasets in this environment, and remove public environment datasets = APIData.query(APIData.environment == team.key).fetch(5000) modified_datasets = [] for dataset in datasets: has_public = False new_dataset_environments = [] for environment in dataset.environment: if environment.id() != 'PUBLIC': new_dataset_environments.append(environment) else: has_public = True if has_public: dataset.environment = new_dataset_environments modified_datasets.append(dataset) if modified_datasets: ndb.put_multi(modified_datasets) team.private = True team.put() response['success'] = True response['description'] = 'Environment Visibility Updated' team.put() response["data"] = team.to_object() elif self.POST('action') == 'add_user_group': response["user_groups"] = [] if self.POST('user_groups'): user_groups = json.loads(self.POST('user_groups')) for g in user_groups: group = UserGroup.get_by_id(int(g)) if group: if group.key not in team.user_groups: if group.users: for u in group.users: if u not in team.users: user = u.get() if user: if user.current_email in team.invited_users: team.invited_users.remove(user.current_email.lower()) team.users_email.append(user.current_email) team.users.append(u) group.environments.append(team.key) group.put() response["user_groups"].append(group.to_object()) team.user_groups.append(group.key) team.put() response['description'] = 'User group(s) has been added to '+team.title.upper()+' environment.' else: response['success'] = False response['description'] = 'Invalid user group.' response['environment'] = team.to_object() elif self.POST('action') == 'remove_user_group': if self.POST('group_id'): group = UserGroup.get_by_id(int(self.POST('group_id'))) if group: if group.key in team.user_groups: if group.users: for u in group.users: if u in team.users: if u != team.owner: team.users.remove(u) if team.key in group.environments: group.environments.remove(team.key) group.put() team.user_groups.remove(group.key) team.put() response['description'] = 'User group(s) has been removed from '+team.title.upper()+' environment.' response['environment'] = team.to_object() response['user_group'] = group.to_object() else: response['success'] = False response['description'] = 'Invalid user group.' wrap_response(self, response) else: if self.POST("environment_name") \ and self.POST("environment_description"): # Create Environment # Only CLUSTERDIRECTOR role can create an environment if self.user.role != "CLUSTERDIRECTOR": msg = "You have insufficient rights to access this application." error_message(self, msg) self.redirect("/environment") return query = Environment.query() query = query.filter(Environment.title == self.POST("environment_name").strip().upper()) environment = query.get() if environment: msg = "Could not create the environment. " msg += self.POST("environment_name").strip() msg += " already exists." error_message(self, msg) else: environment = Environment() environment.title = self.POST("environment_name").strip().upper() environment.description = self.POST("environment_description").strip() environment.owner = self.user.key environment.users.append(self.user.key) environment.users_email.append(self.user.current_email) environment.put() if self.request.get_all('user_group'): for group_id in self.request.get_all('user_group'): user_group = UserGroup.get_by_id(int(group_id)) if user_group: if user_group.users: for u in user_group.users: user = u.get() if user: user.user_groups.append(str(user_group.key.id())) environment.users.append(user.key) environment.users_email.append(user.current_email) environment.user_groups.append(user_group.key) user_group.environments.append(environment.key) user_group.put() if self.POST("environment_member_emails"): for email in self.POST("environment_member_emails").split(","): query = User.query() query = query.filter(User.current_email == email.strip().lower()) user = query.get() if user: environment.users.append(user.key) environment.users_email.append(user.current_email) environment.put() content = { "sender": self.user.name, "team_name": environment.title, "team_id": str(environment.key.id()), "receiver_name": "", "receiver_email": email, "subject": "You have been added to an environment", "email_type": "environment_add" } else: environment.invited_users.append(email.strip().lower()) environment.put() content = { "sender": self.user.name, "team_name": environment.title, "team_id": str(environment.key.id()), "receiver_name": "", "receiver_email": email, "subject": "You have been invited to join an environment", "email_type": "environment_invite" } taskqueue.add( url="/tasks/email/send", params=content, method="POST") msg = "Environment has been saved." success_message(self, msg) self.redirect('/environment?current_environment=' + str(environment.key.id())) return self.redirect("/environment")
def post(self, data_id=None): response = {} response["success"] = True logging.info(self.request.headers) content_type = self.request.headers["Content_Type"] if not self.user: if content_type == "application/json": if "Authorization" not in self.request.headers: logging.info("No Authorization in headers") desc = "You must be logged in to use the API." response["success"] = False response["response"] = "AuthorizationError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return if self.request.headers["Authorization"] == API_KEY: if not self.request.headers["From"]: logging.info("No email defined") desc = "Cannot find user." response["success"] = False response["response"] = "InvalidUserError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return user_email = self.request.headers["From"].lower() query = User.query() owner = query.filter( User.current_email == user_email).get() if not owner: logging.info("Cannot find user") desc = "Cannot find user." response["success"] = False response["response"] = "InvalidUserError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return else: token = Token.get_by_id( self.request.headers["Authorization"]) if not token: logging.info( "Cannot find token: " + str(self.request.headers["Authorization"])) desc = "The token you provided is invalid." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(token) session = token.session.get() if not session: logging.info("Cannot find session") desc = "The token has already expired." response["error"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(session) if session.expires < datetime.datetime.now( ) or session.status is False: logging.info("token has expired or not active") desc = "The token has already expired." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return else: desc = "You must be logged in to use the API." if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) if not data_id: desc = "ID is missing from the request." if content_type == "application/json": response["success"] = False response["response"] = "MissingParametersError" response["description"] = desc response["code"] = 400 wrap_response(self, response) else: if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return data = APIData.get_by_id(normalize_id(data_id)) if not data: desc = "Cannot find the package." if content_type == "application/json": response["success"] = False response["response"] = "InvalidIDError" response["description"] = desc response["code"] = 400 wrap_response(self, response) else: if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return if data.archived: desc = "Cannot find the package." if content_type == "application/json": response["success"] = False response["response"] = "InvalidIDError" response["description"] = desc response["code"] = 400 wrap_response(self, response) else: if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return desc = "There are missing parameters in your request." if content_type == "application/json": if not self.request.body: response["success"] = False response["response"] = "MissingParametersError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return try: body = json.loads(self.request.body) except Exception as e: logging.info(e) desc = "Invalid JSON format." response["success"] = False response["response"] = "InvalidJSONError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return tags = [] try: for key, value in body.items(): try: tags += create_tags(value) except Exception as e: logging.info("Cannot create tag from: ") logging.info(e) if key.startswith('unindexed_'): # unindexed_ ad_key = key.replace("unindexed_", "") data.additional_data[ad_key] = value.strip() if key.startswith('indexed_'): ad_key = key.replace("indexed_", "") data.additional_data[ad_key] = value for d in data.indexed_data: ad_key = key.replace("indexed_", "") if d.startswith(ad_key.upper()): try: data.indexed_data.remove(d) except Exception as e: logging.exception(e) logging.info("Cannot remove from list") data.indexed_data.append(create_indexed_tag( key, value)) if self.user: data.username = self.user.name data.user = self.user.key data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() desc = "Data has been saved." response["success"] = True response["response"] = "Success" response["description"] = desc response["code"] = 200 response["data"] = data.to_api_object() wrap_response(self, response) except Exception as e: logging.exception(e) desc = "A server error occured. Please try again later." response["success"] = False response["response"] = "ServerError" response["description"] = desc response["code"] = 500 wrap_response(self, response) else: if not self.request.arguments(): if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return tags = [] try: for arg in self.request.arguments(): for d in data.indexed_data: ad_key = arg.replace("indexed_", "") if d.startswith(ad_key.upper()): try: data.indexed_data.remove(d) except Exception as e: logging.exception(e) logging.info("Cannot remove from list") if arg.startswith('unindexed_'): ad_key = arg.replace("unindexed_", "") ad_value = self.request.POST.get(arg) data.additional_data[ad_key] = ad_value.strip() try: tags += create_tags(ad_value) except Exception as e: logging.info("Cannot create tag from: ") logging.info(e) if arg.startswith('indexed_'): ad_key = arg.replace("indexed_", "") ad_value = self.request.POST.get(arg) data.additional_data[ad_key] = ad_value try: tags += create_tags(ad_value) except Exception as e: logging.info("Cannot create tag from: ") logging.info(e) data.indexed_data.append( create_indexed_tag(arg, self.request.POST.get(arg))) if arg.startswith('file_'): filename = BUCKET_NAME filename += random_string(20) + "/" ad_key = arg.replace("file_", "") data.additional_data[ad_key] = {} try: # try: file_name = self.request.POST.get(arg).filename filename += file_name gcs_file = gcs.open( filename, 'w', options={'x-goog-acl': 'public-read'}) gcs_file.write(self.request.get(arg)) gcs_file.close() full_url = "https://storage.googleapis.com" + filename # data.additional_data["file"]["file_url"] = full_url data.file_url = full_url data.additional_data[ad_key]["file_url"] = full_url try: blob_key = blobstore.create_gs_key("/gs" + filename) data.serving_url = images.get_serving_url( blob_key) data.additional_data[ad_key][ "serving_url"] = data.serving_url data.gcs_key = blobstore.BlobKey(blob_key) except Exception as e: logging.exception(e) logging.error("not an image??") data.additional_data[ad_key][ "serving_url"] = full_url except AttributeError, e: logging.exception(e) logging.exception("NO FILE ATTACHED") if self.user: data.username = self.user.name data.user = self.user.key data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() desc = "Data has been updated." if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?success=" + urllib.quote(desc) self.redirect(url) else: response["success"] = True response["response"] = "Success" response["description"] = desc response["code"] = 200 response["data"] = data.to_api_object() wrap_response(self, response) except Exception as e: logging.exception(e) desc = "A server error occured. Please try again later." if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) else: response["success"] = False response["response"] = "ServerError" response["description"] = desc response["code"] = 500 wrap_response(self, response)
def post(self): # query all images count = self.request.get('count') if count: logging.info(str(count)) count = int(count) else: count = 0 query = APIData.query( APIData.indexed_data == 'TYPE->IMAGE').order(-APIData.created_time) cursor = None if self.request.get('cursor'): cursor = Cursor(urlsafe=self.request.get("cursor")) if cursor: results, cursor2, more = query.fetch_page(50, start_cursor=cursor) else: results, cursor2, more = query.fetch_page(50) images = [] for result in results: project_key = None # dataset_id if 'dataset_id' not in result.additional_data: if 'dataset_code' in result.additional_data: dataset_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['dataset_code']).get( keys_only=True) else: dataset_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['parent_code']).get( keys_only=True) result.additional_data['dataset_id'] = str(dataset_key.id()) result.indexed_data.append( create_indexed_tag('dataset_id', str(dataset_key.id()))) # project_id if 'project_id' not in result.additional_data: project_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['project_code']).get(keys_only=True) result.additional_data['project_id'] = str(project_key.id()) result.indexed_data.append( create_indexed_tag('project_id', str(project_key.id()))) # subproject_id if 'subproject_code' in result.additional_data: if 'subproject_id' not in result.additional_data: if not project_key: project_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['project_code']).get( keys_only=True) result.additional_data['subproject_id'] = str( project_key.id()) result.indexed_data.append( create_indexed_tag('subproject_id', str(project_key.id()))) # parent_id result.additional_data['parent_id'] = result.additional_data[ 'dataset_id'] result.indexed_data.append( create_indexed_tag('parent_id', result.additional_data['parent_id'])) images.append(result) if images: ndb.put_multi(images) count += len(images) logging.info(str(count)) if more: taskqueue.add(url="/tasks/images", params={ 'cursor': cursor2.urlsafe(), 'count': count }, method="POST")
def write_to_api(items=None, user=None, content_type=None, imported=False, user_request=None): logging.debug(items) logging.debug(user) logging.debug(content_type) logging.debug(imported) logging.debug(user_request) if imported: data = APIData(id=str(items['indexed_code'])) else: data = APIData() if user == '*****@*****.**': user = User.query(User.current_email == '*****@*****.**').get() data.additional_data = {} if content_type == "application/json": tags = [] logging.debug(items) try: for key, value in items.items(): if key.startswith('unindexed_'): ad_key = key.replace("unindexed_", "") data.additional_data[ad_key] = value.strip() if key.startswith('indexed_'): ad_key = key.replace('indexed_', '').replace('_array', '') if key.endswith('_array'): value_arr = json.loads(value) for v_arr in value_arr: data.indexed_data.append(create_indexed_tag(key, v_arr)) else: data.indexed_data.append(create_indexed_tag(key, value)) data.additional_data[ad_key] = value try: tags += create_tags(value) except Exception as e: logging.exception("Cannot create tag from: ") if user: data.username = user.name data.user = user.key data.indexed_data.append(create_indexed_tag("USER_ID", str(user.key.id()))) data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() return data except Exception as e: logging.exception(e) else: tags = [] try: for arg in items: if arg.startswith('unindexed_'): # unindexed_ ad_key = arg.replace("unindexed_", "") ad_value = items.get(arg) data.additional_data[ad_key] = ad_value.strip() if arg.startswith('indexed_'): ad_key = arg.replace("indexed_", "") ad_value = items.get(arg) data.additional_data[ad_key] = ad_value try: tags += create_tags(ad_value) except Exception as e: logging.exception("Cannot create tag from: ") data.indexed_data.append(create_indexed_tag(arg, items.get(arg))) if arg.startswith('file_'): logging.debug(arg) filename = BUCKET_NAME filename += random_string(128) + "/" ad_key = arg.replace("file_", "") data.additional_data[ad_key] = {} try: if not user_request: if 'file_image' in items: file_field = 'file_image' if 'file_file' in items: file_field = 'file_file' if 'file_kml' in items: file_field = 'file_kml' file_name = items[file_field].filename filename += file_name gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(filename, 'w', options=gcs_options) gcs_file.write(items[file_field].file.read()) gcs_file.close() else: file_name = user_request.request.POST.get('file') file_name = file_name.filename filename += file_name gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(filename, 'w', options=gcs_options) gcs_file.write(user_request.request.get('file')) gcs_file.close() full_url = "https://storage.googleapis.com" + filename data.file_url = full_url data.additional_data[ad_key]["file_url"] = full_url try: blob_key = blobstore.create_gs_key("/gs" + filename) data.serving_url = images.get_serving_url(blob_key) data.additional_data[ad_key]["serving_url"] = data.serving_url data.gcs_key = blobstore.BlobKey(blob_key) except Exception as e: logging.exception(e) logging.error("FILE IS NOT AN IMAGE") data.additional_data[ad_key]["serving_url"] = full_url except AttributeError, e: logging.exception(e) logging.exception("NO FILE ATTACHED") if user: data.username = user.name data.user = user.key data.indexed_data.append(create_indexed_tag("USER_ID", str(user.key.id()))) data.tags = uniquify(tags) data.put() return data except Exception as e: logging.exception('ERROR') logging.debug(e)