def project_count_images(project_object): query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'IMAGE') query = query.filter(APIData.indexed_data == tag) tag = create_indexed_tag('project_code', project_object['code']) query = query.filter(APIData.indexed_data == tag) image_count = 0 more = True cursor = None while more: if not cursor: images, cursor, more = query.fetch_page(200, keys_only=True) else: images, cursor, more = query.fetch_page(200, start_cursor=cursor, keys_only=True) image_count += len(images) return image_count
def project_has_image(project_object): query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'IMAGE') query = query.filter(APIData.indexed_data == tag) tag = create_indexed_tag('project_code', project_object['code']) query = query.filter(APIData.indexed_data == tag) image = query.get(keys_only=True) if image: return 1 else: return 0
def project_has_classification(project_object): query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'CLASSIFICATION') query = query.filter(APIData.indexed_data == tag) tag = create_indexed_tag('project_code', project_object['code']) query = query.filter(APIData.indexed_data == tag) classification = query.get(keys_only=True) if classification: return 1 else: return 0
def create_initial_dataset(cls, dataset_name, dataset_desc, category, department, uacs, user, license_id, odi=""): """ Creates a new dataset. """ dataset_ckan_name = dataset_name.lower().strip().replace(" ", "-") if cls.check_dataset_exist(dataset_ckan_name): raise DatasetExistsError(dataset_name) dataset = APIData() dataset.additional_data = {} dataset.additional_data["private"] = False dataset.additional_data["type"] = "DATASET" dataset.additional_data["status"] = "FOR REVIEW" dataset.additional_data["user_id"] = str(user.key.id()) dataset.additional_data["dataset_title"] = dataset_name dataset.additional_data["dataset_description"] = dataset_desc dataset.additional_data["dataset_category"] = category dataset.additional_data["dataset_ckan_name"] = dataset_ckan_name dataset.additional_data["department"] = department dataset.additional_data["uacs_id"] = uacs dataset.additional_data["odi_certificate"] = odi for l in LICENSE: if l["code"] == license_id.lower(): dataset.additional_data["license_id"] = license_id dataset.additional_data["license_title"] = l["name"] break tag = create_indexed_tag("type", "DATASET") dataset.indexed_data.append(tag) tag = create_indexed_tag("status", "FOR REVIEW") dataset.indexed_data.append(tag) tag = create_indexed_tag("user_id", str(user.key.id())) dataset.indexed_data.append(tag) tag = create_indexed_tag("dataset_ckan_name", dataset_ckan_name) dataset.indexed_data.append(tag) tag = create_indexed_tag("uacs_id", uacs) dataset.indexed_data.append(tag) tag = create_indexed_tag("department", department) dataset.indexed_data.append(tag) tag = create_indexed_tag("dataset_title", dataset_name) dataset.indexed_data.append(tag) tag = create_indexed_tag("dataset_category", category) dataset.indexed_data.append(tag) dataset.username = user.name dataset.user = user.key dataset.tags = create_tags(dataset_name) dataset.tags += create_tags(department) dataset.put() return dataset
def get(self): self.tv['page_geoprocessing'] = True # self.tv["program"] = "all" self.tv['project_code'] = self.request.get('project_code') self.tv['system_url'] = 'openroads-geostore.appspot.com' if self.tv['project_code']: query = APIData.query(APIData.indexed_data == create_indexed_tag( 'project_code', str(self.tv['project_code']))) query = query.filter( APIData.indexed_data == create_indexed_tag('type', 'KML')) query = query.order(-APIData.created_time) kmls = query.fetch(20) logging.debug(kmls) self.tv['kmls'] = [] query = APIData.query(APIData.indexed_data == create_indexed_tag( 'parent_code', str(self.tv['project_code']))) query = query.filter( APIData.indexed_data == create_indexed_tag('type', 'DATASET')) query = query.order(-APIData.created_time) datasets = query.fetch(25) for kml in kmls: self.tv['kmls'].append({ 'filename': find_between_r(kml.file_url + ' ', '/', ' '), 'kml_id': kml.key.id(), 'file_url': kml.file_url, 'parent_code': kml.additional_data['parent_code'] }) self.tv['datasets'] = {} for dataset in datasets: self.tv['datasets'][dataset.additional_data.get( 'code')] = dataset.created_time.strftime('%B %d, %Y') self.tv['asset_values'] = json.dumps(ASSET_VALUES) self.render('geoprocessing-classification.html')
def get(self): self.tv['page_geoprocessing'] = True self.tv['project_code'] = self.request.get('project_code') if self.tv['project_code']: tag = create_indexed_tag('project_code', str(self.tv['project_code'])) query = APIData.query(APIData.indexed_data == tag) tag = create_indexed_tag('type', 'KML') query = query.filter(APIData.indexed_data == tag) query = query.order(-APIData.created_time) kmls = query.fetch(20) logging.debug(kmls) self.tv['kmls'] = [] tag = create_indexed_tag('parent_code', str(self.tv['project_code'])) query = APIData.query(APIData.indexed_data == tag) tag = create_indexed_tag('type', 'DATASET') query = query.filter(APIData.indexed_data == tag) query = query.order(-APIData.created_time) datasets = query.fetch(25) logging.debug(datasets) for kml in kmls: self.tv['kmls'].append({ 'filename': find_between_r(kml.file_url + ' ', '/', ' '), 'kml_id': kml.key.id(), 'file_url': kml.file_url, 'parent_code': kml.additional_data['parent_code'] }) self.tv['datasets'] = [] for dataset in datasets: self.tv['datasets'].append(dataset.to_api_object()) self.tv['asset_values'] = json.dumps(ASSET_VALUES) self.render('geoprocessing/new-classification-summary.html')
def check_dataset_exist(cls, dataset_name): """ Checks if dataset already exists. """ tag = create_indexed_tag("DATASET_CKAN_NAME", dataset_name) query = cls.query() query = query.filter(cls.indexed_data == tag) dataset = query.get() if dataset: return True return False
def add_additional_info_dataset(cls, dataset_id, additional_info): """ Updates the additional info of the dataset. """ dataset = cls.get_by_id(int(dataset_id)) if not dataset: return logging.error(additional_info) for key, value in additional_info.items(): dataset.additional_data[key.lower()] = value if value: tag = create_indexed_tag(key, value) dataset.indexed_data.append(tag) dataset.indexed_data = uniquify(dataset.indexed_data) dataset.put() return dataset
def get(self): if not self.request.get('project_code') and not self.request.get('parent_code'): logging.error('missing project code') self.error(400) return project_code = self.request.get('project_code') parent_code = self.request.get('parent_code') kml_id = self.request.get('kml_id') if not kml_id: kml = APIData.query() kml = kml.filter(APIData.indexed_data == 'TYPE->KML') kml = kml.filter(APIData.indexed_data == 'PROJECT_CODE->' + project_code.upper()) kml = kml.get() kml_id = str(kml.key.id()) else: kml = APIData.get_by_id(normalize_id(kml_id)) if not kml: logging.error('cannot find kml_id: ' + kml_id) self.error(404) return # check if has GCSFile if self.request.get('output') and self.request.get('output') == 'json': file_type = 'JSON' else: file_type = 'KML' gcs_file = GCSFile.query(GCSFile.kml_id == kml_id, GCSFile.file_type == file_type).order(-GCSFile.created_time).get() if gcs_file: # check if classification classification_check = APIData.query(APIData.indexed_data == create_indexed_tag('TYPE', 'CLASSIFICATION')).order(-APIData.created_time).get() if classification_check and classification_check.created_time < gcs_file.created_time: # has file and still updated self.redirect(gcs_file.link.encode('utf-8')) return output = self.request.get('output') deferred.defer(merge_kml, parent_code, project_code, output, kml) self.tv['project_code'] = project_code self.render('kml-download.html') return
def post(self): # query all images count = self.request.get('count') if count: logging.info(str(count)) count = int(count) else: count = 0 query = APIData.query( APIData.indexed_data == 'TYPE->IMAGE').order(-APIData.created_time) cursor = None if self.request.get('cursor'): cursor = Cursor(urlsafe=self.request.get("cursor")) if cursor: results, cursor2, more = query.fetch_page(50, start_cursor=cursor) else: results, cursor2, more = query.fetch_page(50) images = [] for result in results: project_key = None # dataset_id if 'dataset_id' not in result.additional_data: if 'dataset_code' in result.additional_data: dataset_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['dataset_code']).get( keys_only=True) else: dataset_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['parent_code']).get( keys_only=True) result.additional_data['dataset_id'] = str(dataset_key.id()) result.indexed_data.append( create_indexed_tag('dataset_id', str(dataset_key.id()))) # project_id if 'project_id' not in result.additional_data: project_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['project_code']).get(keys_only=True) result.additional_data['project_id'] = str(project_key.id()) result.indexed_data.append( create_indexed_tag('project_id', str(project_key.id()))) # subproject_id if 'subproject_code' in result.additional_data: if 'subproject_id' not in result.additional_data: if not project_key: project_key = APIData.query( APIData.indexed_data == 'CODE->' + result.additional_data['project_code']).get( keys_only=True) result.additional_data['subproject_id'] = str( project_key.id()) result.indexed_data.append( create_indexed_tag('subproject_id', str(project_key.id()))) # parent_id result.additional_data['parent_id'] = result.additional_data[ 'dataset_id'] result.indexed_data.append( create_indexed_tag('parent_id', result.additional_data['parent_id'])) images.append(result) if images: ndb.put_multi(images) count += len(images) logging.info(str(count)) if more: taskqueue.add(url="/tasks/images", params={ 'cursor': cursor2.urlsafe(), 'count': count }, method="POST")
def get(self): """ Handles the /api/v1/data endpoint. Returns list of datasets. """ response = { "code": 200, "type": "List of geostore saved data.", "method": "GET", "response": "OK", "data": [] } # Default number of entities to be retrieved is 50. n = 50 if self.GET("n"): n = int(self.GET("n")) # if the number of entities to be retrieved given is # greater than 100. Switch back to default which is 100 if n > 100: n = 100 query = APIData.query() query = query.filter(APIData.archived == False) if not self.user: if "Authorization" in self.request.headers: token = Token.get_by_id(self.request.headers["Authorization"]) if not token: logging.info("Cannot find token: " + str(self.request.headers["Authorization"])) desc = "The token you provided is invalid." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(token) session = token.session.get() if not session: logging.info("Cannot find session") desc = "The token has already expired." response["error"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(session) if session.expires < datetime.datetime.now( ) or session.status is False: logging.info("token has expired or not active") desc = "The token has already expired." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return owner = session.owner.get() if not owner: logging.info("Cannot find user") desc = "Cannot find user." response["success"] = False response["response"] = "InvalidUserError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return self.user = owner if self.user and self.GET('workspace'): environment = ndb.Key(urlsafe=self.GET('workspace')).get() if environment: if self.user.key in environment.users: environment_key = environment.key query = query.filter( APIData.environment == environment_key) elif self.user and not self.GET('workspace') and ( not self.GET('type') or not self.GET('type') in ['PROJECT', 'SUBPROJECT']): environments_user = Environment.query( Environment.users == self.user.key).fetch(keys_only=True) environments_user.append(ndb.Key('Environment', 'PUBLIC')) query = query.filter( APIData.environment.IN(environments_user)).order(APIData._key) else: query = query.filter( APIData.environment == ndb.Key('Environment', 'PUBLIC')) if self.GET("_search_"): response["type"] = "Search geostore saved data." response["query"] = self.GET("_search_") search = self.GET("_search_").strip().upper() query = query.filter(APIData.tags >= search) query = query.order(APIData.tags) else: for arg in self.request.arguments(): if arg.lower() == "callback" \ or arg.lower() == "_" \ or arg.lower() == "order" \ or arg.lower() == "cursor" \ or arg.lower() == "n" \ or arg.lower() == "_search_" \ or arg.lower() == "show_image_dates" \ or arg.lower() == "start_updated_from" \ or arg.lower() == "start_created_from": continue ad_value = self.GET(arg) tag = create_indexed_tag(arg, ad_value) query = query.filter(APIData.indexed_data == tag) if self.GET("order"): if self.GET("order").lower() in ["asc", "ascending"]: query = query.order(APIData.updated_time) elif self.GET("order").lower() in ["desc", "descending"]: query = query.order(-APIData.updated_time) elif self.GET("order").lower() == "created_asc": query = query.order(APIData.created_time) elif self.GET("order").lower() == "created_desc": query = query.order(-APIData.created_time) elif self.GET("order").lower() == "modified": query = query.order(APIData.updated_time) else: query = query.order(-APIData.created_time) if self.GET("start_updated_from"): logging.debug(self.GET("start_updated_from")) if self.GET("order").lower() in ["desc", "descending"]: query = query.filter( APIData.updated_time <= datetime.datetime. fromtimestamp(int(self.GET("start_updated_from")))) else: query = query.filter( APIData.updated_time >= datetime.datetime. fromtimestamp(int(self.GET("start_updated_from")))) elif self.GET('start_created_from'): logging.debug(self.GET("start_created_from")) if self.GET("order").lower() in ["desc", "descending"]: query = query.filter( APIData.created_time <= datetime.datetime. fromtimestamp(int(self.GET("start_created_from")))) else: query = query.filter( APIData.created_time >= datetime.datetime. fromtimestamp(int(self.GET("start_created_from")))) logging.info(query) if self.GET("cursor"): curs = Cursor(urlsafe=self.GET("cursor")) data, cursor, more = query.fetch_page(n, start_cursor=curs) else: data, cursor, more = query.fetch_page(n) if data: response["cursor"] = "" for d in data: try: response["data"].append(d.to_api_object()) except Exception as e: logging.exception(e) if more: response["cursor"] = cursor.urlsafe() if self.GET('show_image_dates'): if self.GET('type'): if self.GET('type').upper() == 'CLASSIFICATION': if 'data' in response: image_ids = [] for classification in response['data']: image_ids.append( ndb.Key('APIData', int(classification['image_id']))) images = ndb.get_multi(image_ids) for image in images: date = '' image_latlng = image.additional_data['latlng'] if 'date' in image.additional_data: date = image.additional_data['date'] else: date = image.created_time.strftime( '%Y:%m:%d %H:%M:%S') for i in range(0, len(response['data'])): if response['data'][i]['image_id'] == str( image.key.id()): response['data'][i]['image_date'] = date response['data'][i][ 'image_latlng'] = image_latlng if self.GET("callback"): callback = self.GET("callback") d = json.dumps(response) self.response.out.write(callback + "(" + d + ");") else: wrap_response(self, response)
def merge_kml(parent_code, project_code, output, kml): # run this # get all the attributes query = APIData.query(APIData.indexed_data == create_indexed_tag('TYPE', 'CLASSIFICATION')) if parent_code: query = query.filter(APIData.indexed_data == create_indexed_tag('parent_code', parent_code)) else: query = query.filter(APIData.indexed_data == create_indexed_tag('project_code', project_code)) cursor = None i = 1 n = 100 all_classifications = [] while i < 50: # logging.debug('querying...') classifications, cursor, more = query.fetch_page(n, start_cursor=cursor) i = i + 1 for classification in classifications: try: all_classifications.append({ 'classification_type': classification.additional_data['classification_type'].upper().strip(), 'classification': classification.additional_data['classification'].upper().strip(), 'latlng': classification.additional_data['lat'] + ',' + classification.additional_data['lng'] }) except KeyError: pass if len(classifications) < n: break logging.debug("i: " + str(i)) # generate new kml kml_id = str(kml.key.id()) try: if output: if output.lower() == 'json': content = merge_kml_and_attributes_json(all_classifications, kml.file_url) filename = 'merged_kml_and_attributes.json' gcs_filename = BUCKET_NAME gcs_filename += random_string(128) + "/" gcs_filename += filename gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(gcs_filename, 'w', options=gcs_options) gcs_file.write(json.dumps(content).encode('utf-8')) gcs_file.close() full_url = "https://storage.googleapis.com" + gcs_filename full_url = urllib.quote(full_url, safe="%/:=&?~#+!$,;'@()*[]") gcsfile = GCSFile() gcsfile.kml_id = kml_id gcsfile.link = full_url gcsfile.file_type = 'JSON' gcsfile.put() return else: content = merge_kml_and_attributes(all_classifications, kml.file_url) except Exception as e: logging.debug(e) return filename = find_between_r(kml.file_url + ' ', '/', ' ') filename = filename.replace('.kmz', '-kmz') + '.kml' # save file to gcs gcs_filename = BUCKET_NAME gcs_filename += random_string(128) + "/" gcs_filename += filename gcs_options = {'x-goog-acl': 'public-read'} gcs_file = gcs.open(gcs_filename, 'w', options=gcs_options) gcs_file.write(content.encode('utf-8')) gcs_file.close() full_url = "https://storage.googleapis.com" + gcs_filename full_url = urllib.quote(full_url, safe="%/:=&?~#+!$,;'@()*[]") gcsfile = GCSFile() gcsfile.kml_id = kml_id gcsfile.link = full_url gcsfile.file_type = 'KML' gcsfile.put()
def get(self): """ Handles the /api/v1/logs endpoint. Returns list of logs. """ response = { "code": 200, "type": "List of geostore logs.", "method": "GET", "response": "OK", "data": [] } # Default number of entities to be retrieved is 50. n = 150 if self.GET("n"): n = int(self.GET("n")) # if the number of entities to be retrieved given is # greater than 100. Switch back to default which is 100 if n > 500: n = 500 query = SL.query() for arg in self.request.arguments(): if arg.lower() == "callback" \ or arg.lower() == "_" \ or arg.lower() == "order" \ or arg.lower() == "cursor" \ or arg.lower() == "n" \ or arg.lower() == "_search_" \ or arg.lower() == "show_image_dates" \ or arg.lower() == "start_updated_from" \ or arg.lower() == "csv" \ or arg.lower() == "start_created_from": continue ad_value = self.GET(arg) tag = create_indexed_tag(arg, ad_value) query = query.filter(SL.indexed_data == tag) query = query.order(-SL.created) logging.info(query) if self.GET("cursor"): curs = Cursor(urlsafe=self.GET("cursor")) data, cursor, more = query.fetch_page(n, start_cursor=curs) else: data, cursor, more = query.fetch_page(n) if data: response["cursor"] = "" data.reverse() previous = None for d in data: try: response["data"].append(d.to_api_object(change_against=previous)) previous = d except Exception as e: logging.debug(e) response["data"].reverse() if more: response["cursor"] = cursor.urlsafe() if self.GET("callback"): callback = self.GET("callback") d = json.dumps(response) self.response.out.write(callback + "(" + d + ");") else: wrap_response(self, response)
def post(self, data_id=None): response = {} response["success"] = True logging.info(self.request.headers) content_type = self.request.headers["Content_Type"] if not self.user: if content_type == "application/json": if "Authorization" not in self.request.headers: logging.info("No Authorization in headers") desc = "You must be logged in to use the API." response["success"] = False response["response"] = "AuthorizationError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return if self.request.headers["Authorization"] == API_KEY: if not self.request.headers["From"]: logging.info("No email defined") desc = "Cannot find user." response["success"] = False response["response"] = "InvalidUserError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return user_email = self.request.headers["From"].lower() query = User.query() owner = query.filter( User.current_email == user_email).get() if not owner: logging.info("Cannot find user") desc = "Cannot find user." response["success"] = False response["response"] = "InvalidUserError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return else: token = Token.get_by_id( self.request.headers["Authorization"]) if not token: logging.info( "Cannot find token: " + str(self.request.headers["Authorization"])) desc = "The token you provided is invalid." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(token) session = token.session.get() if not session: logging.info("Cannot find session") desc = "The token has already expired." response["error"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return logging.info(session) if session.expires < datetime.datetime.now( ) or session.status is False: logging.info("token has expired or not active") desc = "The token has already expired." response["success"] = False response["response"] = "InvalidTokenError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return else: desc = "You must be logged in to use the API." if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) if not data_id: desc = "ID is missing from the request." if content_type == "application/json": response["success"] = False response["response"] = "MissingParametersError" response["description"] = desc response["code"] = 400 wrap_response(self, response) else: if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return data = APIData.get_by_id(normalize_id(data_id)) if not data: desc = "Cannot find the package." if content_type == "application/json": response["success"] = False response["response"] = "InvalidIDError" response["description"] = desc response["code"] = 400 wrap_response(self, response) else: if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return if data.archived: desc = "Cannot find the package." if content_type == "application/json": response["success"] = False response["response"] = "InvalidIDError" response["description"] = desc response["code"] = 400 wrap_response(self, response) else: if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return desc = "There are missing parameters in your request." if content_type == "application/json": if not self.request.body: response["success"] = False response["response"] = "MissingParametersError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return try: body = json.loads(self.request.body) except Exception as e: logging.info(e) desc = "Invalid JSON format." response["success"] = False response["response"] = "InvalidJSONError" response["description"] = desc response["code"] = 400 wrap_response(self, response) return tags = [] try: for key, value in body.items(): try: tags += create_tags(value) except Exception as e: logging.info("Cannot create tag from: ") logging.info(e) if key.startswith('unindexed_'): # unindexed_ ad_key = key.replace("unindexed_", "") data.additional_data[ad_key] = value.strip() if key.startswith('indexed_'): ad_key = key.replace("indexed_", "") data.additional_data[ad_key] = value for d in data.indexed_data: ad_key = key.replace("indexed_", "") if d.startswith(ad_key.upper()): try: data.indexed_data.remove(d) except Exception as e: logging.exception(e) logging.info("Cannot remove from list") data.indexed_data.append(create_indexed_tag( key, value)) if self.user: data.username = self.user.name data.user = self.user.key data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() desc = "Data has been saved." response["success"] = True response["response"] = "Success" response["description"] = desc response["code"] = 200 response["data"] = data.to_api_object() wrap_response(self, response) except Exception as e: logging.exception(e) desc = "A server error occured. Please try again later." response["success"] = False response["response"] = "ServerError" response["description"] = desc response["code"] = 500 wrap_response(self, response) else: if not self.request.arguments(): if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) return tags = [] try: for arg in self.request.arguments(): for d in data.indexed_data: ad_key = arg.replace("indexed_", "") if d.startswith(ad_key.upper()): try: data.indexed_data.remove(d) except Exception as e: logging.exception(e) logging.info("Cannot remove from list") if arg.startswith('unindexed_'): ad_key = arg.replace("unindexed_", "") ad_value = self.request.POST.get(arg) data.additional_data[ad_key] = ad_value.strip() try: tags += create_tags(ad_value) except Exception as e: logging.info("Cannot create tag from: ") logging.info(e) if arg.startswith('indexed_'): ad_key = arg.replace("indexed_", "") ad_value = self.request.POST.get(arg) data.additional_data[ad_key] = ad_value try: tags += create_tags(ad_value) except Exception as e: logging.info("Cannot create tag from: ") logging.info(e) data.indexed_data.append( create_indexed_tag(arg, self.request.POST.get(arg))) if arg.startswith('file_'): filename = BUCKET_NAME filename += random_string(20) + "/" ad_key = arg.replace("file_", "") data.additional_data[ad_key] = {} try: # try: file_name = self.request.POST.get(arg).filename filename += file_name gcs_file = gcs.open( filename, 'w', options={'x-goog-acl': 'public-read'}) gcs_file.write(self.request.get(arg)) gcs_file.close() full_url = "https://storage.googleapis.com" + filename # data.additional_data["file"]["file_url"] = full_url data.file_url = full_url data.additional_data[ad_key]["file_url"] = full_url try: blob_key = blobstore.create_gs_key("/gs" + filename) data.serving_url = images.get_serving_url( blob_key) data.additional_data[ad_key][ "serving_url"] = data.serving_url data.gcs_key = blobstore.BlobKey(blob_key) except Exception as e: logging.exception(e) logging.error("not an image??") data.additional_data[ad_key][ "serving_url"] = full_url except AttributeError, e: logging.exception(e) logging.exception("NO FILE ATTACHED") if self.user: data.username = self.user.name data.user = self.user.key data.indexed_data = uniquify(data.indexed_data) data.tags = uniquify(tags) data.put() desc = "Data has been updated." if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?success=" + urllib.quote(desc) self.redirect(url) else: response["success"] = True response["response"] = "Success" response["description"] = desc response["code"] = 200 response["data"] = data.to_api_object() wrap_response(self, response) except Exception as e: logging.exception(e) desc = "A server error occured. Please try again later." if self.POST("r"): url = urllib.unquote(str(self.POST("r"))) else: url = self.request.referer if url: if "?" in url: url = url.split("?")[0] url += "?error=" + urllib.quote(desc) self.redirect(url) else: response["success"] = False response["response"] = "ServerError" response["description"] = desc response["code"] = 500 wrap_response(self, response)
def run_counter(counter_id=None, cursor_urlsafe=None, set_classification_flags=False): if not counter_id: counter_id = generate_counter_id() counter_instance = Counter(id=counter_id) counters_data = {} else: counter_instance = Counter.get_by_id(counter_id) counters_data = counter_instance.data n = 50 query = APIData.query() query = query.filter(APIData.archived == False) environment_key = ndb.Key('Environment', 'PUBLIC') query = query.filter(APIData.environment == environment_key) tag = create_indexed_tag('type', 'PROJECT') query = query.filter(APIData.indexed_data == tag) query = query.order(APIData._key) if cursor_urlsafe: curs = Cursor(urlsafe=cursor_urlsafe) projects, cursor, more = query.fetch_page(n, start_cursor=curs) else: projects, cursor, more = query.fetch_page(n) new_projects = [] for project in projects: project_data = get_project_data(project) counters_data = increment_counts_with_province(counters_data, project_data) if set_classification_flags: if project_data['has_image']: project.indexed_data.append( create_indexed_tag('has_image', '1')) project.additional_data['has_image'] = '1' else: project.indexed_data.append( create_indexed_tag('has_image', '0')) project.additional_data['has_image'] = '0' if project_data['has_kml']: project.indexed_data.append(create_indexed_tag('has_kml', '1')) project.additional_data['has_kml'] = '1' else: project.indexed_data.append(create_indexed_tag('has_kml', '0')) project.additional_data['has_kml'] = '0' if project_data['has_classification']: project.indexed_data.append( create_indexed_tag('has_classification', '1')) project.additional_data['has_classification'] = '1' else: project.indexed_data.append( create_indexed_tag('has_classification', '0')) project.additional_data['has_classification'] = '0' project.indexed_data = uniquify(project.indexed_data) new_projects.append(project) counter_instance.data = counters_data if not cursor: counter_instance.done = True counter_instance.put() if set_classification_flags and new_projects: ndb.put_multi(new_projects) return { "counter_id": counter_id, "cursor": cursor.urlsafe() if cursor else None }