def save(self, uploader=None, save_to_git=True): """Save current campaign :param uploader: uploader who created :type uploader: str """ self.version += 1 if uploader: self.edited_by = uploader # Generate map self.generate_static_map() data = self.to_dict() Campaign.validate(data, self.uuid) geometry = data['geometry'] del data['geometry'] # save updated campaign to json campaign_key = self.json_path campaign_body = Campaign.serialize(data) S3Data().create(campaign_key, campaign_body) geocampaign_key = self.geojson_path geocampaign_body = json.dumps(geometry) S3Data().create(geocampaign_key, geocampaign_body)
def parse_json_file(self): """ Parse json file for this campaign. If file is corrupted, it will raise Campaign.CorruptedFile exception. """ # campaign data if self.json_path: try: content = S3Data().fetch(self.json_path) content_json = parse_json_string(content) Campaign.validate(content_json, self.uuid) self._content_json = content_json attributes = self.get_attributes() for key, value in content_json.items(): if key in attributes: setattr(self, key, value) except json.decoder.JSONDecodeError: raise JsonModel.CorruptedFile self.types = Campaign.parse_types_string(json.dumps(self.types)) # geometry data if self.geojson_path: try: content = S3Data().fetch(self.geojson_path) geometry = parse_json_string(content) self.geometry = geometry self._content_json['geometry'] = geometry except json.decoder.JSONDecodeError: raise JsonModel.CorruptedFile
def post(self, uuid, feature_name): parser = reqparse.RequestParser() parser.add_argument('fileFormat', type=str) parser.add_argument('filter', type=dict) args = parser.parse_args() file_format = args.get('fileFormat', None) filters = args.get('filter', {}) if file_format == "csv": file_buffer = StringIO() data = S3Data().fetch(f'campaigns/{uuid}/{feature_name}.json') headers = list(data[0].keys()) data = filter_json(data, filters) if len(data) > 0: for item in data[0]['attributes']: headers.append(item) for item in data[0]['missing_attributes']: headers.append(item) for row in data: for item in row['attributes']: row[item] = 1 for item in row['missing_attributes']: row[item] = 0 headers.remove('missing_attributes') headers.remove('attributes') for d in data: del d['attributes'] for d in data: del d['missing_attributes'] writer = csv.DictWriter(file_buffer, fieldnames=headers) writer.writeheader() writer.writerows(data) mimetype = 'text/csv' response_file = BytesIO() file_buffer.seek(0) response_file.write(file_buffer.getvalue().encode('utf-8')) response_file.seek(0) if file_format == "osm": file_buffer = BytesIO() mimetype = 'text/xml' s3 = S3Data().s3 key = f'campaigns/{uuid}/overpass/{feature_name}.xml' bucket = S3Data().bucket data = s3.get_object(Bucket=bucket, Key=key)['Body'].read() data = filter_xml(data, filters) file_buffer.write(data) file_buffer.seek(0) response_file = file_buffer if file_format is None: return resp = send_file(response_file, as_attachment=True, attachment_filename=f'{feature_name}.{file_format}', mimetype=mimetype) return resp
def get_contributor(uuid, osm_name): context = get_campaign_data(uuid) context['mapper'] = osm_name campaign = S3Data().fetch(f'campaigns/{uuid}/campaign.json') features = [ f['type'].replace(' ', '_') for _, f in context['types'].items() ] # Data for ranking panel all_features = [] for feature in features: feature_json = S3Data().fetch(f'campaigns/{uuid}/{feature}.json') all_features += feature_json user_features = [ f for f in all_features if f['last_edited_by'] == osm_name ] context['total_edits'] = len(user_features) all_attr_complete, all_attr_total = 0, len(user_features) contrib_features = {} for feature in user_features: if not feature['missing_attributes']: all_attr_complete += 1 if feature["type"] not in contrib_features.keys(): contrib_features[feature["type"]] = {} contrib_features[feature["type"]]['total'] = 1 contrib_features[feature["type"]]['complete'] = 1 \ if not feature['missing_attributes'] else 0 if feature["type"] in contrib_features.keys(): contrib_features[feature["type"]]['total'] += 1 if not feature['missing_attributes']: contrib_features[feature["type"]]['complete'] += 1 pct = (all_attr_complete * 100) / all_attr_total context['all_attr_completeness'] = round(pct) contrib_features = { k: round((v['complete'] * 100) / v['total']) for k, v in contrib_features.items() } attr_ranking = sorted(contrib_features.items(), key=operator.itemgetter(1), reverse=True) context['attr_ranking'] = attr_ranking[:5] # Map all features from user. context['types'] = [ c['type'].replace(' ', '_') for _, c in context['types'].items() ] context['s3_campaign_url'] = S3Data().url(uuid) return render_template('contributor.html', **context)
def get(self, uuid): s3_client = S3Data() s3 = s3_client.s3 grid_file = s3.get_object(Bucket=s3_client.bucket, Key=f"campaigns/{uuid}/pdf/grid.geojson") grid = json.loads(grid_file['Body'].read()) ids = [cell['properties']['id'] for cell in grid['features']] bundle_buffer = BytesIO() with ZipFile(bundle_buffer, 'w') as zip_obj: for grid_id in ids: dir_path = f'campaigns/{uuid}/pdf/{grid_id}/' kwargs = {"Bucket": s3_client.bucket, "Prefix": dir_path} resp = s3.list_objects_v2(**kwargs) try: pdfs = [obj['Key'] for obj in resp['Contents']] except KeyError: continue for pdf in pdfs: pdf_file = s3.get_object(Bucket=s3_client.bucket, Key=pdf) file_buffer = BytesIO() file_buffer.write(pdf_file['Body'].read()) file_buffer.seek(0) pdf_filename = f"{'/'.join(pdf.split('/')[-2:])}" zip_obj.writestr(pdf_filename, file_buffer.getvalue()) bundle_buffer.seek(0) resp = send_file(bundle_buffer, as_attachment=True, attachment_filename=f'pdf_bundle.zip', mimetype='application/zip') return resp
def __init__(self, uuid=None): if uuid: self.uuid = uuid self.json_path = Campaign.get_json_file(uuid) self.geojson_path = Campaign.get_geojson_file(uuid) self.edited_at = S3Data().get_last_modified_date(self.json_path) self.parse_json_file()
def get_mbtile(): # decoding to geojson client = S3Data() coords = json.loads(request.values.get('coordinates')) polygon = shapely_geometry.Polygon(coords) url = 'campaigns/{0}/mbtiles/'.format(request.values.get('uuid')) mbtiles = client.fetch('{0}tiles.geojson'.format(url)) # Get all campaign polygons. features = [ f for f in mbtiles['features'] if f['properties']['parent'] is None ] polygons = [ shapely_geometry.Polygon(f['geometry']['coordinates'][0]) for f in features ] polygons = [shapely_geometry.polygon.orient(p) for p in polygons] centroids = [p.centroid for p in polygons] distances = [polygon.centroid.distance(c) for c in centroids] min_distance = distances.index(min(distances)) tiles_id = features[min_distance]['properties']['id'] tiles_file = '{0}.mbtiles'.format(tiles_id) # Get file from s3 file_path = '{0}{1}'.format(url, tiles_file) aws_url = 'https://s3-us-west-2.amazonaws.com' file_url = '{0}/{1}/{2}'.format(aws_url, client.bucket, file_path) return Response(json.dumps({'file_url': file_url}))
def all(): surveys = S3Data().list('surveys') # Remove modified value when listing surveys. surveys = [s['uuid'] for s in surveys] return surveys
def get_campaign_data(uuid): from campaign_manager.models.campaign import Campaign from campaign_manager.aws import S3Data """Get campaign details. """ try: campaign = Campaign.get(uuid) except: abort(404) context = campaign.to_dict() context['s3_campaign_url'] = S3Data().url(uuid) campaign_manager_names = [] for manager in parse_json_string(campaign.campaign_managers): campaign_manager_names.append(manager['name']) campaign_viewer_names = [] for viewer in parse_json_string(campaign.campaign_viewers): campaign_viewer_names.append(viewer['name']) campaign_contributor_names = [] for contributor in parse_json_string(campaign.campaign_contributors): campaign_contributor_names.append(contributor['name']) context['oauth_consumer_key'] = OAUTH_CONSUMER_KEY context['oauth_secret'] = OAUTH_SECRET context['map_provider'] = map_provider() context['campaign_manager_names'] = campaign_manager_names context['campaign_viewer_names'] = campaign_viewer_names context['campaign_contributor_names'] = campaign_contributor_names context['participants'] = len(campaign.campaign_managers) context['pct_covered_areas'] = campaign.calculate_areas_covered() if campaign.map_type != '': context['attribution'] = find_attribution(campaign.map_type) # Start date try: start_date = datetime.strptime(campaign.start_date, '%Y-%m-%d') context['start_date_date'] = start_date.strftime('%d %b') context['start_date_year'] = start_date.strftime('%Y') except TypeError: context['start_date_date'] = '-' context['start_date_year'] = '-' context['current_status'] = campaign.get_current_status() if context['current_status'] == 'active': context['current_status'] = 'running' # End date try: end_date = datetime.strptime(campaign.end_date, '%Y-%m-%d') context['end_date_date'] = end_date.strftime('%d %b') context['end_date_year'] = end_date.strftime('%Y') except TypeError: context['end_date_date'] = '-' context['end_date_year'] = '-' return context
def get(self, osm_id): user = S3Data().fetch(f'user_campaigns/{osm_id}.json') if not user: return [] campaigns = [ get_data_from_s3(campaign["uuid"], "") for campaign in user['projects'] ] return campaigns
def get_data_from_s3(uuid, modified): s3 = S3Data() # Make a request to get the campaign json and geojson. campaign_json = s3.fetch('campaigns/{0}/campaign.json'.format(uuid)) geojson = s3.fetch('campaigns/{0}/campaign.geojson'.format(uuid)) campaign_json['geojson'] = geojson campaign_json['modified'] = modified return campaign_json
def get(self, uuid): args = request.args username = args.get('username', None) campaign = S3Data().fetch(f'campaigns/{uuid}/campaign.json') features = [ f['type'].replace(' ', '_') for _, f in campaign['types'].items() ] all_features = [] for feature in features: feature_json = S3Data().fetch(f'campaigns/{uuid}/{feature}.json') all_features += feature_json if username: user_features = [ f for f in all_features if f['last_edited_by'] == username ] return user_features return all_features
def get_types(): """ Get all types in json :return: json of survey of type :rtype: dict """ surveys = {} for filename in S3Data().list('surveys'): survey = get_survey_json(filename['uuid']) surveys[filename['uuid']] = survey return surveys
def get_feature_summary(uuid, feature_name): feature = S3Data().fetch(f'campaigns/{uuid}/{feature_name}.json') data = {'feature_count': 0, 'complete': 0, 'incomplete': 0, 'tags': []} data['tags'] += feature[0]['attributes'] data['tags'] += feature[0]['missing_attributes'] for f in feature: data['feature_count'] += 1 if len(f['missing_attributes']) > 0: data['incomplete'] += 1 else: data['complete'] += 1 return data
def generate_static_map(self): """ Download static map from http://staticmap.openstreetmap.de with marker, then save it thumbnail folder """ if 'MAPBOX_TOKEN' in os.environ: url = 'https://api.mapbox.com/styles/v1/hot/' \ 'cj7hdldfv4d2e2qp37cm09tl8/static/geojson({overlay})/' \ 'auto/{width}x{height}?' \ 'access_token=' + os.environ['MAPBOX_TOKEN'] if len(self.geometry['features']) > 1: geometry = { 'type': 'Feature', 'properties': {}, 'geometry': mapping(self.get_union_polygons()) } geometry = json.dumps(geometry, separators=(',', ':')) else: geometry = json.dumps(self.geometry['features'][0], separators=(',', ':')) url = url.format(overlay=geometry, width=512, height=300) else: polygon = self.get_union_polygons() url = 'http://staticmap.openstreetmap.de/staticmap.php?' \ 'center={y},{x}&zoom=10&size=512x300&maptype=mapnik' \ '&markers={y},{x},lightblue'.format( y=polygon.centroid.y, x=polygon.centroid.x) image_path = 'campaigns/{}/thumbnail.png'.format(self.uuid) request = requests.get(url, stream=True) if request.status_code == 200: request.raw.decode_content = True from io import BytesIO S3Data().create(image_path, BytesIO(request.content)) self.thumbnail = S3Data().thumbnail_url(self.uuid)
def get_s3_types(self): s3 = S3Data() objs = s3.s3.list_objects(Bucket=s3.bucket, Prefix='campaigns/{}/render/'.format( self.uuid), Delimiter='/') if 'CommonPrefixes' not in objs: return None types = [t['Prefix'] for t in objs['CommonPrefixes']] return types
def get(self): """Get all campaigns. """ return S3Data().list('campaigns') args = request.args campaigns = self.get_all_campaign(campaign_status, args) campaigns_json = [] for campaign in campaigns: campaigns_json.append(campaign.json()) return campaigns_json
def home(): """Home page view. On this page a summary campaign manager view will shown. """ context = dict(oauth_consumer_key=OAUTH_CONSUMER_KEY, oauth_secret=OAUTH_SECRET, map_provider=map_provider(), bucket_url=S3Data().bucket_url()) # noinspection PyUnresolvedReferences return render_template('index.html', **context)
def campaigns_list(osm_id): """List the user's campaigns A summary campaign manager view with all the users campaigns """ context = dict(oauth_consumer_key=OAUTH_CONSUMER_KEY, oauth_secret=OAUTH_SECRET, all=True, map_provider=map_provider(), bucket_url=S3Data().bucket_url(), osm_id=osm_id) return render_template('campaign_index.html', **context)
def get_campaign(uuid): context = get_campaign_data(uuid) context['types'] = list( map(lambda type: type[1]['type'], context['types'].items())) # Get data from campaign.json campaign_data = S3Data().fetch(f"campaigns/{uuid}/campaign.json") features = [f.replace(' ', '_') for f in context['types']] all_features = [] contributors_data = {} for feature in features: feature_json = S3Data().fetch(f'campaigns/{uuid}/{feature}.json') all_features += feature_json context['total_features'] = len(all_features) for feature in all_features: if feature['last_edited_by'] not in contributors_data.keys(): contributors_data[feature['last_edited_by']] = feature context['total_contributors'] = len(contributors_data) context['complete'] = len( [f for f in all_features if f['status'] == "Complete"]) context['incomplete'] = len( [f for f in all_features if f['status'] == "Incomplete"]) complete_pct = 0 if context['incomplete'] > 0: complete_pct = int(context['complete'] / context['incomplete']) context['complete_pct'] = complete_pct can_edit = False if 'user' in session.keys(): user_id, _ = session['user'] ids = [m['osm_id'] for m in context['campaign_managers']] if user_id in ids: can_edit = True context['can_edit'] = can_edit return render_template('campaign_detail.html', **context)
def get_campaign(uuid): from campaign_manager.models.campaign import Campaign from campaign_manager.aws import S3Data """Get campaign details. """ try: campaign = Campaign.get(uuid) except: abort(404) context = campaign.to_dict() context['s3_campaign_url'] = S3Data().url(uuid) context['types'] = list( map(lambda type: type[1]['type'], context['types'].items())) context['oauth_consumer_key'] = OAUTH_CONSUMER_KEY context['oauth_secret'] = OAUTH_SECRET context['map_provider'] = map_provider() context['participants'] = len(campaign.campaign_managers) context['pct_covered_areas'] = campaign.calculate_areas_covered() if campaign.map_type != '': context['attribution'] = find_attribution(campaign.map_type) # Start date try: start_date = datetime.strptime(campaign.start_date, '%Y-%m-%d') context['start_date_date'] = start_date.strftime('%d %b') context['start_date_year'] = start_date.strftime('%Y') except TypeError: context['start_date_date'] = '-' context['start_date_year'] = '-' context['current_status'] = campaign.get_current_status() if context['current_status'] == 'active': context['current_status'] = 'running' # End date try: end_date = datetime.strptime(campaign.end_date, '%Y-%m-%d') context['end_date_date'] = end_date.strftime('%d %b') context['end_date_year'] = end_date.strftime('%Y') except TypeError: context['end_date_date'] = '-' context['end_date_year'] = '-' return render_template('campaign_detail.html', **context)
def all(campaign_status=None, **kwargs): """Get all campaigns :param campaign_status: status of campaign, active or inactive :type campaign_status: str :return: Campaigns that found or none :rtype: [Campaign] """ sort_list = [] campaigns = [] for campaign_uuid in S3Data().list('campaigns'): try: campaign = Campaign.get(campaign_uuid) if campaign_status == 'all': allowed = True elif campaign_status == campaign.get_current_status(): allowed = True else: allowed = False if allowed: sort_object = campaign.name if 'sort_by' in kwargs: if kwargs['sort_by'][0] == 'recent': sort_object = int( datetime.today().strftime('%s')) - int( datetime.strptime( campaign.edited_at, "%a %b %d %H:%M:%S %Y").strftime('%s')) position = bisect.bisect(sort_list, sort_object) bisect.insort(sort_list, sort_object) campaigns.insert(position, campaign) except Campaign.DoesNotExist: pass if 'per_page' in kwargs: per_page = int(kwargs['per_page'][0]) page = 1 if 'page' in kwargs: page = int(kwargs['page'][0]) start_index = (page - 1) * per_page campaigns = campaigns[start_index:start_index + per_page] return campaigns
def generate_static_map(self, simplify=False): """ Download static map from http://staticmap.openstreetmap.de with marker, then save it thumbnail folder. :param simplify: if set to True, it will simplify the GeoJSON. :type simplify: boolean """ url = self.generate_static_map_url(simplify) image_path = 'campaigns/{}/thumbnail.png'.format(self.uuid) request = requests.get(url, stream=True) if request.status_code == 200: request.raw.decode_content = True from io import BytesIO S3Data().create(image_path, BytesIO(request.content)) else: if simplify: return self.generate_static_map(simplify=True) self.thumbnail = S3Data().thumbnail_url(self.uuid)
def create(data, uploader): """Validate found dict based on campaign class. uuid should be same as uuid file. :param data: data that will be inserted :type data: dict :param uploader: uploader who created :type uploader: str """ campaign_data = Campaign.parse_campaign_data(data, uploader) geometry = data['geometry'] del data['geometry'] json_str = Campaign.serialize( Campaign.parse_campaign_data(data, uploader)) campaign_key = Campaign.get_json_file(campaign_data['uuid']) campaign_body = json_str S3Data().create(campaign_key, campaign_body) geocampaign_key = Campaign.get_geojson_file(campaign_data['uuid']) geocampaign_body = json.dumps(parse_json_string(geometry)) S3Data().create(geocampaign_key, geocampaign_body)
def get_data_from_s3(uuid, modified): s3 = S3Data() # Make a request to get the campaign json and geojson. campaign_json = s3.fetch('campaigns/{0}/campaign.json'.format(uuid)) features = [ f['type'].replace(' ', '_') for _, f in campaign_json['types'].items() ] all_features = [] for feature in features: feature_json = S3Data().fetch(f'campaigns/{uuid}/{feature}.json') all_features += feature_json complete = [f for f in all_features if f['status'] == 'Complete'] campaign_json['complete_features'] = len(complete) campaign_json['feature_total'] = len(all_features) try: completeness = round(len(complete) / len(all_features), 0) except ZeroDivisionError: completeness = 0 campaign_json['completeness'] = completeness geojson = s3.fetch('campaigns/{0}/campaign.geojson'.format(uuid)) campaign_json['geojson'] = geojson campaign_json['modified'] = modified return campaign_json
def get(self): campaign_uuids = S3Data().list('campaigns') folder_path = os.path.join(config.CACHE_DIR, 'campaigns') # Check that folder exists. If not, create it. if not os.path.isdir(folder_path): os.mkdir(folder_path) cached_uuids = get_uuids_from_cache(folder_path) # Get all campaign information from either s3 or cache directory. campaigns = [ get_data(campaign, cached_uuids, folder_path) for campaign in campaign_uuids ] return campaigns
def get(self, uuid, grid_id): dir_path = f'campaigns/{uuid}/pdf/{grid_id}/' s3_client = S3Data() s3 = s3_client.s3 kwargs = {"Bucket": s3_client.bucket, "Prefix": dir_path} resp = s3.list_objects_v2(**kwargs) pdfs = [obj['Key'] for obj in resp['Contents']] bundle_buffer = BytesIO() with ZipFile(bundle_buffer, 'w') as zip_obj: for pdf in pdfs: pdf_file = s3.get_object(Bucket=s3_client.bucket, Key=pdf) file_buffer = BytesIO() file_buffer.write(pdf_file['Body'].read()) file_buffer.seek(0) pdf_filename = f"{pdf.split('/')[-1]}" zip_obj.writestr(pdf_filename, file_buffer.getvalue()) bundle_buffer.seek(0) resp = send_file(bundle_buffer, as_attachment=True, attachment_filename=f'{grid_id}.zip', mimetype='application/zip') return resp
def get_type_geojsons(self, type): s3 = S3Data() # For each type we get first level data. response = s3.s3.list_objects(Bucket=s3.bucket, Prefix=type, Delimiter='/') if 'Contents' not in list(response.keys()): return None contents = response['Contents'] paths = [c['Key'] for c in contents if 'geojson' in c['Key']] # Now we have to download from s3 the geojson and return dictionary. geojsons = [] for path in paths: body = s3.s3.get_object(Bucket=s3.bucket, Key=path)['Body'] data = zlib.decompress(body.read(), 16 + zlib.MAX_WBITS) json_data = json.loads(data) geojsons.append(json_data) return geojsons
def get_campaign_features(uuid): context = get_campaign_data(uuid) for key, values in context['types'].items(): # Fetch the feature json file. file_name = 'campaigns/{0}/{1}.json'.format( uuid, values['type'].replace(' ', '_')) features = S3Data().fetch(file_name) values["feature_count"] = len(features) values['complete'] = 0 values['incomplete'] = 0 values['element_type'] = values['element_type'] for f in features: if len(f['missing_attributes']) > 0: values['incomplete'] += 1 else: values['complete'] += 1 completeness = 0 if values['feature_count'] > 0: completeness = values['complete'] / values['feature_count'] values['completeness'] = round(completeness * 100) values['complete_status'] = completeness return render_template('campaign_features.html', **context)
def setUp(self): self.uuid = 'ff6ff8fcfdd847c48dd1bc3c9107b397' self.s3_data = S3Data()