def update_rw_layer_year(ds_id, current_year, new_year): ''' Given a Resource Watch dataset's API ID, the current year it is showing data for, and the year we want to change it to, this function will update all layers to show data for the new year INPUT ds_id: Resource Watch API dataset ID (string) current_year: current year used in dataset layers (integer) new_year: year we want to change the layers to show data for (integer) ''' # pull the dataset we want to update dataset = lmi.Dataset(ds_id) # go through and update each of the layers for layer in dataset.layers: # Replace layer config with new values appConfig = layer.attributes['layerConfig'] new_sql = appConfig['body']['layers'][0]['options']['sql'].replace(str(current_year), str(new_year)) appConfig['body']['layers'][0]['options']['sql'] = new_sql payload = { 'layerConfig': { **appConfig } } layer = layer.update(update_params=payload, token=API_TOKEN) # Replace interaction config with new values interactionConfig = layer.attributes['interactionConfig'] for i, element in enumerate(interactionConfig['output']): interactionConfig['output'][i]['property'] = interactionConfig['output'][i]['property'].replace(str(current_year), str(new_year)) payload = { 'interactionConfig': { **interactionConfig } } layer = layer.update(update_params=payload, token=API_TOKEN) # Replace layer name and description new_name = layer.attributes['name'].replace(str(current_year), str(new_year)) new_description = layer.attributes['description'].replace(str(current_year), str(new_year)) payload = { 'name': new_name, 'description': new_description } layer = layer.update(update_params=payload, token=API_TOKEN) print(layer)
import LMIPy as lmi import dotenv dotenv.load_dotenv('') # input widget API ID for the empty advanced widget you have created and want to overwrite widget_to_overwrite = '' # enter embed url that you want to be shown in widget. see example below: # url_to_embed = r"https://resourcewatch.org/embed/data/explore/cli006-Polar-Sea-Ice-Monthly-Median-Extents?section=All%20data&zoom=0.8955740754997088&lat=4.40585468259109&lng=-51.06249926194696&pitch=0&bearing=0&basemap=dark&labels=light&layers=%255B%257B%2522dataset%2522%253A%2522b1ebea96-5963-4c2c-9273-7d08536ac07d%2522%252C%2522opacity%2522%253A1%252C%2522layer%2522%253A%2522d87bb471-1ac0-4f79-818a-e270f04185bf%2522%257D%252C%257B%2522dataset%2522%253A%2522e740efec-c673-431a-be2c-b214613f641a%2522%252C%2522opacity%2522%253A1%252C%2522layer%2522%253A%2522d0713c73-941e-446f-a94d-8e75951e3b03%2522%257D%252C%257B%2522dataset%2522%253A%2522484fbba1-ac34-402f-8623-7b1cc9c34f17%2522%252C%2522opacity%2522%253A1%252C%2522layer%2522%253A%2522b92c01ee-eb2c-4835-8625-d138db75a1cd%2522%257D%255D&page=1&sort=most-viewed&sortDirection=-1&topics=%255B%2522sea_ice%2522%255D" url_to_embed = r"" # create payload to send to API payload = {"widgetConfig": {"url": f"{url_to_embed}"}} # load in API credentials API_TOKEN = os.getenv('RW_API_KEY') # load the widget we are going to overwrite widget = lmi.Widget(widget_to_overwrite) # Update the widget widget = widget.update(update_params=payload, token=API_TOKEN)
def duplicate_wb_layers(ds_id, update_years): ''' Given a Resource Watch dataset's API ID and a list of years we want to add to it, this function will create new layers on Resource Watch for those years INPUT ds_id: Resource Watch API dataset ID (string) update_years: list of years for which we want to add layers to this dataset (list of integers) ''' # pull the dataset we want to update dataset = lmi.Dataset(ds_id) # pull out its first layer to use as a template to create new layers layer_to_clone = dataset.layers[0] # get attributes that might need to change: name = layer_to_clone.attributes['name'] description = layer_to_clone.attributes['description'] appConfig = layer_to_clone.attributes['layerConfig'] sql = appConfig['body']['layers'][0]['options']['sql'] order = str(appConfig['order']) timeLineLabel = appConfig['timelineLabel'] interactionConfig = layer_to_clone.attributes['interactionConfig'] # pull out the year from the example layer's name - we will use this to find all instances of the year within our # example layer so that we can replace it with the correct year in the new layers replace_string = name[:4] # replace year in example layer with {} name_convention = name.replace(replace_string, '{}') description_convention = description.replace(replace_string, '{}') sql_convention = sql.replace(replace_string, '{}') order_convention = order.replace(replace_string, '{}') timeLineLabel_convention = timeLineLabel.replace(replace_string, '{}') for i, dictionary in enumerate(interactionConfig.get('output')): for key, value in dictionary.items(): if value != None: if replace_string in value: interactionConfig.get('output')[i][key] = value.replace(replace_string, '{}') # go through each year we want to add a layer for for year in update_years: # generate the layer attributes with the correct year new_layer_name = name_convention.replace('{}', str(year)) new_description = description_convention.replace('{}', str(year)) new_sql = sql_convention.replace('{}', str(year)) new_timeline_label = timeLineLabel_convention.replace('{}', str(year)) new_order = int(order_convention.replace('{}', str(year))) # Clone the example layer to make a new layer clone_attributes = { 'name': new_layer_name, 'description': new_description } new_layer = layer_to_clone.clone(token=API_TOKEN, env='production', layer_params=clone_attributes, target_dataset_id=ds_id) # Replace layerConfig with new values appConfig = new_layer.attributes['layerConfig'] appConfig['body']['layers'][0]['options']['sql'] = new_sql appConfig['order'] = new_order appConfig['timelineLabel'] = new_timeline_label payload = { 'layerConfig': { **appConfig } } new_layer = new_layer.update(update_params=payload, token=API_TOKEN) # Replace interaction config with new values interactionConfig = new_layer.attributes['interactionConfig'] for i, element in enumerate(interactionConfig['output']): if '{}' in element.get('property'): interactionConfig['output'][i]['property'] = interactionConfig['output'][i]['property'].replace( '{}', str(year)) payload = { 'interactionConfig': { **interactionConfig } } new_layer = new_layer.update(update_params=payload, token=API_TOKEN) # Replace layer name and description payload = { 'name': new_layer_name, 'description': new_description } new_layer = new_layer.update(update_params=payload, token=API_TOKEN) print(new_layer) print('\n')
def clone_ds_ly_from_json(dataset_json, layer_files, token=None, dataset_pub=False, layer_pub=True): """ Create a clone of a target Dataset as a new staging or prod Dataset. INPUT dataset_json: minimal json of dataset info (dictionary) layer_files: list of layer json files (list of strings) token: RW API token (string) dataset_pub: should the dataset be published when it is created (boolean) layer_pub: should the layers be published when it is created (boolean) """ clone_server = 'https://api.resourcewatch.org' if not token: raise ValueError( f'[token] Resource Watch API token required to clone.') else: ### update dataset dataset_json.pop("id", None) dataset_json['attributes']['published'] = dataset_pub dataset_fields_to_drop = ["createdAt", "updatedAt", "userId"] for field in dataset_fields_to_drop: dataset_json['attributes'].pop(field, None) ### clone dataset url = f'{clone_server}/dataset' headers = create_headers() payload = {'dataset': {**dataset_json['attributes']}} display(json.dumps(payload)) r = requests.post(url, data=json.dumps(payload), headers=headers) if r.status_code == 200: clone_dataset_id = r.json()['data']['id'] clone_dataset = LMIPy.Dataset(id_hash=clone_dataset_id, server=clone_server) print('Dataset created:') print(r.json()['data']['id']) else: print(r.status_code) ### clone layers layer_fields_to_drop = ["createdAt", "updatedAt", "userId", "dataset"] for i in range(len(layer_files)): layer_f = open(os.path.join(layer_json_data_loc, layer_files[i])) layer_json = json.load(layer_f) layer_json.pop("id", None) layer_json['attributes']['published'] = layer_pub for field in layer_fields_to_drop: layer_json['attributes'].pop(field, None) url = f'{clone_server}/dataset/{clone_dataset_id}/layer' print(url) payload = {'layer': {**layer_json['attributes']}} r = requests.post(url, data=json.dumps(payload), headers=headers) if r.status_code == 200: clone_layer_id = r.json()['data']['id'] clone_layer = LMIPy.Layer(id_hash=clone_layer_id, server=clone_server) print('Layer created:') print(r.json()['data']['id']) else: print(r.status_code)
def clone_ds(self, token=None, enviro='preproduction', clone_server=None, dataset_params=None, clone_children=True, clone_first_layer_only=True, clone_default_widget_only=True, published=False): """ Create a clone of a target Dataset as a new staging or prod Dataset. A set of attributes can be specified for the clone Dataset. The argument `clone_server` specifies the server to clone to. Default server = https://api.resourcewatch.org Set clone_children=True to clone all child layers, and widgets. Set published=True to publish the layer. """ if not clone_server: clone_server = self.server if not token: raise ValueError( f'[token] Resource Watch API token required to clone.') else: name = dataset_params.get('name', self.attributes['name'] + 'CLONE') clone_dataset_attr = {**self.attributes, 'name': name} for k, v in clone_dataset_attr.items(): if k in dataset_params: clone_dataset_attr[k] = dataset_params.get(k, '') payload = { 'dataset': { 'application': ['rw'], 'connectorType': clone_dataset_attr['connectorType'], 'connectorUrl': clone_dataset_attr['connectorUrl'], 'tableName': clone_dataset_attr['tableName'], 'provider': clone_dataset_attr['provider'], 'published': published, 'env': enviro, 'name': clone_dataset_attr['name'], 'widgetRelevantProps': clone_dataset_attr['widgetRelevantProps'], 'geoInfo': clone_dataset_attr['geoInfo'], 'layerRelevantProps': clone_dataset_attr['layerRelevantProps'], 'type': clone_dataset_attr['type'] } } if 'applicationConfig' in clone_dataset_attr: payload['dataset'].update( {'applicationConfig': clone_dataset_attr['applicationConfig']}) if 'subscribable' in clone_dataset_attr: payload['dataset'].update( {'subscribable': clone_dataset_attr['subscribable']}) print(f'Creating clone dataset') url = f'{clone_server}/dataset' headers = { 'Authorization': f'Bearer {token}', 'Content-Type': 'application/json', 'Cache-Control': 'no-cache' } r = requests.post(url, data=json.dumps(payload), headers=headers) if r.status_code == 200: clone_dataset_id = r.json()['data']['id'] clone_dataset = LMIPy.Dataset(id_hash=clone_dataset_id, server=clone_server) else: print(r.status_code) print(f'{clone_server}/v1/dataset/{clone_dataset_id}') if clone_children: layers = self.layers if len(layers) > 0: if clone_first_layer_only: l = layers[0] if l.attributes['application'] == ['rw']: try: layer_name = l.attributes['name'] print('Cloning layer: {}'.format(layer_name)) l.clone(token=token, env='production', layer_params={'name': layer_name}, target_dataset_id=clone_dataset_id) time.sleep(2) except: raise ValueError( f'Layer cloning failed for {l.id}') else: for l in layers: if l.attributes['application'] == ['rw']: try: layer_name = l.attributes['name'] print('Cloning layer: {}'.format(layer_name)) l.clone(token=token, env='production', layer_params={'name': layer_name}, target_dataset_id=clone_dataset_id) time.sleep(2) except: raise ValueError( f'Layer cloning failed for {l.id}') else: print("No child layers to clone!") #clone widgets try: url = f'{self.server}/v1/dataset/{self.id}?includes=vocabulary,metadata,layer,widget' r = requests.get(url) widgets = r.json()['data']['attributes']['widget'] except: print("Could not retrieve widgets.") if len(widgets) > 0: if clone_default_widget_only: for w in widgets: widget = w['attributes'] if widget['defaultEditableWidget']: try: name = widget['name'], widget_config = widget['widgetConfig'], app = widget['application'] ds_id = clone_dataset_id if app == ['rw']: if name and widget_config and app: widget_payload = { "name": widget['name'], "description": widget.get('description', None), "env": widget['env'], "widgetConfig": widget['widgetConfig'], "application": ['rw'] } try: url = f'{self.server}/v1/dataset/{ds_id}/widget' print(url) headers = { 'Authorization': 'Bearer ' + token, 'Content-Type': 'application/json' } r = requests.post( url, data=json.dumps( widget_payload), headers=headers) print(r.json()) except: raise ValueError( f'Widget creation failed.') if r.status_code == 200: print(f'Widget created.') # self.attributes = self.get_dataset() else: print( f'Failed with error code {r.status_code}' ) else: raise ValueError( f'Widget creation requires name string, application list and a widgetConfig object.' ) # clone_dataset.add_widget(token=token, widget_params=widget_payload) else: print("Non-rw app. Not cloning.") except: raise ValueError( f'Widget cloning failed for {widget.id}') else: for w in widgets: widget = w['attributes'] try: name = widget['name'], widget_config = widget['widgetConfig'], app = widget['application'] ds_id = clone_dataset_id if app == ['rw']: if name and widget_config and app: widget_payload = { "name": widget['name'], "description": widget.get('description', None), "env": widget['env'], "widgetConfig": widget['widgetConfig'], "application": ['rw'] } try: url = f'{self.server}/v1/dataset/{ds_id}/widget' print(url) headers = { 'Authorization': 'Bearer ' + token, 'Content-Type': 'application/json' } r = requests.post( url, data=json.dumps(widget_payload), headers=headers) print(r.json()) except: raise ValueError( f'Widget creation failed.') if r.status_code == 200: print(f'Widget created.') #self.attributes = self.get_dataset() else: print( f'Failed with error code {r.status_code}' ) else: raise ValueError( f'Widget creation requires name string, application list and a widgetConfig object.' ) #clone_dataset.add_widget(token=token, widget_params=widget_payload) else: print("Non-rw app. Not cloning.") except: raise ValueError( f'Widget cloning failed for {widget.id}') else: print("No child widgets to clone!") vocabs = self.vocabulary if len(vocabs) > 0: for v in vocabs: vocab = v.attributes if vocab['application'] == 'rw': vocab_payload = { 'application': vocab['application'], 'name': vocab['name'], 'tags': vocab['tags'] } try: clone_dataset.add_vocabulary( vocab_params=vocab_payload, token=token) except: raise ValueError('Failed to clone Vocabulary.') metas = self.metadata if len(metas) > 0: for m in metas: meta = m.attributes if meta['application'] == 'rw': meta_payload = { "dataset": meta['dataset'], 'application': meta['application'], 'language': meta['language'], "name": meta['name'], 'description': meta['description'], "source": meta['source'], 'info': meta['info'], } if 'columns' in meta: meta_payload.update({'columns': meta['columns']}) try: rw_api_url = 'https://api.resourcewatch.org/v1/dataset/{}/metadata'.format( clone_dataset.id) res = requests.request( "POST", rw_api_url, data=json.dumps(meta_payload), headers=create_headers()) print('Metadata created.') except: raise ValueError('Failed to clone Metadata.') # self.attributes = Dataset(clone_dataset_id, server=clone_server).attributes return clone_dataset_id
'info': meta['info'], } if 'columns' in meta: meta_payload.update({'columns': meta['columns']}) try: rw_api_url = 'https://api.resourcewatch.org/v1/dataset/{}/metadata'.format( clone_dataset.id) res = requests.request( "POST", rw_api_url, data=json.dumps(meta_payload), headers=create_headers()) print('Metadata created.') except: raise ValueError('Failed to clone Metadata.') # self.attributes = Dataset(clone_dataset_id, server=clone_server).attributes return clone_dataset_id # Make a copy dataset_to_copy = LMIPy.Dataset(dataset_id) clone_attributes = {'name': new_dataset_name} # Clone dataset new_dataset_id = clone_ds(dataset_to_copy, token=API_TOKEN, enviro='production', dataset_params=clone_attributes, clone_children=True, clone_first_layer_only=clone_first_layer_only, clone_default_widget_only=clone_default_widget_only) print('new dataset API ID:' + new_dataset_id)