def publish(self): """ publish store's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs; try_set_push_owner("wmsserver") hg = None try: json_out = {} json_out["name"] = self.name json_out["capability_url"] = self.get_capability_url json_out["username"] = self.user or "" json_out["password"] = self.password or "" json_out["workspace"] = self.workspace.name json_out["publish_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Update wms store {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmsserver",hg) finally: if hg: hg.close() try_clear_push_owner("wmsserver")
def custom_delete_selected(self, request, queryset): if request.POST.get('post') != 'yes': #the confirm page, or user not confirmed return self.default_delete_action[0](self, request, queryset) #user confirm to delete the normalisees, execute the custom delete logic. result = None failed_normalises = [] try_set_push_owner("normalise_admin", enforce=True) warning_message = None try: for normalise in queryset: try: with transaction.atomic(): normalise.delete() except: error = sys.exc_info() failed_normalises.append( (normalise.name, traceback.format_exception_only(error[0], error[1]))) #remove failed, continue to process the next normalise continue try: try_push_to_repository('normalise_admin', enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only( error[0], error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("normalise_admin", enforce=True) if failed_normalises or warning_message: if failed_normalises: if warning_message: messages.warning( request, mark_safe( "<ul><li>{0}</li><li>Some selected normalises are deleted failed:<ul>{1}</ul></li></ul>" .format( warning_message, "".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_normalises ])))) else: messages.warning( request, mark_safe( "Some selected normalises are deleted failed:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_normalises ])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success( request, "All selected normalises are deleted successfully")
def empty_gwc(self, request, queryset): result = None failed_objects = [] try_set_push_owner("layergroup_admin", enforce=True) warning_message = None try: for g in queryset: try: if g.publish_status.unpublished: #Not published before. failed_objects.append( ("{0}:{1}".format(g.workspace, g.name), "Not published before, no need to empty gwc.")) continue g.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append( ("{0}:{1}".format(l.server, l.name), traceback.format_exception_only(error[0], error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('layergroup_admin', enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only( error[0], error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("layergroup_admin", enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning( request, mark_safe( "<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>" .format( warning_message, "".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning( request, mark_safe( "Some selected layers are processed failed:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success( request, "All selected layers are processed successfully.")
def empty_gwc(self): """ update layer group's json for empty gwc to the repository """ if self.status not in [ResourceStatus.PUBLISHED,ResourceStatus.UPDATED]: #layer is not published, no need to empty gwc return json_filename = self.json_filename_abs; try_set_push_owner("layergroup") hg = None try: json_out = {} json_out["name"] = self.name json_out["workspace"] = self.workspace.name json_out["action"] = "empty_gwc" json_out["empty_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Empty GWC of layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def publish_meta_data(self,request,queryset): result = None failed_objects = [] #import ipdb;ipdb.set_trace() try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for publish in queryset: try: publish.publish_meta_data() except: error = sys.exc_info() failed_objects.append(("{0}:{1}".format(publish.workspace.name,publish.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Pushing changes to repository failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Publish meta data failed for some selected publishs:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "Publish meta data successfully for all selected publishs")
def unpublish(self): """ remove store's json reference (if exists) from the repository, return True if store is removed for repository; return false, if layers does not existed in repository. """ json_files = [ self.json_filename_abs(action) for action in [ 'publish' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: #file exists, layers is published, remove it. try_set_push_owner("liveserver") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=json_files) hg.commit(include=json_files,addremove=True, user="******", message="Remove live store {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("liveserver",hg) finally: if hg: hg.close() try_clear_push_owner("liveserver") return True else: return False
def empty_gwc(self, request, queryset): result = None failed_objects = [] try_set_push_owner("publish_admin", enforce=True) warning_message = None try: for l in queryset: try: if l.publish_status not in [ResourceStatus.Enabled]: #Publish is disabled. failed_objects.append( ("{0}:{1}".format(l.workspace.name, l.name), "Disabled, no need to empty gwc.")) continue l.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append( ("{0}:{1}".format(l.workspace.name, l.name), traceback.format_exception_only(error[0], error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin', enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only( error[0], error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin", enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning( request, mark_safe( "<ul><li>{0}</li><li>Some selected publishs are processed failed:<ul>{1}</ul></li></ul>" .format( warning_message, "".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning( request, mark_safe( "Some selected publishs are processed failed:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success( request, "All selected publishs are processed successfully.")
def unpublish(self): """ remove store's json reference (if exists) from the repository, return True if store is removed for repository; return false, if layers does not existed in repository. """ #remove it from catalogue service res = requests.delete("{}/catalogue/api/records/{}:{}/".format(settings.CSW_URL,self.datasource.workspace.name,self.kmi_name),auth=(settings.CSW_USER,settings.CSW_PASSWORD)) if res.status_code != 404: res.raise_for_status() json_files = [ self.json_filename_abs(action) for action in [ 'publish','empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: #file exists, layers is published, remove it. try_set_push_owner("livelayer") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=json_files) hg.commit(include=json_files,addremove=True, user="******", message="Remove live layer {}.{}".format(self.datasource.workspace.name, self.kmi_name)) increase_committed_changes() try_push_to_repository("livelayer",hg) finally: if hg: hg.close() try_clear_push_owner("livelayer") return True else: return False
def empty_gwc(self): """ update layer's json for empty gwc to the repository """ if self.publish_status.unpublished: #layer is not published, no need to empty gwc raise ValidationError("The wms layer({0}) is not published before.".format(self.kmi_name)) json_filename = self.json_filename_abs('empty_gwc'); try_set_push_owner("livelayer") hg = None try: json_out = {} json_out["name"] = self.kmi_name json_out["workspace"] = self.datasource.workspace.name json_out["store"] = self.datasource.name json_out["action"] = "empty_gwc" json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Empty GWC of live layer {}.{}".format(self.datasource.workspace.name, self.kmi_name)) increase_committed_changes() try_push_to_repository("livelayer",hg) finally: if hg: hg.close() try_clear_push_owner("livelayer")
def _change_status(self, request, queryset, status, update_fields=None): result = None failed_objects = [] try_set_push_owner("wmslayer_admin", enforce=True) warning_message = None try: for l in queryset: try: target_status = l.get_next_status(l.status, status) if target_status == l.status: #status not changed continue else: l.status = target_status l.save(update_fields=update_fields) except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append( ("{0}:{1}".format(l.server, l.name), traceback.format_exception_only(error[0], error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmslayer_admin', enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only( error[0], error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmslayer_admin", enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning( request, mark_safe( "<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>" .format( warning_message, "".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning( request, mark_safe( "Some selected layers are processed failed:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success( request, "All selected layers are processed successfully.")
def custom_delete_selected(self, request, queryset): if request.POST.get('post') != 'yes': #the confirm page, or user not confirmed return self.default_delete_action[0](self, request, queryset) result = None failed_objects = [] try_set_push_owner("wmsserver_admin", enforce=True) warning_message = None try: for server in queryset: #import ipdb;ipdb.set_trace() try: #delete the server server.delete() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append( ("{0}:{1}".format(server.workspace.name, server.name), traceback.format_exception_only(error[0], error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmsserver_admin', enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only( error[0], error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmsserver_admin", enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning( request, mark_safe( "<ul><li>{0}</li><li>Some selected servers are deleted failed:<ul>{1}</ul></li></ul>" .format( warning_message, "".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning( request, mark_safe( "Some selected servers are deleted failed:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected ervers are deleted successfully.")
def _change_status(self, request, queryset, action, update_fields=None): result = None failed_objects = [] try_set_push_owner("layergroup_admin", enforce=True) warning_message = None try: for group in queryset: #import ipdb;ipdb.set_trace() try: target_status = group.next_status(action) if target_status == group.status and not group.publish_required and not group.unpublish_required: #status not changed continue else: group.status = target_status group.save(update_fields=update_fields) except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append( ("{0}".format(group.name), traceback.format_exception_only(error[0], error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('layergroup_admin', enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only( error[0], error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("layergroup_admin", enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning( request, mark_safe( "<ul><li>{0}</li><li>Some selected groups are processed failed:<ul>{1}</ul></li></ul>" .format( warning_message, "".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning( request, mark_safe( "Some selected groups are processed failed:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All groups are processed successfully.")
def publish(self): """ Only publish the member layers which is already published. """ json_filename = self.json_filename_abs('publish'); try_set_push_owner("layergroup") hg = None try: json_out = self.update_catalogue_service(extra_datas={"publication_date": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")}) layers = [] for group_layer in LayerGroupLayers.objects.filter(group=self).order_by("order"): if group_layer.layer and group_layer.layer.is_published: layers.append({"type":"wms_layer","name":group_layer.layer.name,"store":group_layer.layer.server.name,"workspace":group_layer.layer.server.workspace.name}) elif group_layer.publish and group_layer.publish.is_published: layers.append({"type":"publish","name":group_layer.publish.name,"workspace":group_layer.publish.workspace.name}) elif group_layer.sub_group and group_layer.sub_group.is_published: layers.append({"type":"group","name":group_layer.sub_group.name,"workspace":group_layer.sub_group.workspace.name}) if not layers: #layergroup is empty,remove it. raise LayerGroupEmpty("Layer group can't be empty.") json_out["layers"] = layers json_out["srs"] = self.srs or None json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") inclusions = self.get_inclusions() dependent_groups = [] for group in inclusions[2].keys(): if group.is_published: dependent_groups.append({"name":group.name,"workspace":group.workspace.name}) json_out["dependent_groups"] = dependent_groups #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in [ 'empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files, user="******",addremove=True, message="Update layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def publish(self): """ publish store's json reference (if exists) to the repository; """ json_filename = self.json_filename_abs; try_set_push_owner("layergroup") hg = None try: layers = [] for group_layer in LayerGroupLayers.objects.filter(group=self).order_by("order"): if group_layer.layer and group_layer.layer.is_published: layers.append({"type":"wms_layer","name":group_layer.layer.layer_name,"store":group_layer.layer.server.name,"workspace":group_layer.layer.server.workspace.name}) elif group_layer.publish : layers.append({"type":"publish","name":group_layer.publish.name,"workspace":group_layer.publish.workspace.name}) elif group_layer.sub_group and group_layer.sub_group.is_published: layers.append({"type":"group","name":group_layer.sub_group.name,"workspace":group_layer.sub_group.workspace.name}) if not layers: #layergroup is empty,remove it. raise LayerGroupEmpty("Layer group can't be empty.") json_out = {} json_out["layers"] = layers; json_out["name"] = self.name json_out["title"] = self.title or "" json_out["abstract"] = self.abstract or "" json_out["workspace"] = self.workspace.name json_out["srs"] = self.srs json_out["publish_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") inclusions = self.get_inclusions() dependent_groups = [] for group in inclusions[2].keys(): if group.is_published: dependent_groups.append({"name":group.name,"workspace":group.workspace.name}) json_out["dependent_groups"] = dependent_groups if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename], user="******",addremove=True, message="Update layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def publish(self): """ publish store's json reference (if exists) to the repository, """ try_set_push_owner("liveserver") hg = None try: meta_data = {} meta_data["name"] = self.name meta_data["host"] = self.host meta_data["port"] = self.port meta_data["database"] = self.db_name meta_data["user"] = self.user meta_data["passwd"] = self.password meta_data["schema"] = self.schema meta_data["workspace"] = self.workspace.name if self.geoserver_setting: meta_data["geoserver_setting"] = json.loads(self.geoserver_setting) #write meta data file file_name = "{}.{}.meta.json".format(self.workspace.name,self.name) meta_file = os.path.join(BorgConfiguration.LIVE_STORE_DIR,file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file,"wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = {"file":"{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file),"md5":file_md5(meta_file)} json_out['action'] = 'publish' json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") json_filename = self.json_filename_abs('publish'); #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Update live store {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("liveserver",hg) finally: if hg: hg.close() try_clear_push_owner("liveserver")
def unpublish(self): try_set_push_owner("wmsserver") hg = None try: meta_data = {} meta_data["name"] = self.name meta_data["workspace"] = self.workspace.name #write meta data file file_name = "{}.meta.json".format(self.name) meta_file = os.path.join(BorgConfiguration.UNPUBLISH_DIR, self.workspace.publish_channel.name, self.workspace.name, "stores", file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file, "wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = { "file": "{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file), "md5": file_md5(meta_file) } json_out['action'] = 'remove' json_out["remove_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") json_filename = self.json_filename_abs('unpublish') #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename], addremove=True, user="******", message="Unpublish wms store {}.{}".format( self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmsserver", hg) finally: if hg: hg.close() try_clear_push_owner("wmsserver")
def publish(self): """ publish layer's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs('publish'); try_set_push_owner("livelayer") hg = None try: meta_data = self.update_catalogue_service(md5=True,extra_datas={"publication_date":datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")}) #write meta data file file_name = "{}.{}.meta.json".format(self.datasource.workspace.name,self.kmi_name) meta_file = os.path.join(BorgConfiguration.LIVE_LAYER_DIR,file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file,"wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = {"file":"{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file),"md5":file_md5(meta_file)} json_out['action'] = "publish" json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in [ 'empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files,addremove=True, user="******", message="update live layer {}.{}".format(self.datasource.workspace.name, self.kmi_name)) increase_committed_changes() try_push_to_repository("livelayer",hg) finally: if hg: hg.close() try_clear_push_owner("livelayer")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("wmslayer_admin",enforce=True) warning_message = None try: for l in queryset: try: if l.publish_status.unpublished: #Not published before. failed_objects.append(("{0}:{1}".format(l.server,l.name),"Not published before, no need to empty gwc.")) continue l.empty_gwc() #empty the related layergroup's cache for layer in LayerGroupLayers.objects.filter(layer = l): target_status = layer.group.next_status(ResourceAction.CASCADE_PUBLISH) if layer.group.publish_required: layer.group.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.server,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmslayer_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmslayer_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("wmslayer_admin",enforce=True) warning_message = None try: for l in queryset: try: if l.status not in [ResourceStatus.PUBLISHED,ResourceStatus.UPDATED]: #Not published before. failed_objects.append(("{0}:{1}".format(l.server,l.name),"Not published before, no need to empty gwc.")) continue l.empty_gwc() #empty the related layergroup's cache for layer in LayerGroupLayers.objects.filter(layer = l): target_status = layer.group.get_next_status(layer.group.status,ResourceStatus.SIDE_PUBLISH) if target_status == ResourceStatus.PUBLISH: layer.group.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.server,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmslayer_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmslayer_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def disable_publish(self,request,queryset): result = None failed_objects = [] #import ipdb;ipdb.set_trace() try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for publish in queryset: try: publish.unpublish() if publish.status != ResourceStatus.Disabled.name: publish.status = ResourceStatus.Disabled.name publish.pending_actions = None publish.job_id = None publish.job_batch_id = None publish.job_status = None publish.save(update_fields=['status','pending_actions','job_id','job_batch_id','job_status']) except: error = sys.exc_info() failed_objects.append(("{0}:{1}".format(publish.workspace.name,publish.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Pushing changes to repository failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Disable failed for some selected publishs:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "Disable successfully for all selected publishs")
def disable_publish(self,request,queryset): result = None failed_objects = [] #import ipdb;ipdb.set_trace() try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for publish in queryset: try: publish.remove_publish_from_repository() if publish.status != DisabledStatus.instance().name: publish.status = DisabledStatus.instance().name publish.pending_actions = None publish.job_id = None publish.job_batch_id = None publish.job_status = None publish.save(update_fields=['status','pending_actions','job_id','job_batch_id','job_status']) except: error = sys.exc_info() failed_objects.append(("{0}:{1}".format(publish.workspace.name,publish.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Pushing changes to repository failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Disable failed for some selected publishs:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "Disable successfully for all selected publishs")
def _change_status(self,request,queryset,status,update_fields=None): result = None failed_objects = [] try_set_push_owner("wmsserver_admin",enforce=True) warning_message = None try: for server in queryset: #import ipdb;ipdb.set_trace() try: target_status = server.get_next_status(server.status,status) if target_status == server.status: #status not changed continue else: server.status = target_status server.save(update_fields=update_fields) except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(server.workspace.name,server.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmsserver_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmsserver_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected servers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected servers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected servers are processed successfully.")
def empty_gwc(self): """ update layer's json for empty gwc to the repository """ if self.publish_status.unpublished: #layer is not published, no need to empty gwc raise ValidationError( "The wms layer({0}) is not published before.".format( self.name)) json_filename = self.json_filename_abs('empty_gwc') try_set_push_owner("wmslayer") hg = None try: json_out = {} json_out["name"] = self.kmi_name json_out["workspace"] = self.server.workspace.name json_out["store"] = self.server.name json_out["action"] = "empty_gwc" json_out["publish_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename], addremove=True, user="******", message="Empty GWC of wms layer {}.{}".format( self.server.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmslayer", hg) finally: if hg: hg.close() try_clear_push_owner("wmslayer")
def custom_delete_selected(self,request,queryset): if request.POST.get('post') != 'yes': #the confirm page, or user not confirmed return self.default_delete_action[0](self,request,queryset) #user confirm to delete the workspaces, execute the custom delete logic. result = None failed_publish_channels = [] try_set_push_owner("publish_channel_admin",enforce=True) warning_message = None try: for publish_channel in queryset: try: with transaction.atomic(): publish_channel.delete() except: error = sys.exc_info() failed_publish_channels.append((workspace.name,traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish_channel continue try: try_push_to_repository('publish_channel_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_channel_admin",enforce=True) if failed_publish_channels or warning_message: if failed_publish_channels: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected publish channels are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_publish_channels])))) else: messages.warning(request, mark_safe("Some selected publish channels are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_publish_channels])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected publish channels are deleted successfully")
def _change_status(self,request,queryset,action,update_fields=None): result = None failed_objects = [] try_set_push_owner("livelayer_admin",enforce=True) warning_message = None try: for l in queryset: try: target_status = l.next_status(action) if target_status == l.status and not l.publish_required and not l.unpublish_required: #status not changed continue else: l.status = target_status l.save(update_fields=update_fields) except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.datasource,l.kmi_name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('livelayer_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("livelayer_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def custom_delete_selected(self,request,queryset): if request.POST.get('post') != 'yes': #the confirm page, or user not confirmed return self.default_delete_action[0](self,request,queryset) result = None failed_objects = [] try_set_push_owner("wmsserver_admin",enforce=True) warning_message = None try: for server in queryset: #import ipdb;ipdb.set_trace() try: #delete the server server.delete() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(server.workspace.name,server.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmsserver_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmsserver_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected servers are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected servers are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected ervers are deleted successfully.")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for l in queryset: try: if l.publish_status not in [ResourceStatus.Enabled]: #Publish is disabled. failed_objects.append(("{0}:{1}".format(l.workspace.name,l.name),"Disabled, no need to empty gwc.")) continue l.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.workspace.name,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected publishs are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected publishs are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected publishs are processed successfully.")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("layergroup_admin",enforce=True) warning_message = None try: for g in queryset: try: if g.publish_status.unpublished: #Not published before. failed_objects.append(("{0}:{1}".format(g.workspace,g.name),"Not published before, no need to empty gwc.")) continue g.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.server,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('layergroup_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("layergroup_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def publish(self): """ publish layer's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs; try_set_push_owner("wmslayer") hg = None try: json_out = {} json_out["name"] = self.layer_name json_out["native_name"] = self.name json_out["title"] = self.layer_title json_out["abstract"] = self.layer_abstract json_out["workspace"] = self.server.workspace.name json_out["store"] = self.server.name json_out["publish_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") from application.models import Application_Layers json_out["applications"] = ["{0}:{1}".format(o.application,o.order) for o in Application_Layers.objects.filter(wmslayer=self)] if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="update wms layer {}.{}".format(self.server.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmslayer",hg) finally: if hg: hg.close() try_clear_push_owner("wmslayer")
def unpublish(self): """ remove store's json reference (if exists) from the repository, return True if store is removed for repository; return false, if layers does not existed in repository. """ json_filename = self.json_filename_abs; if os.path.exists(json_filename): #file exists, layers is published, remove it. try_set_push_owner("layergroup") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=[json_filename]) hg.commit(include=[json_filename],addremove=True, user="******", message="Remove layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup") return True else: return False
def unpublish(self): """ unpublish layer group """ json_files = [ self.json_filename_abs(action) for action in [ 'publish','empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: #file exists, layers is published, remove it. try_set_push_owner("layergroup") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=json_files) hg.commit(include=json_files,addremove=True, user="******", message="Remove layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup") return True else: return False
def create(self): resp = {"status": True} result = None try_set_push_owner("meta_resource") try: for layer in self.data.get('layers') or []: workspace, name = layer.split(":") resp[layer] = {} #get the workspace object try: workspaces = Workspace.objects.filter(name=workspace) if not len(workspaces): #workspace does not exist resp["status"] = False resp[layer]["status"] = False resp[layer][ "message"] = "Workspace does not exist.".format( name) continue try: #try to locate it from publishs, and publish the meta data if found pub = Publish.objects.get(workspace__in=workspaces, name=name) try: pub.publish_meta_data() resp[layer]["status"] = True resp[layer]["message"] = "Succeed." continue except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer][ "message"] = "Publish meta data failed!{}".format( msg) continue except Publish.DoesNotExist: pass #not a publish object, try to locate it from live layers, and publish it if found try: livelayer = LiveLayer.objects.filter( datasource__workspace__in=workspaces).get( Q(name=name) | Q(table=name)) try: target_status = livelayer.next_status( ResourceAction.PUBLISH) livelayer.status = target_status livelayer.save( update_fields=["status", "last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." continue except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer][ "message"] = "Publish live layer failed!{}".format( msg) continue except LiveLayer.DoesNotExist: pass #not a publish object, try to locate it from live sqlview layers, and publish it if found try: livelayer = LiveSqlViewLayer.objects.get( datasource__workspace__in=workspaces, name=name) try: target_status = livelayer.next_status( ResourceAction.PUBLISH) livelayer.status = target_status livelayer.save( update_fields=["status", "last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." continue except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer][ "message"] = "Publish live sqlview layer failed!{}".format( msg) continue except LiveSqlViewLayer.DoesNotExist: pass #not a publish object, try to locate it from wms layers, and publish it if found try: wmslayer = WmsLayer.objects.get( server__workspace__in=workspaces, kmi_name=name) try: target_status = wmslayer.next_status( ResourceAction.PUBLISH) wmslayer.status = target_status wmslayer.save( update_fields=["status", "last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." continue except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer][ "message"] = "Publish wms layer failed!{}".format( msg) continue except WmsLayer.DoesNotExist: #layer does not exist, resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Does not exist.".format(name) continue except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = msg continue #push all files into repository at once. try: try_push_to_repository('meta_resource', enforce=True) except: #push failed, set status to false, and proper messages for related layers. msg = traceback.format_exc() logger.error(msg) resp["status"] = False for layer in self.data.get('layers') or []: if resp[layer]["status"]: #publish succeed but push failed resp[layer]["status"] = False resp[layer][ "message"] = "Push to repository failed!{}".format( msg) finally: try_clear_push_owner("meta_resource") return resp
def create(self): resp = {"status":True} result = None try_set_push_owner("meta_resource") try: for layer in self.data.get('layers') or []: workspace,name = layer.split(":") resp[layer] = {} #get the workspace object try: workspaces = Workspace.objects.filter(name=workspace) if not len(workspaces): #workspace does not exist resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Workspace does not exist.".format(name) continue try: #try to locate it from publishs, and publish the meta data if found pub = Publish.objects.get(workspace__in=workspaces,name=name) try: pub.publish_meta_data() resp[layer]["status"] = True resp[layer]["message"] = "Succeed." except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Publish meta data failed!{}".format(msg) continue except Publish.DoesNotExist: #not a publish object, try to locate it from live layers, and publish it if found try: livelayer = LiveLayer.objects.filter(datasource__workspace__in=workspaces).get(Q(name=name) | Q(table=name)) try: target_status = livelayer.next_status(ResourceAction.PUBLISH) livelayer.status = target_status livelayer.save(update_fields=["status","last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." except : msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Publish live layer failed!{}".format(msg) continue except LiveLayer.DoesNotExist: #not a publish object, try to locate it from wms layers, and publish it if found try: wmslayer = WmsLayer.objects.get(server__workspace__in=workspaces,kmi_name=name) try: target_status = wmslayer.next_status(ResourceAction.PUBLISH) wmslayer.status = target_status wmslayer.save(update_fields=["status","last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Publish wms layer failed!{}".format(msg) continue except WmsLayer.DoesNotExist: #layer does not exist, resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Does not exist.".format(name) continue except : msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = msg continue #push all files into repository at once. try: try_push_to_repository('meta_resource',enforce=True) except: #push failed, set status to false, and proper messages for related layers. msg = traceback.format_exc() logger.error(msg) resp["status"] = False for layer in self.data.get('layers') or []: if resp[layer]["status"]: #publish succeed but push failed resp[layer]["status"] = False resp[layer]["message"] = "Push to repository failed!{}".format(msg) finally: try_clear_push_owner("meta_resource") return resp
def unpublish(self): publish_file = self.json_filename_abs('publish') publish_json = None if os.path.exists(publish_file): with open(publish_file, "r") as f: publish_json = json.loads(f.read()) else: publish_json = {} json_file = self.json_filename_abs('unpublish') json_out = None try_set_push_owner("wmsserver") hg = None try: if publish_json.get("action", "publish") != "remove": json_out = {} json_out["name"] = self.name json_out["workspace"] = self.workspace.name json_out["channel"] = self.workspace.publish_channel.name json_out['action'] = 'remove' json_out[ "sync_geoserver_data"] = self.workspace.publish_channel.sync_geoserver_data #retrieve meta data from the last published task meta_json = publish_json if "meta" in publish_json and "file" in publish_json["meta"]: meta_file = publish_json["meta"]["file"][ len(BorgConfiguration.MASTER_PATH_PREFIX):] if os.path.exists(meta_file): with open(meta_file, "r") as f: meta_json = json.loads(f.read()) else: meta_json = {} for key in [ "name", "workspace", "channel", "sync_geoserver_data" ]: if key in meta_json: json_out[key] = meta_json[key] else: json_out = publish_json json_out["remove_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_file)): os.makedirs(os.path.dirname(json_file)) with open(json_file, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_file], addremove=True, user="******", message="Unpublish wms store {}.{}".format( self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmsserver", hg) finally: if hg: hg.close() try_clear_push_owner("wmsserver")
def publish(self): """ publish store's json reference (if exists) to the repository, """ try_set_push_owner("wmsserver") hg = None try: meta_data = {} meta_data["name"] = self.name meta_data["capability_url"] = self.get_capability_url meta_data["channel"] = self.workspace.publish_channel.name meta_data["username"] = self.user or "" meta_data["password"] = self.password or "" meta_data["workspace"] = self.workspace.name meta_data[ "sync_geoserver_data"] = self.workspace.publish_channel.sync_geoserver_data if self.geoserver_setting: meta_data["geoserver_setting"] = json.loads( self.geoserver_setting) #write meta data file file_name = "{}.{}.meta.json".format(self.workspace.name, self.name) meta_file = os.path.join(BorgConfiguration.WMS_STORE_DIR, file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file, "wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = { "file": "{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file), "md5": file_md5(meta_file) } json_out['action'] = 'publish' json_out["publish_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") json_filename = self.json_filename_abs('publish') #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename], addremove=True, user="******", message="Update wms store {}.{}".format( self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmsserver", hg) finally: if hg: hg.close() try_clear_push_owner("wmsserver")
def publish(self): """ publish layer's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs('publish') try_set_push_owner("wmslayer") hg = None try: meta_data = self.update_catalogue_service( extra_datas={ "publication_date": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") }) #write meta data file file_name = "{}.{}.meta.json".format(self.server.workspace.name, self.kmi_name) meta_file = os.path.join(BorgConfiguration.WMS_LAYER_DIR, file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file, "wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = { "file": "{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file), "md5": file_md5(meta_file) } json_out['action'] = "publish" json_out["publish_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in ['empty_gwc'] ] #get all existing files. json_files = [f for f in json_files if os.path.exists(f)] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files, addremove=True, user="******", message="update wms layer {}.{}".format( self.server.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmslayer", hg) finally: if hg: hg.close() try_clear_push_owner("wmslayer")
def unpublish(self): """ unpublish layer group """ #remove it from catalogue service res = requests.delete("{}/catalogue/api/records/{}:{}/".format( settings.CSW_URL, self.workspace.name, self.name), auth=(settings.CSW_USER, settings.CSW_PASSWORD)) if res.status_code != 404: res.raise_for_status() json_filename = self.json_filename_abs('unpublish') try_set_push_owner("layergroup") hg = None try: meta_data = {} #add extra data to meta data meta_data["workspace"] = self.workspace.name meta_data["name"] = self.name meta_data["native_name"] = self.name meta_data["auth_level"] = self.workspace.auth_level meta_data["spatial_data"] = True meta_data["channel"] = self.workspace.publish_channel.name meta_data[ "sync_geoserver_data"] = self.workspace.publish_channel.sync_geoserver_data #write meta data file file_name = "{}.meta.json".format(self.name) meta_file = os.path.join(BorgConfiguration.UNPUBLISH_DIR, self.workspace.publish_channel.name, self.workspace.name, "layergroups", file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file, "wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = { "file": "{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file), "md5": file_md5(meta_file) } json_out['action'] = "remove" json_out["remove_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in ['empty_gwc'] ] #get all existing files. json_files = [f for f in json_files if os.path.exists(f)] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files, user="******", addremove=True, message="Unpublish layer group {}.{}".format( self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup", hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def publish(self): """ Only publish the member layers which is already published. """ json_filename = self.json_filename_abs('publish') try_set_push_owner("layergroup") hg = None try: json_out = self.update_catalogue_service( extra_datas={ "publication_date": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") }) layers = [] for group_layer in LayerGroupLayers.objects.filter( group=self).order_by("order"): if group_layer.layer and group_layer.layer.is_published: layers.append({ "type": "wms_layer", "name": group_layer.layer.kmi_name, "store": group_layer.layer.server.name, "workspace": group_layer.layer.server.workspace.name }) elif group_layer.publish and group_layer.publish.is_published: layers.append({ "type": "publish", "name": group_layer.publish.name, "workspace": group_layer.publish.workspace.name }) elif group_layer.sub_group and group_layer.sub_group.is_published: layers.append({ "type": "group", "name": group_layer.sub_group.name, "workspace": group_layer.sub_group.workspace.name }) if not layers: #layergroup is empty,remove it. raise LayerGroupEmpty("Layer group can't be empty.") json_out["layers"] = layers json_out["srs"] = self.srs or None json_out["publish_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") inclusions = self.get_inclusions() dependent_groups = [] for group in inclusions[2].keys(): if group.is_published: dependent_groups.append({ "name": group.name, "workspace": group.workspace.name }) json_out["dependent_groups"] = dependent_groups #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in ['empty_gwc'] ] #get all existing files. json_files = [f for f in json_files if os.path.exists(f)] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files, user="******", addremove=True, message="Update layer group {}.{}".format( self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup", hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def unpublish(self): """ unpublish layer group """ #remove it from catalogue service res = requests.delete("{}/catalogue/api/records/{}:{}/".format( settings.CSW_URL, self.workspace.name, self.name), auth=(settings.CSW_USER, settings.CSW_PASSWORD), verify=settings.CSW_CERT_VERIFY) if res.status_code != 404: res.raise_for_status() publish_file = self.json_filename_abs('publish') publish_json = None if os.path.exists(publish_file): with open(publish_file, "r") as f: publish_json = json.loads(f.read()) else: publish_json = {} json_file = self.json_filename_abs('unpublish') json_out = None try_set_push_owner("layergroup") hg = None try: if publish_json.get("action", "publish") != "remove": json_out = {} json_out["name"] = self.name json_out["workspace"] = self.workspace.name json_out["spatial_data"] = True json_out["channel"] = self.workspace.publish_channel.name json_out[ "sync_geoserver_data"] = self.workspace.publish_channel.sync_geoserver_data json_out['action'] = "remove" #retrieve meta data from the last publish task meta_json = publish_json if "meta" in publish_json and "file" in publish_json["meta"]: meta_file = publish_json["meta"]["file"][ len(BorgConfiguration.MASTER_PATH_PREFIX):] if os.path.exists(meta_file): with open(meta_file, "r") as f: meta_json = json.loads(f.read()) else: meta_json = {} for key in [ "name", "workspace", "channel", "spatial_data", "sync_geoserver_data" ]: if key in meta_json: json_out[key] = meta_json[key] else: json_out = publish_json json_out["remove_time"] = timezone.localtime( timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_file)): os.makedirs(os.path.dirname(json_file)) with open(json_file, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in ['empty_gwc'] ] #get all existing files. json_files = [f for f in json_files if os.path.exists(f)] if json_files: hg.remove(files=json_files) json_files.append(json_file) hg.commit(include=json_files, user="******", addremove=True, message="Unpublish layer group {}.{}".format( self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup", hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")