def publish_meta_data(self,request,queryset): result = None failed_objects = [] #import ipdb;ipdb.set_trace() try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for publish in queryset: try: publish.publish_meta_data() except: error = sys.exc_info() failed_objects.append(("{0}:{1}".format(publish.workspace.name,publish.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Pushing changes to repository failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Publish meta data failed for some selected publishs:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "Publish meta data successfully for all selected publishs")
def publish(self): """ publish store's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs; try_set_push_owner("wmsserver") hg = None try: json_out = {} json_out["name"] = self.name json_out["capability_url"] = self.get_capability_url json_out["username"] = self.user or "" json_out["password"] = self.password or "" json_out["workspace"] = self.workspace.name json_out["publish_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Update wms store {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmsserver",hg) finally: if hg: hg.close() try_clear_push_owner("wmsserver")
def empty_gwc(self): """ update layer's json for empty gwc to the repository """ if self.publish_status.unpublished: #layer is not published, no need to empty gwc raise ValidationError("The wms layer({0}) is not published before.".format(self.kmi_name)) json_filename = self.json_filename_abs('empty_gwc'); try_set_push_owner("livelayer") hg = None try: json_out = {} json_out["name"] = self.kmi_name json_out["workspace"] = self.datasource.workspace.name json_out["store"] = self.datasource.name json_out["action"] = "empty_gwc" json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Empty GWC of live layer {}.{}".format(self.datasource.workspace.name, self.kmi_name)) increase_committed_changes() try_push_to_repository("livelayer",hg) finally: if hg: hg.close() try_clear_push_owner("livelayer")
def empty_gwc(self): """ update layer group's json for empty gwc to the repository """ if self.status not in [ResourceStatus.PUBLISHED,ResourceStatus.UPDATED]: #layer is not published, no need to empty gwc return json_filename = self.json_filename_abs; try_set_push_owner("layergroup") hg = None try: json_out = {} json_out["name"] = self.name json_out["workspace"] = self.workspace.name json_out["action"] = "empty_gwc" json_out["empty_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Empty GWC of layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def unpublish(self): """ remove store's json reference (if exists) from the repository, return True if store is removed for repository; return false, if layers does not existed in repository. """ #remove it from catalogue service res = requests.delete("{}/catalogue/api/records/{}:{}/".format(settings.CSW_URL,self.datasource.workspace.name,self.kmi_name),auth=(settings.CSW_USER,settings.CSW_PASSWORD)) if res.status_code != 404: res.raise_for_status() json_files = [ self.json_filename_abs(action) for action in [ 'publish','empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: #file exists, layers is published, remove it. try_set_push_owner("livelayer") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=json_files) hg.commit(include=json_files,addremove=True, user="******", message="Remove live layer {}.{}".format(self.datasource.workspace.name, self.kmi_name)) increase_committed_changes() try_push_to_repository("livelayer",hg) finally: if hg: hg.close() try_clear_push_owner("livelayer") return True else: return False
def unpublish(self): """ remove store's json reference (if exists) from the repository, return True if store is removed for repository; return false, if layers does not existed in repository. """ json_files = [ self.json_filename_abs(action) for action in [ 'publish' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: #file exists, layers is published, remove it. try_set_push_owner("liveserver") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=json_files) hg.commit(include=json_files,addremove=True, user="******", message="Remove live store {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("liveserver",hg) finally: if hg: hg.close() try_clear_push_owner("liveserver") return True else: return False
def publish(self): """ Only publish the member layers which is already published. """ json_filename = self.json_filename_abs('publish'); try_set_push_owner("layergroup") hg = None try: json_out = self.update_catalogue_service(extra_datas={"publication_date": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")}) layers = [] for group_layer in LayerGroupLayers.objects.filter(group=self).order_by("order"): if group_layer.layer and group_layer.layer.is_published: layers.append({"type":"wms_layer","name":group_layer.layer.name,"store":group_layer.layer.server.name,"workspace":group_layer.layer.server.workspace.name}) elif group_layer.publish and group_layer.publish.is_published: layers.append({"type":"publish","name":group_layer.publish.name,"workspace":group_layer.publish.workspace.name}) elif group_layer.sub_group and group_layer.sub_group.is_published: layers.append({"type":"group","name":group_layer.sub_group.name,"workspace":group_layer.sub_group.workspace.name}) if not layers: #layergroup is empty,remove it. raise LayerGroupEmpty("Layer group can't be empty.") json_out["layers"] = layers json_out["srs"] = self.srs or None json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") inclusions = self.get_inclusions() dependent_groups = [] for group in inclusions[2].keys(): if group.is_published: dependent_groups.append({"name":group.name,"workspace":group.workspace.name}) json_out["dependent_groups"] = dependent_groups #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in [ 'empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files, user="******",addremove=True, message="Update layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def publish(self): """ publish store's json reference (if exists) to the repository; """ json_filename = self.json_filename_abs; try_set_push_owner("layergroup") hg = None try: layers = [] for group_layer in LayerGroupLayers.objects.filter(group=self).order_by("order"): if group_layer.layer and group_layer.layer.is_published: layers.append({"type":"wms_layer","name":group_layer.layer.layer_name,"store":group_layer.layer.server.name,"workspace":group_layer.layer.server.workspace.name}) elif group_layer.publish : layers.append({"type":"publish","name":group_layer.publish.name,"workspace":group_layer.publish.workspace.name}) elif group_layer.sub_group and group_layer.sub_group.is_published: layers.append({"type":"group","name":group_layer.sub_group.name,"workspace":group_layer.sub_group.workspace.name}) if not layers: #layergroup is empty,remove it. raise LayerGroupEmpty("Layer group can't be empty.") json_out = {} json_out["layers"] = layers; json_out["name"] = self.name json_out["title"] = self.title or "" json_out["abstract"] = self.abstract or "" json_out["workspace"] = self.workspace.name json_out["srs"] = self.srs json_out["publish_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") inclusions = self.get_inclusions() dependent_groups = [] for group in inclusions[2].keys(): if group.is_published: dependent_groups.append({"name":group.name,"workspace":group.workspace.name}) json_out["dependent_groups"] = dependent_groups if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename], user="******",addremove=True, message="Update layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup")
def publish(self): """ publish store's json reference (if exists) to the repository, """ try_set_push_owner("liveserver") hg = None try: meta_data = {} meta_data["name"] = self.name meta_data["host"] = self.host meta_data["port"] = self.port meta_data["database"] = self.db_name meta_data["user"] = self.user meta_data["passwd"] = self.password meta_data["schema"] = self.schema meta_data["workspace"] = self.workspace.name if self.geoserver_setting: meta_data["geoserver_setting"] = json.loads(self.geoserver_setting) #write meta data file file_name = "{}.{}.meta.json".format(self.workspace.name,self.name) meta_file = os.path.join(BorgConfiguration.LIVE_STORE_DIR,file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file,"wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = {"file":"{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file),"md5":file_md5(meta_file)} json_out['action'] = 'publish' json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") json_filename = self.json_filename_abs('publish'); #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="Update live store {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("liveserver",hg) finally: if hg: hg.close() try_clear_push_owner("liveserver")
def publish(self): """ publish layer's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs('publish'); try_set_push_owner("livelayer") hg = None try: meta_data = self.update_catalogue_service(md5=True,extra_datas={"publication_date":datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")}) #write meta data file file_name = "{}.{}.meta.json".format(self.datasource.workspace.name,self.kmi_name) meta_file = os.path.join(BorgConfiguration.LIVE_LAYER_DIR,file_name) #create the dir if required if not os.path.exists(os.path.dirname(meta_file)): os.makedirs(os.path.dirname(meta_file)) with open(meta_file,"wb") as output: json.dump(meta_data, output, indent=4) json_out = {} json_out['meta'] = {"file":"{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, meta_file),"md5":file_md5(meta_file)} json_out['action'] = "publish" json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f") #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) #remove other related json files json_files = [ self.json_filename_abs(action) for action in [ 'empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: hg.remove(files=json_files) json_files.append(json_filename) hg.commit(include=json_files,addremove=True, user="******", message="update live layer {}.{}".format(self.datasource.workspace.name, self.kmi_name)) increase_committed_changes() try_push_to_repository("livelayer",hg) finally: if hg: hg.close() try_clear_push_owner("livelayer")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("wmslayer_admin",enforce=True) warning_message = None try: for l in queryset: try: if l.status not in [ResourceStatus.PUBLISHED,ResourceStatus.UPDATED]: #Not published before. failed_objects.append(("{0}:{1}".format(l.server,l.name),"Not published before, no need to empty gwc.")) continue l.empty_gwc() #empty the related layergroup's cache for layer in LayerGroupLayers.objects.filter(layer = l): target_status = layer.group.get_next_status(layer.group.status,ResourceStatus.SIDE_PUBLISH) if target_status == ResourceStatus.PUBLISH: layer.group.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.server,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmslayer_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmslayer_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def disable_publish(self,request,queryset): result = None failed_objects = [] #import ipdb;ipdb.set_trace() try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for publish in queryset: try: publish.remove_publish_from_repository() if publish.status != DisabledStatus.instance().name: publish.status = DisabledStatus.instance().name publish.pending_actions = None publish.job_id = None publish.job_batch_id = None publish.job_status = None publish.save(update_fields=['status','pending_actions','job_id','job_batch_id','job_status']) except: error = sys.exc_info() failed_objects.append(("{0}:{1}".format(publish.workspace.name,publish.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Pushing changes to repository failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Disable failed for some selected publishs:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "Disable successfully for all selected publishs")
def _change_status(self,request,queryset,status,update_fields=None): result = None failed_objects = [] try_set_push_owner("wmsserver_admin",enforce=True) warning_message = None try: for server in queryset: #import ipdb;ipdb.set_trace() try: target_status = server.get_next_status(server.status,status) if target_status == server.status: #status not changed continue else: server.status = target_status server.save(update_fields=update_fields) except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(server.workspace.name,server.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmsserver_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmsserver_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected servers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected servers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected servers are processed successfully.")
def custom_delete_selected(self,request,queryset): if request.POST.get('post') != 'yes': #the confirm page, or user not confirmed return self.default_delete_action[0](self,request,queryset) #user confirm to delete the workspaces, execute the custom delete logic. result = None failed_publish_channels = [] try_set_push_owner("publish_channel_admin",enforce=True) warning_message = None try: for publish_channel in queryset: try: with transaction.atomic(): publish_channel.delete() except: error = sys.exc_info() failed_publish_channels.append((workspace.name,traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish_channel continue try: try_push_to_repository('publish_channel_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_channel_admin",enforce=True) if failed_publish_channels or warning_message: if failed_publish_channels: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected publish channels are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_publish_channels])))) else: messages.warning(request, mark_safe("Some selected publish channels are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_publish_channels])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected publish channels are deleted successfully")
def _change_status(self,request,queryset,action,update_fields=None): result = None failed_objects = [] try_set_push_owner("livelayer_admin",enforce=True) warning_message = None try: for l in queryset: try: target_status = l.next_status(action) if target_status == l.status and not l.publish_required and not l.unpublish_required: #status not changed continue else: l.status = target_status l.save(update_fields=update_fields) except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.datasource,l.kmi_name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('livelayer_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("livelayer_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def custom_delete_selected(self,request,queryset): if request.POST.get('post') != 'yes': #the confirm page, or user not confirmed return self.default_delete_action[0](self,request,queryset) result = None failed_objects = [] try_set_push_owner("wmsserver_admin",enforce=True) warning_message = None try: for server in queryset: #import ipdb;ipdb.set_trace() try: #delete the server server.delete() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(server.workspace.name,server.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('wmsserver_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("wmsserver_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected servers are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected servers are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected ervers are deleted successfully.")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("publish_admin",enforce=True) warning_message = None try: for l in queryset: try: if l.publish_status not in [ResourceStatus.Enabled]: #Publish is disabled. failed_objects.append(("{0}:{1}".format(l.workspace.name,l.name),"Disabled, no need to empty gwc.")) continue l.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.workspace.name,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('publish_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("publish_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected publishs are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected publishs are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected publishs are processed successfully.")
def empty_gwc(self,request,queryset): result = None failed_objects = [] try_set_push_owner("layergroup_admin",enforce=True) warning_message = None try: for g in queryset: try: if g.publish_status.unpublished: #Not published before. failed_objects.append(("{0}:{1}".format(g.workspace,g.name),"Not published before, no need to empty gwc.")) continue g.empty_gwc() except: logger.error(traceback.format_exc()) error = sys.exc_info() failed_objects.append(("{0}:{1}".format(l.server,l.name),traceback.format_exception_only(error[0],error[1]))) #remove failed, continue to process the next publish continue try: try_push_to_repository('layergroup_admin',enforce=True) except: error = sys.exc_info() warning_message = traceback.format_exception_only(error[0],error[1]) logger.error(traceback.format_exc()) finally: try_clear_push_owner("layergroup_admin",enforce=True) if failed_objects or warning_message: if failed_objects: if warning_message: messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected layers are processed failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe("Some selected layers are processed failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.warning(request, mark_safe(warning_message)) else: messages.success(request, "All selected layers are processed successfully.")
def publish(self): """ publish layer's json reference (if exists) to the repository, """ json_filename = self.json_filename_abs; try_set_push_owner("wmslayer") hg = None try: json_out = {} json_out["name"] = self.layer_name json_out["native_name"] = self.name json_out["title"] = self.layer_title json_out["abstract"] = self.layer_abstract json_out["workspace"] = self.server.workspace.name json_out["store"] = self.server.name json_out["publish_time"] = timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f") from application.models import Application_Layers json_out["applications"] = ["{0}:{1}".format(o.application,o.order) for o in Application_Layers.objects.filter(wmslayer=self)] if self.geoserver_setting: json_out["geoserver_setting"] = json.loads(self.geoserver_setting) #create the dir if required if not os.path.exists(os.path.dirname(json_filename)): os.makedirs(os.path.dirname(json_filename)) with open(json_filename, "wb") as output: json.dump(json_out, output, indent=4) hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.commit(include=[json_filename],addremove=True, user="******", message="update wms layer {}.{}".format(self.server.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("wmslayer",hg) finally: if hg: hg.close() try_clear_push_owner("wmslayer")
def unpublish(self): """ unpublish layer group """ json_files = [ self.json_filename_abs(action) for action in [ 'publish','empty_gwc' ] ] #get all existing files. json_files = [ f for f in json_files if os.path.exists(f) ] if json_files: #file exists, layers is published, remove it. try_set_push_owner("layergroup") hg = None try: hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY) hg.remove(files=json_files) hg.commit(include=json_files,addremove=True, user="******", message="Remove layer group {}.{}".format(self.workspace.name, self.name)) increase_committed_changes() try_push_to_repository("layergroup",hg) finally: if hg: hg.close() try_clear_push_owner("layergroup") return True else: return False
def create(self): resp = {"status":True} result = None try_set_push_owner("meta_resource") try: for layer in self.data.get('layers') or []: workspace,name = layer.split(":") resp[layer] = {} #get the workspace object try: workspaces = Workspace.objects.filter(name=workspace) if not len(workspaces): #workspace does not exist resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Workspace does not exist.".format(name) continue try: #try to locate it from publishs, and publish the meta data if found pub = Publish.objects.get(workspace__in=workspaces,name=name) try: pub.publish_meta_data() resp[layer]["status"] = True resp[layer]["message"] = "Succeed." except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Publish meta data failed!{}".format(msg) continue except Publish.DoesNotExist: #not a publish object, try to locate it from live layers, and publish it if found try: livelayer = LiveLayer.objects.filter(datasource__workspace__in=workspaces).get(Q(name=name) | Q(table=name)) try: target_status = livelayer.next_status(ResourceAction.PUBLISH) livelayer.status = target_status livelayer.save(update_fields=["status","last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." except : msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Publish live layer failed!{}".format(msg) continue except LiveLayer.DoesNotExist: #not a publish object, try to locate it from wms layers, and publish it if found try: wmslayer = WmsLayer.objects.get(server__workspace__in=workspaces,kmi_name=name) try: target_status = wmslayer.next_status(ResourceAction.PUBLISH) wmslayer.status = target_status wmslayer.save(update_fields=["status","last_publish_time"]) resp[layer]["status"] = True resp[layer]["message"] = "Succeed." except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Publish wms layer failed!{}".format(msg) continue except WmsLayer.DoesNotExist: #layer does not exist, resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = "Does not exist.".format(name) continue except : msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[layer]["status"] = False resp[layer]["message"] = msg continue #push all files into repository at once. try: try_push_to_repository('meta_resource',enforce=True) except: #push failed, set status to false, and proper messages for related layers. msg = traceback.format_exc() logger.error(msg) resp["status"] = False for layer in self.data.get('layers') or []: if resp[layer]["status"]: #publish succeed but push failed resp[layer]["status"] = False resp[layer]["message"] = "Push to repository failed!{}".format(msg) finally: try_clear_push_owner("meta_resource") return resp