def load_plot(job_id, alg_name, name): """Deprecated, Loads a plot This function loads a plot with associated with the given name and job_id Args: job_id: The job id which the plot is associated with alg_name: The name of the algorithm name: The name of the plot Returns: A tuple with the plot script and the plot html """ log = logger().get_log('plotter') try: query = \ plot_model.objects.filter(name=name).filter(job_id=job_id).filter(alg_name=alg_name) if not query: return None model = query[0] #sc = pickle.load(open(model.script,"rb")) #html = pickle.load(open(model.html,"rb")) sc = model.script html = model.html return (sc, html) except Exception: log.warning(traceback.format_exc()) return False return True
def start(self, data): """This method runs the algorithm job. This method will start an algorithm asynchronously. The data parameter must have the key 'algorithm_id' containing the algorithm id to be started. The method will retrieve an algorithm_info_model with the given id and it will create an algorithm_model and save it. By saving the model a algorithm_task will start asynchronously. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if the algorithm has started, False otherwise. @see algorithm_info_model @link avi.models.algorithm_info_model @see algorithm_model @link avi.models.algorithm_model @see algorithm_task @link avi.task.algorithm_task """ log = logger().get_log("algorithm_task") log.info("job %s", data) if "algorithm_id" in data: mng = algorithm_manager() if not data.get('algorithm_id'): self.job_data.ok = False return self.job_data if len(data['algorithm_id']) < 1: self.job_data.ok = False return self.job_data alg = algorithm_info_model.objects.get(pk=data['algorithm_id'][0]) result = mng.get_algorithm_data(data['algorithm_id'][0], alg.name, alg.definition_file, data) m = algorithm_model(alg_name=alg.name, params=result, results=result) m.save() self.job_data.data = m self.job_data.ok = True return self.job_data # OLD m = algorithm_model(alg_name=data['algorithm']['name'], params=data, results=data) m.save() #log.info("model %s", str(m)) self.job_data.data = m self.job_data.ok = True return self.job_data
def run(self): """Runs the task Args: self: The object pointer Raises: Exception """ log = logger().get_log('risea') log.info('deavi_task run...') t = herschel_query_task() t.task_id = self.request.herschel_query_model_model.pk data = { 'name_coord': self.name_coord, 'name': self.name, 'input_file': self.input_file, 'ra': self.ra, 'dec': self.dec, 'shape': self.shape, 'radius': self.radius, 'width': self.width, 'height': self.height, 'polygon': self.polygon, 'positional_images': self.positional_images, 'table': self.table, 'instrument': self.instrument, 'level': self.level, 'params': self.params, 'output_file': self.file_name, 'adql': self.adql } t.task_data.data = data t.run()
def __init__(self): """Constructor Initializes the log and sets some the initial values of the warehouse Args: self: The object pointer """ self.log = logger().get_log(self.str_log_header) try: from django.conf import settings self.log.debug("output path: %s", settings.OUTPUT_PATH) self.log.debug("media path: %s", settings.MEDIA_URL) self.log.debug("static path: %s", settings.STATIC_URL) self.log.debug("media root: %s", settings.MEDIA_ROOT) # TODO: ? set media and static paths? self.OUTPUT_PATH = settings.OUTPUT_PATH self.INPUT_PATH = settings.INPUT_PATH if settings.AVI_URL_NAME: self.AVI_URL_NAME = settings.AVI_URL_NAME if settings.PORTAL_URL: self.PORTAL_URL = settings.PORTAL_URL if settings.AVI_ROOT_URL: self.AVI_URL = settings.AVI_ROOT_URL #except ImportError: except Exception: pass self.RESOURCES_PATH = self.OUTPUT_PATH
def start(self, data): log = logger().get_log("get_queries_status") ms = gaia_query_model.objects.all() data = {} i = 0 for q in ms: log.info("query %s", q.name) status = q.request.pipe_state.state log.info("status: %s", status) #data[q.pk] = (q.name, status) data[i] = ("Gaia %s"%(q.name), status) i+=1 #data.append((q.name, status)) ms = herschel_query_model.objects.all() for q in ms: log.info("query %s", q.name) status = q.request.pipe_state.state log.info("status: %s", status) data[i] = ("HSA %s"%(q.name), status) i+=1 self.job_data.data = data self.job_data.ok = ms is not None return self.job_data
def start(self, data): """This method runs the get_files_list job. This method retrieves all the resource_models. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if there are files retrieved, False otherwise. @see resource_model @link avi.models.resource_model """ log = logger().get_log('algorithm_manager') ms = resource_model.objects.all() self.job_data.ok = ms is not None if not ms: self.job_data.data = None return self.job_data return self.job_data
def init(self, cfg): """Initializes the attributes. This method initializes the attributes with the given configuration Args: self: The object pointer cfg: The configuration to be loaded Returns: True if everything is initialized correctly, False otherwise """ try: self.httphdr = cfg['http-header'] self.host = cfg['host'] self.port = cfg['port'] self.tap_server = cfg['tap-server'] self.pathinfo = cfg['async-pathinfo'] self.pathlogin = cfg['login-pathinfo'] self.pathlogout = cfg['logout-pathinfo'] self.pathupload = cfg['upload-pathinfo'] self.pathresults = cfg['results-pathinfo'] self.jobidtag = cfg['jobid-tag'] self.phasetag = cfg['phase-tag'] self.cookie = cfg['cookie'] self.log = logger().get_log('connection') return True except KeyError: return False
def save_plot(job_id, alg_name, plot): """saves the given plot This function saves the given plot an associates it with the given job id Args: job_id: The job id to be associated with alg_name: The name of the algorithm plot: A bokeh plot object """ log = logger().get_log('plotter') try: sc_path = '/data/output/sc' html_path = '/data/output/html' sc, html = components(plot) #, CDN) #pickle.dump(sc, open(sc_path,'wb')) #pickle.dump(html, open(html_path,'wb')) model = plot_model(name="name", job_id=job_id, alg_name=alg_name, script=sc, html=html) model.save() df = data_file(job_id) df.add_plot(model) except Exception: log.warning(traceback.format_exc()) return False return True
def start(self, data): """This method runs the relaunch_algorithm job. This method will relaunch_algorithm the asynchronous job provided in the data parameter. The data parameter must have the key 'algorithm' containing all the data of the algorithm to be relaunched. It will first parse the data parameter to a dictionary and save it into 'qparams' variable, then get the algorithm model by the name in the 'qparams' variable and save the 'pk' of the algorithm into 'qparams. If the algorithm input parameters contains 'Gaia' then will set the gaia file, if contains 'HSA' then will set the hsa file. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The algorithm parameters to be relaunched. """ wh = wh_frontend_config().get() gwh = wh_global_config().get() log = logger().get_log("relaunch") log.info("inside relaunch_algorithm job") # data is a string type, transform to dictionary qparams = literal_eval(data['pk']) # get the model of the algorithm by the name in qparams m = algorithm_info_model.objects.get(name=qparams['algorithm']['name']) # add the pk of the algorithm qparams['algorithm']['pk'] = m.pk # get the keys of the algorithm parameters keys = qparams['algorithm']['params'].keys() # check if Gaia or HSA is one of the parameters to delete the path of the files if "Gaia" in keys: path_to_eliminate = gwh.SOURCES_PATH path_to_eliminate = str(path_to_eliminate) + "/gaia/" qparams['algorithm']['params']['Gaia'] = qparams['algorithm'][ 'params']['Gaia'].replace(path_to_eliminate, '') log.info(qparams['algorithm']['params']['Gaia']) #Delete path if "HSA" in keys: path_to_eliminate = gwh.SOURCES_PATH path_to_eliminate = str(path_to_eliminate) + "/hsa/" qparams['algorithm']['params']['HSA'] = qparams['algorithm'][ 'params']['HSA'].replace(path_to_eliminate, '') log.info(qparams['algorithm']['params']['HSA']) #Delete path data = {} self.job_data.data = qparams['algorithm'] log.info("params " + str(qparams['algorithm'])) return self.job_data
def __init__(self): """Constructor Initializes the log Args: self: The object pointer """ self.log = logger().get_log(self.str_log_header)
def start(self, data): """This method runs the abort job. This method will abort the asynchronous job provided in the data parameter. The data parameter must have the key 'pk' containing the primary key of the job to be aborted and the key 'type' containing the type of asynchronous job to be aborted. It will first check if the job of the given type and with the given pk exists and if so, it will abort it unless it is already or its state is 'SUCCESS' or 'FAILURE'. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if the job has been aborted, False otherwise. """ log = logger().get_log("views") log.info("inside the job") if data['type'] == 'algorithm': m = algorithm_model.objects.get(pk=data['pk']) if data['type'] == 'gaia': m = gaia_query_model.objects.get(pk=data['pk']) if data['type'] == 'hsa': m = herschel_query_model.objects.get(pk=data['pk']) if data['type'] == 'sim': m = sim_query_model.objects.get(pk=data['pk']) self.job_data.data = {} self.job_data.ok = m is not None if not m: return self.job_data log.info("abandon_job") log.info(m.request.pipeline_state.state) if m.request.pipeline_state.state == 'SUCCESS' or \ m.request.pipeline_state.state == 'FAILURE' or \ m.is_aborted: return self.job_data log.info("aborting job...") m.request.abandon_job('User request') m.is_aborted = True m.request.delete_job_task() log.info(m.request.pipeline_state.state) m.request.pipeline_state.save() m.request.save() m.save() return self.job_data
def __init__(self): """The pipeline_manager constructor The constructor basically starts the log. Args: self: The object pointer. """ self.log = logger().get_log(self.str_log_header)
def __init__(self): """The constructor Initializes the log Args: self: The object pointer """ self.log = logger().get_log('configuration_manager')
def __init__(self): """The class constructor The constructor just initializes the log Args: self: The object pointer. """ self.log = logger().get_log(self.str_log_header)
def start(self, data): """This method is deprecated""" log = logger().get_log('views') log.info(data) from avi.core.algorithm.algorithm_manager import algorithm_manager res = algorithm_manager().get_algorithm(data) self.job_data.data = {} self.job_data.ok = True return self.job_data
def __init__(self): """Constructor Initializes the log, the warehouses, the configuration and the interface_manager Args: self: The object pointer """ ipath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config') #wh_global_config().get().INPUT_PATH self.str_log_config_file = os.path.join(ipath, 'log_config.xml') self.str_config_file = os.path.join(ipath, 'config.xml') self.str_global_config_file = os.path.join(ipath, 'global_config.xml') from avi.utils.config_manager import logger_configuration log_cfg = logger_configuration() log_cfg.load(self.str_log_config_file) self.log = logger().get_log(self.str_log_header) self.log.info("Initialization of RISEA...") self.log.info("RISEA ptr : %s",self.get_ptr()) # loading cfg from avi.utils.config_manager import configuration_manager self.cfg = configuration_manager() if not self.cfg.load(self.str_config_file): self.log.error("Failed to load the configuration %s", self.str_config_file) return # Initialization of the warehouses cfg = configuration_manager() if not cfg.load(self.str_global_config_file): self.log.error("Failed to load the configuration %s", self.str_global_config_file) return wh_global_config().get().load(cfg.get("global")) wh_frontend_config().get().load(cfg.get("frontend")) wh_frontend_config().get().\ CURRENT_PATH = os.path.\ normpath("".join(wh_global_config().get().RESOURCES_PATH)) wh_frontend_config().get().\ HOME_PATH = os.path.\ normpath("".join(wh_global_config().get().RESOURCES_PATH)) self.log.info("current id : %s\n resources id : %s", id(wh_frontend_config().get().CURRENT_PATH), id(wh_global_config().get().RESOURCES_PATH)) # Initialization of the interface manager from .interface.interface_manager import interface_manager self.interface_manager = interface_manager() self.interface_manager.init(self.cfg) # Initialization of the resources from avi.utils.resources_manager import resources_manager resources_manager().init()
def get_job(self, name, container): """Returns the job object. This method will return the job object created with the given name. Args: self: The object pointer. name: The name of the job to be created. container: Deprecated. Returns: The job object if it does exist, None otherwise. """ # package_str = "avi.core.pipeline." + container + "_job_" + name package_str = "avi.core.pipeline.job_" + name # module_str = container + "_job_" + name module_str = "job_" + name #package_str = "core.pipeline.job." + container + "_job_" + name #package_str = "job.deavi_job_gaia_query" logger().get_log('risea').info("Package str : %s - %s", package_str, module_str) logger().get_log('risea').info( __import__(package_str, fromlist=[module_str])) mod = __import__(package_str, fromlist=[module_str]) #mod = __import__("core.pipeline.avi_job_gaia_query", #fromlist=['avi_job_gaia_query']) if not mod: logger().get_log('risea').info("module not loaded") return None #return None return getattr(mod, name)()
def __init__(self): """The herschel constructor The constructor initializes the log and calls the parent class constructor to create the conneciton object. Args: self: The object pointer See: connection: avi.core.interface.connection.connection.connection """ super(herschel, self).__init__() self.log = logger().get_log('herschel')
def __init__(self): """The gaia constructor The constructor initializes the log and calls the parent class constructor to create the connection object Args: self: the object pointer See: connection: avi.core.interface.connection.connection.connection """ super(gaia, self).__init__() self.log = logger().get_log('gaia')
def load(self, log_config): self.log_config = {} log = logger() if (not os.path.isfile(log_config)): print('Warning, log configuration file ' + log_config + ' not found!') print('Will now initialize default log configuration...') log.default_log() return log xml_root = xml.etree.ElementTree.parse(log_config).getroot() for child in xml_root: if child.tag == 'config': self.load_config(child, log) if child.tag == 'level': self.load_level(child, log) #self.log_config[child.tag] = child.text #log.create(child.tag, child.text) return log
def start(self, data): """This method runs the get_results job. The data parameter must be the primary key of the algorithm execution. The method will retrieve all the resources and plots created by the algorithm execution of the given id. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if the results were retrieved correctly, False otherwise. @see results_model @link avi.models.results_model """ log = logger().get_log("views") log.info("%s", str(data)) ms = results_model.objects.filter(job_id=data) if not ms: log.info("no results") self.job_data.ok = False self.job_data.data = None return self.job_data log.info("results %s", data) model = ms[0] ret = {} plots = {} resources = {} ret['plots'] = plots ret['resources'] = resources for p in model.plots.all(): log.info("%s", str(p.job_id)) plots[p.pk] = p.pk #(p.script, p.html) for r in model.resources.all(): log.info("%s", str(r.name)) resources[r.pk] = r.name self.job_data.ok = True self.job_data.data = ret return self.job_data
def start(self, data): """This method runs the delete file job. This method will delete file provided in the data parameter. The data parameter must have the key 'pk' containing the primary key of the resource to be deleted. It will first check if the resource of the given pk exists and if so, it will delete it. It will also delete the file from disk. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if the resource has been deleted, False otherwise. """ log = logger().get_log("views") log.info("inside delete file job") pk = data['pk'] m = resource_model.objects.get(pk=pk) self.job_data.data = {} self.job_data.ok = m is not None if not m: return self.job_data path = m.path name = m.name m.delete() full_path = os.path.join(path, name) os.remove(full_path) return self.job_data
def start(self, data): """This method runs the get_query_status job. The data parameter must be a tupla of the primary key of the query execution and an identifier of the mission. The method will retrieve the status of the given query. Args: self: The object pointer. data: A dictionary containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if the information was retrieved correctly, False otherwise. @see results_model @link avi.models.results_model """ log = logger().get_log("views") if data.get('mission') == 'gaia': #m = gaia_query_model.objects.all.filter(pk=data['id']) #m = gaia_query_model.objects.all m = gaia_query_model.objects.get(pk=data['id']) if m == None: self.job_data.ok = False return self.job_data self.job_data.ok = True self.job_data.data = m.request.pipeline_state.state return self.job_data if data.get('mission') == 'hsa': m = herschel_query_model.objects.get(pk=data['id']) if m == None: self.job_data.ok = False return self.job_data self.job_data.ok = True self.job_data.data = m.request.pipeline_state.state return self.job_data
def __init__(self, id): """Constructor Initializes the log and sets the 'res' attribute retrieving the results_model associated with the given id. If the results_model does not exist then a new model it is created. Args: self: The object pointer id: The job_id associated with the results_model See: results_model: avi.models.results_model """ self.log = logger().get_log('data_file') try: self.log.info("getting results") self.res = results_model.objects.get(job_id=id) except results_model.DoesNotExist: self.log.info("Creating results") self.res = results_model(job_id=id) self.res.save()
def run(self): """Runs the task Args: self: The object pointer Raises: Exception """ log = logger().get_log('risea') log.info('deavi_task run...') t = sim_query_task() t.task_id = self.request.sim_query_model_model.pk data = { 'name': t.task_id, 'total_mass': self.total_mass, 'virial_ratio': self.virial_ratio, 'half_mass_radius': self.half_mass_radius, 'fractal_dimension': self.fractal_dimension, 'mass_segregation_degree': self.mass_segregation_degree, 'binary_fraction': self.binary_fraction } t.task_data.data = data t.run()
def run(self): """Runs the scientific algorithm. Loads the scientific algorithm and sets its input parameters contained in the task_data Args: self: The object pointer. """ log = logger().get_log("algorithm_task") log.info("running algorithm") data = self.__get_data(self.task_data.data) if not data: log.error("Invalid data provided") raise err("Invalid data provided") return try: alg_name = data['algorithm']['name'] except Exception: raise err("No algorithm name provided") try: alg_info = algorithm_info_model.objects.get(name=alg_name) except Exception: raise err("Inconsistent database") try: sc_path = alg_info.source_file except Exception: raise err("No algorithm_info could have been retrieved") log.info(sc_path) #log.info(os.path.basename(os.path.normpath(sc_path))) head, tail = os.path.split(os.path.normpath(sc_path)) #log.info(head) #log.info(self._get_package_str(head)) #package_str = "avi.algorithms." + alg_name package_str = self._get_package_str(head) + alg_name module_str = alg_name log.info("from %s import %s", package_str, module_str) # FIXME: mod = __import__(package_str, fromlist=[module_str]) log.info("Getting algorithm obj") alg = getattr(mod, alg_name)() log.info("Filling algorithm params") for k, v in data['algorithm']['params'].items(): setattr(alg, k, v) log.info("Running algorithm %i", self.task_id) try: alg.run(self.task_id) except Exception: raise err("Script has issues")
def start(self, data): """This method retrieves the algorithm information. The data parameter must have the key 'id' containing the algorithm id which info will be retrieved. The method will retrieve the algorithm_info_model with the given id and it will will extract the parameters information from the definition file provided by the algorithm_info_model. If the algorithm has gaia, herschel or results files in its input, this method will also retrieve the names of thouse files from the resource_model. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute will be True if the algorithm exists, False otherwise. @see algorithm_info_model @link avi.models.algorithm_info_model @see resource_model @link avi.models.resource_model """ log = logger().get_log('algorithm_manager') log.info(data) am = algorithm_info_model.objects.get(pk=data['id']) if not am: self.log_data.ok = False return self.job_data alg_info = algorithm_manager().get_algorithm(am.definition_file) inp = alg_info['algorithm']['input'] #inp = {} #inp = sorted(inp.items()) #inp = sorted(inp, key = lambda x: (x.get('group') is None, x)) self.job_data.data = dict() self.job_data.data['algorithm'] = alg_info['algorithm'] inp = [] for _, i in alg_info['algorithm']['input'].items(): log.info(i) inp.append(i) inp.sort(key=lambda x: x['position'], reverse=False) # self.job_data.data['algorithm_input'] = inp self.job_data.data.update({'algorithm_input':inp}) ms = resource_model.objects.all() has_gaia = algorithm_manager().has_param_type(am.definition_file, 'gaia_table') if has_gaia: # self.job_data.data['gaia'] = [] self.job_data.data.update({'gaia':[]}) has_hsa = algorithm_manager().has_param_type(am.definition_file, 'hsa_table') if has_hsa: # self.job_data.data['hsa'] = [] self.job_data.data.update({'hsa':[]}) log.info(am.definition_file) has_results = algorithm_manager().has_param_type(am.definition_file, 'results_data') if has_results: # self.job_data.data['res'] = [] self.job_data.data.update({'res':[]}) has_user = algorithm_manager().has_param_type(am.definition_file, 'user_data') if has_user: # self.job_data.data['user'] = [] self.job_data.data.update({'user':[]}) for i in ms: log.info("file %s - %s", i.name, i.file_type) if has_gaia and i.file_type == 'gaia': self.job_data.data['gaia'].extend([i.name]) if has_hsa and i.file_type == 'hsa': self.job_data.data['hsa'].extend([i.name]) if has_results and i.file_type == 'result': self.job_data.data['res'].extend([i.name]) if has_user and i.file_type == 'user': self.job_data.data['user'].extend([i.name]) #self.job_data.data = alg_info self.job_data.ok = alg_info is not None return self.job_data
def start(self, data): """This method runs the get_files job. This method will retrieve all the allowed files and directories to be shown in the user interface. It uses the resources_manager to get the path information and then uses the discard_files to discard the ones that should not be shown. After that it will paginate the results with the current page retrieved from the wh_frontend_config warehouse. Args: self: The object pointer. data: A dictorianry containing the input data for the job. Returns: The job_data attribute. The ok attribute provides the pages information. @see resources_manager @link avi.utils.resources_manager.resources_manager @see wh_frontend_config @link avi.warehouse.wh_frontend_config """ log = logger().get_log("views") wh = wh_frontend_config().get() gwh = wh_global_config().get() #dirs = data[0] #files = data[1] sorting_wh = wh.SORTING_RESOURCES_BY if sorting_wh[0] == '-': sorting_wh = sorting_wh[1:] order_by = '' all_files = self.discard_files(resources_manager().get_list( wh.CURRENT_PATH)) #---------------------------------------------------------------------------------------------------- #log.info("Current path!!" + str(wh.CURRENT_PATH)) #log.info("all filess: " + str(all_files)) gaia_files = self.discard_files( resources_manager().get_list("/data/output/sources/gaia")) #log.info("gaia filess: " + str(gaia_files)) gaia = resources_manager().get_info(gaia_files, "/data/output/sources/gaia") #log.info("gaia data: " + str(gaia)) hsa_files = self.discard_files( resources_manager().get_list("/data/output/sources/hsa")) #log.info("hsa filess: " + str(hsa_files)) hsa = resources_manager().get_info(hsa_files, "/data/output/sources/hsa") #log.info("hsa data: " + str(hsa)) sim_files = self.discard_files( resources_manager().get_list("/data/output/sources/sim")) #log.info("hsa filess: " + str(hsa_files)) sim = resources_manager().get_info(sim_files, "/data/output/sources/sim") #log.info("hsa data: " + str(hsa)) results_files = self.discard_files( resources_manager().get_list("/data/output/results")) #log.info("results filess: " + str(results_files)) results_data = resources_manager().get_info(results_files, "/data/output/results") #log.info("results data: " + str(results_data)) user_files = self.discard_files( resources_manager().get_list("/data/output/user")) #log.info("user filess: " + str(user_files)) user_data = resources_manager().get_info(user_files, "/data/output/user") #log.info("user data: " + str(user_data)) #--------------------------------------------------------------------------------------------------- all_files.sort() pg = Paginator(all_files, wh.MAX_RESOURCES_PER_PAGE) page = wh.CURRENT_RESOURCES_PAGE if page < 1: wh.CURRENT_RESOURCES_PAGE = 1 elif page > pg.num_pages: wh.CURRENT_RESOURCES_PAGE = pg.num_pages files = pg.page(wh.CURRENT_RESOURCES_PAGE) f, d = resources_manager().get_info(files, wh.CURRENT_PATH) log.info(f) log.info(sorting_wh) if sorting_wh == 'size': f = collections.OrderedDict(sorted(f.items(), key=lambda x: x[1])) d = collections.OrderedDict(sorted(d.items(), key=lambda x: x[1])) elif sorting_wh == 'name': f = collections.OrderedDict(sorted(f.items(), key=lambda x: x[0])) d = collections.OrderedDict(sorted(d.items(), key=lambda x: x[0])) #Parse for the filemanager breadcrumb p = wh.CURRENT_PATH path_to_eliminate = gwh.RESOURCES_PATH #path_to_eliminate = re.sub("/results", '', path_to_eliminate) fail #p = gwh.RESULTS_PATH p = re.sub(path_to_eliminate, '', p) #p = path_to_eliminate #End parse for the filemanager breadcrumb p = p.split("/") self.job_data.data = [f, d, p, gaia, hsa, sim, results_data, user_data] self.job_data.ok = (pg.num_pages, wh.CURRENT_RESOURCES_PAGE, \ wh.CURRENT_RESOURCES_PAGE + 1, wh.CURRENT_RESOURCES_PAGE - 1) return self.job_data
def __init__(self): """Constructor Initializes the log """ self.log = logger().get_log('resources_manager')
def run(self): """Runs the query to the herschel archive. If the task_data contains the 'input_file' key it will read that value and call get_herschel_data() once per input parameter found in the input_file. If the task_data contains the 'adql' key it will query the archive through the interface_manager using that query. Otherwise it will call get_herschel_data() with the input from task_data Args: self: The object pointer. Raises: task_exception: avi.task.task.task_exception See: interface_manager: avi.core.interface.interface_manager.interface_manager See also: get_herschel_data: get_herschel_data() """ def get_herschel_data(log, data): """Deprecated""" pass log = logger().get_log('herschel_query_task') data = self.task_data.data jm = json_manager() if data.get('input_file') and data.get('name_coord') == 'file': log.info('There is an input file') #self.get_herschel_data(log, data) try: d = jm.read_herschel_input(data['input_file']) for i in d: if i.get('name'): i['name_coord'] = 'name' if i.get('wavelength'): wl = int(i['wavelength']) if wl == 70 or wl == 100 or wl == 160: i['tablee'] = "hsa.pacs_point_source_%s" % ( str(wl).zfill(3)) #"cat_hppsc_%s"%(str(wl).zfill(3)) elif wl == 250 or wl == 350 or wl == 500: i['tablee'] = "hsa.spire_point_source_%s" % (wl) #"cat_spsc_%i"%(wl) if i.get('positional_source'): if i['positional_source'] == 'False': i['positional_images'] = True else: i['positional_images'] = False else: i['positional_images'] = True self.get_herschel_data(log, i) except Exception: log.error("Exception while retrieving data from herschel") log.error(traceback.format_exc()) raise err(traceback.format_exc()) finally: pass #os.remove(data['input_file']) return elif data.get('adql') and data.get('name_coord') == 'adql': log.info('ADQL query') im = risea().get().interface_manager fm = file_manager() adql = data['adql'] if not im: log.error('There is no interface manager initialized!') raise err("There is no interface manager initialized!") src = im._archive_herschel_get_adql(adql) if src != None: if not data.get('output_file'): file_name = wh().get().SOURCES_FMT % { "mission": "hsa", "date": str(round(time.time())), "name": "data" } else: file_name = wh().get().SOURCES_FMT % { "mission": "hsa", "date": str(round(time.time())), "name": data['output_file'] } fm.save_file_plain_data(src, "%s.vot" % (file_name), wh().get().HSA_PATH, self.task_id, "hsa", timezone.now()) log.info("Everything done!") return else: if data.get('shape') == 'polygon': jm.set_vertexes(data, data['polygon']) log.info("added vertexes %s", str(data)) self.get_herschel_data(log, data) return