Exemple #1
0
        def __init__(self):
            """Constructor
            
            Initializes the log, the warehouses, the configuration and the 
            interface_manager
            
            Args:
            self: The object pointer
            """
            ipath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'config')
            #wh_global_config().get().INPUT_PATH
            self.str_log_config_file = os.path.join(ipath, 'log_config.xml')
            self.str_config_file = os.path.join(ipath, 'config.xml')
            self.str_global_config_file = os.path.join(ipath, 'global_config.xml')
            from avi.utils.config_manager import logger_configuration
            log_cfg = logger_configuration()
            log_cfg.load(self.str_log_config_file)
            self.log = logger().get_log(self.str_log_header)
            self.log.info("Initialization of RISEA...")
            self.log.info("RISEA ptr : %s",self.get_ptr())

            # loading cfg
            from avi.utils.config_manager import configuration_manager
            self.cfg = configuration_manager()
            if not self.cfg.load(self.str_config_file):
                self.log.error("Failed to load the configuration %s",
                               self.str_config_file)
                return
            
            # Initialization of the warehouses
            cfg = configuration_manager()
            if not cfg.load(self.str_global_config_file):
                self.log.error("Failed to load the configuration %s",
                               self.str_global_config_file)
                return
            
            wh_global_config().get().load(cfg.get("global"))
            wh_frontend_config().get().load(cfg.get("frontend"))
            wh_frontend_config().get().\
                CURRENT_PATH = os.path.\
                normpath("".join(wh_global_config().get().RESOURCES_PATH))
            wh_frontend_config().get().\
                HOME_PATH = os.path.\
                normpath("".join(wh_global_config().get().RESOURCES_PATH))
            self.log.info("current id : %s\n resources id : %s",
                          id(wh_frontend_config().get().CURRENT_PATH),
                          id(wh_global_config().get().RESOURCES_PATH))
            # Initialization of the interface manager
            from .interface.interface_manager import interface_manager
            self.interface_manager = interface_manager()
            self.interface_manager.init(self.cfg)
            # Initialization of the resources
            from avi.utils.resources_manager import resources_manager
            resources_manager().init()
Exemple #2
0
 def rename_directory(self, name, new_name):
     """Deprecated"""
     folder = \
         wh_frontend_config().get().CURRENT_PATH + "/" + name
     new_folder = \
         wh_frontend_config().get().CURRENT_PATH + "/" + new_name
     if os.path.isdir(folder):
         if os.path.isdir(new_folder):
             self.log.info('The directory already exists.')
         else:
             os.rename(folder, new_folder)
     else:
         self.log.info('The directory does not exists.')
Exemple #3
0
 def rename_file(self, name, new_name):
     """Deprecated"""
     old_file = \
         wh_frontend_config().get().CURRENT_PATH + "/" + name
     new_file = \
         wh_frontend_config().get().CURRENT_PATH + "/" + new_name
     if os.path.isfile(old_file):
         if os.path.isfile(new_file):
             self.log.info('The file already exists.')
         else:
             os.rename(old_file, new_file)
     else:
         self.log.info('The file does not exists.')
Exemple #4
0
    def start(self, data):
        """This method runs the relaunch_algorithm job.

        This method will relaunch_algorithm the asynchronous job provided in the data 
        parameter.

        The data parameter must have the key 'algorithm' containing all the data of 
        the algorithm to be relaunched.

        It will first parse the data parameter to a dictionary and
        save it into 'qparams' variable, then get the algorithm model
        by the name in the 'qparams' variable and save the 'pk' of the
        algorithm into 'qparams.

        If the algorithm input parameters contains 'Gaia' then
        will set the gaia file, if contains 'HSA' then will
        set the hsa file.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The algorithm parameters to be relaunched.
        """
        wh = wh_frontend_config().get()
        gwh = wh_global_config().get()

        log = logger().get_log("relaunch")
        log.info("inside relaunch_algorithm job")
        # data is a string type, transform to dictionary
        qparams = literal_eval(data['pk'])
        # get the model of the algorithm by the name in qparams
        m = algorithm_info_model.objects.get(name=qparams['algorithm']['name'])
        # add the pk of the algorithm
        qparams['algorithm']['pk'] = m.pk
        # get the keys of the algorithm parameters
        keys = qparams['algorithm']['params'].keys()
        # check if Gaia or HSA is one of the parameters to delete the path of the files
        if "Gaia" in keys:
            path_to_eliminate = gwh.SOURCES_PATH
            path_to_eliminate = str(path_to_eliminate) + "/gaia/"
            qparams['algorithm']['params']['Gaia'] = qparams['algorithm'][
                'params']['Gaia'].replace(path_to_eliminate, '')
            log.info(qparams['algorithm']['params']['Gaia'])
            #Delete path
        if "HSA" in keys:
            path_to_eliminate = gwh.SOURCES_PATH
            path_to_eliminate = str(path_to_eliminate) + "/hsa/"
            qparams['algorithm']['params']['HSA'] = qparams['algorithm'][
                'params']['HSA'].replace(path_to_eliminate, '')
            log.info(qparams['algorithm']['params']['HSA'])
            #Delete path

        data = {}

        self.job_data.data = qparams['algorithm']
        log.info("params " + str(qparams['algorithm']))
        return self.job_data
Exemple #5
0
    def start(self, data):
        """This method runs the sorting_by job.

        This methods changes the current sorting method storaged in the 
        wh_frontend_config warehouse.

        The data parameter must have the key 'page' containing the name of 
        the web page whose sorting method will change. It must have also the 
        key 'sort_by' with the sorting method to be changed with.

        Here we just change the current sorting method in the warehouse, 
        without checking if it is valid or not. Those error controls will be 
        made in other jobs.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The job_data attribute. The ok attribute will be True always.

        @see wh_frontend_config @link avi.warehouse.wh_frontend_config
        """
        wh = wh_frontend_config().get()

        if data['page'] == "pipeline_status":
            if self.are_equal(wh.SORTING_EXEC_BY, data['sort_by']):
                wh.SORTING_EXEC_BY = self.invert(wh.SORTING_EXEC_BY)
            else:
                wh.SORTING_EXEC_BY = data['sort_by']

        elif data['page'] == "query_status":
            if self.are_equal(wh.SORTING_QUERY_BY, data['sort_by']):
                wh.SORTING_QUERY_BY = self.invert(wh.SORTING_QUERY_BY)
            else:
                wh.SORTING_QUERY_BY = data['sort_by']

        elif data['page'] == "algorithm":
            if self.are_equal(wh.SORTING_ALG_BY, data['sort_by']):
                wh.SORTING_ALG_BY = self.invert(wh.SORTING_ALG_BY)
            else:
                wh.SORTING_ALG_BY = data['sort_by']

        elif data['page'] == "resources":
            if self.are_equal(wh.SORTING_RESOURCES_BY, data['sort_by']):
                wh.SORTING_RESOURCES_BY = self.invert(wh.SORTING_RESOURCES_BY)
            else:
                wh.SORTING_RESOURCES_BY = data['sort_by']

        elif data['page'] == "results":
            if self.are_equal(wh.SORTING_RESULTS_BY, data['sort_by']):
                wh.SORTING_RESULTS_BY = self.invert(wh.SORTING_RESULTS_BY)
            else:
                wh.SORTING_RESULTS_BY = data['sort_by']

        self.job_data.data = {}
        self.job_data.ok = True
        return self.job_data
Exemple #6
0
    def directory_down(self, path, down_dir):
        """Moves the current warehouse path to the given path specified 
        directory

        This method changes the CURRENT_PATH stored in the wh_frontend_config 
        warehouse to the given path spacified directory

        Args:
        self: The object pointer
        path: The path from which the down_dir directory must be
        down_dir: The new current path

        Returns:
        The new current path if everything went correctly
        
        See:
        wh_frontend_config: avi.warehouse.wh_frontend_config
        """
        wh_frontend_config().get().CURRENT_PATH =  \
        self.move_absolute_directory(os.path.join(path, down_dir))
        return wh_frontend_config().get().CURRENT_PATH
Exemple #7
0
 def  directory_up(self):
     """Moves the warehouse current path to the parent directory
     
     Args:
     self: The object pointer
     
     Returns:
     The warehouse current path
     """
     from avi.utils.resources_manager import resources_manager
     return resources_manager() \
         .directory_up(wh_frontend_config().get().CURRENT_PATH)
Exemple #8
0
 def create_directory(self, directory):
     """Deprecated"""
     import re, errno
     if re.match("^[a-zA-Z0-9_ ]*$", directory):
         path_full = \
         wh_frontend_config().get().CURRENT_PATH + "/" + directory
         try:
             os.makedirs(path_full)
         except OSError as e:
             if e.errno != errno.EEXIST:
                 raise
     else:
         raise
Exemple #9
0
 def  move_default_directory(self):
     """Moves the warehouse current path to the home directory
     
     Args:
     self: The object pointer
     
     Returns:
     The warehouse current path
     """
     from avi.utils.resources_manager import resources_manager
     return resources_manager() \
         .move_absolute_directory("".join(wh_frontend_config().\
                                          get().HOME_PATH))
Exemple #10
0
    def directory_up(self, path):
        """Moves the current warehouse path to the parent of the given path

        This method changes the CURRENT_PATH stored in the wh_frontend_config 
        warehouse to the parent of the given path

        Args:
        self: The object pointer
        path: The path which parent will be the new current path

        Returns:
        The new current path if everything went correctly, None otherwise
        
        See:
        wh_frontend_config: avi.warehouse.wh_frontend_config
        """
        if path == wh_frontend_config().get().HOME_PATH:
            self.log.info('The directory does not exist.')
        else:
            wh_frontend_config() \
            .get().CURRENT_PATH = \
            self.move_absolute_directory(os.path.split(path)[0])
            return wh_frontend_config().get().CURRENT_PATH
Exemple #11
0
        def  directory_down(self, folder_local):
            """Moves the warehouse current path to the given new location
            
            Args:
            self: The object pointer
            folder_local: The directory to enter

            Returns:
            The warehouse current path
            """
            from avi.utils.resources_manager import resources_manager
            return resources_manager() \
                .directory_down(wh_frontend_config().get().\
                                CURRENT_PATH, folder_local)
Exemple #12
0
    def start(self, data):
        """This method runs the change page job.

        This methods changes the current page storaged in the 
        wh_frontend_config warehouse.

        The data parameter must have the key 'page' containing the name of 
        the web page whose page number will change. It must have also the 
        key 'number' with the number of the page.

        Here we just change the current page in the warehouse, without checking 
        if it is valid or not. Those error controls will be made in other jobs.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The job_data attribute. The ok attribute will be True always.

        @see wh_frontend_config @link avi.warehouse.wh_frontend_config
        """
        #log = logger().get_log('get_queries_status')
        wh = wh_frontend_config().get()
        if data['page'] == "pipeline_status":
            wh.CURRENT_EXEC_PAGE = int(data['number'])

        if data['page'] == "query_status":
            wh.CURRENT_QUERY_PAGE = int(data['number'])

        if data['page'] == "algorithm":
            wh.CURRENT_ALG_PAGE = int(data['number'])

        if data['page'] == "resources":
            wh.CURRENT_RESOURCES_PAGE = int(data['number'])

        if data['page'] == "results":
            wh.CURRENT_RESULTS_PAGE = int(data['number'])

        self.job_data.data = {}
        self.job_data.ok = True
        return self.job_data
Exemple #13
0
 def delete_directory(self, directory):
     """Deprecated"""
     import shutil
     folder = \
         wh_frontend_config().get().CURRENT_PATH + "/" + directory
     if os.path.isdir(folder):
         for the_file in os.listdir(folder):
             file_path = os.path.join(folder, the_file)
             if os.path.isfile(file_path):
                 os.unlink(file_path)
             elif os.path.isdir(file_path):
                 shutil.rmtree(file_path)
             else:
                 self.log.info('The directory does not exist.')
         if os.path.isdir(folder):
             os.rmdir(folder)
         else:
             self.log.info('The directory does not exist.')
     else:
         self.log.info('The directory does not exist.')
Exemple #14
0
    def move_absolute_directory(self, path):
        """Moves the current warehouse path to the given path

        This method changes the CURRENT_PATH stored in the wh_frontend_config 
        warehouse to the given path

        Args:
        self: The object pointer
        path: The new current path

        Returns:
        The new current path if everything went correctly, False otherwise
        
        See:
        wh_frontend_config: avi.warehouse.wh_frontend_config
        """
        if self.dir_exists(path):
            new_directory = wh_frontend_config().get().CURRENT_PATH = path
            return new_directory
        else:
            self.log.info('The directory does not exist.')
            return False
Exemple #15
0
    def delete_file(self, file_name):
        """Deletes the given file
        
        Args:
        self: The object pointer
        file_name: the file to be deleted

        Raises:
        Exception: if the an unknow error happens
        """
        #         file_to_delete = \
        #             wh_frontend_config().get().CURRENT_PATH + "/" + file_name
        path_to_delete = wh_frontend_config().get().CURRENT_PATH
        self.log.info(path_to_delete)
        self.log.info(file_name)
        self.log.info(file_name.startswith('gaia'))
        if file_name.startswith('gaia'):
            if 'gaia' not in path_to_delete:
                path_to_delete = "/data/output/sources/gaia"
        elif file_name.startswith('hsa'):
            if 'hsa' not in path_to_delete:
                path_to_delete = "/data/output/sources/hsa"
        elif file_name.startswith('res'):
            if 'results' not in path_to_delete:
                path_to_delete = "/data/output/results"
        elif file_name.startswith('user'):
            if 'user' not in path_to_delete:
                path_to_delete = "/data/output/user"

        try:
            #             os.remove(file_to_delete)
            file_manager().remove_file(file_name, path_to_delete)
        # this would be "except OSError, e:" before Python 2.6
        except OSError as e:
            # errno.ENOENT = no such file or directory
            if e.errno != errno.ENOENT:
                raise  # re-raise exception if a different error occurred
Exemple #16
0
    def start(self, data):
        """This method runs the launch job.

        This method will launch the asynchronous job provided in the data 
        parameter.

        The data parameter must have the key 'id' containing the primary key of 
        the query and the mission to be launched and the key mission unseted.

        It will first split the key 'id' by '-' and save the primary key into
        data 'id' and the mission into data 'mission'.

        Then it will get all the algorithms and take the valid algorithms
        for the query by comparing the data 'mission' and the inputs of
        the algorithms.

        Finally it will set all the parameters to be returned into
        the 'ret' variable.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The the mission. The algorithms. The files of the query
        """
        wh = wh_frontend_config().get()
        gwh = wh_global_config().get()
        log = logger().get_log("launch")
        log.info("inside launch job")
        
        ret={}
        cont = 0
        ids = []
        data['mission'] = data['id'].split('-')[1]
        data['id'] = data['id'].split('-')[0]
        ret['mission'] = data['mission']
        ret['algorithms'] = {}
        pk={}
        

        #Take valid alorithms for the query
        m = get_algorithms.start(self, None)
        #for i in range(len(m.data['algorithms'])):
        #    ids.append(m.data['algorithms'][i][0])
        log.info(m.data)
        for group in m.data['algorithms']:
            for i in group['algorithms']:
                ids.append(i[0])
        for j in ids:
            pk['id'] = j
            model = get_algorithm_info.start(self, pk)
            for a in list(model.data['algorithm']['input'].keys()):
                #if ret['mission'].lower() == model.data['algorithm']['input'][a]['view_name'].lower():
                if ret['mission'].lower() in model.data['algorithm']['input'][a]['type'].lower():
                    ret['algorithms'][cont] = {}
                    ret['algorithms'][cont]['pk'] = pk['id']
                    ret['algorithms'][cont]['view_name'] = model.data['algorithm']['view_name']
                    log.info(cont)
                    cont = cont + 1
                    break

        #Set all the return data
        query_info = get_query_info.start(self,data)
        query_info_data = dict(query_info.data)
        if 'files' in query_info_data:
            ret['files'] = query_info_data['files']
        else:
            ret['files'] = 'No data found'
        log.info("return of job_launch: "+str(ret))
        self.job_data.data = ret
        return self.job_data
    def start(self, data):
        """This method runs the get_queries_status job.

        This method will retrieve the gaia_query_models and the 
        herschel_query_models and it will sort them by the current sorting 
        method provided by the wh_frontend_config warehouse.

        Then it will paginate the results with current page retrieved from 
        the wh_frontend_config warehouse.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The job_data attribute. The ok attribute provides the pages information.

        @see gaia_query_model @link avi.models.gaia_query_model
        @see herschel_query_model @link avi.models.herschel_query_model
        @see wh_frontend_config @link avi.warehouse.wh_frontend_config
        """
        log = logger().get_log("get_queries_status")

        current_started = wh_common().get().queries_started
        count_started = 0

        wh = wh_frontend_config().get()
        sorting_wh = wh.SORTING_QUERY_BY
        order_by = 'request__pipeline_state__started_time'
        order_lambda = lambda query: (query.request.pipeline_state.
                                      started_time, query)
        if sorting_wh == 'name':
            order_by = 'name'
            order_lambda = lambda query: ("%s %s %s" % (query.archive, query.
                                                        name, query.pk), query)
        elif sorting_wh == '-name':
            order_by = '-name'
            order_lambda = lambda query: ("%s %s %s" % (query.archive, query.
                                                        name, query.pk), query)
        elif sorting_wh == '-date':
            order_by = '-request__pipeline_state__started_time'
            order_lambda = lambda query: (query.request.pipeline_state.
                                          started_time, query)
        elif sorting_wh == 'status':
            order_by = 'request__pipeline_state__state'
            order_lambda = lambda query: (query.request.pipeline_state.state,
                                          query)
        elif sorting_wh == '-status':
            order_by = '-request__pipeline_state__state'
            order_lambda = lambda query: (query.request.pipeline_state.state,
                                          query)

        all_gaia = gaia_query_model.objects.all().order_by(order_by, 'pk')
        all_hsa = herschel_query_model.objects.all().order_by(order_by, 'pk')
        all_sim = sim_query_model.objects.all().order_by(order_by, 'pk')

        len_all = len(str(len(all_gaia) + len(all_hsa) + len(all_sim)))
        order_lambda = lambda query: (query.request.pipeline_state.
                                      started_time, query)
        if sorting_wh == 'name':
            order_lambda = lambda query: ("%s %s %s" % (
                query.archive, query.name, str(query.pk).zfill(len_all)), query
                                          )
        elif sorting_wh == '-name':
            order_lambda = lambda query: ("%s %s %s" % (
                query.archive, query.name, str(query.pk).zfill(len_all)), query
                                          )

        log.info("models retrieved")

        self.job_data.data = dict([(0, False), (1, {})])
        self.job_data.ok = False

        #pgg = Paginator(all_gaia, wh.MAX_QUERY_PER_PAGE)
        #pgh = Paginator(all_hsa, wh.MAX_QUERY_PER_PAGE)

        #page_gaia = wh.CURRENT_QUERY_PAGE
        #if page_gaia < 1:
        #    wh.CURRENT_QUERY_PAGE = 1
        #elif page_gaia > pgg.num_pages:
        #    wh.CURRENT_QUERY_PAGE = pgg.num_pages

        #ms_gaia = pgg.page(wh.CURRENT_QUERY_PAGE)

        #if wh.CURRENT_QUERY_PAGE > pgh.num_pages:
        #    wh.CURRENT_QUERY_PAGE = pgh.num_pages

        #ms_hsa = pgh.page(wh.CURRENT_QUERY_PAGE)

        reverse = False
        if order_by[0] == '-':
            reverse = True
            order_by.replace('-', '', 1)
        call = chain(all_gaia, all_hsa, all_sim)
        #for call_ in call:
        #    call_.name = "query %s"%(call_.name)
        #    if isinstance(call_, gaia_query_model):
        #        call_.name = "Gaia %s"%(call_.name)
        #    elif isinstance(call_, herschel_query_model):
        #        call_.name = "HSA %s"%(call_.name)
        #    elif isinstance(call_, sim_query_model):
        #        call_.name = "SIM %s"%(call_.name)
        #try:
        all_ms = sorted(
            call,
            #key=attrgetter("name"),
            key=order_lambda,
            reverse=reverse)
        #for am in all_ms:
        #    log.info("%s %s %s", am.name, am.request.pipeline_state.started_time, am.request.pipeline_state.state)
        #except TypeError:
        #    log.info("whaaat")
        #    all_ms = sorted(call ,
        #key=attrgetter("name"),
        #                    key = lambda query: (query.name is None, query),
        #                    reverse=True)

        log.info("models chained and sorted")

        pg = Paginator(all_ms, wh.MAX_QUERY_PER_PAGE)
        page = wh.CURRENT_QUERY_PAGE
        #log.info(page)
        #------------------
        #---all data queries
        allqueries = {}
        k = 0
        for h in pg.object_list:
            try:
                status = h.request.pipe_state.state
                date = h.request.pipe_state.started_time
            except AttributeError:
                status = h.request.pipeline_state.state
                date = h.request.pipeline_state.started_time
            #status = h.request.pipeline_state.state
            #date = h.request.pipeline_state.started_time
            name = "query %s" % (h.name)
            if isinstance(h, gaia_query_model):
                name = "Gaia %s" % (h.name)
            elif isinstance(h, herschel_query_model):
                name = "HSA %s" % (h.name)
            elif isinstance(h, sim_query_model):
                name = "SIM %s" % (h.name)
            allqueries[k] = (name, status, date, h.pk, h.archive)
            k += 1
        #------------------
        log.info("paginator done")
        if page < 1:
            wh.CURRENT_QUERY_PAGE = 1
        elif page > pg.num_pages:
            wh.CURRENT_QUERY_PAGE = pg.num_pages

        all_ms = pg.page(wh.CURRENT_QUERY_PAGE)

        if not all_ms:
            return self.job_data

        data = {}

        #------query info -----
        #query_id_mission = {}
        #query_info = {}
        #------query info -----

        i = 0
        for q in all_ms:
            try:
                status = q.request.pipe_state.state
                date = q.request.pipe_state.started_time
            except AttributeError:
                status = q.request.pipeline_state.state
                date = q.request.pipeline_state.started_time
                error = q.request.pipeline_state.exception
                pos = error.rfind("Exception: ")
                error = error[pos + 11:]

                #------query info -----
                #query_id_mission['mission'] = q.archive
                #query_id_mission['id'] = q.pk
                #query_info = get_query_info.start(self,query_id_mission)
                #log.info("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
                #log.info(query_info.data)
                #log.info("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
                #------query info -----

            if not error or error == "":
                error = "OK"
            if q.is_aborted:
                error = "Aborted"

            log.info("status: %s", status)
            if status == "STARTED":
                count_started += 1
            #data[q.pk] = (q.name, status)

            #------query info -----
            #data[i] = ("query %s"%(q.name), status, date, error, q.pk, q.archive, query_info.data)
            #------query info -----

            name = "query %s" % (q.name)
            if isinstance(q, gaia_query_model):
                name = "Gaia %s" % (q.name)
            elif isinstance(q, herschel_query_model):
                name = "HSA %s" % (q.name)
            elif isinstance(q, sim_query_model):
                name = "SIM %s" % (q.name)
            data[i] = (name, status, date, error, q.pk, q.archive)
            log.info("loop")

            i += 1

        log.info("looping through the paginator")

        wh_common().get().queries_started = count_started
        #if current_started != 0:
        log.info("asdasd")
        log.info(self.job_data.data)
        self.job_data.data[0] = count_started < current_started
        #self.job_data.data.append(count_started < current_started)
        self.job_data.ok = [pg.num_pages, wh.CURRENT_QUERY_PAGE, \
                            wh.CURRENT_QUERY_PAGE + 1, wh.CURRENT_QUERY_PAGE - 1, allqueries, wh.SORTING_QUERY_BY]
        try:
            log.info(self.job_data.data)
            #log.info(self.job_data.data[1])
            log.info(data)
            aux = data
            log.info(type(aux))
            if isinstance(self.job_data.data, dict):
                #log.info(type(self.job_data.data[1]))
                self.job_data.data[
                    1] = aux  #{0:('sim','success'),1:('sim20','success')}
            else:
                self.job_data.data.append(data)
            #self.job_data.data.append(data)
        except Exception as e:
            log.info(e)
        log.info("job_get_queries_status end")
        return self.job_data
        # OLD

        ms = gaia_query_model.objects.all()

        data = {}
        i = 0
        for q in ms:
            log.info("query %s", q.name)
            try:
                status = q.request.pipe_state.state
                date = q.request.pipe_state.started_time
            except AttributeError:
                status = q.request.pipeline_state.state
                date = q.request.pipeline_state.started_time
                error = q.request.pipeline_state.exception
                pos = error.rfind("Exception: ")
                error = error[pos + 11:]

            if not error or error == "":
                error = "OK"
            if q.is_aborted:
                error = "Aborted"

            log.info("status: %s", status)
            if status == "STARTED":
                count_started += 1
            #data[q.pk] = (q.name, status)
            data[i] = ("Gaia %s" % (q.name), status, date, error, q.pk, "gaia")
            i += 1
            #data.append((q.name, status))

        ms = herschel_query_model.objects.all()

        for q in ms:
            log.info("query %s", q.name)
            try:
                status = q.request.pipe_state.state
                date = q.request.pipe_state.started_time
            except AttributeError:
                status = q.request.pipeline_state.state
                date = q.request.pipeline_state.started_time
                error = q.request.pipeline_state.exception
                pos = error.rfind("Exception: ")
                error = error[pos + 11:]

            if not error or error == "":
                error = "OK"
            if q.is_aborted:
                error = "Aborted"
            log.info("status: %s", status)
            if status == "STARTED":
                count_started += 1
            data[i] = ("HSA %s" % (q.name), status, date, error, q.pk, "hsa")
            i += 1

        sorted_index = sorted(data, key=lambda x: data[x][1], reverse=True)

        res = {}
        i = 0
        for ind in sorted_index:
            res[i] = data[ind]
            i += 1
        self.job_data.data = {}
        self.job_data.data[1] = res
        wh_common().get().queries_started = count_started
        #if current_started != 0:
        self.job_data.data[0] = count_started < current_started
        #else:
        #    self.job_data.data[0] = False

        self.job_data.ok = ms is not None
        return self.job_data

        self.job_data.data = {}
        i = 0
        for ind in sorted_index:
            self.job_data.data[i] = data[ind]
            i += 1

        self.job_data.ok = ms is not None
        return self.job_data
Exemple #18
0
    def start(self, data):
        """This method runs the get_files job.

        This method will retrieve all the allowed files and directories to be 
        shown in the user interface.
        
        It uses the resources_manager to get the path information and then uses 
        the discard_files to discard the ones that should not be shown.

        After that it will paginate the results with the current page retrieved 
        from the wh_frontend_config warehouse.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The job_data attribute. The ok attribute provides the pages information.

        @see resources_manager @link avi.utils.resources_manager.resources_manager
        @see wh_frontend_config @link avi.warehouse.wh_frontend_config
        """
        log = logger().get_log("views")

        wh = wh_frontend_config().get()
        gwh = wh_global_config().get()

        #dirs = data[0]
        #files = data[1]

        sorting_wh = wh.SORTING_RESOURCES_BY
        if sorting_wh[0] == '-': sorting_wh = sorting_wh[1:]
        order_by = ''

        all_files = self.discard_files(resources_manager().get_list(
            wh.CURRENT_PATH))
        #----------------------------------------------------------------------------------------------------
        #log.info("Current path!!" + str(wh.CURRENT_PATH))
        #log.info("all filess: " + str(all_files))

        gaia_files = self.discard_files(
            resources_manager().get_list("/data/output/sources/gaia"))
        #log.info("gaia filess: " + str(gaia_files))
        gaia = resources_manager().get_info(gaia_files,
                                            "/data/output/sources/gaia")
        #log.info("gaia data: " + str(gaia))

        hsa_files = self.discard_files(
            resources_manager().get_list("/data/output/sources/hsa"))
        #log.info("hsa filess: " + str(hsa_files))
        hsa = resources_manager().get_info(hsa_files,
                                           "/data/output/sources/hsa")
        #log.info("hsa data: " + str(hsa))

        sim_files = self.discard_files(
            resources_manager().get_list("/data/output/sources/sim"))
        #log.info("hsa filess: " + str(hsa_files))
        sim = resources_manager().get_info(sim_files,
                                           "/data/output/sources/sim")
        #log.info("hsa data: " + str(hsa))

        results_files = self.discard_files(
            resources_manager().get_list("/data/output/results"))
        #log.info("results filess: " + str(results_files))
        results_data = resources_manager().get_info(results_files,
                                                    "/data/output/results")
        #log.info("results data: " + str(results_data))

        user_files = self.discard_files(
            resources_manager().get_list("/data/output/user"))
        #log.info("user filess: " + str(user_files))
        user_data = resources_manager().get_info(user_files,
                                                 "/data/output/user")
        #log.info("user data: " + str(user_data))

        #---------------------------------------------------------------------------------------------------
        all_files.sort()

        pg = Paginator(all_files, wh.MAX_RESOURCES_PER_PAGE)
        page = wh.CURRENT_RESOURCES_PAGE
        if page < 1:
            wh.CURRENT_RESOURCES_PAGE = 1
        elif page > pg.num_pages:
            wh.CURRENT_RESOURCES_PAGE = pg.num_pages

        files = pg.page(wh.CURRENT_RESOURCES_PAGE)

        f, d = resources_manager().get_info(files, wh.CURRENT_PATH)

        log.info(f)

        log.info(sorting_wh)
        if sorting_wh == 'size':
            f = collections.OrderedDict(sorted(f.items(), key=lambda x: x[1]))
            d = collections.OrderedDict(sorted(d.items(), key=lambda x: x[1]))
        elif sorting_wh == 'name':
            f = collections.OrderedDict(sorted(f.items(), key=lambda x: x[0]))
            d = collections.OrderedDict(sorted(d.items(), key=lambda x: x[0]))

        #Parse for the filemanager breadcrumb
        p = wh.CURRENT_PATH
        path_to_eliminate = gwh.RESOURCES_PATH
        #path_to_eliminate = re.sub("/results", '', path_to_eliminate) fail
        #p = gwh.RESULTS_PATH
        p = re.sub(path_to_eliminate, '', p)
        #p = path_to_eliminate
        #End parse for the filemanager breadcrumb
        p = p.split("/")

        self.job_data.data = [f, d, p, gaia, hsa, sim, results_data, user_data]
        self.job_data.ok = (pg.num_pages, wh.CURRENT_RESOURCES_PAGE, \
                            wh.CURRENT_RESOURCES_PAGE + 1, wh.CURRENT_RESOURCES_PAGE - 1)
        return self.job_data
    def start(self, data):
        """This method runs the get_pipeline_status job.

        This method will retrieve the algorithm_models and it will sort them 
        by the current sorting method provided by the wh_frontend_config 
        warehouse.

        Then it will paginate the results with current page retrieved from 
        the wh_frontend_config warehouse.

        Args:
        self: The object pointer.
        data: A dictorianry containing the input data for the job.

        Returns:
        The job_data attribute. The ok attribute provides the pages information.

        @see algorithm_model @link avi.models.algorithm_model
        @see wh_frontend_config @link avi.warehouse.wh_frontend_config
        """
        log = logger().get_log("get_pipeline_status")
        wh = wh_frontend_config().get()
        sorting_wh = wh.SORTING_EXEC_BY
        order_by = 'request__pipeline_state__started_time'
        if sorting_wh == 'name':
            order_by = 'alg_name'
        elif sorting_wh == '-name':
            order_by = '-alg_name'
        elif sorting_wh == '-date':
            order_by = '-request__pipeline_state__started_time'
        elif sorting_wh == 'status':
            order_by = 'request__pipeline_state__state'
        elif sorting_wh == '-status':
            order_by = '-request__pipeline_state__state'

        all_ms = algorithm_model.objects.all().order_by(order_by, 'pk')

        self.job_data.data = {}
        self.job_data.ok = all_ms is not None
        if not all_ms:
            self.job_data.ok = False
            return self.job_data

        pg = Paginator(all_ms, wh.MAX_EXEC_PER_PAGE)
        page = wh.CURRENT_EXEC_PAGE
        #------------------
        #----all data algorithms
        alldata = {}
        k = 0
        for h in pg.object_list:
            try:
                status = h.request.pipe_state.state
                date = h.request.pipe_state.started_time
            except AttributeError:
                status = h.request.pipeline_state.state
                date = h.request.pipeline_state.started_time
            #status = h.request.pipeline_state.state
            #date = h.request.pipeline_state.started_time
            params = {}
            params['algorithm'] = {'name': h.alg_name, 'params': {}}
            ainfo_ms = algorithm_info_model.objects.filter(name=h.alg_name)[0]
            if ainfo_ms:
                qparams = literal_eval(h.params)
                mng = algorithm_manager()
                ainfo = mng.get_info(ainfo_ms.definition_file, 'input')
                for l in ainfo:
                    if 'view_name' in ainfo[l]:
                        params['algorithm']['params'][ainfo[l][
                            'view_name']] = qparams['algorithm']['params'][
                                ainfo[l]['name']]
                    else:
                        params['algorithm']['params'][
                            ainfo[l]['name']] = qparams['algorithm']['params'][
                                ainfo[l]['name']]
                params = str(params)

            else:
                params = h.params
            alldata[k] = (h.alg_name, h.pk, params, date, status)
            log.info(params)
            k += 1
        #------------------
        if page < 1:
            wh.CURRENT_EXEC_PAGE = 1
        elif page > pg.num_pages:
            wh.CURRENT_EXEC_PAGE = pg.num_pages

        ms = pg.page(wh.CURRENT_EXEC_PAGE)

        self.job_data.data = {}
        self.job_data.ok = ms is not None
        if not ms:
            self.job_data.ok = False
            return self.job_data

        data = {}
        i = 0
        for j in ms:
            name = j.alg_name
            params = {}
            params['algorithm'] = {'name': j.alg_name, 'params': {}}
            try:
                status = j.request.pipe_state.state
                date = j.request.pipe_state.started_time
            except AttributeError:
                status = j.request.pipeline_state.state
                date = j.request.pipeline_state.started_time
                error = j.request.pipeline_state.exception
                pos = error.rfind("Exception: ")
                error = error[pos + 11:]
                ainfo_ms = algorithm_info_model.objects.filter(
                    name=j.alg_name)[0]
                if ainfo_ms:
                    qparams = literal_eval(j.params)
                    mng = algorithm_manager()
                    ainfo = mng.get_info(ainfo_ms.definition_file, 'input')
                    for k in ainfo:
                        if 'view_name' in ainfo[k]:
                            params['algorithm']['params'][
                                ainfo[k]['view_name']] = str(
                                    qparams['algorithm']['params'][ainfo[k]
                                                                   ['name']])
                        else:
                            params['algorithm']['params'][
                                ainfo[k]['name']] = str(
                                    qparams['algorithm']['params'][ainfo[k]
                                                                   ['name']])
                    params = str(params)

                else:
                    params = j.params
                #log.info(error)
            if not error or error == "":
                error = "OK"
            if j.is_aborted:
                error = "Aborted"
            data[i] = (name, status, date, error, j.pk, params)
            i += 1

        self.job_data.ok = (pg.num_pages, wh.CURRENT_EXEC_PAGE, \
                            wh.CURRENT_EXEC_PAGE + 1, wh.CURRENT_EXEC_PAGE - 1, alldata, wh.SORTING_EXEC_BY)
        self.job_data.data = data
        return self.job_data

        # OLD
        sorted_index = sorted(data, key=lambda x: data[x][1], reverse=True)

        i = 0
        for ind in sorted_index:
            self.job_data.data[i] = data[ind]
            i += 1

        return self.job_data
Exemple #20
0
 def get_file_list(self):
     """Deprecated"""
     from avi.utils.resources_manager import resources_manager
     return resources_manager() \
         .get_file_list(wh_frontend_config().get().CURRENT_PATH)
Exemple #21
0
    def start(self, data):
        """This method runs the get_algorithms job.

        If the algorithms are not loaded it will load them. Then it will 
        retrieve the all algorithm_info_models and return the data from them.

        Args:
        self: The object pointer.
        data: A dictionary containing the input data for the job.
        
        Returns:
        The job_data attribute. The ok attribute will be True if there are 
        algorithms retrieved, False otherwise.

        @see algorithm_info_model @link avi.models.algorithm_info_model
        """
        log = logger().get_log('algorithm_manager')

        wh_f = wh_frontend_config().get()

        if not wh().get().ALGORITHMS_LOADED:
            from avi.core.algorithm.algorithm_manager import algorithm_manager
            algorithm_manager().init()
            wh().get().ALGORITHMS_LOADED = True

        from avi.models import algorithm_info_model, algorithm_group_model
        all_ms = algorithm_info_model.objects.all().order_by(
            'name_view', 'name', 'pk')

        #sall_ms = sorted(all_ms, key = lambda x:(x.name_view is None, x))

        self.job_data.data = {}
        self.job_data.ok = all_ms is not None
        if not all_ms:
            return self.job_data

        pg = Paginator(all_ms, wh_f.MAX_ALG_PER_PAGE)
        page = wh_f.CURRENT_ALG_PAGE
        if page < 1:
            wh_f.CURRENT_ALG_PAGE = 1
        elif page > pg.num_pages:
            wh_f.CURRENT_ALG_PAGE = pg.num_pages

        ms = pg.page(wh_f.CURRENT_ALG_PAGE)

        all_ms_g = algorithm_group_model.objects.all().order_by(
            'name_view', 'name', 'pk')
        pg_g = Paginator(all_ms_g, wh_f.MAX_ALG_PER_PAGE)
        page = wh_f.CURRENT_ALG_PAGE
        if page < 1:
            wh_f.CURRENT_ALG_PAGE = 1
        elif page > pg.num_pages:
            wh_f.CURRENT_ALG_PAGE = pg.num_pages
        ms_g = pg_g.page(wh_f.CURRENT_ALG_PAGE)

        data = []
        for g in ms_g:
            data.append({"group": g, "algorithms": []})
        data.sort(key=lambda x: x["group"].position, reverse=False)

        for j in ms:
            for g in data:
                if j.algorithm_group == g["group"].name:
                    g["algorithms"].append(
                        (j.pk, j.name, j.name_view, j.algorithm_type,
                         j.algorithm_group, j.position))
        for g in data:
            g["algorithms"].sort(key=lambda x: x[4], reverse=False)

        for g in data:
            log.info(g["group"].name)
            log.info(g["group"].position)
            for a in g["algorithms"]:
                log.info(a[4])
                log.info(a[1])

        # OLD
        # data = {}
        # i = 0
        # for j in ms:
        #     data[i] = (j.pk,
        #                j.name,
        #                j.name_view,
        #                j.algorithm_type)
        #     i += 1

        res = {}
        res["algorithms"] = data
        res["max_pages"] = pg.num_pages
        res["current_page"] = wh_f.CURRENT_ALG_PAGE
        res["next_page"] = wh_f.CURRENT_ALG_PAGE + 1
        res["prev_page"] = wh_f.CURRENT_ALG_PAGE - 1
        self.job_data.data = res
        return self.job_data
Exemple #22
0
    def get_algorithm_list(self):
        """Deprecated
        
        Returns the algorithm list

        This method returns a dictionary with the information of all the 
        algorithms.

        Args:
        self: The object pointer.

        Returns:
        A dictionary with the information of all the algorithms.
        """
        num_alg = wh_frontend_config().get().MAX_ALG_PER_PAGE
        current_page = wh_frontend_config().get().CURRENT_ALG_PAGE

        path = wh_global_config().get().ALGORITHM_PATH
        self.log.info("Reading algorithms data from %s", path)
        data = {}
        jm = json_manager()
        for f in os.listdir(path):
            if not f.endswith(".json"):
                continue
            self.log.info("File %s", f)
            name, fext = os.path.splitext(f)
            name = name + ".py"
            self.log.info("Checking file %s", name)
            if not os.path.isfile(path + name):
                continue
            self.log.info("Algorithm file found, reading data file now")
            jm.load(path + f)
            self.log.info("Data loaded: %s", jm.json_root)
            if not self.__check_json(jm.json_root):
                continue
            self.log.info("JSON checked correctly")
            alg_name = jm.json_root['algorithm']['name']
            data[alg_name] = jm.json_root['algorithm']

        data2 = {
            "alg_data": {
                "alg_1": {
                    "input": {
                        "input_1": {
                            "name": "table name",
                            "type": "table"
                        },
                        "input_2": {
                            "name": "float name",
                            "type": "float"
                        }
                    },
                    "name": "algorithm 1",
                },
                "alg_2": {
                    "input": {
                        "input_1": {
                            "name": "bool name",
                            "type": "bool"
                        },
                        "input_2": {
                            "name": "float name",
                            "type": "float"
                        }
                    },
                    "name": "asdf 2",
                }
            }
        }
        ret = {"alg_data": data}
        return ret
        return self.__sort_dict(data)