Пример #1
0
    def publishDataset(self,file):
        """
        """
        try:
            jobReport = readJobReport(file)[0]
            self.exit_status = '0'
        except IndexError:
            self.exit_status = '1'
            msg = "Error: Problem with "+file+" file"
            common.logger.info(msg)
            return self.exit_status

        if (len(self.dataset_to_import) != 0):
           for dataset in self.dataset_to_import:
               common.logger.info("--->>> Importing parent dataset in the dbs: " +dataset)
               status_import=self.importParentDataset(self.globalDBS, dataset)
               if (status_import == 1):
                   common.logger.info('Problem with parent '+ dataset +' import from the global DBS '+self.globalDBS+ 'to the local one '+self.DBSURL)
                   self.exit_status='1'
                   return self.exit_status
               else:
                   common.logger.info('Import ok of dataset '+dataset)

        
        if (len(jobReport.files) <= 0) :
            self.exit_status = '1'
            msg = "Error: No EDM file to publish in xml file"+file+" file"
            common.logger.info(msg)
            return self.exit_status
        else:
            msg = "fjr contains some files to publish" 
            common.logger.debug(msg)

        #### datasets creation in dbs
        #// DBS to contact write and read of the same dbs
        dbsReader = DBSReader(self.DBSURL,level='ERROR')
        dbswriter = DBSWriter(self.DBSURL)
        #####

        self.published_datasets = [] 
        for fileinfo in jobReport.files:
            datasets_info=fileinfo.dataset
            if len(datasets_info)<=0:
                self.exit_status = '1'
                msg = "Error: No info about dataset in the xml file "+file
                common.logger.info(msg)
                return self.exit_status
            else:
                for dataset in datasets_info:
                    #### for production data
                    self.processedData = dataset['ProcessedDataset']
                    if (dataset['PrimaryDataset'] == 'null'):
                        dataset['PrimaryDataset'] = self.userprocessedData
                    elif self.datasetpath.upper() != 'NONE':
                        dataset['ParentDataset']= self.datasetpath

                    dataset['PSetContent']=self.content
                    cfgMeta = {'name' : self.pset , 'Type' : 'user' , 'annotation': 'user cfg', 'version' : 'private version'} # add real name of user cfg
                    common.logger.info("PrimaryDataset = %s"%dataset['PrimaryDataset'])
                    common.logger.info("ProcessedDataset = %s"%dataset['ProcessedDataset'])
                    common.logger.info("<User Dataset Name> = /"+dataset['PrimaryDataset']+"/"+dataset['ProcessedDataset']+"/USER")
                    
                    self.dataset_to_check="/"+dataset['PrimaryDataset']+"/"+dataset['ProcessedDataset']+"/USER"


                    self.published_datasets.append(self.dataset_to_check)

                    common.logger.log(10-1,"--->>> Inserting primary: %s processed : %s"%(dataset['PrimaryDataset'],dataset['ProcessedDataset']))
                    
                    #### check if dataset already exists in the DBS
                    result = dbsReader.matchProcessedDatasets(dataset['PrimaryDataset'], 'USER', dataset['ProcessedDataset'])
                    if (len(result) != 0):
                       result = dbsReader.listDatasetFiles(self.dataset_to_check)

                    primary = DBSWriterObjects.createPrimaryDataset( dataset, dbswriter.dbs)
                    common.logger.log(10-1,"Primary:  %s "%primary)
                    print "primary = ", primary 

                    algo = DBSWriterObjects.createAlgorithm(dataset, cfgMeta, dbswriter.dbs)
                    common.logger.log(10-1,"Algo:  %s "%algo)

                    processed = DBSWriterObjects.createProcessedDataset(primary, algo, dataset, dbswriter.dbs)
                    common.logger.log(10-1,"Processed:  %s "%processed)
                    print "processed = ", processed 

                    common.logger.log(10-1,"Inserted primary %s processed %s"%(primary,processed))
                    #######################################################################################
                
        common.logger.log(10-1,"exit_status = %s "%self.exit_status)
        return self.exit_status
Пример #2
0
    def publishDataset(self, file):
        """
        """
        try:
            jobReport = readJobReport(file)[0]
            self.exit_status = '0'
        except IndexError:
            self.exit_status = '1'
            msg = "Error: Problem with " + file + " file"
            common.logger.info(msg)
            return self.exit_status

        if (len(self.dataset_to_import) != 0):
            for dataset in self.dataset_to_import:
                common.logger.info(
                    "--->>> Importing parent dataset in the dbs: " + dataset)
                status_import = self.importParentDataset(
                    self.globalDBS, dataset)
                if (status_import == 1):
                    common.logger.info('Problem with parent ' + dataset +
                                       ' import from the global DBS ' +
                                       self.globalDBS + 'to the local one ' +
                                       self.DBSURL)
                    self.exit_status = '1'
                    return self.exit_status
                else:
                    common.logger.info('Import ok of dataset ' + dataset)

        if (len(jobReport.files) <= 0):
            self.exit_status = '1'
            msg = "Error: No EDM file to publish in xml file" + file + " file"
            common.logger.info(msg)
            return self.exit_status
        else:
            msg = "fjr contains some files to publish"
            common.logger.debug(msg)

        #### datasets creation in dbs
        #// DBS to contact write and read of the same dbs
        dbsReader = DBSReader(self.DBSURL, level='ERROR')
        dbswriter = DBSWriter(self.DBSURL)
        #####

        self.published_datasets = []
        for fileinfo in jobReport.files:
            datasets_info = fileinfo.dataset
            if len(datasets_info) <= 0:
                self.exit_status = '1'
                msg = "Error: No info about dataset in the xml file " + file
                common.logger.info(msg)
                return self.exit_status
            else:
                for dataset in datasets_info:
                    #### for production data
                    self.processedData = dataset['ProcessedDataset']
                    if (dataset['PrimaryDataset'] == 'null'):
                        dataset['PrimaryDataset'] = self.userprocessedData
                    elif self.datasetpath.upper() != 'NONE':
                        dataset['ParentDataset'] = self.datasetpath

                    dataset['PSetContent'] = self.content
                    cfgMeta = {
                        'name': self.pset,
                        'Type': 'user',
                        'annotation': 'user cfg',
                        'version': 'private version'
                    }  # add real name of user cfg
                    common.logger.info("PrimaryDataset = %s" %
                                       dataset['PrimaryDataset'])
                    common.logger.info("ProcessedDataset = %s" %
                                       dataset['ProcessedDataset'])
                    common.logger.info("<User Dataset Name> = /" +
                                       dataset['PrimaryDataset'] + "/" +
                                       dataset['ProcessedDataset'] + "/USER")

                    self.dataset_to_check = "/" + dataset[
                        'PrimaryDataset'] + "/" + dataset[
                            'ProcessedDataset'] + "/USER"

                    self.published_datasets.append(self.dataset_to_check)

                    common.logger.log(
                        10 - 1, "--->>> Inserting primary: %s processed : %s" %
                        (dataset['PrimaryDataset'],
                         dataset['ProcessedDataset']))

                    #### check if dataset already exists in the DBS
                    result = dbsReader.matchProcessedDatasets(
                        dataset['PrimaryDataset'], 'USER',
                        dataset['ProcessedDataset'])
                    if (len(result) != 0):
                        result = dbsReader.listDatasetFiles(
                            self.dataset_to_check)

                    primary = DBSWriterObjects.createPrimaryDataset(
                        dataset, dbswriter.dbs)
                    common.logger.log(10 - 1, "Primary:  %s " % primary)
                    print "primary = ", primary

                    algo = DBSWriterObjects.createAlgorithm(
                        dataset, cfgMeta, dbswriter.dbs)
                    common.logger.log(10 - 1, "Algo:  %s " % algo)

                    processed = DBSWriterObjects.createProcessedDataset(
                        primary, algo, dataset, dbswriter.dbs)
                    common.logger.log(10 - 1, "Processed:  %s " % processed)
                    print "processed = ", processed

                    common.logger.log(
                        10 - 1, "Inserted primary %s processed %s" %
                        (primary, processed))
                    #######################################################################################

        common.logger.log(10 - 1, "exit_status = %s " % self.exit_status)
        return self.exit_status