def setDataset(self, datasetName, primaryType, datasetType, physicsGroup = None, overwrite = False, valid = 1): """ _setDataset_ Set all the information concerning a single dataset, including the primary, processed and tier info """ if self.hasDataset() and not overwrite: # Do nothing, we already have a dataset return Lexicon.primaryDatasetType(primaryType) if not datasetType in ['VALID', 'PRODUCTION', 'INVALID', 'DEPRECATED', 'DELETED']: msg = "Invalid processedDatasetType %s\n" % datasetType logging.error(msg) raise DBSBlockException(msg) try: if datasetName[0] == '/': junk, primary, processed, tier = datasetName.split('/') else: primary, processed, tier = datasetName.split('/') except Exception, ex: msg = "Invalid dataset name %s" % datasetName logging.error(msg) raise DBSBlockException(msg)
def setDataset(self, datasetName, primaryType, datasetType, physicsGroup=None, prep_id=None, overwrite=False): """ _setDataset_ Set all the information concerning a single dataset, including the primary, processed and tier info """ if self.getDataset() != None and not overwrite: # Do nothing, we already have a dataset return Lexicon.primaryDatasetType(primaryType) if not datasetType in [ 'VALID', 'PRODUCTION', 'INVALID', 'DEPRECATED', 'DELETED' ]: msg = "Invalid processedDatasetType %s\n" % datasetType logging.error(msg) raise DBSBufferBlockException(msg) try: if datasetName[0] == '/': _, primary, processed, tier = datasetName.split('/') else: primary, processed, tier = datasetName.split('/') except Exception: msg = "Invalid dataset name %s" % datasetName logging.error(msg) raise DBSBufferBlockException(msg) # Do the primary dataset self.data['primds']['primary_ds_name'] = primary self.data['primds']['primary_ds_type'] = primaryType self.data['primds']['create_by'] = "WMAgent" self.data['primds']['creation_date'] = int(time.time()) # Do the processed self.data['dataset']['physics_group_name'] = physicsGroup self.data['dataset']['processed_ds_name'] = processed self.data['dataset']['data_tier_name'] = tier self.data['dataset']['dataset_access_type'] = datasetType self.data['dataset']['dataset'] = datasetName self.data['dataset']['prep_id'] = prep_id # Add misc meta data. self.data['dataset']['create_by'] = "WMAgent" self.data['dataset']['last_modified_by'] = "WMAgent" self.data['dataset']['creation_date'] = int(time.time()) self.data['dataset']['last_modification_date'] = int(time.time()) return
def setDataset(self, datasetName, primaryType, datasetType, physicsGroup = None, prep_id = None, overwrite = False): """ _setDataset_ Set all the information concerning a single dataset, including the primary, processed and tier info """ if self.getDataset() != None and not overwrite: # Do nothing, we already have a dataset return Lexicon.primaryDatasetType(primaryType) if not datasetType in ['VALID', 'PRODUCTION', 'INVALID', 'DEPRECATED', 'DELETED']: msg = "Invalid processedDatasetType %s\n" % datasetType logging.error(msg) raise DBSBufferBlockException(msg) try: if datasetName[0] == '/': junk, primary, processed, tier = datasetName.split('/') else: primary, processed, tier = datasetName.split('/') except Exception: msg = "Invalid dataset name %s" % datasetName logging.error(msg) raise DBSBufferBlockException(msg) # Do the primary dataset self.data['primds']['primary_ds_name'] = primary self.data['primds']['primary_ds_type'] = primaryType self.data['primds']['create_by'] = "WMAgent" self.data['primds']['creation_date'] = int(time.time()) # Do the processed self.data['dataset']['physics_group_name'] = physicsGroup self.data['dataset']['processed_ds_name'] = processed self.data['dataset']['data_tier_name'] = tier self.data['dataset']['dataset_access_type'] = datasetType self.data['dataset']['dataset'] = datasetName self.data['dataset']['prep_id'] = prep_id # Add misc meta data. self.data['dataset']['create_by'] = "WMAgent" self.data['dataset']['last_modified_by'] = "WMAgent" self.data['dataset']['creation_date'] = int(time.time()) self.data['dataset']['last_modification_date'] = int(time.time()) return
def createPrimaryDataset(primaryName, primaryDatasetType='mc', apiRef=None): """ _createPrimaryDataset_ """ logging.debug("Inserting PrimaryDataset %s with Type %s" \ % (primaryName, primaryDatasetType)) Lexicon.primaryDatasetType(primaryDatasetType) primary = DbsPrimaryDataset(Name=primaryName, Type=primaryDatasetType) if apiRef: try: apiRef.insertPrimaryDataset(primary) except DbsException, ex: msg = "Error in DBSInterface.createPrimaryDataset(%s)\n" % primaryName msg += formatEx(ex) logging.error(msg) raise DBSInterfaceError(msg)
def createPrimaryDataset(primaryName, primaryDatasetType = 'mc', apiRef = None): """ _createPrimaryDataset_ """ logging.debug("Inserting PrimaryDataset %s with Type %s" \ % (primaryName, primaryDatasetType)) Lexicon.primaryDatasetType(primaryDatasetType) primary = DbsPrimaryDataset(Name = primaryName, Type = primaryDatasetType) if apiRef: try: apiRef.insertPrimaryDataset(primary) except DbsException, ex: msg = "Error in DBSInterface.createPrimaryDataset(%s)\n" % primaryName msg += formatEx(ex) logging.error(msg) raise DBSInterfaceError(msg)
def setDataset(self, datasetName, primaryType, datasetType, physicsGroup=None, prep_id=None, overwrite=False, valid=1): """ _setDataset_ Set all the information concerning a single dataset, including the primary, processed and tier info """ if self.hasDataset() and not overwrite: # Do nothing, we already have a dataset return Lexicon.primaryDatasetType(primaryType) if not datasetType in [ 'VALID', 'PRODUCTION', 'INVALID', 'DEPRECATED', 'DELETED' ]: msg = "Invalid processedDatasetType %s\n" % datasetType logging.error(msg) raise DBSBlockException(msg) try: if datasetName[0] == '/': junk, primary, processed, tier = datasetName.split('/') else: primary, processed, tier = datasetName.split('/') except Exception, ex: msg = "Invalid dataset name %s" % datasetName logging.error(msg) raise DBSBlockException(msg)