def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.storage = StorageManager(self.config) self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config)
def test_managers(self): "Test managers" # sites = SiteManager(config=self.config) # sites.initiate_db() # sites.update_db() # sites.update_cpu() datasets = DatasetManager(config=self.config) # datasets.initiate_db() # datasets.update_db() replicas = datasets.get_current_num_replicas()
def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.max_replicas = int(config['rocker_board']['max_replicas']) self.MAX_THREADS = int(config['threading']['max_threads']) self.dataset_popularity = dict()
def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.phedex = PhEDExService(self.config) self.mit_db = MITDBService(self.config) self.datasets = DatasetManager(self.config) self.sites = SiteManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.rankings = Ranker(self.config) self.max_gb = int(self.config['rocker_board']['max_gb']) self.csv_data = list()
def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.pop_db = PopDBService(self.config) self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.storage = StorageManager(self.config) self.MAX_THREADS = int(config['threading']['max_threads'])
def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.phedex = PhEDExService(self.config) self.mit_db = MITDBService(self.config) self.datasets = DatasetManager(self.config) self.sites = SiteManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.rankings = Ranker(self.config) self.max_gb = int(self.config["rocker_board"]["max_gb"]) self.csv_data = list()
class Initiate(object): """ Initiate Database """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) def start(self): """ Begin Initiating Database """ t1 = datetime.datetime.utcnow() self.sites.initiate_db() self.datasets.initiate_db() self.popularity.initiate_db() t2 = datetime.datetime.utcnow() td = t2 - t1 self.logger.info('Initiate took %s', str(td))
def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.max_replicas = int(config['rocker_board']['max_replicas']) self.name = 'generic' self.data_path = self.config['paths']['data'] self.data_tiers = config['tools']['valid_tiers'].split(',') self.preprocessed_data = dict() self.clf_trend = dict() self.clf_avg = dict()
class UpdateDB(object): """ Update DB with new dataset and site data """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.storage = StorageManager(self.config) self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) def start(self): """ Begin Database Update """ t1 = datetime.datetime.utcnow() self.sites.update_db() self.datasets.update_db() self.popularity.update_db() t2 = datetime.datetime.utcnow() td = t2 - t1 self.logger.info('Update DB took %s', str(td))
class UpdateDB(object): """ Update DB with new dataset and site data """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.storage = StorageManager(self.config) self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) def start(self): """ Begin Database Update """ t1 = datetime.datetime.utcnow() self.sites.update_db() self.datasets.update_db() self.popularity.update_db() t2 = datetime.datetime.utcnow() td = t2 - t1 self.logger.info("Update DB took %s", str(td))
class RockerBoard(object): """ RockerBoard is a system balancing algorithm using popularity metrics to predict popularity and make appropriate replications to keep the system balanced """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.phedex = PhEDExService(self.config) self.mit_db = MITDBService(self.config) self.datasets = DatasetManager(self.config) self.sites = SiteManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.rankings = Ranker(self.config) self.max_gb = int(self.config["rocker_board"]["max_gb"]) self.csv_data = list() def start(self, date=datetime_day(datetime.datetime.utcnow())): """ Begin Rocker Board Algorithm """ t1 = datetime.datetime.utcnow() # Get goals dataset_rankings = self.rankings.get_dataset_rankings(date) site_rankings = self.rankings.get_site_rankings(date) self.change_dataset_rankings(dataset_rankings) subscriptions = self.replicate(dataset_rankings, site_rankings) self.logger.info("SUBSCRIPTIONS") for subscription in subscriptions: self.logger.info("site: %s\tdataset: %s", subscription[1], subscription[0]) # site_storage = self.rankings.get_site_storage_rankings(subscriptions) # deletions = self.clean(dataset_rankings, site_storage) # self.logger.info('DELETIONS') # for deletion in deletions: # self.logger.info('site: %s\tdataset: %s', deletion[1], deletion[0]) # self.delete(deletions) self.subscribe(subscriptions) # self.datasets.update_replicas(subscriptions, deletions) t2 = datetime.datetime.utcnow() td = t2 - t1 self.logger.info("Rocker Board took %s", str(td)) def change_dataset_rankings(self, dataset_rankings): """ Change the ranks from being the target number of replicas to being the change in number of replicas required to reach the goal """ current_replicas = self.datasets.get_current_num_replicas() for dataset in current_replicas: dataset_rankings[dataset["name"]] -= dataset["n_replicas"] def replicate(self, dataset_rankings, site_rankings): """ Balance system by creating new replicas based on popularity """ subscriptions = list() subscribed_gb = 0 sites_available_storage_gb = self.sites.get_all_available_storage() while (subscribed_gb < self.max_gb) and site_rankings: tmp_site_rankings = dict() for k, v in site_rankings.items(): tmp_site_rankings[k] = v dataset = max(dataset_rankings.iteritems(), key=operator.itemgetter(1)) dataset_name = dataset[0] dataset_rank = dataset[1] if (not dataset_name) or (dataset_rank < 1): break size_gb = self.datasets.get_size(dataset_name) unavailable_sites = set(self.datasets.get_sites(dataset_name)) for site_name in tmp_site_rankings.keys(): if (self.sites.get_available_storage(site_name) < size_gb) or (tmp_site_rankings[site_name] <= 0): unavailable_sites.add(site_name) for site_name in unavailable_sites: try: del tmp_site_rankings[site_name] except: continue if not tmp_site_rankings: del dataset_rankings[dataset_name] continue site_name = weighted_choice(tmp_site_rankings) subscription = (dataset_name, site_name) subscriptions.append(subscription) subscribed_gb += size_gb sites_available_storage_gb[site_name] -= size_gb self.logger.info("%s : added", dataset_name) if sites_available_storage_gb[site_name] <= 0: del site_rankings[site_name] dataset_rankings[dataset_name] -= 1 self.logger.info("Subscribed %dGB", subscribed_gb) return subscriptions def clean(self, dataset_rankings, site_rankings): """ Suggest deletions based on dataset and site rankings """ deletions = list() deleted_gb = 0 while site_rankings: tmp_site_rankings = dict() dataset = min(dataset_rankings.iteritems(), key=operator.itemgetter(1)) dataset_name = dataset[0] size_gb = self.datasets.get_size(dataset_name) available_sites = set(self.datasets.get_sites(dataset_name)) for site_name in available_sites: try: tmp_site_rankings[site_name] = site_rankings[site_name] except: continue if not tmp_site_rankings: del dataset_rankings[dataset_name] continue site_name = weighted_choice(tmp_site_rankings) deletion = (dataset_name, site_name) deletions.append(deletion) deleted_gb += size_gb site_rankings[site_name] -= size_gb dataset_rankings[dataset_name] += 1 if site_rankings[site_name] <= 0: del site_rankings[site_name] self.logger.info("Deleted %dGB", deleted_gb) return deletions def subscribe(self, subscriptions): """ Make subscriptions to phedex subscriptions = [(dataset_name, site_name), ...] """ new_subscriptions = dict() for subscription in subscriptions: dataset_name = subscription[0] site_name = subscription[1] try: new_subscriptions[site_name].append(dataset_name) except: new_subscriptions[site_name] = list() new_subscriptions[site_name].append(dataset_name) for site_name, dataset_names in new_subscriptions.items(): data = self.phedex.generate_xml(dataset_names) comments = ( "This dataset is predicted to become popular and has therefore been automatically replicated by cuadrnt" ) api = "subscribe" params = [ ("node", site_name), ("data", data), ("level", "dataset"), ("move", "n"), ("custodial", "n"), ("group", "AnalysisOps"), ("request_only", "n"), ("no_mail", "n"), ("comments", comments), ] json_data = self.phedex.fetch(api=api, params=params, method="post") # insert into db group_name = "AnalysisOps" request_id = 0 request_type = 0 try: request = json_data["phedex"] request_id = request["request_created"][0]["id"] request_created = timestamp_to_datetime(request["request_timestamp"]) except: self.logger.warning( "Subscription did not succeed\n\tSite:%s\n\tDatasets: %s", str(site_name), str(dataset_names) ) continue for dataset_name in dataset_names: coll = "dataset_rankings" date = datetime_day(datetime.datetime.utcnow()) pipeline = list() match = {"$match": {"name": dataset_name, "date": date}} pipeline.append(match) project = {"$project": {"delta_rank": 1, "_id": 0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) dataset_rank = data[0]["delta_rank"] query = "INSERT INTO Requests(RequestId, RequestType, DatasetId, SiteId, GroupId, Rank, Date) SELECT %s, %s, Datasets.DatasetId, Sites.SiteId, Groups.GroupId, %s, %s FROM Datasets, Sites, Groups WHERE Datasets.DatasetName=%s AND Sites.SiteName=%s AND Groups.GroupName=%s" values = (request_id, request_type, dataset_rank, request_created, dataset_name, site_name, group_name) self.mit_db.query(query=query, values=values, cache=False) def delete(self, deletions): """ Make deletions to phedex deletions = [(dataset_name, site_name), ...] """ new_deletions = dict() for deletion in deletions: dataset_name = deletion[0] site_name = deletion[1] try: new_deletions[site_name].append(dataset_name) except: new_deletions[site_name] = list() new_deletions[site_name].append(dataset_name) for site_name, dataset_names in new_deletions.items(): data = self.phedex.generate_xml(dataset_names) comments = "This dataset is predicted to become less popular and has therefore been automatically deleted by cuadrnt" api = "delete" params = [ ("node", site_name), ("data", data), ("level", "dataset"), ("rm_subscriptions", "y"), ("comments", comments), ] json_data = self.phedex.fetch(api=api, params=params, method="post") # insert into db group_name = "AnalysisOps" request_id = 0 request_type = 1 try: request = json_data["phedex"] request_id = request["request_created"][0]["id"] request_created = timestamp_to_datetime(request["request_timestamp"]) except: self.logger.warning( "Deletion did not succeed\n\tSite:%s\n\tDatasets: %s", str(site_name), str(dataset_names) ) continue for dataset_name in dataset_names: coll = "dataset_rankings" date = datetime_day(datetime.datetime.utcnow()) pipeline = list() match = {"$match": {"name": dataset_name, "date": date}} pipeline.append(match) project = {"$project": {"delta_rank": 1, "_id": 0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) dataset_rank = data[0]["delta_rank"] query = "INSERT INTO Requests(RequestId, RequestType, DatasetId, SiteId, GroupId, Rank, Date) SELECT %s, %s, Datasets.DatasetId, Sites.SiteId, Groups.GroupId, %s, %s FROM Datasets, Sites, Groups WHERE Datasets.DatasetName=%s AND Sites.SiteName=%s AND Groups.GroupName=%s" values = (request_id, request_type, dataset_rank, request_created, dataset_name, site_name, group_name) self.mit_db.query(query=query, values=values, cache=False)
class Ranker(object): """ Generic Ranking class """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.max_replicas = int(config['rocker_board']['max_replicas']) self.MAX_THREADS = int(config['threading']['max_threads']) self.dataset_popularity = dict() def get_dataset_rankings(self, date=datetime_day(datetime.datetime.utcnow())): """ Generate dataset rankings """ self.dataset_popularity = dict() dataset_names = self.datasets.get_db_datasets() q = Queue.Queue() for i in range(self.MAX_THREADS): worker = threading.Thread(target=self.get_dataset_popularity, args=(q,)) worker.daemon = True worker.start() # self.dataset_features = self.popularity.get_features(dataset_names, date) # self.dataset_tiers = self.datasets.get_data_tiers(dataset_names) for dataset_name in dataset_names: q.put((dataset_name, date)) q.join() dataset_rankings = self.normalize_popularity(date) return dataset_rankings def get_site_rankings(self, date=datetime_day(datetime.datetime.utcnow())): """ Generate site rankings """ # get all sites which can be replicated to site_names = self.sites.get_available_sites() site_rankings = dict() for site_name in site_names: # get popularity popularity = self.get_site_popularity(site_name, date) # get cpu and storage (performance) performance = self.sites.get_performance(site_name) # get available storage available_storage_tb = self.sites.get_available_storage(site_name)/10**3 if available_storage_tb <= 0: available_storage_tb = 0 else: available_storage_tb = 1 #calculate rank try: rank = (performance*available_storage_tb)/popularity except: rank = 0.0 # store into dict site_rankings[site_name] = rank # insert into database coll = 'site_rankings' query = {'name':site_name, 'date':date} data = {'$set':{'name':site_name, 'date':date, 'rank':rank, 'popularity':popularity}} self.storage.update_data(coll=coll, query=query, data=data, upsert=True) return site_rankings def get_dataset_popularity(self, q): """ Get the estimated popularity for dataset """ while True: # collect features data = q.get() dataset_name = data[0] date = data[1] popularity = 0.0 # get average popularity = self.popularity.get_average_popularity(dataset_name, date) self.dataset_popularity[dataset_name] = popularity q.task_done() def get_site_popularity(self, site_name, date=datetime_day(datetime.datetime.utcnow())): """ Get popularity for site """ # get all datasets with a replica at the site and how many replicas it has coll = 'dataset_data' pipeline = list() match = {'$match':{'replicas':site_name}} pipeline.append(match) project = {'$project':{'name':1, '_id':0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) popularity = 0.0 dataset_names = [dataset_data['name'] for dataset_data in data] # get the popularity of the dataset and decide by number of replicas coll = 'dataset_rankings' pipeline = list() match = {'$match':{'date':date}} pipeline.append(match) match = {'$match':{'name':{'$in':dataset_names}}} pipeline.append(match) group = {'$group':{'_id':'$date', 'total_popularity':{'$sum':'$popularity'}}} pipeline.append(group) project = {'$project':{'total_popularity':1, '_id':0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) try: popularity = data[0]['total_popularity'] except: popularity = 0.0 return popularity def get_site_storage_rankings(self, subscriptions): """ Return the amount over the soft limit sites are including new subscriptions If site is not over just set to 0 """ site_rankings = dict() available_sites = self.sites.get_available_sites() for site_name in available_sites: site_rankings[site_name] = self.sites.get_over_soft_limit(site_name) for subscription in subscriptions: site_rankings[subscription[1]] += self.datasets.get_size(subscription[0]) for site_name in available_sites: if site_rankings[site_name] < 0: del site_rankings[site_name] return site_rankings def normalize_popularity(self, date): """ Normalize popularity values to be between 1 and max_replicas """ dataset_rankings = dict() max_pop = max(self.dataset_popularity.iteritems(), key=operator.itemgetter(1))[1] min_pop = min(self.dataset_popularity.iteritems(), key=operator.itemgetter(1))[1] n = float(min_pop + (self.max_replicas - 1))/max_pop m = 1 - n*min_pop for dataset_name, popularity in self.dataset_popularity.items(): # store into dict rank = int(n*self.dataset_popularity[dataset_name] + m) dataset_rankings[dataset_name] = rank coll = 'dataset_rankings' query = data = {'name':dataset_name, 'date':date} data = {'$set':{'name':dataset_name, 'date':date, 'rank':rank, 'popularity':popularity}} self.storage.update_data(coll=coll, query=query, data=data, upsert=True) return dataset_rankings
class Ranker(object): """ Generic Ranking class """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.sites = SiteManager(self.config) self.datasets = DatasetManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.max_replicas = int(config['rocker_board']['max_replicas']) self.MAX_THREADS = int(config['threading']['max_threads']) self.dataset_popularity = dict() def get_dataset_rankings(self, date=datetime_day(datetime.datetime.utcnow())): """ Generate dataset rankings """ self.dataset_popularity = dict() dataset_names = self.datasets.get_db_datasets() q = Queue.Queue() for i in range(self.MAX_THREADS): worker = threading.Thread(target=self.get_dataset_popularity, args=(q, )) worker.daemon = True worker.start() # self.dataset_features = self.popularity.get_features(dataset_names, date) # self.dataset_tiers = self.datasets.get_data_tiers(dataset_names) for dataset_name in dataset_names: q.put((dataset_name, date)) q.join() dataset_rankings = self.normalize_popularity(date) return dataset_rankings def get_site_rankings(self, date=datetime_day(datetime.datetime.utcnow())): """ Generate site rankings """ # get all sites which can be replicated to site_names = self.sites.get_available_sites() site_rankings = dict() for site_name in site_names: # get popularity popularity = self.get_site_popularity(site_name, date) # get cpu and storage (performance) performance = self.sites.get_performance(site_name) # get available storage available_storage_tb = self.sites.get_available_storage( site_name) / 10**3 if available_storage_tb <= 0: available_storage_tb = 0 else: available_storage_tb = 1 #calculate rank try: rank = (performance * available_storage_tb) / popularity except: rank = 0.0 # store into dict site_rankings[site_name] = rank # insert into database coll = 'site_rankings' query = {'name': site_name, 'date': date} data = { '$set': { 'name': site_name, 'date': date, 'rank': rank, 'popularity': popularity } } self.storage.update_data(coll=coll, query=query, data=data, upsert=True) return site_rankings def get_dataset_popularity(self, q): """ Get the estimated popularity for dataset """ while True: # collect features data = q.get() dataset_name = data[0] date = data[1] popularity = 0.0 # get average popularity = self.popularity.get_average_popularity( dataset_name, date) self.dataset_popularity[dataset_name] = popularity q.task_done() def get_site_popularity(self, site_name, date=datetime_day(datetime.datetime.utcnow())): """ Get popularity for site """ # get all datasets with a replica at the site and how many replicas it has coll = 'dataset_data' pipeline = list() match = {'$match': {'replicas': site_name}} pipeline.append(match) project = {'$project': {'name': 1, '_id': 0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) popularity = 0.0 dataset_names = [dataset_data['name'] for dataset_data in data] # get the popularity of the dataset and decide by number of replicas coll = 'dataset_rankings' pipeline = list() match = {'$match': {'date': date}} pipeline.append(match) match = {'$match': {'name': {'$in': dataset_names}}} pipeline.append(match) group = { '$group': { '_id': '$date', 'total_popularity': { '$sum': '$popularity' } } } pipeline.append(group) project = {'$project': {'total_popularity': 1, '_id': 0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) try: popularity = data[0]['total_popularity'] except: popularity = 0.0 return popularity def get_site_storage_rankings(self, subscriptions): """ Return the amount over the soft limit sites are including new subscriptions If site is not over just set to 0 """ site_rankings = dict() available_sites = self.sites.get_available_sites() for site_name in available_sites: site_rankings[site_name] = self.sites.get_over_soft_limit( site_name) for subscription in subscriptions: site_rankings[subscription[1]] += self.datasets.get_size( subscription[0]) for site_name in available_sites: if site_rankings[site_name] < 0: del site_rankings[site_name] return site_rankings def normalize_popularity(self, date): """ Normalize popularity values to be between 1 and max_replicas """ dataset_rankings = dict() max_pop = max(self.dataset_popularity.iteritems(), key=operator.itemgetter(1))[1] min_pop = min(self.dataset_popularity.iteritems(), key=operator.itemgetter(1))[1] n = float(min_pop + (self.max_replicas - 1)) / max_pop m = 1 - n * min_pop for dataset_name, popularity in self.dataset_popularity.items(): # store into dict rank = int(n * self.dataset_popularity[dataset_name] + m) dataset_rankings[dataset_name] = rank coll = 'dataset_rankings' query = data = {'name': dataset_name, 'date': date} data = { '$set': { 'name': dataset_name, 'date': date, 'rank': rank, 'popularity': popularity } } self.storage.update_data(coll=coll, query=query, data=data, upsert=True) return dataset_rankings
class RockerBoard(object): """ RockerBoard is a system balancing algorithm using popularity metrics to predict popularity and make appropriate replications to keep the system balanced """ def __init__(self, config=dict()): self.logger = logging.getLogger(__name__) self.config = config self.phedex = PhEDExService(self.config) self.mit_db = MITDBService(self.config) self.datasets = DatasetManager(self.config) self.sites = SiteManager(self.config) self.popularity = PopularityManager(self.config) self.storage = StorageManager(self.config) self.rankings = Ranker(self.config) self.max_gb = int(self.config['rocker_board']['max_gb']) self.csv_data = list() def start(self, date=datetime_day(datetime.datetime.utcnow())): """ Begin Rocker Board Algorithm """ t1 = datetime.datetime.utcnow() # Get goals dataset_rankings = self.rankings.get_dataset_rankings(date) site_rankings = self.rankings.get_site_rankings(date) self.change_dataset_rankings(dataset_rankings) subscriptions = self.replicate(dataset_rankings, site_rankings) self.logger.info('SUBSCRIPTIONS') for subscription in subscriptions: self.logger.info('site: %s\tdataset: %s', subscription[1], subscription[0]) # self.subscribe(subscriptions) t2 = datetime.datetime.utcnow() td = t2 - t1 self.logger.info('Rocker Board took %s', str(td)) def change_dataset_rankings(self, dataset_rankings): """ Change the ranks from being the target number of replicas to being the change in number of replicas required to reach the goal """ current_replicas = self.datasets.get_current_num_replicas() for dataset in current_replicas: dataset_rankings[dataset['name']] -= dataset['n_replicas'] def replicate(self, dataset_rankings, site_rankings): """ Balance system by creating new replicas based on popularity """ subscriptions = list() subscribed_gb = 0 sites_available_storage_gb = self.sites.get_all_available_storage() while (subscribed_gb < self.max_gb) and site_rankings: tmp_site_rankings = dict() for k, v in site_rankings.items(): tmp_site_rankings[k] = v dataset = max(dataset_rankings.iteritems(), key=operator.itemgetter(1)) dataset_name = dataset[0] dataset_rank = dataset[1] if (not dataset_name) or (dataset_rank < 1): break size_gb = self.datasets.get_size(dataset_name) unavailable_sites = set(self.datasets.get_sites(dataset_name)) for site_name in tmp_site_rankings.keys(): if (self.sites.get_available_storage(site_name) < size_gb) or (tmp_site_rankings[site_name] <= 0): unavailable_sites.add(site_name) for site_name in unavailable_sites: try: del tmp_site_rankings[site_name] except: continue if not tmp_site_rankings: del dataset_rankings[dataset_name] continue site_name = weighted_choice(tmp_site_rankings) subscription = (dataset_name, site_name) subscriptions.append(subscription) subscribed_gb += size_gb sites_available_storage_gb[site_name] -= size_gb self.logger.info('%s : added', dataset_name) if sites_available_storage_gb[site_name] <= 0: del site_rankings[site_name] dataset_rankings[dataset_name] -= 1 self.logger.info('Subscribed %dGB', subscribed_gb) return subscriptions def subscribe(self, subscriptions): """ Make subscriptions to phedex subscriptions = [(dataset_name, site_name), ...] """ new_subscriptions = dict() for subscription in subscriptions: dataset_name = subscription[0] site_name = subscription[1] try: new_subscriptions[site_name].append(dataset_name) except: new_subscriptions[site_name] = list() new_subscriptions[site_name].append(dataset_name) for site_name, dataset_names in new_subscriptions.items(): data = self.phedex.generate_xml(dataset_names) comments = 'This dataset is predicted to become popular and has therefore been automatically replicated by cuadrnt' api = 'subscribe' params = [('node', site_name), ('data', data), ('level','dataset'), ('move', 'n'), ('custodial', 'n'), ('group', 'AnalysisOps'), ('request_only', 'n'), ('no_mail', 'n'), ('comments', comments)] json_data = self.phedex.fetch(api=api, params=params, method='post') # insert into db group_name = 'AnalysisOps' request_id = 0 request_type = 0 try: request = json_data['phedex'] request_id = request['request_created'][0]['id'] request_created = timestamp_to_datetime(request['request_timestamp']) except: self.logger.warning('Subscription did not succeed\n\tSite:%s\n\tDatasets: %s', str(site_name), str(dataset_names)) continue for dataset_name in dataset_names: coll = 'dataset_rankings' date = datetime_day(datetime.datetime.utcnow()) pipeline = list() match = {'$match':{'name':dataset_name, 'date':date}} pipeline.append(match) project = {'$project':{'delta_rank':1, '_id':0}} pipeline.append(project) data = self.storage.get_data(coll=coll, pipeline=pipeline) dataset_rank = data[0]['delta_rank'] query = "INSERT INTO Requests(RequestId, RequestType, DatasetId, SiteId, GroupId, Rank, Date) SELECT %s, %s, Datasets.DatasetId, Sites.SiteId, Groups.GroupId, %s, %s FROM Datasets, Sites, Groups WHERE Datasets.DatasetName=%s AND Sites.SiteName=%s AND Groups.GroupName=%s" values = (request_id, request_type, dataset_rank, request_created, dataset_name, site_name, group_name) self.mit_db.query(query=query, values=values, cache=False)