def _create_lock(self, request, user, dn): service_id = 0 if 'service' in request: try: service_id = self.registry.db.query('SELECT `id` FROM `user_services` WHERE `name` = %s', request['service'])[0] except IndexError: pass columns = ('item', 'sites', 'groups', 'lock_date', 'expiration_date', 'user', 'dn', 'service_id', 'comment') comment = None if 'comment' in request: comment = request['comment'] values = [(request['item'], None, None, MySQL.bare('NOW()'), MySQL.bare('FROM_UNIXTIME(%d)' % request['expires']), user, dn, service_id, comment)] if 'sites' in request: new_values = [] for site in request['sites']: for v in values: new_values.append(v[:1] + (site,) + v[2:]) values = new_values if 'groups' in request: new_values = [] for group in request['groups']: for v in values: new_values.append(v[:2] + (group,) + v[3:]) values = new_values new_locks = [] for v in values: lock_id = self.registry.db.insert_get_id('detox_locks', columns, v) new_lock = { 'lockid': lock_id, 'user': user, 'dn': dn, 'item': request['item'], 'locked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime()), 'expires': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(request['expires'])) } if v[7] != 0: new_lock['service'] = request['service'] if v[1] is not None: new_lock['sites'] = v[1] if v[2] is not None: new_lock['groups'] = v[2] if 'comment' in request: new_lock['comment'] = request['comment'] new_locks.append(new_lock) return new_locks
def create_request(self, caller, items, sites, sites_original, group, ncopies): now = int(time.time()) if self._read_only: return CopyRequest(0, caller.name, caller.dn, group, ncopies, 'new', now, now, 1) # Make an entry in registry columns = ('group', 'num_copies', 'user', 'dn', 'first_request_time', 'last_request_time') values = (group, ncopies, caller.name, caller.dn, MySQL.bare('FROM_UNIXTIME(%d)' % now), MySQL.bare('FROM_UNIXTIME(%d)' % now)) request_id = self.registry.db.insert_get_id('copy_requests', columns, values) mapping = lambda site: (request_id, site) self.registry.db.insert_many('copy_request_sites', ('request_id', 'site'), mapping, sites) mapping = lambda item: (request_id, item) self.registry.db.insert_many('copy_request_items', ('request_id', 'item'), mapping, items) # Make an entry in history history_user_ids = self.history.save_users([(caller.name, caller.dn)], get_ids=True) history_site_ids = self.history.save_sites(sites_original, get_ids=True) history_group_ids = self.history.save_groups([group], get_ids=True) history_dataset_ids, history_block_ids = self._save_items(items) sql = 'INSERT INTO `copy_requests` (`id`, `group_id`, `num_copies`, `user_id`, `request_time`)' sql += ' VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))' self.history.db.query(sql, request_id, history_group_ids[0], ncopies, history_user_ids[0], now) mapping = lambda sid: (request_id, sid) self.history.db.insert_many('copy_request_sites', ('request_id', 'site_id'), mapping, history_site_ids) mapping = lambda did: (request_id, did) self.history.db.insert_many('copy_request_datasets', ('request_id', 'dataset_id'), mapping, history_dataset_ids) mapping = lambda bid: (request_id, bid) self.history.db.insert_many('copy_request_blocks', ('request_id', 'block_id'), mapping, history_block_ids) return self.get_requests(request_id=request_id)[request_id]
def create_request(self, caller, items, sites, sites_original, group, ncopies): now = int(time.time()) if self._read_only: return CopyRequest(0, caller.name, caller.dn, group, ncopies, 'new', now, now, 1) # Make an entry in registry columns = ('group', 'num_copies', 'user', 'dn', 'first_request_time', 'last_request_time') values = (group, ncopies, caller.name, caller.dn, MySQL.bare('FROM_UNIXTIME(%d)' % now), MySQL.bare('FROM_UNIXTIME(%d)' % now)) request_id = self.registry.db.insert_get_id('copy_requests', columns, values) mapping = lambda site: (request_id, site) self.registry.db.insert_many('copy_request_sites', ('request_id', 'site'), mapping, sites) mapping = lambda item: (request_id, item) self.registry.db.insert_many('copy_request_items', ('request_id', 'item'), mapping, items) # Make an entry in history history_user_ids = self.history.save_users([(caller.name, caller.dn)], get_ids = True) history_site_ids = self.history.save_sites(sites_original, get_ids = True) history_group_ids = self.history.save_groups([group], get_ids = True) history_dataset_ids, history_block_ids = self._save_items(items) sql = 'INSERT INTO `copy_requests` (`id`, `group_id`, `num_copies`, `user_id`, `request_time`)' sql += ' VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))' self.history.db.query(sql, request_id, history_group_ids[0], ncopies, history_user_ids[0], now) mapping = lambda sid: (request_id, sid) self.history.db.insert_many('copy_request_sites', ('request_id', 'site_id'), mapping, history_site_ids) mapping = lambda did: (request_id, did) self.history.db.insert_many('copy_request_datasets', ('request_id', 'dataset_id'), mapping, history_dataset_ids) mapping = lambda bid: (request_id, bid) self.history.db.insert_many('copy_request_blocks', ('request_id', 'block_id'), mapping, history_block_ids) return self.get_requests(request_id = request_id)[request_id]
def new_cycle(self, partition, policy_text, comment='', test=False): """ Set up a new deletion cycle for the partition. @param partition Partition name string @param policy_text Full text of the policy @param comment Comment string @param test If True, create a deletion_test cycle. @return cycle number. """ if self._read_only: return 0 part_id = self.save_partitions([partition], get_ids=True)[0] policy_id = self.save_policy(policy_text) if test: operation_str = 'deletion_test' else: operation_str = 'deletion' columns = ('operation', 'partition_id', 'policy_id', 'comment', 'time_start') values = (operation_str, part_id, policy_id, comment, MySQL.bare('NOW()')) return self.db.insert_get_id('deletion_cycles', columns=columns, values=values)
def new_cycle(self, partition, policy_text, comment = '', test = False): """ Set up a new deletion cycle for the partition. @param partition Partition name string @param policy_text Full text of the policy @param comment Comment string @param test If True, create a deletion_test cycle. @return cycle number. """ if self._read_only: return 0 part_id = self.save_partitions([partition], get_ids = True)[0] policy_id = self.save_policy(policy_text) if test: operation_str = 'deletion_test' else: operation_str = 'deletion' columns = ('operation', 'partition_id', 'policy_id', 'comment', 'time_start') values = (operation_str, part_id, policy_id, comment, MySQL.bare('NOW()')) return self.db.insert_get_id('deletion_cycles', columns = columns, values = values)
def new_cycle(self, partition, comment='', test=False): """ Set up a new copy cycle for the partition. @param partition partition name string @param comment comment string @param test if True, create a copy_test cycle. @return cycle number. """ if self._read_only: return 0 part_id = self.save_partitions([partition], get_ids=True)[0] if test: operation_str = 'copy_test' else: operation_str = 'copy' columns = ('operation', 'partition_id', 'comment', 'time_start') values = (operation_str, part_id, comment, MySQL.bare('NOW()')) return self.db.insert_get_id('copy_cycles', columns=columns, values=values)
def create_request(self, caller, items, sites): now = int(time.time()) if self._read_only: return DeletionRequest(0, caller.name, caller.dn, 'new', now, None) # Make an entry in registry columns = ('user', 'dn', 'request_time') values = (caller.name, caller.dn, MySQL.bare('FROM_UNIXTIME(%d)' % now)) request_id = self.registry.db.insert_get_id('deletion_requests', columns, values) mapping = lambda site: (request_id, site) self.registry.db.insert_many('deletion_request_sites', ('request_id', 'site'), mapping, sites) mapping = lambda item: (request_id, item) self.registry.db.insert_many('deletion_request_items', ('request_id', 'item'), mapping, items) # Make an entry in history history_user_ids = self.history.save_users([(caller.name, caller.dn)], get_ids = True) history_site_ids = self.history.save_sites(sites, get_ids = True) history_dataset_ids, history_block_ids = self._save_items(items) sql = 'INSERT INTO `deletion_requests` (`id`, `user_id`, `request_time`)' sql += ' SELECT %s, u.`id`, FROM_UNIXTIME(%s) FROM `groups` AS g, `users` AS u' sql += ' WHERE u.`dn` = %s' self.history.db.query(sql, request_id, now, caller.dn) mapping = lambda sid: (request_id, sid) self.history.db.insert_many('deletion_request_sites', ('request_id', 'site_id'), mapping, history_site_ids) mapping = lambda did: (request_id, did) self.history.db.insert_many('deletion_request_datasets', ('request_id', 'dataset_id'), mapping, history_dataset_ids) mapping = lambda bid: (request_id, bid) self.history.db.insert_many('deletion_request_blocks', ('request_id', 'block_id'), mapping, history_block_ids) return self.get_requests(request_id = request_id)[request_id]
def make_entry(self, site_name): if self._read_only: operation_id = 0 else: site_id = self.save_sites([site_name], get_ids=True)[0] operation_id = self.db.insert_get_id('copy_operations', columns=('timestamp', 'site_id'), values=(MySQL.bare('NOW()'), site_id)) return HistoryRecord(HistoryRecord.OP_COPY, operation_id, site_name, int(time.time()))
def _connect(self): #override if self._host == 'localhost' or self._host == socket.gethostname(): # This is the master server; wipe the table clean self._mysql.query('DELETE FROM `servers`') self._mysql.query('ALTER TABLE `servers` AUTO_INCREMENT = 1') else: self._mysql.query('DELETE FROM `servers` WHERE `hostname` = %s', socket.gethostname()) # id of this server self._server_id = self._mysql.insert_get_id( 'servers', columns=('hostname', 'last_heartbeat'), values=(socket.gethostname(), MySQL.bare('NOW()')))
def create_cached_request(self, caller, item, sites_original, group, ncopies): now = int(time.time()) # Make an entry in registry columns = ('item', 'sites', 'group', 'num_copies', 'user', 'dn', 'request_time', 'status') values = (item, sites_original, group, ncopies, caller.name, caller.dn, MySQL.bare('FROM_UNIXTIME(%d)' % now), 'new') LOG.info(values) cached_request_id = self.registry.db.insert_get_id('cached_copy_requests', columns, values) return_dict = {} return_dict['request_id'] = cached_request_id return_dict['item'] = item return_dict['sites'] = sites_original return return_dict
def _cancel(self, task_ids, optype): sql = 'SELECT b.`job_id`, f.`fts_file_id` FROM `fts_{op}_tasks` AS f' sql += ' INNER JOIN `fts_{op}_batches` AS b ON b.`id` = f.`fts_batch_id`' result = self.db.execute_many(sql.format(op = optype), MySQL.bare('f.`id`'), task_ids) by_job = collections.defaultdict(list) for job_id, file_id in result: by_job[job_id].append(file_id) if not self._read_only: for job_id, ids in by_job.iteritems(): try: self._ftscall('cancel', job_id, file_ids = ids) except: LOG.error('Failed to cancel FTS job %s', job_id)
def save_policy(self, policy_text): md5 = hashlib.md5(policy_text).hexdigest() result = self.db.query( 'SELECT `id`, `text` FROM `deletion_policies` WHERE `hash` = UNHEX(%s)', md5) for policy_id, text in result: if text == policy_text: return policy_id # no row with matching hash or no row with matching text although hash matches (basically impossible) # new policy columns = ('hash', 'text') return self.db.insert_get_id('deletion_policies', columns=columns, values=(MySQL.bare('UNHEX(\'%s\')' % md5), policy_text))
def _cancel(self, task_ids, optype): sql = 'SELECT b.`job_id`, f.`fts_file_id` FROM `fts_{op}_tasks` AS f' sql += ' INNER JOIN `fts_{op}_batches` AS b ON b.`id` = f.`fts_batch_id`' result = self.db.execute_many(sql.format(op=optype), MySQL.bare('f.`id`'), task_ids) by_job = collections.defaultdict(list) for job_id, file_id in result: by_job[job_id].append(file_id) if not self._read_only: for job_id, ids in by_job.iteritems(): try: self._ftscall('cancel', job_id, file_ids=ids) except: LOG.error('Failed to cancel FTS job %s', job_id)
def create_request(self, caller, items, sites): now = int(time.time()) if self._read_only: return DeletionRequest(0, caller.name, caller.dn, 'new', now, None) # Make an entry in registry columns = ('user', 'dn', 'request_time') values = (caller.name, caller.dn, MySQL.bare('FROM_UNIXTIME(%d)' % now)) request_id = self.registry.db.insert_get_id('deletion_requests', columns, values) mapping = lambda site: (request_id, site) self.registry.db.insert_many('deletion_request_sites', ('request_id', 'site'), mapping, sites) mapping = lambda item: (request_id, item) self.registry.db.insert_many('deletion_request_items', ('request_id', 'item'), mapping, items) # Make an entry in history history_user_ids = self.history.save_users([(caller.name, caller.dn)], get_ids=True) history_site_ids = self.history.save_sites(sites, get_ids=True) history_dataset_ids, history_block_ids = self._save_items(items) sql = 'INSERT INTO `deletion_requests` (`id`, `user_id`, `request_time`)' sql += ' SELECT %s, u.`id`, FROM_UNIXTIME(%s) FROM `groups` AS g, `users` AS u' sql += ' WHERE u.`dn` = %s' self.history.db.query(sql, request_id, now, caller.dn) mapping = lambda sid: (request_id, sid) self.history.db.insert_many('deletion_request_sites', ('request_id', 'site_id'), mapping, history_site_ids) mapping = lambda did: (request_id, did) self.history.db.insert_many('deletion_request_datasets', ('request_id', 'dataset_id'), mapping, history_dataset_ids) mapping = lambda bid: (request_id, bid) self.history.db.insert_many('deletion_request_blocks', ('request_id', 'block_id'), mapping, history_block_ids) return self.get_requests(request_id=request_id)[request_id]
def new_cycle(self, partition, comment = '', test = False): """ Set up a new copy cycle for the partition. @param partition partition name string @param comment comment string @param test if True, create a copy_test cycle. @return cycle number. """ if self._read_only: return 0 part_id = self.save_partitions([partition], get_ids = True)[0] if test: operation_str = 'copy_test' else: operation_str = 'copy' columns = ('operation', 'partition_id', 'comment', 'time_start') values = (operation_str, part_id, comment, MySQL.bare('NOW()')) return self.db.insert_get_id('copy_cycles', columns = columns, values = values)
def _create_lock(self, request, user, dn): service_id = 0 if 'service' in request: try: service_id = self.registry.db.query( 'SELECT `id` FROM `user_services` WHERE `name` = %s', request['service'])[0] except IndexError: pass columns = ('item', 'sites', 'groups', 'lock_date', 'expiration_date', 'user', 'dn', 'service_id', 'comment') comment = None if 'comment' in request: comment = request['comment'] values = [(request['item'], None, None, MySQL.bare('NOW()'), MySQL.bare('FROM_UNIXTIME(%d)' % request['expires']), user, dn, service_id, comment)] if 'sites' in request: new_values = [] for site in request['sites']: for v in values: new_values.append(v[:1] + (site, ) + v[2:]) values = new_values if 'groups' in request: new_values = [] for group in request['groups']: for v in values: new_values.append(v[:2] + (group, ) + v[3:]) values = new_values new_locks = [] for v in values: lock_id = self.registry.db.insert_get_id('detox_locks', columns, v) new_lock = { 'lockid': lock_id, 'user': user, 'dn': dn, 'item': request['item'], 'locked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime()), 'expires': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(request['expires'])) } if v[7] != 0: new_lock['service'] = request['service'] if v[1] is not None: new_lock['sites'] = v[1] if v[2] is not None: new_lock['groups'] = v[2] if 'comment' in request: new_lock['comment'] = request['comment'] new_locks.append(new_lock) return new_locks
def save_policy(self, policy_text): md5 = hashlib.md5(policy_text).hexdigest() result = self.db.query('SELECT `id`, `text` FROM `deletion_policies` WHERE `hash` = UNHEX(%s)', md5) for policy_id, text in result: if text == policy_text: return policy_id # no row with matching hash or no row with matching text although hash matches (basically impossible) # new policy columns = ('hash', 'text') return self.db.insert_get_id('deletion_policies', columns = columns, values = (MySQL.bare('UNHEX(\'%s\')' % md5), policy_text))
def make_entry(self, site_name): if self._read_only: operation_id = 0 else: site_id = self.save_sites([site_name], get_ids = True)[0] operation_id = self.db.insert_get_id('copy_operations', columns = ('timestamp', 'site_id'), values = (MySQL.bare('NOW()'), site_id)) return HistoryRecord(HistoryRecord.OP_COPY, operation_id, site_name, int(time.time()))
def _update_status(self, optype): if optype == 'transfer': site_columns = 'ss.`name`, sd.`name`' site_joins = ' INNER JOIN `sites` AS ss ON ss.`id` = q.`source_id`' site_joins += ' INNER JOIN `sites` AS sd ON sd.`id` = u.`site_id`' else: site_columns = 's.`name`' site_joins = ' INNER JOIN `sites` AS s ON s.`id` = u.`site_id`' get_task_data = 'SELECT u.`id`, f.`name`, f.`size`, UNIX_TIMESTAMP(q.`created`), ' + site_columns + ' FROM `{op}_tasks` AS q' get_task_data += ' INNER JOIN `file_subscriptions` AS u ON u.`id` = q.`subscription_id`' get_task_data += ' INNER JOIN `files` AS f ON f.`id` = u.`file_id`' get_task_data += site_joins get_task_data += ' WHERE q.`id` = %s' get_task_data = get_task_data.format(op = optype) if optype == 'transfer': history_table_name = 'file_transfers' history_site_fields = ('source_id', 'destination_id') else: history_table_name = 'file_deletions' history_site_fields = ('site_id',) history_fields = ('file_id', 'exitcode', 'message', 'batch_id', 'created', 'started', 'finished', 'completed') + history_site_fields if optype == 'transfer': insert_failure = 'INSERT INTO `failed_transfers` (`id`, `subscription_id`, `source_id`, `exitcode`)' insert_failure += ' SELECT `id`, `subscription_id`, `source_id`, %s FROM `transfer_tasks` WHERE `id` = %s' insert_failure += ' ON DUPLICATE KEY UPDATE `id`=VALUES(`id`)' delete_failures = 'DELETE FROM `failed_transfers` WHERE `subscription_id` = %s' get_subscription_status = 'SELECT `status` FROM `file_subscriptions` WHERE `id` = %s' update_subscription = 'UPDATE `file_subscriptions` SET `status` = %s, `last_update` = NOW() WHERE `id` = %s' delete_subscription = 'DELETE FROM `file_subscriptions` WHERE `id` = %s' delete_task = 'DELETE FROM `{op}_tasks` WHERE `id` = %s'.format(op = optype) delete_batch = 'DELETE FROM `{op}_batches` WHERE `id` = %s'.format(op = optype) done_subscriptions = [] num_success = 0 num_failure = 0 num_cancelled = 0 # Collect completed tasks for batch_id in self.db.query('SELECT `id` FROM `{op}_batches`'.format(op = optype)): results = [] if optype == 'transfer': for _, query in self.transfer_queries: results = query.get_transfer_status(batch_id) if len(results) != 0: break else: for _, query in self.deletion_queries: results = query.get_deletion_status(batch_id) if len(results) != 0: break batch_complete = True for task_id, status, exitcode, message, start_time, finish_time in results: # start_time and finish_time can be None LOG.debug('%s result: %d %s %d %s %s', optype, task_id, FileQuery.status_name(status), exitcode, start_time, finish_time) if status == FileQuery.STAT_DONE: num_success += 1 elif status == FileQuery.STAT_FAILED: num_failure += 1 elif status == FileQuery.STAT_CANCELLED: num_cancelled += 1 else: batch_complete = False continue try: task_data = self.db.query(get_task_data, task_id)[0] except IndexError: LOG.warning('%s task %d got lost.', optype, task_id) if optype == 'transfer': query.forget_transfer_status(task_id) else: query.forget_deletion_status(task_id) if not self._read_only: self.db.query(delete_task, task_id) continue subscription_id, lfn, size, create_time = task_data[:4] if optype == 'transfer': source_name, dest_name = task_data[4:] history_site_ids = ( self.history_db.save_sites([source_name], get_ids = True)[0], self.history_db.save_sites([dest_name], get_ids = True)[0] ) else: site_name = task_data[4] history_site_ids = (self.history_db.save_sites([site_name], get_ids = True)[0],) file_id = self.history_db.save_files([(lfn, size)], get_ids = True)[0] if start_time is None: sql_start_time = None else: sql_start_time = datetime.datetime(*time.localtime(start_time)[:6]) if finish_time is None: sql_finish_time = None else: sql_finish_time = datetime.datetime(*time.localtime(finish_time)[:6]) values = (file_id, exitcode, message, batch_id, datetime.datetime(*time.localtime(create_time)[:6]), sql_start_time, sql_finish_time, MySQL.bare('NOW()')) + history_site_ids if optype == 'transfer': LOG.debug('Archiving transfer of %s from %s to %s (exitcode %d)', lfn, source_name, dest_name, exitcode) else: LOG.debug('Archiving deletion of %s at %s (exitcode %d)', lfn, site_name, exitcode) if self._read_only: history_id = 0 else: history_id = self.history_db.db.insert_get_id(history_table_name, history_fields, values) if optype == 'transfer': query.write_transfer_history(self.history_db, task_id, history_id) else: query.write_deletion_history(self.history_db, task_id, history_id) # We check the subscription status and update accordingly. Need to lock the tables. if not self._read_only: self.db.lock_tables(write = ['file_subscriptions']) try: subscription_status = self.db.query(get_subscription_status, subscription_id)[0] if subscription_status == 'inbatch': if status == FileQuery.STAT_DONE: LOG.debug('Subscription %d done.', subscription_id) if not self._read_only: self.db.query(update_subscription, 'done', subscription_id) elif status == FileQuery.STAT_FAILED: LOG.debug('Subscription %d failed (exit code %d). Flagging retry.', subscription_id, exitcode) if not self._read_only: self.db.query(update_subscription, 'retry', subscription_id) elif subscription_status == 'cancelled': # subscription is cancelled and task terminated -> delete the subscription now, irrespective of the task status LOG.debug('Subscription %d is cancelled.', subscription_id) if not self._read_only: self.db.query(delete_subscription, subscription_id) finally: if not self._read_only: self.db.unlock_tables() if not self._read_only: if optype == 'transfer': if subscription_status == 'cancelled' or (subscription_status == 'inbatch' and status == FileQuery.STAT_DONE): # Delete entries from failed_transfers table self.db.query(delete_failures, subscription_id) elif subscription_status == 'inbatch' and status == FileQuery.STAT_FAILED: # Insert entry to failed_transfers table self.db.query(insert_failure, exitcode, task_id) self.db.query(delete_task, task_id) if status == FileQuery.STAT_DONE: done_subscriptions.append(subscription_id) if optype == 'transfer': query.forget_transfer_status(task_id) else: query.forget_deletion_status(task_id) if self.cycle_stop.is_set(): break if batch_complete: if not self._read_only: self.db.query(delete_batch, batch_id) if optype == 'transfer': query.forget_transfer_batch(batch_id) else: query.forget_deletion_batch(batch_id) if num_success + num_failure + num_cancelled != 0: LOG.info('Archived file %s: %d succeeded, %d failed, %d cancelled.', optype, num_success, num_failure, num_cancelled) else: LOG.debug('Archived file %s: %d succeeded, %d failed, %d cancelled.', optype, num_success, num_failure, num_cancelled) return done_subscriptions