def _do_get_next_application(self, read_only, blocked_apps): #override sql = 'SELECT `applications`.`id`, 0+`auth_level`, `title`, `path`, `args`, `timeout`, `users`.`name`, `user_host` FROM `applications`' sql += ' INNER JOIN `users` ON `users`.`id` = `applications`.`user_id`' sql += ' WHERE `status` = \'new\'' if read_only: sql += ' AND `auth_level` != \'write\'' if len(blocked_apps) != 0: sql += ' AND `title` NOT IN %s' % MySQL.stringify_sequence( blocked_apps) sql += ' ORDER BY `applications`.`id` LIMIT 1' result = self._mysql.query(sql) if len(result) == 0: return None else: appid, auth_level, title, path, args, timeout, uname, uhost = result[ 0] return { 'appid': appid, 'auth_level': auth_level, 'user_name': uname, 'user_host': uhost, 'title': title, 'path': path, 'args': args, 'timeout': timeout }
def _make_history_constraints(self, request_id, statuses, users, items, sites): if users is not None: history_user_ids = self.history.db.select_many( 'users', 'id', 'name', users) else: history_user_ids = None if items is not None: history_dataset_ids, history_block_ids = self._get_saved_item_ids( items) else: history_dataset_ids = None history_block_ids = None if sites is not None: history_site_ids = self.history.db.select_many( 'sites', 'id', 'name', sites) else: history_site_ids = None constraints = [] if request_id is not None: constraints.append('r.`id` = %d' % request_id) if statuses is not None: constraints.append('r.`status` IN ' + MySQL.stringify_sequence(statuses)) if users is not None: constraints.append('r.`user_id` IN ' + MySQL.stringify_sequence(history_user_ids)) if items is not None or sites is not None: temp_table = self._make_temp_history_tables( history_dataset_ids, history_block_ids, history_site_ids) constraints.append( 'r.`id` IN (SELECT `id` FROM {0})'.format(temp_table)) if len(constraints) != 0: return ' WHERE ' + ' AND '.join(constraints) else: return ''
def _make_registry_constraints(self, request_id, statuses, users, items, sites): constraints = [] if request_id is not None: constraints.append('r.`id` = %d' % request_id) if statuses is not None: constraints.append('r.`status` IN ' + MySQL.stringify_sequence(statuses)) if users is not None: constraints.append('r.`user` IN ' + MySQL.stringify_sequence(users)) if items is not None or sites is not None: temp_table = self._make_temp_registry_tables(items, sites) constraints.append('r.`id` IN (SELECT `id` FROM {0})'.format(temp_table)) if len(constraints) != 0: return ' WHERE ' + ' AND '.join(constraints) else: return ''
def _make_registry_constraints(self, request_id, statuses, users, items, sites): constraints = [] if request_id is not None: constraints.append('r.`id` = %d' % request_id) if statuses is not None: constraints.append('r.`status` IN ' + MySQL.stringify_sequence(statuses)) if users is not None: constraints.append('r.`user` IN ' + MySQL.stringify_sequence(users)) if items is not None or sites is not None: temp_table = self._make_temp_registry_tables(items, sites) constraints.append( 'r.`id` IN (SELECT `id` FROM {0})'.format(temp_table)) if len(constraints) != 0: return ' WHERE ' + ' AND '.join(constraints) else: return ''
def _make_history_constraints(self, request_id, statuses, users, items, sites): if users is not None: history_user_ids = self.history.db.select_many('users', 'id', 'name', users) else: history_user_ids = None if items is not None: history_dataset_ids, history_block_ids = self._get_saved_item_ids(items) else: history_dataset_ids = None history_block_ids = None if sites is not None: history_site_ids = self.history.db.select_many('sites', 'id', 'name', sites) else: history_site_ids = None constraints = [] if request_id is not None: constraints.append('r.`id` = %d' % request_id) if statuses is not None: constraints.append('r.`status` IN ' + MySQL.stringify_sequence(statuses)) if users is not None: constraints.append('r.`user_id` IN ' + MySQL.stringify_sequence(history_user_ids)) if items is not None or sites is not None: temp_table = self._make_temp_history_tables(history_dataset_ids, history_block_ids, history_site_ids) constraints.append('r.`id` IN (SELECT `id` FROM {0})'.format(temp_table)) if len(constraints) != 0: return ' WHERE ' + ' AND '.join(constraints) else: return ''
def _get_lock(self, request, valid_only = False): sql = 'SELECT l.`id`, l.`user`, l.`dn`, s.`name`, l.`item`, l.`sites`, l.`groups`,' sql += ' UNIX_TIMESTAMP(l.`lock_date`), UNIX_TIMESTAMP(l.`expiration_date`), l.`comment`' sql += ' FROM `detox_locks` AS l' sql += ' LEFT JOIN `user_services` AS s ON s.`id` = l.`service_id`' constraints = [] args = [] user_const = -1 if 'lockid' in request: constraints.append('l.`id` IN %s' % MySQL.stringify_sequence(request['lockid'])) if 'user' in request: user_const = len(constraints) constraints.append('l.`user` IN %s' % MySQL.stringify_sequence(request['user'])) if 'service' in request: constraints.append('s.`name` = %s') args.append(request['service']) if 'item' in request: constraints.append('l.`item` = %s') args.append(request['item']) if 'sites' in request: constraints.append('l.`sites` IN %s' % MySQL.stringify_sequence(request['sites'])) if 'groups' in request: constraints.append('l.`groups` IN %s' % MySQL.stringify_sequence(request['groups'])) if 'created_before' in request: constraints.append('l.`lock_date` <= FROM_UNIXTIME(%s)') args.append(request['created_before']) if 'created_after' in request: constraints.append('l.`lock_date` >= FROM_UNIXTIME(%s)') args.append(request['created_after']) if 'expires_before' in request: constraints.append('l.`expiration_date` <= FROM_UNIXTIME(%s)') args.append(request['expires_before']) if 'expires_after' in request: constraints.append('l.`expiration_date` >= FROM_UNIXTIME(%s)') args.append(request['expires_after']) if len(constraints) != 0: sql += ' WHERE ' + ' AND '.join(constraints) existing = [] for lock_id, user, dn, service, item, site, group, lock_date, expiration_date, comment in self.registry.db.xquery(sql, *args): lock = { 'lockid': lock_id, 'user': user, 'dn': dn, 'item': item, 'locked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(lock_date)), 'expires': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(expiration_date)) } if service is not None: lock['service'] = service if site is not None: lock['sites'] = site if group is not None: lock['groups'] = group if comment is not None: lock['comment'] = comment existing.append(lock) if valid_only or ('lockid' in request and len(existing) != 0): return existing sql = 'SELECT l.`id`, u.`name`, u.`dn`, s.`name`, l.`item`, l.`sites`, l.`groups`,' sql += ' UNIX_TIMESTAMP(l.`lock_date`), UNIX_TIMESTAMP(l.`unlock_date`), UNIX_TIMESTAMP(l.`expiration_date`), l.`comment`' sql += ' FROM `detox_locks` AS l' sql += ' LEFT JOIN `users` AS u ON u.`id` = l.`user_id`' sql += ' LEFT JOIN `user_services` AS s ON s.`id` = l.`service_id`' if len(constraints) != 0: if user_const != -1: constraints[user_const] = 'u.`name` IN %s' % MySQL.stringify_sequence(request['user']) sql += ' WHERE ' + ' AND '.join(constraints) for lock_id, user, dn, service, item, site, group, lock_date, unlock_date, expiration_date, comment in self.history.db.xquery(sql, *args): lock = { 'lockid': lock_id, 'user': user, 'dn': dn, 'item': item, 'locked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(lock_date)), 'unlocked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(unlock_date)), 'expires': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(expiration_date)) } if service is not None: lock['service'] = service if site is not None: lock['sites'] = site if group is not None: lock['groups'] = group if comment is not None: lock['comment'] = comment existing.append(lock) return existing
def _get_lock(self, request, valid_only=False): sql = 'SELECT l.`id`, l.`user`, l.`dn`, s.`name`, l.`item`, l.`sites`, l.`groups`,' sql += ' UNIX_TIMESTAMP(l.`lock_date`), UNIX_TIMESTAMP(l.`expiration_date`), l.`comment`' sql += ' FROM `detox_locks` AS l' sql += ' LEFT JOIN `user_services` AS s ON s.`id` = l.`service_id`' constraints = [] args = [] user_const = -1 if 'lockid' in request: constraints.append('l.`id` IN %s' % MySQL.stringify_sequence(request['lockid'])) if 'user' in request: user_const = len(constraints) constraints.append('l.`user` IN %s' % MySQL.stringify_sequence(request['user'])) if 'service' in request: constraints.append('s.`name` = %s') args.append(request['service']) if 'item' in request: constraints.append('l.`item` = %s') args.append(request['item']) if 'sites' in request: constraints.append('l.`sites` IN %s' % MySQL.stringify_sequence(request['sites'])) if 'groups' in request: constraints.append('l.`groups` IN %s' % MySQL.stringify_sequence(request['groups'])) if 'created_before' in request: constraints.append('l.`lock_date` <= FROM_UNIXTIME(%s)') args.append(request['created_before']) if 'created_after' in request: constraints.append('l.`lock_date` >= FROM_UNIXTIME(%s)') args.append(request['created_after']) if 'expires_before' in request: constraints.append('l.`expiration_date` <= FROM_UNIXTIME(%s)') args.append(request['expires_before']) if 'expires_after' in request: constraints.append('l.`expiration_date` >= FROM_UNIXTIME(%s)') args.append(request['expires_after']) if len(constraints) != 0: sql += ' WHERE ' + ' AND '.join(constraints) existing = [] for lock_id, user, dn, service, item, site, group, lock_date, expiration_date, comment in self.registry.db.xquery( sql, *args): lock = { 'lockid': lock_id, 'user': user, 'dn': dn, 'item': item, 'locked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(lock_date)), 'expires': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(expiration_date)) } if service is not None: lock['service'] = service if site is not None: lock['sites'] = site if group is not None: lock['groups'] = group if comment is not None: lock['comment'] = comment existing.append(lock) if valid_only or ('lockid' in request and len(existing) != 0): return existing sql = 'SELECT l.`id`, u.`name`, u.`dn`, s.`name`, l.`item`, l.`sites`, l.`groups`,' sql += ' UNIX_TIMESTAMP(l.`lock_date`), UNIX_TIMESTAMP(l.`unlock_date`), UNIX_TIMESTAMP(l.`expiration_date`), l.`comment`' sql += ' FROM `detox_locks` AS l' sql += ' LEFT JOIN `users` AS u ON u.`id` = l.`user_id`' sql += ' LEFT JOIN `user_services` AS s ON s.`id` = l.`service_id`' if len(constraints) != 0: if user_const != -1: constraints[ user_const] = 'u.`name` IN %s' % MySQL.stringify_sequence( request['user']) sql += ' WHERE ' + ' AND '.join(constraints) for lock_id, user, dn, service, item, site, group, lock_date, unlock_date, expiration_date, comment in self.history.db.xquery( sql, *args): lock = { 'lockid': lock_id, 'user': user, 'dn': dn, 'item': item, 'locked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(lock_date)), 'unlocked': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(unlock_date)), 'expires': time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(expiration_date)) } if service is not None: lock['service'] = service if site is not None: lock['sites'] = site if group is not None: lock['groups'] = group if comment is not None: lock['comment'] = comment existing.append(lock) return existing
def get_subscriptions(self, inventory, op = None, status = None): """ Return a list containing Subscription and Desubscription objects ordered by the id. @param inventory Dynamo inventory @param op If set to 'transfer' or 'deletion', limit to the operation type. @param status If not None, set to list of status strings to limit the query. """ # First convert all pre-subscriptions self.convert_pre_subscriptions(inventory) subscriptions = [] get_all = 'SELECT u.`id`, u.`status`, u.`delete`, f.`block_id`, f.`name`, s.`name`, u.`hold_reason` FROM `file_subscriptions` AS u' get_all += ' INNER JOIN `files` AS f ON f.`id` = u.`file_id`' get_all += ' INNER JOIN `sites` AS s ON s.`id` = u.`site_id`' constraints = [] if op == 'transfer': constraints.append('u.`delete` = 0') elif op == 'deletion': constraints.append('u.`delete` = 1') if status is not None: constraints.append('u.`status` IN ' + MySQL.stringify_sequence(status)) if len(constraints) != 0: get_all += ' WHERE ' + ' AND '.join(constraints) get_all += ' ORDER BY s.`id`, f.`block_id`' get_tried_sites = 'SELECT s.`name`, f.`exitcode` FROM `failed_transfers` AS f' get_tried_sites += ' INNER JOIN `sites` AS s ON s.`id` = f.`source_id`' get_tried_sites += ' WHERE f.`subscription_id` = %s' _destination_name = '' _block_id = -1 no_source = [] all_failed = [] to_done = [] COPY = 0 DELETE = 1 for row in self.db.query(get_all): sub_id, st, optype, block_id, file_name, site_name, hold_reason = row if site_name != _destination_name: _destination_name = site_name try: destination = inventory.sites[site_name] except KeyError: # Site disappeared from the inventory - weird but can happen! destination = None _block_id = -1 if destination is None: continue if block_id != _block_id: lfile = inventory.find_file(file_name) if lfile is None: # Dataset, block, or file was deleted from the inventory earlier in this process (deletion not reflected in the inventory store yet) continue _block_id = block_id block = lfile.block dest_replica = block.find_replica(destination) else: lfile = block.find_file(file_name) if lfile is None: # Dataset, block, or file was deleted from the inventory earlier in this process (deletion not reflected in the inventory store yet) continue if dest_replica is None and st != 'cancelled': LOG.debug('Destination replica for %s does not exist. Canceling the subscription.', file_name) # Replica was invalidated sql = 'UPDATE `file_subscriptions` SET `status` = \'cancelled\'' sql += ' WHERE `id` = %s' if not self._read_only: self.db.query(sql, sub_id) if status is not None and 'cancelled' not in status: # We are not asked to return cancelled subscriptions continue st = 'cancelled' if optype == COPY: disk_sources = None tape_sources = None failed_sources = None if st not in ('done', 'held', 'cancelled'): if dest_replica.has_file(lfile): LOG.debug('%s already exists at %s', file_name, site_name) to_done.append(sub_id) st = 'done' else: disk_sources = [] tape_sources = [] for replica in block.replicas: if replica.site == destination or replica.site.status != Site.STAT_READY: continue if replica.has_file(lfile): if replica.site.storage_type == Site.TYPE_DISK: disk_sources.append(replica.site) elif replica.site.storage_type == Site.TYPE_MSS: tape_sources.append(replica.site) if len(disk_sources) + len(tape_sources) == 0: LOG.warning('Transfer of %s to %s has no source.', file_name, site_name) no_source.append(sub_id) st = 'held' if st == 'retry': failed_sources = {} for source_name, exitcode in self.db.query(get_tried_sites, sub_id): try: source = inventory.sites[source_name] except KeyError: # this site may have been deleted in this process continue try: failed_sources[source].append(exitcode) except KeyError: if source not in disk_sources and source not in tape_sources: # this is not a source site any more continue failed_sources[source] = [exitcode] if len(failed_sources) == len(disk_sources) + len(tape_sources): # transfers from all sites failed at least once for codes in failed_sources.itervalues(): if codes[-1] not in irrecoverable_errors: # This site failed for a recoverable reason break else: # last failure from all sites due to irrecoverable errors LOG.warning('Transfer of %s to %s failed from all sites.', file_name, site_name) all_failed.append(sub_id) st = 'held' # st value may have changed - filter again if status is None or st in status: subscription = RLFSM.Subscription(sub_id, st, lfile, destination, disk_sources, tape_sources, failed_sources, hold_reason) subscriptions.append(subscription) elif optype == DELETE: if st not in ('done', 'held', 'cancelled') and not dest_replica.has_file(lfile): LOG.debug('%s is already gone from %s', file_name, site_name) to_done.append(sub_id) st = 'done' if status is None or st in status: desubscription = RLFSM.Desubscription(sub_id, st, lfile, destination) subscriptions.append(desubscription) if len(to_done) + len(no_source) + len(all_failed) != 0: msg = 'Subscriptions terminated directly: %d done' % len(to_done) if len(no_source) != 0: msg += ', %d held with reason "no_source"' % len(no_source) if len(all_failed) != 0: msg += ', %d held with reason "all_failed"' % len(all_failed) LOG.info(msg) if not self._read_only: self.db.execute_many('UPDATE `file_subscriptions` SET `status` = \'done\', `last_update` = NOW()', 'id', to_done) self.db.execute_many('UPDATE `file_subscriptions` SET `status` = \'held\', `hold_reason` = \'no_source\', `last_update` = NOW()', 'id', no_source) self.db.execute_many('UPDATE `file_subscriptions` SET `status` = \'held\', `hold_reason` = \'all_failed\', `last_update` = NOW()', 'id', all_failed) # Clean up subscriptions for deleted files / sites sql = 'DELETE FROM u USING `file_subscriptions` AS u' sql += ' LEFT JOIN `files` AS f ON f.`id` = u.`file_id`' sql += ' LEFT JOIN `sites` AS s ON s.`id` = u.`site_id`' sql += ' WHERE f.`name` IS NULL OR s.`name` IS NULL' self.db.query(sql) sql = 'DELETE FROM f USING `failed_transfers` AS f' sql += ' LEFT JOIN `file_subscriptions` AS u ON u.`id` = f.`subscription_id`' sql += ' WHERE u.`id` IS NULL' self.db.query(sql) return subscriptions