def download(self, acc, container, u_agent, delay=0, request_tries=3):
        self.logger.info('Prefetching objects with InternalClient with ' + str(delay) + ' seconds of delay.')
        time.sleep(delay)
        swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries)
        headers = {}

        prefetch_list = []
        bytes_count = 0
        for o in swift.iter_objects(acc, container):
            if bytes_count + int(o['bytes']) < self.cache_max_size:
                prefetch_list.append(o['name'])
                bytes_count += int(o['bytes'])
            else:
                break

        for name in prefetch_list:
            object_path = '/v1/' + acc + '/' + container + '/' + name
            oid = hashlib.md5(object_path).hexdigest()

            status, resp_headers, it = swift.get_object(acc, container, name, headers, ACCEPTABLE_STATUS)

            object_size = int(resp_headers.get('Content-Length'))
            object_etag = resp_headers.get('Etag')

            object_storage_policy_id = '0'  # FIXME hardcoded
            to_evict = self.cache.access_cache("PUT", oid, object_size, object_etag, object_storage_policy_id)
            for ev_object_id in to_evict:
                os.remove(os.path.join(self.cache_path, ev_object_id))

            self.logger.info('Prefetch Filter - Object ' + name + ' stored in cache with ID: ' + oid)
            with open(os.path.join(self.cache_path, oid), 'w') as f:
                for el in it:
                    f.write(el)
Example #2
0
 def __init__(self, conf):
     self.conf = conf
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.interval = int(conf.get('interval', 30))
     conf_path = conf.get('__file__') or \
         '/etc/swift/container-reconciler.conf'
     self.logger = get_logger(conf, log_route='container-reconciler')
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path, 'Swift Container Reconciler',
                                 request_tries)
     self.stats = defaultdict(int)
     self.last_stat_time = time.time()
Example #3
0
def download(oid, acc, container, name, u_agent, token, delay=0, request_tries=5):
    print 'Prefetching object with InternalClient: ' + oid + ' after ' + str(delay) + ' seconds of delay.'
    #time.sleep(delay)
    start_time = dt.now()
    swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries)
    headers = {}
    headers['X-Auth-Token'] = token
    headers['X-No-Prefetch'] = 'True'
    status, head, it = swift.get_object(acc, container, name, headers, acc_status)
    data = [el for el in it]
    end_time = dt.now()
    diff = end_time - start_time
    return (oid, data, head, end_time, diff)
Example #4
0
def make_swift_request(op, account, container=None, obj=None):
    """
    Makes a swift request via a local proxy (cost expensive)
    :param op: opertation (PUT, GET, DELETE, HEAD)
    :param account: swift account
    :param container: swift container
    :param obj: swift object
    :returns: swift.common.swob.Response instance
    """
    iclient = InternalClient(LOCAL_PROXY, 'Zion', 1)
    path = iclient.make_path(account, container, obj)
    resp = iclient.make_request(op, path, {'PATH_INFO': path}, [200])

    return resp
Example #5
0
def make_swift_request(op, account, container=None, obj=None):
    """
    Makes a swift request via a local proxy (cost expensive)

    :param op: opertation (PUT, GET, DELETE, HEAD)
    :param account: swift account
    :param container: swift container
    :param obj: swift object
    :returns: swift.common.swob.Response instance
    """
    iclient = InternalClient(LOCAL_PROXY, 'SA', 1)
    path = iclient.make_path(account, container, obj)
    resp = iclient.make_request(op, path, {'PATH_INFO': path}, [200])

    return resp
Example #6
0
 def __init__(self, conf):
     self.conf = conf
     # This option defines how long an un-processable misplaced object
     # marker will be retried before it is abandoned.  It is not coupled
     # with the tombstone reclaim age in the consistency engine.
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.interval = int(conf.get('interval', 30))
     conf_path = conf.get('__file__') or \
         '/etc/swift/container-reconciler.conf'
     self.logger = get_logger(conf, log_route='container-reconciler')
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path, 'Swift Container Reconciler',
                                 request_tries)
     self.stats = defaultdict(int)
     self.last_stat_time = time.time()
Example #7
0
def main():
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--config', default='/etc/swift/internal-client.conf',
                        help=('internal-client config file '
                              '(default: /etc/swift/internal-client.conf'))
    parser.add_argument('--request-tries', type=int, default=3,
                        help='(default: 3)')
    parser.add_argument('account', help='account from which to delete')
    parser.add_argument('container', help='container from which to delete')
    parser.add_argument(
        '--prefix', default='',
        help='only delete objects with this prefix (default: none)')
    parser.add_argument(
        '--marker', default='',
        help='only delete objects after this marker (default: none)')
    parser.add_argument(
        '--end-marker', default='',
        help='only delete objects before this end-marker (default: none)')
    parser.add_argument(
        '--timestamp', type=Timestamp, default=Timestamp.now(),
        help='delete all objects as of this time (default: now)')
    args = parser.parse_args()

    swift = InternalClient(
        args.config, 'Swift Container Deleter', args.request_tries)
    for deleted, marker in mark_for_deletion(
            swift, args.account, args.container,
            args.marker, args.end_marker, args.prefix, args.timestamp):
        if marker is None:
            print('Finished. Marked %d objects for deletion.' % deleted)
        else:
            print('Marked %d objects for deletion, through %r' % (
                deleted, marker))
Example #8
0
 def __init__(self, conf, logger=None, swift=None):
     self.conf = conf
     # This option defines how long an un-processable misplaced object
     # marker will be retried before it is abandoned.  It is not coupled
     # with the tombstone reclaim age in the consistency engine.
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.interval = float(conf.get('interval', 30))
     conf_path = conf.get('__file__') or \
         '/etc/swift/container-reconciler.conf'
     self.logger = logger or get_logger(conf,
                                        log_route='container-reconciler')
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = swift or InternalClient(conf_path,
                                          'Swift Container Reconciler',
                                          request_tries,
                                          use_replication_network=True)
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.stats = defaultdict(int)
     self.last_stat_time = time.time()
     self.ring_check_interval = float(conf.get('ring_check_interval', 15))
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     if self.processes < 0:
         raise ValueError(
             'processes must be an integer greater than or equal to 0')
     self.process = int(self.conf.get('process', 0))
     if self.process < 0:
         raise ValueError(
             'process must be an integer greater than or equal to 0')
     if self.processes and self.process >= self.processes:
         raise ValueError('process must be less than processes')
 def __init__(self, conf):
     self.conf = conf
     self.container_ring = Ring('/etc/swift', ring_name='container')
     self.logger = get_logger(conf, log_route='object-restorer')
     self.logger.set_statsd_prefix('s3-object-restorer')
     self.interval = int(conf.get('interval') or 300)
     self.restoring_object_account = '.s3_restoring_objects'
     self.expiring_restored_account = '.s3_expiring_restored_objects'
     self.glacier_account_prefix = '.glacier_'
     self.todo_container = 'todo'
     self.restoring_container = 'restoring'
     conf_path = '/etc/swift/s3-object-restorer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.glacier = self._init_glacier()
     self.glacier_tmpdir = conf.get('temp_path', '/var/cache/s3/')
     self.swift = InternalClient(conf_path,
                                 'Swift Object Restorer',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.client = Client(self.conf.get('sentry_sdn', ''))
Example #10
0
    def read_conf_for_queue_access(self, swift):
        if self.conf.get('auto_create_account_prefix'):
            self.logger.warning('Option auto_create_account_prefix is '
                                'deprecated. Configure '
                                'auto_create_account_prefix under the '
                                'swift-constraints section of '
                                'swift.conf. This option will '
                                'be ignored in a future release.')
            auto_create_account_prefix = \
                self.conf['auto_create_account_prefix']
        else:
            auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX

        self.expiring_objects_account = auto_create_account_prefix + \
            (self.conf.get('expiring_objects_account_name') or
             'expiring_objects')

        # This is for common parameter with general task queue in future
        self.task_container_prefix = ''

        request_tries = int(self.conf.get('request_tries') or 3)
        self.swift = swift or InternalClient(self.ic_conf_path,
                                             'Swift Object Expirer',
                                             request_tries,
                                             use_replication_network=True)

        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='utilization-aggregator')
     self.interval = int(conf.get('interval') or 60)
     self.aggregate_account = '.utilization'
     self.sample_account = '.transfer_record'
     conf_path = conf.get('__file__') or \
                 '/etc/swift/swift-utilization-aggregator.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Utilization Aggregator',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 60)
     self.report_first_time = self.report_last_time = time()
     self.report_containers = 0
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.container_ring = Ring('/etc/swift', ring_name='container')
     self.sample_rate = int(self.conf.get('sample_rate', 600))
     self.last_chk = iso8601_to_timestamp(self.conf.get(
         'service_start'))
     self.kinx_api_url = self.conf.get('kinx_api_url')
 def __init__(self, conf):
     super(ObjectExpirer, self).__init__(conf)
     self.conf = conf
     self.logger = get_logger(conf, log_route='s3-object-expirer')
     self.logger.set_statsd_prefix('s3-object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.s3_expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         (conf.get('expiring_objects_account_name') or
          's3_expiring_objects')
     conf_path = conf.get('__file__') or '/etc/swift/s3-object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Object Expirer',
                                 request_tries)
     self.glacier = self._init_glacier()
     self.glacier_account_prefix = '.glacier_'
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.client = Client(self.conf.get('sentry_sdn', ''))
Example #13
0
 def __init__(self, conf, logger=None, swift=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         (conf.get('expiring_objects_account_name') or 'expiring_objects')
     conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = swift or InternalClient(conf_path, 'Swift Object Expirer',
                                          request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     # This option defines how long an un-processable expired object
     # marker will be retried before it is abandoned.  It is not coupled
     # with the tombstone reclaim age in the consistency engine.
     self.reclaim_age = int(conf.get('reclaim_age', 604800))
Example #14
0
def create_internal_client(conf, swift_dir):
    ic_config = conf.get('internal_client_path',
                         os.path.join(swift_dir, 'internal-client.conf'))
    if not os.path.exists(ic_config):
        ic_config = ConfigString(INTERNAL_CLIENT_CONFIG)

    ic_name = conf.get('internal_client_logname', 'ContainerCrawler')
    return InternalClient(ic_config, ic_name, 3)
Example #15
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path, 'Swift Object Expirer',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
Example #16
0
    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)
Example #17
0
 def run(self):
     self.logger.debug('Prefetching object with InternalClient: ' +
                       self.oid + ' after ' + str(self.delay) +
                       ' seconds of delay.')
     eventlet.sleep(self.delay)
     start_time = dt.now()
     swift = InternalClient(PROXY_PATH,
                            self.user_agent,
                            request_tries=self.request_tries)
     headers = {}
     headers['X-Auth-Token'] = self.token
     headers['X-No-Prefetch'] = 'True'
     status, head, it = swift.get_object(self.acc, self.container,
                                         self.objname, headers, acc_status)
     data = [el for el in it]
     end_time = dt.now()
     diff = end_time - start_time
     self.log_results(self.oid, data, diff)
     self.delete_memory()
Example #18
0
 def __init__(self, status_dir, settings):
     self._status_dir = status_dir
     self._account = settings['account']
     self._container = settings['container']
     ic_config = ConfigString(self.INTERNAL_CLIENT_CONFIG)
     self._swift_client = InternalClient(ic_config, 'Metadata sync', 3)
     self._status_file = os.path.join(self._status_dir, self._account,
                                      self._container)
     self._status_account_dir = os.path.join(self._status_dir,
                                             self._account)
Example #19
0
def download(oid,
             acc,
             container,
             name,
             u_agent,
             token,
             delay=0,
             request_tries=5):
    print 'Prefetching object with InternalClient: ' + oid + ' after ' + str(
        delay.total_seconds()) + ' seconds of delay.'
    time.sleep(delay.total_seconds())
    swift = InternalClient(PROXY_PATH, u_agent, request_tries=request_tries)
    headers = {}
    headers['X-Auth-Token'] = token
    headers['X-No-Prefetch'] = 'True'
    status, head, it = swift.get_object(acc, container, name, headers,
                                        acc_status)
    print 'Request to Swift - Response Status: ' + str(
        status) + ' Response headers: ' + str(head)
    data = [el for el in it]
    return (oid, data, head)
Example #20
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path, 'Swift Object Expirer',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
Example #21
0
 def __init__(self, conf):
     self.conf = conf
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.interval = int(conf.get('interval', 30))
     conf_path = conf.get('__file__') or \
         '/etc/swift/container-reconciler.conf'
     self.logger = get_logger(conf, log_route='container-reconciler')
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Container Reconciler',
                                 request_tries)
     self.stats = defaultdict(int)
     self.last_stat_time = time.time()
Example #22
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route="object-expirer")
     self.interval = int(conf.get("interval") or 300)
     self.expiring_objects_account = (conf.get("auto_create_account_prefix") or ".") + "expiring_objects"
     conf_path = conf.get("__file__") or "/etc/swift/object-expirer.conf"
     request_tries = int(conf.get("request_tries") or 3)
     self.swift = InternalClient(conf_path, "Swift Object Expirer", request_tries)
     self.report_interval = int(conf.get("report_interval") or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift")
     self.rcache = join(self.recon_cache_path, "object.recon")
Example #23
0
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expiring_objects_account = \
         (conf.get('auto_create_account_prefix') or '.') + \
         'expiring_objects'
     conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path, 'Swift Object Expirer',
         request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
Example #24
0
    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token, self.account,
         self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)
Example #25
0
    def read_conf_for_queue_access(self, swift):
        self.expiring_objects_account = \
            (self.conf.get('auto_create_account_prefix') or '.') + \
            (self.conf.get('expiring_objects_account_name') or
             'expiring_objects')

        # This is for common parameter with general task queue in future
        self.task_container_prefix = ''

        request_tries = int(self.conf.get('request_tries') or 3)
        self.swift = swift or InternalClient(
            self.ic_conf_path, 'Swift Object Expirer', request_tries)

        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
Example #26
0
 def __init__(self, conf):
     self.conf = conf
     # This option defines how long an un-processable misplaced object
     # marker will be retried before it is abandoned.  It is not coupled
     # with the tombstone reclaim age in the consistency engine.
     self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
     self.interval = int(conf.get('interval', 30))
     conf_path = conf.get('__file__') or \
         '/etc/swift/container-reconciler.conf'
     self.logger = get_logger(conf, log_route='container-reconciler')
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Container Reconciler',
                                 request_tries)
     self.stats = defaultdict(int)
     self.last_stat_time = time.time()
 def setUp(cls):
     cls.conn = Connection(config)
     cls.conn.authenticate()
     cls.account = Account(cls.conn,
                           config.get('account',
                           config['username']))
     cls.account.delete_containers()
     cls.container = cls.account.container(Utils.create_name())
     if not cls.container.create():
         raise ResponseError(cls.conn.response)
     cls.file_size = 8
     cls.root_dir = os.path.join('/mnt/gluster-object',
                             cls.account.conn.storage_url.split('/')[2].split('_')[1])
     devices = config.get('devices', '/mnt/gluster-object')
     cls.client = InternalClient('/etc/swift/object-expirer.conf',
                                 'Test Object Expirer', 1)
     cls.expirer = Manager(['object-expirer'])
Example #28
0
class InternalBrainClient(object):

    def __init__(self, conf_file, account='AUTH_test'):
        self.swift = InternalClient(conf_file, 'probe-test', 3)
        self.account = account

    @translate_client_exception
    def put_container(self, container_name, headers):
        return self.swift.create_container(self.account, container_name,
                                           headers=headers)

    @translate_client_exception
    def post_container(self, container_name, headers):
        return self.swift.set_container_metadata(self.account, container_name,
                                                 headers)

    @translate_client_exception
    def delete_container(self, container_name):
        return self.swift.delete_container(self.account, container_name)

    def parse_qs(self, query_string):
        if query_string is not None:
            return {k: v[-1] for k, v in parse_qs(query_string).items()}

    @translate_client_exception
    def put_object(self, container_name, object_name, headers, contents,
                   query_string=None):
        return self.swift.upload_object(StringIO(contents), self.account,
                                        container_name, object_name,
                                        headers=headers,
                                        params=self.parse_qs(query_string))

    @translate_client_exception
    def delete_object(self, container_name, object_name):
        return self.swift.delete_object(
            self.account, container_name, object_name)

    @translate_client_exception
    def head_object(self, container_name, object_name):
        return self.swift.get_object_metadata(
            self.account, container_name, object_name)

    @translate_client_exception
    def get_object(self, container_name, object_name, query_string=None):
        status, headers, resp_iter = self.swift.get_object(
            self.account, container_name, object_name,
            params=self.parse_qs(query_string))
        return headers, ''.join(resp_iter)
Example #29
0
    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)
    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest("Need more than one policy")

        self.expirer = Manager(["object-expirer"])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest("Unable to verify object-expirer service")

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, "probe-test", 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = "container-%s" % uuid.uuid4()
        self.object_name = "object-%s" % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)
Example #31
0
    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token,
         self.account, self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)
Example #32
0
 def __init__(self,
              account,
              auth_url=None,
              password=None,
              container='rings',
              internal=True):
     """ """
     self.internal = internal
     self.account = account
     self.auth_url = auth_url
     self.password = password
     self.container = container
     self.conn = None
     retry_times = 3
     if self.internal:
         try:
             conf_path = join(abspath(dirname(__file__)), 'stub.conf')
             self.conn = InternalClient(conf_path, 'swift_ring_sync',
                                        retry_times)
         except IOError, msg:
             raise RingSyncError('InternalClient Init Error: [%s]' % msg)
         except UnexpectedResponse, (msg, resp):
             raise RingSyncError('InternalClient Init Error: [%s]' % msg)
 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='restored-object-expirer')
     self.logger.set_statsd_prefix('s3-restored-object-expirer')
     self.interval = int(conf.get('interval') or 300)
     self.expire_restored_account = '.s3_expiring_restored_objects'
     conf_path = '/etc/swift/s3-restored-object-expirer.conf'
     request_tries = int(conf.get('request_tries') or 3)
     self.swift = InternalClient(conf_path,
                                 'Swift Restored Object Expirer',
                                 request_tries)
     self.report_interval = int(conf.get('report_interval') or 300)
     self.report_first_time = self.report_last_time = time()
     self.report_objects = 0
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = join(self.recon_cache_path, 'object.recon')
     self.concurrency = int(conf.get('concurrency', 1))
     if self.concurrency < 1:
         raise ValueError("concurrency must be set to at least 1")
     self.processes = int(self.conf.get('processes', 0))
     self.process = int(self.conf.get('process', 0))
     self.client = Client(self.conf.get('sentry_sdn', ''))
Example #34
0
class TestObjectExpirer(ReplProbeTest):

    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def _check_obj_in_container_listing(self):
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):

            if self.object_name == obj['name']:
                return True

        return False

    @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirer again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)

    def test_expirer_doesnt_make_async_pendings(self):
        # The object expirer cleans up its own queue. The inner loop
        # basically looks like this:
        #
        #    for obj in stuff_to_delete:
        #        delete_the_object(obj)
        #        remove_the_queue_entry(obj)
        #
        # By default, upon receipt of a DELETE request for an expiring
        # object, the object servers will create async_pending records to
        # clean the expirer queue. Since the expirer cleans its own queue,
        # this is unnecessary. The expirer can make requests in such a way
        # tha the object server does not write out any async pendings; this
        # test asserts that this is the case.

        # Make an expiring object in each policy
        for policy in ENABLED_POLICIES:
            container_name = "expirer-test-%d" % policy.idx
            container_headers = {'X-Storage-Policy': policy.name}
            client.put_container(self.url, self.token, container_name,
                                 headers=container_headers)

            now = time.time()
            delete_at = int(now + 2.0)
            client.put_object(
                self.url, self.token, container_name, "some-object",
                headers={'X-Delete-At': str(delete_at),
                         'X-Timestamp': Timestamp(now).normal},
                contents='dontcare')

        time.sleep(2.0)
        # make sure auto-created expirer-queue containers get in the account
        # listing so the expirer can find them
        Manager(['container-updater']).once()

        # Make sure there's no async_pendings anywhere. Probe tests only run
        # on single-node installs anyway, so this set should be small enough
        # that an exhaustive check doesn't take too long.
        all_obj_nodes = self.get_all_object_nodes()
        pendings_before = self.gather_async_pendings(all_obj_nodes)

        # expire the objects
        Manager(['object-expirer']).once()
        pendings_after = self.gather_async_pendings(all_obj_nodes)
        self.assertEqual(pendings_after, pendings_before)

    def test_expirer_object_should_not_be_expired(self):

        # Current object-expirer checks the correctness via x-if-delete-at
        # header that it can be deleted by expirer. If there are objects
        # either which doesn't have x-delete-at header as metadata or which
        # has different x-delete-at value from x-if-delete-at value,
        # object-expirer's delete will fail as 412 PreconditionFailed.
        # However, if some of the objects are in handoff nodes, the expirer
        # can put the tombstone with the timestamp as same as x-delete-at and
        # the object consistency will be resolved as the newer timestamp will
        # be winner (in particular, overwritten case w/o x-delete-at). This
        # test asserts such a situation that, at least, the overwriten object
        # which have larger timestamp than the original expirered date should
        # be safe.

        def put_object(headers):
            # use internal client to PUT objects so that X-Timestamp in headers
            # is effective
            headers['Content-Length'] = '0'
            path = self.client.make_path(
                self.account, self.container_name, self.object_name)
            try:
                self.client.make_request('PUT', path, headers, (2,))
            except UnexpectedResponse as e:
                self.fail(
                    'Expected 201 for PUT object but got %s' % e.resp.status)

        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        obj_brain.put_container()

        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        # so here, T(obj_created) < T(obj_deleted with x-delete-at)
        now = time.time()
        delete_at = int(now + 2.0)
        recreate_at = delete_at + 1.0
        put_object(headers={'X-Delete-At': str(delete_at),
                            'X-Timestamp': Timestamp(now).normal})

        # some object servers stopped to make a situation that the
        # object-expirer can put tombstone in the primary nodes.
        obj_brain.stop_primary_half()

        # increment the X-Timestamp explicitly
        # (will be T(obj_deleted with x-delete-at) < T(obj_recreated))
        put_object(headers={'X-Object-Meta-Expired': 'False',
                            'X-Timestamp': Timestamp(recreate_at).normal})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # sanity, the newer object is still there
        try:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name)
        except UnexpectedResponse as e:
            self.fail(
                'Expected 200 for HEAD object but got %s' % e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

        # some object servers recovered
        obj_brain.start_primary_half()

        # sleep until after recreated_at
        while time.time() <= recreate_at:
            time.sleep(0.1)
        # Now, expirer runs at the time after obj is recreated
        self.expirer.once()

        # verify that original object was deleted by expirer
        obj_brain.stop_handoff_half()
        try:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,))
        except UnexpectedResponse as e:
            self.fail(
                'Expected 404 for HEAD object but got %s' % e.resp.status)
        obj_brain.start_handoff_half()

        # and inconsistent state of objects is recovered by replicator
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        try:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name)
        except UnexpectedResponse as e:
            self.fail(
                'Expected 200 for HEAD object but got %s' % e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

    def _test_expirer_delete_outdated_object_version(self, object_exists):
        # This test simulates a case where the expirer tries to delete
        # an outdated version of an object.
        # One case is where the expirer gets a 404, whereas the newest version
        # of the object is offline.
        # Another case is where the expirer gets a 412, since the old version
        # of the object mismatches the expiration time sent by the expirer.
        # In any of these cases, the expirer should retry deleting the object
        # later, for as long as a reclaim age has not passed.
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        obj_brain.put_container()

        if object_exists:
            obj_brain.put_object()

        # currently, the object either doesn't exist, or does not have
        # an expiration

        # stop primary servers and put a newer version of the object, this
        # time with an expiration. only the handoff servers will have
        # the new version
        obj_brain.stop_primary_half()
        now = time.time()
        delete_at = int(now + 2.0)
        obj_brain.put_object({'X-Delete-At': str(delete_at)})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()

        # update object record in the container listing
        Manager(['container-replicator']).once()

        # take handoff servers down, and bring up the outdated primary servers
        obj_brain.start_primary_half()
        obj_brain.stop_handoff_half()

        # wait until object expiration time
        while time.time() <= delete_at:
            time.sleep(0.1)

        # run expirer against the outdated servers. it should fail since
        # the outdated version does not match the expiration time
        self.expirer.once()

        # bring all servers up, and run replicator to update servers
        obj_brain.start_handoff_half()
        Manager(['object-replicator']).once()

        # verify the deletion has failed by checking the container listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # run expirer again, delete should now succeed
        self.expirer.once()

        # verify the deletion by checking the container listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

    def test_expirer_delete_returns_outdated_404(self):
        self._test_expirer_delete_outdated_object_version(object_exists=False)

    def test_expirer_delete_returns_outdated_412(self):
        self._test_expirer_delete_outdated_object_version(object_exists=True)
Example #35
0
class ObjectExpirer(Daemon):
    """
    Daemon that queries the internal hidden expiring_objects_account to
    discover objects that need to be deleted.

    :param conf: The daemon configuration.
    """

    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf, log_route='object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Object Expirer',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.expiring_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))
            for c in self.swift.iter_containers(self.expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self.expiring_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue
                    timestamp, actual_obj = obj.split('-', 1)
                    timestamp = int(timestamp)
                    if timestamp > int(time()):
                        break
                    pool.spawn_n(
                        self.delete_object, actual_obj, timestamp,
                        container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.swift.delete_container(
                        self.expiring_objects_account,
                        container,
                        acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
                except (Exception, Timeout), err:
                    self.logger.exception(
                        _('Exception while deleting container %s %s') %
                        (container, str(err)))
            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout):
            self.logger.exception(_('Unhandled exception'))
Example #36
0
class ObjectExpirer(Daemon):
    """
    Daemon that queries the internal hidden expiring_objects_account to
    discover objects that need to be deleted.

    :param conf: The daemon configuration.
    """

    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf, log_route='object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            (conf.get('expiring_objects_account_name') or 'expiring_objects')
        conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Object Expirer',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.expiring_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))
            for c in self.swift.iter_containers(self.expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self.expiring_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue
                    timestamp, actual_obj = obj.split('-', 1)
                    timestamp = int(timestamp)
                    if timestamp > int(time()):
                        break
                    pool.spawn_n(
                        self.delete_object, actual_obj, timestamp,
                        container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.swift.delete_container(
                        self.expiring_objects_account,
                        container,
                        acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
                except (Exception, Timeout) as err:
                    self.logger.exception(
                        _('Exception while deleting container %s %s') %
                        (container, str(err)))
            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout):
            self.logger.exception(_('Unhandled exception'))

    def run_forever(self, *args, **kwargs):
        """
        Executes passes forever, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            try:
                self.run_once(*args, **kwargs)
            except (Exception, Timeout):
                self.logger.exception(_('Unhandled exception'))
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(random() * (self.interval - elapsed))

    def get_process_values(self, kwargs):
        """
        Gets the processes, process from the kwargs if those values exist.

        Otherwise, return processes, process set in the config file.

        :param kwargs: Keyword args passed into the run_forever(), run_once()
                       methods.  They have values specified on the command
                       line when the daemon is run.
        """
        if kwargs.get('processes') is not None:
            processes = int(kwargs['processes'])
        else:
            processes = self.processes

        if kwargs.get('process') is not None:
            process = int(kwargs['process'])
        else:
            process = self.process

        if process < 0:
            raise ValueError(
                'process must be an integer greater than or equal to 0')

        if processes < 0:
            raise ValueError(
                'processes must be an integer greater than or equal to 0')

        if processes and process >= processes:
            raise ValueError(
                'process must be less than or equal to processes')

        return processes, process

    def delete_object(self, actual_obj, timestamp, container, obj):
        start_time = time()
        try:
            self.delete_actual_object(actual_obj, timestamp)
            self.swift.delete_object(self.expiring_objects_account,
                                     container, obj)
            self.report_objects += 1
            self.logger.increment('objects')
        except (Exception, Timeout) as err:
            self.logger.increment('errors')
            self.logger.exception(
                _('Exception while deleting object %s %s %s') %
                (container, obj, str(err)))
        self.logger.timing_since('timing', start_time)
        self.report()

    def delete_actual_object(self, actual_obj, timestamp):
        """
        Deletes the end-user object indicated by the actual object name given
        '<account>/<container>/<object>' if and only if the X-Delete-At value
        of the object is exactly the timestamp given.

        :param actual_obj: The name of the end-user object to delete:
                           '<account>/<container>/<object>'
        :param timestamp: The timestamp the X-Delete-At value must match to
                          perform the actual delete.
        """
        path = '/v1/' + urllib.quote(actual_obj.lstrip('/'))
        self.swift.make_request('DELETE', path,
                                {'X-If-Delete-At': str(timestamp)},
                                (2, HTTP_NOT_FOUND, HTTP_PRECONDITION_FAILED))
Example #37
0
class ContainerSync(Daemon):
    """
    Daemon to sync syncable containers.

    This is done by scanning the local devices for container databases and
    checking for x-container-sync-to and x-container-sync-key metadata values.
    If they exist, newer rows since the last sync will trigger PUTs or DELETEs
    to the other container.

    .. note::

        Container sync will sync object POSTs only if the proxy server is set
        to use "object_post_as_copy = true" which is the default. So-called
        fast object posts, "object_post_as_copy = false" do not update the
        container listings and therefore can't be detected for synchronization.

    The actual syncing is slightly more complicated to make use of the three
    (or number-of-replicas) main nodes for a container without each trying to
    do the exact same work but also without missing work if one node happens to
    be down.

    Two sync points are kept per container database. All rows between the two
    sync points trigger updates. Any rows newer than both sync points cause
    updates depending on the node's position for the container (primary nodes
    do one third, etc. depending on the replica count of course). After a sync
    run, the first sync point is set to the newest ROWID known and the second
    sync point is set to newest ROWID for which all updates have been sent.

    An example may help. Assume replica count is 3 and perfectly matching
    ROWIDs starting at 1.

        First sync run, database has 6 rows:

            * SyncPoint1 starts as -1.
            * SyncPoint2 starts as -1.
            * No rows between points, so no "all updates" rows.
            * Six rows newer than SyncPoint1, so a third of the rows are sent
              by node 1, another third by node 2, remaining third by node 3.
            * SyncPoint1 is set as 6 (the newest ROWID known).
            * SyncPoint2 is left as -1 since no "all updates" rows were synced.

        Next sync run, database has 12 rows:

            * SyncPoint1 starts as 6.
            * SyncPoint2 starts as -1.
            * The rows between -1 and 6 all trigger updates (most of which
              should short-circuit on the remote end as having already been
              done).
            * Six more rows newer than SyncPoint1, so a third of the rows are
              sent by node 1, another third by node 2, remaining third by node
              3.
            * SyncPoint1 is set as 12 (the newest ROWID known).
            * SyncPoint2 is set as 6 (the newest "all updates" ROWID).

    In this way, under normal circumstances each node sends its share of
    updates each run and just sends a batch of older updates to ensure nothing
    was missed.

    :param conf: The dict of configuration values from the [container-sync]
                 section of the container-server.conf
    :param container_ring: If None, the <swift_dir>/container.ring.gz will be
                           loaded. This is overridden by unit tests.
    """

    def __init__(self, conf, container_ring=None, logger=None):
        #: The dict of configuration values from the [container-sync] section
        #: of the container-server.conf.
        self.conf = conf
        #: Logger to use for container-sync log lines.
        self.logger = logger or get_logger(conf, log_route='container-sync')
        #: Path to the local device mount points.
        self.devices = conf.get('devices', '/srv/node')
        #: Indicates whether mount points should be verified as actual mount
        #: points (normally true, false for tests and SAIO).
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        #: Minimum time between full scans. This is to keep the daemon from
        #: running wild on near empty systems.
        self.interval = int(conf.get('interval', 300))
        #: Maximum amount of time to spend syncing a container before moving on
        #: to the next one. If a conatiner sync hasn't finished in this time,
        #: it'll just be resumed next scan.
        self.container_time = int(conf.get('container_time', 60))
        #: ContainerSyncCluster instance for validating sync-to values.
        self.realms_conf = ContainerSyncRealms(
            os.path.join(
                conf.get('swift_dir', '/etc/swift'),
                'container-sync-realms.conf'),
            self.logger)
        #: The list of hosts we're allowed to send syncs to. This can be
        #: overridden by data in self.realms_conf
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()]
        self.http_proxies = [
            a.strip()
            for a in conf.get('sync_proxy', '').split(',')
            if a.strip()]
        #: ContainerSyncStore instance for iterating over synced containers
        self.sync_store = ContainerSyncStore(self.devices,
                                             self.logger,
                                             self.mount_check)
        #: Number of containers with sync turned on that were successfully
        #: synced.
        self.container_syncs = 0
        #: Number of successful DELETEs triggered.
        self.container_deletes = 0
        #: Number of successful PUTs triggered.
        self.container_puts = 0
        #: Number of containers whose sync has been turned off, but
        #: are not yet cleared from the sync store.
        self.container_skips = 0
        #: Number of containers that had a failure of some type.
        self.container_failures = 0
        #: Time of last stats report.
        self.reported = time()
        self.swift_dir = conf.get('swift_dir', '/etc/swift')
        #: swift.common.ring.Ring for locating containers.
        self.container_ring = container_ring or Ring(self.swift_dir,
                                                     ring_name='container')
        bind_ip = conf.get('bind_ip', '0.0.0.0')
        self._myips = whataremyips(bind_ip)
        self._myport = int(conf.get('bind_port', 6001))
        swift.common.db.DB_PREALLOCATION = \
            config_true_value(conf.get('db_preallocation', 'f'))
        self.conn_timeout = float(conf.get('conn_timeout', 5))
        request_tries = int(conf.get('request_tries') or 3)

        internal_client_conf_path = conf.get('internal_client_conf_path')
        if not internal_client_conf_path:
            self.logger.warning(
                _('Configuration option internal_client_conf_path not '
                  'defined. Using default configuration, See '
                  'internal-client.conf-sample for options'))
            internal_client_conf = ConfigString(ic_conf_body)
        else:
            internal_client_conf = internal_client_conf_path
        try:
            self.swift = InternalClient(
                internal_client_conf, 'Swift Container Sync', request_tries)
        except IOError as err:
            if err.errno != errno.ENOENT:
                raise
            raise SystemExit(
                _('Unable to load internal client from config: %r (%s)') %
                (internal_client_conf_path, err))

    def get_object_ring(self, policy_idx):
        """
        Get the ring object to use based on its policy.

        :policy_idx: policy index as defined in swift.conf
        :returns: appropriate ring object
        """
        return POLICIES.get_object_ring(policy_idx, self.swift_dir)

    def run_forever(self, *args, **kwargs):
        """
        Runs container sync scans until stopped.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            for path in self.sync_store.synced_containers_generator():
                self.container_sync(path)
                if time() - self.reported >= 3600:  # once an hour
                    self.report()
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(self.interval - elapsed)

    def run_once(self, *args, **kwargs):
        """
        Runs a single container sync scan.
        """
        self.logger.info(_('Begin container sync "once" mode'))
        begin = time()
        for path in self.sync_store.synced_containers_generator():
            self.container_sync(path)
            if time() - self.reported >= 3600:  # once an hour
                self.report()
        self.report()
        elapsed = time() - begin
        self.logger.info(
            _('Container sync "once" mode completed: %.02fs'), elapsed)

    def report(self):
        """
        Writes a report of the stats to the logger and resets the stats for the
        next report.
        """
        self.logger.info(
            _('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
              'puts], %(skip)s skipped, %(fail)s failed'),
            {'time': ctime(self.reported),
             'sync': self.container_syncs,
             'delete': self.container_deletes,
             'put': self.container_puts,
             'skip': self.container_skips,
             'fail': self.container_failures})
        self.reported = time()
        self.container_syncs = 0
        self.container_deletes = 0
        self.container_puts = 0
        self.container_skips = 0
        self.container_failures = 0

    def container_sync(self, path):
        """
        Checks the given path for a container database, determines if syncing
        is turned on for that database and, if so, sends any updates to the
        other container.

        :param path: the path to a container db
        """
        broker = None
        try:
            broker = ContainerBroker(path)
            # The path we pass to the ContainerBroker is a real path of
            # a container DB. If we get here, however, it means that this
            # path is linked from the sync_containers dir. In rare cases
            # of race or processes failures the link can be stale and
            # the get_info below will raise a DB doesn't exist exception
            # In this case we remove the stale link and raise an error
            # since in most cases the db should be there.
            try:
                info = broker.get_info()
            except DatabaseConnectionError as db_err:
                if str(db_err).endswith("DB doesn't exist"):
                    self.sync_store.remove_synced_container(broker)
                raise

            x, nodes = self.container_ring.get_nodes(info['account'],
                                                     info['container'])
            for ordinal, node in enumerate(nodes):
                if is_local_device(self._myips, self._myport,
                                   node['ip'], node['port']):
                    break
            else:
                return
            if not broker.is_deleted():
                sync_to = None
                user_key = None
                sync_point1 = info['x_container_sync_point1']
                sync_point2 = info['x_container_sync_point2']
                for key, (value, timestamp) in broker.metadata.items():
                    if key.lower() == 'x-container-sync-to':
                        sync_to = value
                    elif key.lower() == 'x-container-sync-key':
                        user_key = value
                if not sync_to or not user_key:
                    self.container_skips += 1
                    self.logger.increment('skips')
                    return
                err, sync_to, realm, realm_key = validate_sync_to(
                    sync_to, self.allowed_sync_hosts, self.realms_conf)
                if err:
                    self.logger.info(
                        _('ERROR %(db_file)s: %(validate_sync_to_err)s'),
                        {'db_file': str(broker),
                         'validate_sync_to_err': err})
                    self.container_failures += 1
                    self.logger.increment('failures')
                    return
                stop_at = time() + self.container_time
                next_sync_point = None
                while time() < stop_at and sync_point2 < sync_point1:
                    rows = broker.get_items_since(sync_point2, 1)
                    if not rows:
                        break
                    row = rows[0]
                    if row['ROWID'] > sync_point1:
                        break
                    key = hash_path(info['account'], info['container'],
                                    row['name'], raw_digest=True)
                    # This node will only initially sync out one third of the
                    # objects (if 3 replicas, 1/4 if 4, etc.) and will skip
                    # problematic rows as needed in case of faults.
                    # This section will attempt to sync previously skipped
                    # rows in case the previous attempts by any of the nodes
                    # didn't succeed.
                    if not self.container_sync_row(
                            row, sync_to, user_key, broker, info, realm,
                            realm_key):
                        if not next_sync_point:
                            next_sync_point = sync_point2
                    sync_point2 = row['ROWID']
                    broker.set_x_container_sync_points(None, sync_point2)
                if next_sync_point:
                    broker.set_x_container_sync_points(None, next_sync_point)
                while time() < stop_at:
                    rows = broker.get_items_since(sync_point1, 1)
                    if not rows:
                        break
                    row = rows[0]
                    key = hash_path(info['account'], info['container'],
                                    row['name'], raw_digest=True)
                    # This node will only initially sync out one third of the
                    # objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
                    # around to the section above and attempt to sync
                    # previously skipped rows in case the other nodes didn't
                    # succeed or in case it failed to do so the first time.
                    if unpack_from('>I', key)[0] % \
                            len(nodes) == ordinal:
                        self.container_sync_row(
                            row, sync_to, user_key, broker, info, realm,
                            realm_key)
                    sync_point1 = row['ROWID']
                    broker.set_x_container_sync_points(sync_point1, None)
                self.container_syncs += 1
                self.logger.increment('syncs')
        except (Exception, Timeout):
            self.container_failures += 1
            self.logger.increment('failures')
            self.logger.exception(_('ERROR Syncing %s'),
                                  broker if broker else path)

    def container_sync_row(self, row, sync_to, user_key, broker, info,
                           realm, realm_key):
        """
        Sends the update the row indicates to the sync_to container.

        :param row: The updated row in the local database triggering the sync
                    update.
        :param sync_to: The URL to the remote container.
        :param user_key: The X-Container-Sync-Key to use when sending requests
                         to the other container.
        :param broker: The local container database broker.
        :param info: The get_info result from the local container database
                     broker.
        :param realm: The realm from self.realms_conf, if there is one.
            If None, fallback to using the older allowed_sync_hosts
            way of syncing.
        :param realm_key: The realm key from self.realms_conf, if there
            is one. If None, fallback to using the older
            allowed_sync_hosts way of syncing.
        :returns: True on success
        """
        try:
            start_time = time()
            if row['deleted']:
                try:
                    headers = {'x-timestamp': row['created_at']}
                    if realm and realm_key:
                        nonce = uuid.uuid4().hex
                        path = urlparse(sync_to).path + '/' + quote(
                            row['name'])
                        sig = self.realms_conf.get_sig(
                            'DELETE', path, headers['x-timestamp'], nonce,
                            realm_key, user_key)
                        headers['x-container-sync-auth'] = '%s %s %s' % (
                            realm, nonce, sig)
                    else:
                        headers['x-container-sync-key'] = user_key
                    delete_object(sync_to, name=row['name'], headers=headers,
                                  proxy=self.select_http_proxy(),
                                  logger=self.logger,
                                  timeout=self.conn_timeout)
                except ClientException as err:
                    if err.http_status != HTTP_NOT_FOUND:
                        raise
                self.container_deletes += 1
                self.logger.increment('deletes')
                self.logger.timing_since('deletes.timing', start_time)
            else:
                part, nodes = \
                    self.get_object_ring(info['storage_policy_index']). \
                    get_nodes(info['account'], info['container'],
                              row['name'])
                shuffle(nodes)
                exc = None
                looking_for_timestamp = Timestamp(row['created_at'])
                timestamp = -1
                headers = body = None
                # look up for the newest one
                headers_out = {'X-Newest': True,
                               'X-Backend-Storage-Policy-Index':
                               str(info['storage_policy_index'])}
                try:
                    source_obj_status, source_obj_info, source_obj_iter = \
                        self.swift.get_object(info['account'],
                                              info['container'], row['name'],
                                              headers=headers_out,
                                              acceptable_statuses=(2, 4))

                except (Exception, UnexpectedResponse, Timeout) as err:
                    source_obj_info = {}
                    source_obj_iter = None
                    exc = err
                timestamp = Timestamp(source_obj_info.get(
                                      'x-timestamp', 0))
                headers = source_obj_info
                body = source_obj_iter
                if timestamp < looking_for_timestamp:
                    if exc:
                        raise exc
                    raise Exception(
                        _('Unknown exception trying to GET: '
                          '%(account)r %(container)r %(object)r'),
                        {'account': info['account'],
                         'container': info['container'],
                         'object': row['name']})
                for key in ('date', 'last-modified'):
                    if key in headers:
                        del headers[key]
                if 'etag' in headers:
                    headers['etag'] = headers['etag'].strip('"')
                if 'content-type' in headers:
                    headers['content-type'] = clean_content_type(
                        headers['content-type'])
                headers['x-timestamp'] = row['created_at']
                if realm and realm_key:
                    nonce = uuid.uuid4().hex
                    path = urlparse(sync_to).path + '/' + quote(row['name'])
                    sig = self.realms_conf.get_sig(
                        'PUT', path, headers['x-timestamp'], nonce, realm_key,
                        user_key)
                    headers['x-container-sync-auth'] = '%s %s %s' % (
                        realm, nonce, sig)
                else:
                    headers['x-container-sync-key'] = user_key
                put_object(sync_to, name=row['name'], headers=headers,
                           contents=FileLikeIter(body),
                           proxy=self.select_http_proxy(), logger=self.logger,
                           timeout=self.conn_timeout)
                self.container_puts += 1
                self.logger.increment('puts')
                self.logger.timing_since('puts.timing', start_time)
        except ClientException as err:
            if err.http_status == HTTP_UNAUTHORIZED:
                self.logger.info(
                    _('Unauth %(sync_from)r => %(sync_to)r'),
                    {'sync_from': '%s/%s' %
                        (quote(info['account']), quote(info['container'])),
                     'sync_to': sync_to})
            elif err.http_status == HTTP_NOT_FOUND:
                self.logger.info(
                    _('Not found %(sync_from)r => %(sync_to)r \
                      - object %(obj_name)r'),
                    {'sync_from': '%s/%s' %
                        (quote(info['account']), quote(info['container'])),
                     'sync_to': sync_to, 'obj_name': row['name']})
            else:
                self.logger.exception(
                    _('ERROR Syncing %(db_file)s %(row)s'),
                    {'db_file': str(broker), 'row': row})
            self.container_failures += 1
            self.logger.increment('failures')
            return False
        except (Exception, Timeout) as err:
            self.logger.exception(
                _('ERROR Syncing %(db_file)s %(row)s'),
                {'db_file': str(broker), 'row': row})
            self.container_failures += 1
            self.logger.increment('failures')
            return False
        return True

    def select_http_proxy(self):
        return choice(self.http_proxies) if self.http_proxies else None
class TestObjectExpirer(ReplProbeTest):
    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest("Need more than one policy")

        self.expirer = Manager(["object-expirer"])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest("Unable to verify object-expirer service")

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, "probe-test", 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = "container-%s" % uuid.uuid4()
        self.object_name = "object-%s" % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={"X-Delete-After": 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
        )
        create_timestamp = Timestamp(metadata["x-timestamp"])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(["object-updater"]).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(["container-updater"]).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4,),
            headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
        )
        self.assertTrue("x-backend-timestamp" in metadata)
        self.assertEqual(Timestamp(metadata["x-backend-timestamp"]), create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj["name"]:
                break
        else:
            self.fail("Did not find listing for %s" % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj["name"]:
                self.fail("Found listing for %s" % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4,),
                headers={"X-Backend-Storage-Policy-Index": int(policy)},
            )
            if "x-backend-timestamp" in metadata:
                if found_in_policy:
                    self.fail("found object in %s and also %s" % (found_in_policy, policy))
                found_in_policy = policy
                self.assertTrue("x-backend-timestamp" in metadata)
                self.assertTrue(Timestamp(metadata["x-backend-timestamp"]) > create_timestamp)
Example #39
0
    def __init__(self, conf, container_ring=None, logger=None):
        #: The dict of configuration values from the [container-sync] section
        #: of the container-server.conf.
        self.conf = conf
        #: Logger to use for container-sync log lines.
        self.logger = logger or get_logger(conf, log_route='container-sync')
        #: Path to the local device mount points.
        self.devices = conf.get('devices', '/srv/node')
        #: Indicates whether mount points should be verified as actual mount
        #: points (normally true, false for tests and SAIO).
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        #: Minimum time between full scans. This is to keep the daemon from
        #: running wild on near empty systems.
        self.interval = int(conf.get('interval', 300))
        #: Maximum amount of time to spend syncing a container before moving on
        #: to the next one. If a container sync hasn't finished in this time,
        #: it'll just be resumed next scan.
        self.container_time = int(conf.get('container_time', 60))
        #: ContainerSyncCluster instance for validating sync-to values.
        self.realms_conf = ContainerSyncRealms(
            os.path.join(conf.get('swift_dir', '/etc/swift'),
                         'container-sync-realms.conf'), self.logger)
        #: The list of hosts we're allowed to send syncs to. This can be
        #: overridden by data in self.realms_conf
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()
        ]
        self.http_proxies = [
            a.strip() for a in conf.get('sync_proxy', '').split(',')
            if a.strip()
        ]
        #: ContainerSyncStore instance for iterating over synced containers
        self.sync_store = ContainerSyncStore(self.devices, self.logger,
                                             self.mount_check)
        #: Number of containers with sync turned on that were successfully
        #: synced.
        self.container_syncs = 0
        #: Number of successful DELETEs triggered.
        self.container_deletes = 0
        #: Number of successful PUTs triggered.
        self.container_puts = 0
        #: Number of containers whose sync has been turned off, but
        #: are not yet cleared from the sync store.
        self.container_skips = 0
        #: Number of containers that had a failure of some type.
        self.container_failures = 0

        #: Per container stats. These are collected per container.
        #: puts - the number of puts that were done for the container
        #: deletes - the number of deletes that were fot the container
        #: bytes - the total number of bytes transferred per the container
        self.container_stats = collections.defaultdict(int)
        self.container_stats.clear()

        #: Time of last stats report.
        self.reported = time()
        self.swift_dir = conf.get('swift_dir', '/etc/swift')
        #: swift.common.ring.Ring for locating containers.
        self.container_ring = container_ring or Ring(self.swift_dir,
                                                     ring_name='container')
        bind_ip = conf.get('bind_ip', '0.0.0.0')
        self._myips = whataremyips(bind_ip)
        self._myport = int(conf.get('bind_port', 6201))
        swift.common.db.DB_PREALLOCATION = \
            config_true_value(conf.get('db_preallocation', 'f'))
        self.conn_timeout = float(conf.get('conn_timeout', 5))
        request_tries = int(conf.get('request_tries') or 3)

        internal_client_conf_path = conf.get('internal_client_conf_path')
        if not internal_client_conf_path:
            self.logger.warning(
                _('Configuration option internal_client_conf_path not '
                  'defined. Using default configuration, See '
                  'internal-client.conf-sample for options'))
            internal_client_conf = ConfigString(ic_conf_body)
        else:
            internal_client_conf = internal_client_conf_path
        try:
            self.swift = InternalClient(internal_client_conf,
                                        'Swift Container Sync', request_tries)
        except (OSError, IOError) as err:
            if err.errno != errno.ENOENT and \
                    not str(err).endswith(' not found'):
                raise
            raise SystemExit(
                _('Unable to load internal client from config: '
                  '%(conf)r (%(error)s)') % {
                      'conf': internal_client_conf_path,
                      'error': err
                  })
Example #40
0
class TestObjectExpirer(ReplProbeTest):
    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def _check_obj_in_container_listing(self):
        for obj in self.client.iter_objects(self.account, self.container_name):

            if self.object_name == obj['name']:
                return True

        return False

    @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice(
            [p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)

    def test_expirer_object_should_not_be_expired(self):

        # Current object-expirer checks the correctness via x-if-delete-at
        # header that it can be deleted by expirer. If there are objects
        # either which doesn't have x-delete-at header as metadata or which
        # has different x-delete-at value from x-if-delete-at value,
        # object-expirer's delete will fail as 412 PreconditionFailed.
        # However, if some of the objects are in handoff nodes, the expirer
        # can put the tombstone with the timestamp as same as x-delete-at and
        # the object consistency will be resolved as the newer timestamp will
        # be winner (in particular, overwritten case w/o x-delete-at). This
        # test asserts such a situation that, at least, the overwriten object
        # which have larger timestamp than the original expirered date should
        # be safe.

        def put_object(headers):
            # use internal client to PUT objects so that X-Timestamp in headers
            # is effective
            headers['Content-Length'] = '0'
            path = self.client.make_path(self.account, self.container_name,
                                         self.object_name)
            try:
                self.client.make_request('PUT', path, headers, (2, ))
            except UnexpectedResponse as e:
                self.fail('Expected 201 for PUT object but got %s' %
                          e.resp.status)

        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        obj_brain.put_container()

        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        # so here, T(obj_created) < T(obj_deleted with x-delete-at)
        now = time.time()
        delete_at = int(now + 2.0)
        recreate_at = delete_at + 1.0
        put_object(headers={
            'X-Delete-At': str(delete_at),
            'X-Timestamp': Timestamp(now).normal
        })

        # some object servers stopped to make a situation that the
        # object-expirer can put tombstone in the primary nodes.
        obj_brain.stop_primary_half()

        # increment the X-Timestamp explicitly
        # (will be T(obj_deleted with x-delete-at) < T(obj_recreated))
        put_object(
            headers={
                'X-Object-Meta-Expired': 'False',
                'X-Timestamp': Timestamp(recreate_at).normal
            })

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # sanity, the newer object is still there
        try:
            metadata = self.client.get_object_metadata(self.account,
                                                       self.container_name,
                                                       self.object_name)
        except UnexpectedResponse as e:
            self.fail('Expected 200 for HEAD object but got %s' %
                      e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

        # some object servers recovered
        obj_brain.start_primary_half()

        # sleep until after recreated_at
        while time.time() <= recreate_at:
            time.sleep(0.1)
        # Now, expirer runs at the time after obj is recreated
        self.expirer.once()

        # verify that original object was deleted by expirer
        obj_brain.stop_handoff_half()
        try:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ))
        except UnexpectedResponse as e:
            self.fail('Expected 404 for HEAD object but got %s' %
                      e.resp.status)
        obj_brain.start_handoff_half()

        # and inconsistent state of objects is recovered by replicator
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        try:
            metadata = self.client.get_object_metadata(self.account,
                                                       self.container_name,
                                                       self.object_name)
        except UnexpectedResponse as e:
            self.fail('Expected 200 for HEAD object but got %s' %
                      e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

    def _test_expirer_delete_outdated_object_version(self, object_exists):
        # This test simulates a case where the expirer tries to delete
        # an outdated version of an object.
        # One case is where the expirer gets a 404, whereas the newest version
        # of the object is offline.
        # Another case is where the expirer gets a 412, since the old version
        # of the object mismatches the expiration time sent by the expirer.
        # In any of these cases, the expirer should retry deleting the object
        # later, for as long as a reclaim age has not passed.
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        obj_brain.put_container()

        if object_exists:
            obj_brain.put_object()

        # currently, the object either doesn't exist, or does not have
        # an expiration

        # stop primary servers and put a newer version of the object, this
        # time with an expiration. only the handoff servers will have
        # the new version
        obj_brain.stop_primary_half()
        now = time.time()
        delete_at = int(now + 2.0)
        obj_brain.put_object({'X-Delete-At': str(delete_at)})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()

        # update object record in the container listing
        Manager(['container-replicator']).once()

        # take handoff servers down, and bring up the outdated primary servers
        obj_brain.start_primary_half()
        obj_brain.stop_handoff_half()

        # wait until object expiration time
        while time.time() <= delete_at:
            time.sleep(0.1)

        # run expirer against the outdated servers. it should fail since
        # the outdated version does not match the expiration time
        self.expirer.once()

        # bring all servers up, and run replicator to update servers
        obj_brain.start_handoff_half()
        Manager(['object-replicator']).once()

        # verify the deletion has failed by checking the container listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # run expirer again, delete should now succeed
        self.expirer.once()

        # this is mainly to paper over lp bug #1652323
        self.get_to_final_state()

        # verify the deletion by checking the container listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

    def test_expirer_delete_returns_outdated_404(self):
        self._test_expirer_delete_outdated_object_version(object_exists=False)

    def test_expirer_delete_returns_outdated_412(self):
        self._test_expirer_delete_outdated_object_version(object_exists=True)
class ObjectRestorer(Daemon):
    """
    Daemon that queries the internal hidden expiring_objects_account to
    discover objects that need to be deleted.

    :param conf: The daemon configuration.
    """

    def __init__(self, conf):
        self.conf = conf
        self.container_ring = Ring('/etc/swift', ring_name='container')
        self.logger = get_logger(conf, log_route='object-restorer')
        self.logger.set_statsd_prefix('s3-object-restorer')
        self.interval = int(conf.get('interval') or 300)
        self.restoring_object_account = '.s3_restoring_objects'
        self.expiring_restored_account = '.s3_expiring_restored_objects'
        self.glacier_account_prefix = '.glacier_'
        self.todo_container = 'todo'
        self.restoring_container = 'restoring'
        conf_path = '/etc/swift/s3-object-restorer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.glacier = self._init_glacier()
        self.glacier_tmpdir = conf.get('temp_path', '/var/cache/s3/')
        self.swift = InternalClient(conf_path,
                                    'Swift Object Restorer',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
        self.client = Client(self.conf.get('sentry_sdn', ''))

    def _init_glacier(self):
        con = Layer2(region_name='ap-northeast-1')
        return con.get_vault('swift-s3-transition')

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects restored') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects restored') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))

            for o in self.swift.iter_objects(self.restoring_object_account,
                                             self.todo_container):
                obj = o['name'].encode('utf8')
                if processes > 0:
                    obj_process = int(
                        hashlib.md5('%s/%s' % (self.todo_container, obj)).
                        hexdigest(), 16)
                    if obj_process % processes != process:
                        continue
                pool.spawn_n(self.start_object_restoring, obj)

            pool.waitall()

            for o in self.swift.iter_objects(self.restoring_object_account,
                                             self.restoring_container):
                obj = o['name'].encode('utf8')
                if processes > 0:
                    obj_process = int(
                        hashlib.md5('%s/%s' % (self.restoring_container, obj)).
                        hexdigest(), 16)
                    if obj_process % processes != process:
                        continue
                pool.spawn_n(self.check_object_restored, obj)

            pool.waitall()

            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout) as e:
            report_exception(self.logger, _('Unhandled exception'), self.client)

    def run_forever(self, *args, **kwargs):
        """
        Executes passes forever, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            try:
                self.run_once(*args, **kwargs)
            except (Exception, Timeout):
                report_exception(self.logger, _('Unhandled exception'), self.client)
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(random() * (self.interval - elapsed))

    def get_process_values(self, kwargs):
        """
        Gets the processes, process from the kwargs if those values exist.

        Otherwise, return processes, process set in the config file.

        :param kwargs: Keyword args passed into the run_forever(), run_once()
                       methods.  They have values specified on the command
                       line when the daemon is run.
        """
        if kwargs.get('processes') is not None:
            processes = int(kwargs['processes'])
        else:
            processes = self.processes

        if kwargs.get('process') is not None:
            process = int(kwargs['process'])
        else:
            process = self.process

        if process < 0:
            raise ValueError(
                'process must be an integer greater than or equal to 0')

        if processes < 0:
            raise ValueError(
                'processes must be an integer greater than or equal to 0')

        if processes and process >= processes:
            raise ValueError(
                'process must be less than or equal to processes')

        return processes, process

    def start_object_restoring(self, obj):
        start_time = time()
        try:
            actual_obj = obj
            account, container, obj = actual_obj.split('/', 2)
            archiveId = self.get_archiveid(account, container, obj)

            if archiveId is None:
                self.swift.delete_object(self.restoring_object_account,
                                         self.todo_container, actual_obj)
                return

            jobId = self.glacier.retrieve_archive(archiveId).id
            restoring_obj = make_glacier_hidden_object_name(actual_obj, jobId)

            meta_prefix = 'X-Object-Meta'
            meta = self.swift.get_object_metadata(account, container, obj,
                                                  metadata_prefix=meta_prefix)
            meta = {'X-Object-Meta' + key: value for key, value in
                    meta.iteritems()}
            self.update_action_hidden(self.restoring_object_account,
                                      self.restoring_container,
                                      restoring_obj, metadata=meta)

            self.swift.delete_object(self.restoring_object_account,
                                     self.todo_container, actual_obj)
            self.report_objects += 1
            self.logger.increment('start')
        except (Exception, Timeout) as err:
            self.logger.increment('errors')
            report_exception(self.logger.exception,
                             _('Exception while restoring object %s. %s') %
                             (obj, str(err)), self.client)
        self.logger.timing_since('timing', start_time)
        self.report()

    def get_archiveid(self, account, container, obj):
        glacier_account = '%s%s' % (self.glacier_account_prefix, account)

        glacier_obj = None
        for o in get_objects_by_prefix(glacier_account, container, obj,
                                       swift_client=self.swift):
            name = get_glacier_objname_from_hidden_object(o)
            if name == obj:
                glacier_obj = o
                break
        if glacier_obj is None:
            return None

        return get_glacier_key_from_hidden_object(glacier_obj)

    def check_object_restored(self, restoring_object):
        actual_obj = get_glacier_objname_from_hidden_object(restoring_object)
        jobId = get_glacier_key_from_hidden_object(restoring_object)
        try:
            path = '/v1/%s' % actual_obj
            resp = self.swift.make_request('GET', path, {}, (2, 4,))
            if resp.status_int == 404:
                raise Exception('Object Not Found: %s' % actual_obj)

            job = self.glacier.get_job(job_id=jobId)
            if not job.completed:
                return
            self.complete_restore(actual_obj, job)
        except Exception as e:
            # Job ID가 만료될 경우 다시 restore 를 시도한다.
            if not e.message.startswith('Object Not Found:'):
                self.start_object_restoring(actual_obj)
            self.logger.info(e)

        self.swift.delete_object(self.restoring_object_account,
                                 self.restoring_container, restoring_object)

    def complete_restore(self, actual_obj, job):
        tmppath = tempfile.NamedTemporaryFile(bufsize=0, delete=False,
                                              dir=self.glacier_tmpdir).name
        try:
            job.download_to_file(filename=tmppath)

            prefix = 'X-Object-Meta'
            a, c, o = actual_obj.split('/', 2)
            metadata = self.swift.get_object_metadata(a, c, o,
                                                      metadata_prefix=prefix)
            metadata = {'X-Object-Meta' + key: value for key, value in metadata
            .iteritems()}
            days = int(metadata['X-Object-Meta-s3-restore-expire-days'])
            exp_time = normalize_delete_at_timestamp(calc_nextDay(time()) +
                                                     (days - 1) * 86400)

            # send restored object to proxy server
            path = '/v1/%s' % actual_obj
            metadata['X-Object-Meta-S3-Restored'] = True
            exp_date = strftime("%a, %d %b %Y %H:%M:%S GMT",
                                gmtime(float(exp_time)))

            metadata['X-Object-Meta-s3-restore'] = 'ongoing-request="false", ' \
                                                   'expiry-date="%s"' % exp_date
            metadata['Content-Length'] = os.path.getsize(tmppath)
            del metadata['X-Object-Meta-s3-restore-expire-days']

            obj_body = open(tmppath, 'r')
            self.swift.make_request('PUT', path, metadata, (2,),
                                    body_file=obj_body)

            # Add to .s3_expiring_restored_objects
            self.update_action_hidden(self.expiring_restored_account,
                                      exp_time, actual_obj)
            obj_body.close()
            self.logger.increment('done')
        except UnexpectedResponse as e:
            if e.resp.status_int == 404:
                self.logger.error('Restoring object not found - %s' %
                                  actual_obj)
        except Exception as e:
            self.logger.increment('errors')
            self.logger.debug(e)
        finally:
            os.remove(tmppath)

    def compute_obj_md5(self, obj):
        etag = hashlib.md5()
        etag.update(obj)
        etag = etag.hexdigest()
        return etag

    def update_action_hidden(self, account, container, obj, metadata=None):
        hidden_path = '/%s/%s/%s' % (account, container, obj)
        part, nodes = self.container_ring.get_nodes(account, container)
        for node in nodes:
            ip = node['ip']
            port = node['port']
            dev = node['device']
            action_headers = dict()
            action_headers['user-agent'] = 'restore-daemon'
            action_headers['X-Timestamp'] = normalize_timestamp(time())
            action_headers['referer'] = 'restore-daemon'
            action_headers['x-size'] = '0'
            action_headers['x-content-type'] = "text/plain"
            action_headers['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'

            if metadata:
                action_headers.update(metadata)

            conn = http_connect(ip, port, dev, part, 'PUT', hidden_path,
                                action_headers)
            response = conn.getresponse()
            response.read()
Example #42
0
class TestObjectExpirer(unittest.TestCase):

    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token,
         self.account, self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(list(POLICIES))
        wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assert_('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assert_('x-backend-timestamp' in metadata)
                self.assert_(Timestamp(metadata['x-backend-timestamp']) >
                             create_timestamp)
class UtilizationAggregator(Daemon):
    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf, log_route='utilization-aggregator')
        self.interval = int(conf.get('interval') or 60)
        self.aggregate_account = '.utilization'
        self.sample_account = '.transfer_record'
        conf_path = conf.get('__file__') or \
                    '/etc/swift/swift-utilization-aggregator.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Utilization Aggregator',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 60)
        self.report_first_time = self.report_last_time = time()
        self.report_containers = 0
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
        self.container_ring = Ring('/etc/swift', ring_name='container')
        self.sample_rate = int(self.conf.get('sample_rate', 600))
        self.last_chk = iso8601_to_timestamp(self.conf.get(
            'service_start'))
        self.kinx_api_url = self.conf.get('kinx_api_url')

    def report(self, final=False):
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d containers,'
                               ' %d objects aggregated') %
                             (elapsed, self.report_containers,
                              self.report_objects))
            dump_recon_cache({'object_aggregation_pass': elapsed,
                              'aggregation_last_pass': self.report_containers},
                             self.rcache, self.logger)

        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects aggregated') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.report_containers = 0
        containers_to_delete = []
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.sample_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))
            for c in self.swift.iter_containers(self.sample_account):
                container = c['name']
                try:
                    timestamp, account = container.split('_', 1)
                    timestamp = float(timestamp)
                except ValueError:
                    self.logger.debug('ValueError: %s, '
                                      'need more than 1 value to unpack' % \
                                      container)
                else:
                    if processes > 0:
                        obj_proc = int(hashlib.md5(container).hexdigest(), 16)
                        if obj_proc % processes != process:
                            continue
                    n = (float(time()) // self.sample_rate) * self.sample_rate
                    if timestamp <= n:
                        containers_to_delete.append(container)
                        pool.spawn_n(self.aggregate_container, container)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.logger.debug('delete container: %s' % container)
                    self.swift.delete_container(self.sample_account, container,
                                                acceptable_statuses=(
                                                    2, HTTP_NOT_FOUND,
                                                    HTTP_CONFLICT))
                except (Exception, Timeout) as err:
                    self.logger.exception(
                        _('Exception while deleting container %s %s') %
                        (container, str(err)))

            tenants_to_fillup = list()
            for c in self.swift.iter_containers(self.aggregate_account):
                tenant_id = c['name']
                if processes > 0:
                    c_proc = int(hashlib.md5(tenant_id).hexdigest(), 16)
                    if c_proc % processes != process:
                        continue
                    tenants_to_fillup.append(tenant_id)
            # fillup lossed usage data
            self.fillup_lossed_usage_data(tenants_to_fillup)

            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout):
            self.logger.exception(_('Unhandled exception'))

    def run_forever(self, *args, **kwargs):
        """
        Executes passes forever, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            try:
                self.run_once(*args, **kwargs)
            except (Exception, Timeout):
                self.logger.exception(_('Unhandled exception'))
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(random() * (self.interval - elapsed))

    def get_process_values(self, kwargs):
        """
        Gets the processes, process from the kwargs if those values exist.

        Otherwise, return processes, process set in the config file.

        :param kwargs: Keyword args passed into the run_forever(), run_once()
                       methods.  They have values specified on the command
                       line when the daemon is run.
        """
        if kwargs.get('processes') is not None:
            processes = int(kwargs['processes'])
        else:
            processes = self.processes

        if kwargs.get('process') is not None:
            process = int(kwargs['process'])
        else:
            process = self.process

        if process < 0:
            raise ValueError(
                'process must be an integer greater than or equal to 0')

        if processes < 0:
            raise ValueError(
                'processes must be an integer greater than or equal to 0')

        if processes and process >= processes:
            raise ValueError(
                'process must be less than or equal to processes')

        return processes, process

    def aggregate_container(self, container):
        start_time = time()
        try:
            objs_to_delete = list()
            bytes_recvs = dict()
            bytes_sents = dict()

            ts, tenant_id, account = container.split('_', 2)
            ts = int(float(ts))

            for o in self.swift.iter_objects(self.sample_account, container):
                name = o['name']
                objs_to_delete.append(name)
                ts, bytes_rv, bytes_st, trans_id, client_ip = name.split('/')
                bill_type = self.get_billtype_by_client_ip(client_ip, ts)
                bytes_recvs[bill_type] = bytes_recvs.get(bill_type,
                                                         0) + int(bytes_rv)
                bytes_sents[bill_type] = bytes_sents.get(bill_type,
                                                         0) + int(bytes_st)
                self.report_objects += 1

            for o in objs_to_delete:
                self.swift.delete_object(self.sample_account, container, o)

            for bill_type, bt_rv in bytes_recvs.items():
                t_object = 'transfer/%d/%d/%d_%d_%d' % (ts, bill_type, bt_rv,
                                                        bytes_sents[bill_type],
                                                        self.report_objects)
                self._hidden_update(tenant_id, t_object)
        except (Exception, Timeout) as err:
            self.logger.increment('errors')
            self.logger.exception(
                _('Exception while aggregating sample %s %s') %
                (container, str(err)))

        self.logger.timing_since('timing', start_time)
        self.report()

    def account_info(self, tenant_id, timestamp):
        path = '/v1/%s/%s?prefix=usage/%d&limit=1' % (self.aggregate_account,
                                                      tenant_id, timestamp)
        resp = self.swift.make_request('GET', path, {}, (2,))
        if len(resp.body) == 0:
            return 0, 0, 0
        usages = resp.body.split('/', 2)[2].rstrip()
        cont_cnt, obj_cnt, bt_used = usages.split('_')
        return int(cont_cnt), int(obj_cnt), int(bt_used)

    def _hidden_update(self, container, obj, method='PUT'):
        hidden_path = '/%s/%s/%s' % (self.aggregate_account, container, obj)
        part, nodes = self.container_ring.get_nodes(self.aggregate_account,
                                                    container)
        for node in nodes:
            ip = node['ip']
            port = node['port']
            dev = node['device']
            action_headers = dict()
            action_headers['user-agent'] = 'aggregator'
            action_headers['X-Timestamp'] = normalize_timestamp(time())
            action_headers['referer'] = 'aggregator-daemon'
            action_headers['x-size'] = '0'
            action_headers['x-content-type'] = "text/plain"
            action_headers['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'

            conn = http_connect(ip, port, dev, part, method, hidden_path,
                                action_headers)
            response = conn.getresponse()
            response.read()

    def fillup_lossed_usage_data(self, tenants):
        now = (float(time()) // self.sample_rate) * self.sample_rate
        path = '/v1/%s/%s?prefix=usage/%d&limit=1'

        for t in tenants:
            last = self.last_chk
            cont_cnt = obj_cnt = bt_used = -1
            while last <= now:
                p = path % (self.aggregate_account, t, last)
                resp = self.swift.make_request('GET', p, {}, (2,))
                if len(resp.body) != 0:
                    usages = resp.body.split('/', 2)[2].rstrip()
                    c, o, bt = usages.split('_')
                    cont_cnt = int(c)
                    obj_cnt = int(o)
                    bt_used = int(bt)
                else:
                    before = last - self.sample_rate
                    if cont_cnt == -1:
                        cont_cnt, obj_cnt, bt_used = \
                            self.account_info(self.aggregate_account, before)
                    obj = 'usage/%d/%d_%d_%d' % (last, cont_cnt, obj_cnt,
                                                 bt_used)
                    self._hidden_update(t, obj)
                last += self.sample_rate
        self.last_chk = now

    def get_billtype_by_client_ip(self, client_ip, timestamp):
        end_ts = timestamp_to_iso8601(timestamp + self.sample_rate - 1)
        start_ts = timestamp_to_iso8601(timestamp)

        params = {'start': start_ts, 'end': end_ts}
        path = self.kinx_api_url + '/?%s' % (urllib.urlencode(params))

        data = json.loads(urllib.urlopen(path).read())
        bill_type = -1
        for r in data['ip_ranges']:
            bill_type = r['bill_type']
            for cidr in r['ip_range']:
                if self.ip_in_cidr(client_ip, cidr):
                    return bill_type
        return bill_type

    def ip_in_cidr(self, client_ip, cidr):
        bt_to_bits = lambda b: bin(int(b))[2:].rjust(8, '0')
        ip_to_bits = lambda ip: ''.join([bt_to_bits(b) for b in ip.split('.')])
        client_ip_bits = ip_to_bits(client_ip)
        ip, snet = cidr.split('/')
        ip_bits = ip_to_bits(ip)
        if client_ip_bits[:int(snet)] == ip_bits[:int(snet)]:
            return True
        else:
            return False
Example #44
0
class ContainerSync(Daemon):
    """
    Daemon to sync syncable containers.

    This is done by scanning the local devices for container databases and
    checking for x-container-sync-to and x-container-sync-key metadata values.
    If they exist, newer rows since the last sync will trigger PUTs or DELETEs
    to the other container.

    The actual syncing is slightly more complicated to make use of the three
    (or number-of-replicas) main nodes for a container without each trying to
    do the exact same work but also without missing work if one node happens to
    be down.

    Two sync points are kept per container database. All rows between the two
    sync points trigger updates. Any rows newer than both sync points cause
    updates depending on the node's position for the container (primary nodes
    do one third, etc. depending on the replica count of course). After a sync
    run, the first sync point is set to the newest ROWID known and the second
    sync point is set to newest ROWID for which all updates have been sent.

    An example may help. Assume replica count is 3 and perfectly matching
    ROWIDs starting at 1.

        First sync run, database has 6 rows:

            * SyncPoint1 starts as -1.
            * SyncPoint2 starts as -1.
            * No rows between points, so no "all updates" rows.
            * Six rows newer than SyncPoint1, so a third of the rows are sent
              by node 1, another third by node 2, remaining third by node 3.
            * SyncPoint1 is set as 6 (the newest ROWID known).
            * SyncPoint2 is left as -1 since no "all updates" rows were synced.

        Next sync run, database has 12 rows:

            * SyncPoint1 starts as 6.
            * SyncPoint2 starts as -1.
            * The rows between -1 and 6 all trigger updates (most of which
              should short-circuit on the remote end as having already been
              done).
            * Six more rows newer than SyncPoint1, so a third of the rows are
              sent by node 1, another third by node 2, remaining third by node
              3.
            * SyncPoint1 is set as 12 (the newest ROWID known).
            * SyncPoint2 is set as 6 (the newest "all updates" ROWID).

    In this way, under normal circumstances each node sends its share of
    updates each run and just sends a batch of older updates to ensure nothing
    was missed.

    :param conf: The dict of configuration values from the [container-sync]
                 section of the container-server.conf
    :param container_ring: If None, the <swift_dir>/container.ring.gz will be
                           loaded. This is overridden by unit tests.
    """
    def __init__(self, conf, container_ring=None, logger=None):
        #: The dict of configuration values from the [container-sync] section
        #: of the container-server.conf.
        self.conf = conf
        #: Logger to use for container-sync log lines.
        self.logger = logger or get_logger(conf, log_route='container-sync')
        #: Path to the local device mount points.
        self.devices = conf.get('devices', '/srv/node')
        #: Indicates whether mount points should be verified as actual mount
        #: points (normally true, false for tests and SAIO).
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        #: Minimum time between full scans. This is to keep the daemon from
        #: running wild on near empty systems.
        self.interval = int(conf.get('interval', 300))
        #: Maximum amount of time to spend syncing a container before moving on
        #: to the next one. If a container sync hasn't finished in this time,
        #: it'll just be resumed next scan.
        self.container_time = int(conf.get('container_time', 60))
        #: ContainerSyncCluster instance for validating sync-to values.
        self.realms_conf = ContainerSyncRealms(
            os.path.join(conf.get('swift_dir', '/etc/swift'),
                         'container-sync-realms.conf'), self.logger)
        #: The list of hosts we're allowed to send syncs to. This can be
        #: overridden by data in self.realms_conf
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()
        ]
        self.http_proxies = [
            a.strip() for a in conf.get('sync_proxy', '').split(',')
            if a.strip()
        ]
        #: ContainerSyncStore instance for iterating over synced containers
        self.sync_store = ContainerSyncStore(self.devices, self.logger,
                                             self.mount_check)
        #: Number of containers with sync turned on that were successfully
        #: synced.
        self.container_syncs = 0
        #: Number of successful DELETEs triggered.
        self.container_deletes = 0
        #: Number of successful PUTs triggered.
        self.container_puts = 0
        #: Number of containers whose sync has been turned off, but
        #: are not yet cleared from the sync store.
        self.container_skips = 0
        #: Number of containers that had a failure of some type.
        self.container_failures = 0

        #: Per container stats. These are collected per container.
        #: puts - the number of puts that were done for the container
        #: deletes - the number of deletes that were fot the container
        #: bytes - the total number of bytes transferred per the container
        self.container_stats = collections.defaultdict(int)
        self.container_stats.clear()

        #: Time of last stats report.
        self.reported = time()
        self.swift_dir = conf.get('swift_dir', '/etc/swift')
        #: swift.common.ring.Ring for locating containers.
        self.container_ring = container_ring or Ring(self.swift_dir,
                                                     ring_name='container')
        bind_ip = conf.get('bind_ip', '0.0.0.0')
        self._myips = whataremyips(bind_ip)
        self._myport = int(conf.get('bind_port', 6201))
        swift.common.db.DB_PREALLOCATION = \
            config_true_value(conf.get('db_preallocation', 'f'))
        self.conn_timeout = float(conf.get('conn_timeout', 5))
        request_tries = int(conf.get('request_tries') or 3)

        internal_client_conf_path = conf.get('internal_client_conf_path')
        if not internal_client_conf_path:
            self.logger.warning(
                _('Configuration option internal_client_conf_path not '
                  'defined. Using default configuration, See '
                  'internal-client.conf-sample for options'))
            internal_client_conf = ConfigString(ic_conf_body)
        else:
            internal_client_conf = internal_client_conf_path
        try:
            self.swift = InternalClient(internal_client_conf,
                                        'Swift Container Sync', request_tries)
        except (OSError, IOError) as err:
            if err.errno != errno.ENOENT and \
                    not str(err).endswith(' not found'):
                raise
            raise SystemExit(
                _('Unable to load internal client from config: '
                  '%(conf)r (%(error)s)') % {
                      'conf': internal_client_conf_path,
                      'error': err
                  })

    def run_forever(self, *args, **kwargs):
        """
        Runs container sync scans until stopped.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            for path in self.sync_store.synced_containers_generator():
                self.container_stats.clear()
                self.container_sync(path)
                if time() - self.reported >= 3600:  # once an hour
                    self.report()
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(self.interval - elapsed)

    def run_once(self, *args, **kwargs):
        """
        Runs a single container sync scan.
        """
        self.logger.info(_('Begin container sync "once" mode'))
        begin = time()
        for path in self.sync_store.synced_containers_generator():
            self.container_sync(path)
            if time() - self.reported >= 3600:  # once an hour
                self.report()
        self.report()
        elapsed = time() - begin
        self.logger.info(_('Container sync "once" mode completed: %.02fs'),
                         elapsed)

    def report(self):
        """
        Writes a report of the stats to the logger and resets the stats for the
        next report.
        """
        self.logger.info(
            _('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
              'puts], %(skip)s skipped, %(fail)s failed'), {
                  'time': ctime(self.reported),
                  'sync': self.container_syncs,
                  'delete': self.container_deletes,
                  'put': self.container_puts,
                  'skip': self.container_skips,
                  'fail': self.container_failures
              })
        self.reported = time()
        self.container_syncs = 0
        self.container_deletes = 0
        self.container_puts = 0
        self.container_skips = 0
        self.container_failures = 0

    def container_report(self, start, end, sync_point1, sync_point2, info,
                         max_row):
        self.logger.info(
            _('Container sync report: %(container)s, '
              'time window start: %(start)s, '
              'time window end: %(end)s, '
              'puts: %(puts)s, '
              'posts: %(posts)s, '
              'deletes: %(deletes)s, '
              'bytes: %(bytes)s, '
              'sync_point1: %(point1)s, '
              'sync_point2: %(point2)s, '
              'total_rows: %(total)s'), {
                  'container': '%s/%s' % (info['account'], info['container']),
                  'start': start,
                  'end': end,
                  'puts': self.container_stats['puts'],
                  'posts': 0,
                  'deletes': self.container_stats['deletes'],
                  'bytes': self.container_stats['bytes'],
                  'point1': sync_point1,
                  'point2': sync_point2,
                  'total': max_row
              })

    def container_sync(self, path):
        """
        Checks the given path for a container database, determines if syncing
        is turned on for that database and, if so, sends any updates to the
        other container.

        :param path: the path to a container db
        """
        broker = None
        try:
            broker = ContainerBroker(path, logger=self.logger)
            # The path we pass to the ContainerBroker is a real path of
            # a container DB. If we get here, however, it means that this
            # path is linked from the sync_containers dir. In rare cases
            # of race or processes failures the link can be stale and
            # the get_info below will raise a DB doesn't exist exception
            # In this case we remove the stale link and raise an error
            # since in most cases the db should be there.
            try:
                info = broker.get_info()
            except DatabaseConnectionError as db_err:
                if str(db_err).endswith("DB doesn't exist"):
                    self.sync_store.remove_synced_container(broker)
                raise

            x, nodes = self.container_ring.get_nodes(info['account'],
                                                     info['container'])
            for ordinal, node in enumerate(nodes):
                if is_local_device(self._myips, self._myport, node['ip'],
                                   node['port']):
                    break
            else:
                return
            if not broker.is_deleted():
                sync_to = None
                user_key = None
                sync_point1 = info['x_container_sync_point1']
                sync_point2 = info['x_container_sync_point2']
                for key, (value, timestamp) in broker.metadata.items():
                    if key.lower() == 'x-container-sync-to':
                        sync_to = value
                    elif key.lower() == 'x-container-sync-key':
                        user_key = value
                if not sync_to or not user_key:
                    self.container_skips += 1
                    self.logger.increment('skips')
                    return
                err, sync_to, realm, realm_key = validate_sync_to(
                    sync_to, self.allowed_sync_hosts, self.realms_conf)
                if err:
                    self.logger.info(
                        _('ERROR %(db_file)s: %(validate_sync_to_err)s'), {
                            'db_file': str(broker),
                            'validate_sync_to_err': err
                        })
                    self.container_failures += 1
                    self.logger.increment('failures')
                    return
                start_at = time()
                stop_at = start_at + self.container_time
                next_sync_point = None
                sync_stage_time = start_at
                try:
                    while time() < stop_at and sync_point2 < sync_point1:
                        rows = broker.get_items_since(sync_point2, 1)
                        if not rows:
                            break
                        row = rows[0]
                        if row['ROWID'] > sync_point1:
                            break
                        # This node will only initially sync out one third
                        # of the objects (if 3 replicas, 1/4 if 4, etc.)
                        # and will skip problematic rows as needed in case of
                        # faults.
                        # This section will attempt to sync previously skipped
                        # rows in case the previous attempts by any of the
                        # nodes didn't succeed.
                        if not self.container_sync_row(row, sync_to, user_key,
                                                       broker, info, realm,
                                                       realm_key):
                            if not next_sync_point:
                                next_sync_point = sync_point2
                        sync_point2 = row['ROWID']
                        broker.set_x_container_sync_points(None, sync_point2)
                    if next_sync_point:
                        broker.set_x_container_sync_points(
                            None, next_sync_point)
                    else:
                        next_sync_point = sync_point2
                    sync_stage_time = time()
                    while sync_stage_time < stop_at:
                        rows = broker.get_items_since(sync_point1, 1)
                        if not rows:
                            break
                        row = rows[0]
                        key = hash_path(info['account'],
                                        info['container'],
                                        row['name'],
                                        raw_digest=True)
                        # This node will only initially sync out one third of
                        # the objects (if 3 replicas, 1/4 if 4, etc.).
                        # It'll come back around to the section above
                        # and attempt to sync previously skipped rows in case
                        # the other nodes didn't succeed or in case it failed
                        # to do so the first time.
                        if unpack_from('>I', key)[0] % \
                                len(nodes) == ordinal:
                            self.container_sync_row(row, sync_to, user_key,
                                                    broker, info, realm,
                                                    realm_key)
                        sync_point1 = row['ROWID']
                        broker.set_x_container_sync_points(sync_point1, None)
                        sync_stage_time = time()
                    self.container_syncs += 1
                    self.logger.increment('syncs')
                finally:
                    self.container_report(start_at, sync_stage_time,
                                          sync_point1, next_sync_point, info,
                                          broker.get_max_row())
        except (Exception, Timeout):
            self.container_failures += 1
            self.logger.increment('failures')
            self.logger.exception(_('ERROR Syncing %s'),
                                  broker if broker else path)

    def _update_sync_to_headers(self, name, sync_to, user_key, realm,
                                realm_key, method, headers):
        """
        Updates container sync headers

        :param name: The name of the object
        :param sync_to: The URL to the remote container.
        :param user_key: The X-Container-Sync-Key to use when sending requests
                         to the other container.
        :param realm: The realm from self.realms_conf, if there is one.
            If None, fallback to using the older allowed_sync_hosts
            way of syncing.
        :param realm_key: The realm key from self.realms_conf, if there
            is one. If None, fallback to using the older
            allowed_sync_hosts way of syncing.
        :param method: HTTP method to create sig with
        :param headers: headers to update with container sync headers
        """
        if realm and realm_key:
            nonce = uuid.uuid4().hex
            path = urlparse(sync_to).path + '/' + quote(name)
            sig = self.realms_conf.get_sig(method, path,
                                           headers.get('x-timestamp', 0),
                                           nonce, realm_key, user_key)
            headers['x-container-sync-auth'] = '%s %s %s' % (realm, nonce, sig)
        else:
            headers['x-container-sync-key'] = user_key

    def _object_in_remote_container(self, name, sync_to, user_key, realm,
                                    realm_key, timestamp):
        """
        Performs head object on remote to eliminate extra remote put and
        local get object calls

        :param name: The name of the object in the updated row in the local
                     database triggering the sync update.
        :param sync_to: The URL to the remote container.
        :param user_key: The X-Container-Sync-Key to use when sending requests
                         to the other container.
        :param realm: The realm from self.realms_conf, if there is one.
            If None, fallback to using the older allowed_sync_hosts
            way of syncing.
        :param realm_key: The realm key from self.realms_conf, if there
            is one. If None, fallback to using the older
            allowed_sync_hosts way of syncing.
        :param timestamp: last modified date of local object
        :returns: True if object already exists in remote
        """
        headers = {'x-timestamp': timestamp.internal}
        self._update_sync_to_headers(name, sync_to, user_key, realm, realm_key,
                                     'HEAD', headers)
        try:
            metadata, _ = head_object(sync_to,
                                      name=name,
                                      headers=headers,
                                      proxy=self.select_http_proxy(),
                                      logger=self.logger,
                                      retries=0)
            remote_ts = Timestamp(metadata.get('x-timestamp', 0))
            self.logger.debug("remote obj timestamp %s local obj %s" %
                              (timestamp.internal, remote_ts.internal))
            if timestamp <= remote_ts:
                return True
            # Object in remote should be updated
            return False
        except ClientException as http_err:
            # Object not in remote
            if http_err.http_status == 404:
                return False
            raise http_err

    def container_sync_row(self, row, sync_to, user_key, broker, info, realm,
                           realm_key):
        """
        Sends the update the row indicates to the sync_to container.
        Update can be either delete or put.

        :param row: The updated row in the local database triggering the sync
                    update.
        :param sync_to: The URL to the remote container.
        :param user_key: The X-Container-Sync-Key to use when sending requests
                         to the other container.
        :param broker: The local container database broker.
        :param info: The get_info result from the local container database
                     broker.
        :param realm: The realm from self.realms_conf, if there is one.
            If None, fallback to using the older allowed_sync_hosts
            way of syncing.
        :param realm_key: The realm key from self.realms_conf, if there
            is one. If None, fallback to using the older
            allowed_sync_hosts way of syncing.
        :returns: True on success
        """
        try:
            start_time = time()
            # extract last modified time from the created_at value
            ts_data, ts_ctype, ts_meta = decode_timestamps(row['created_at'])
            if row['deleted']:
                # when sync'ing a deleted object, use ts_data - this is the
                # timestamp of the source tombstone
                try:
                    headers = {'x-timestamp': ts_data.internal}
                    self._update_sync_to_headers(row['name'], sync_to,
                                                 user_key, realm, realm_key,
                                                 'DELETE', headers)
                    delete_object(sync_to,
                                  name=row['name'],
                                  headers=headers,
                                  proxy=self.select_http_proxy(),
                                  logger=self.logger,
                                  timeout=self.conn_timeout)
                except ClientException as err:
                    if err.http_status != HTTP_NOT_FOUND:
                        raise
                self.container_deletes += 1
                self.container_stats['deletes'] += 1
                self.logger.increment('deletes')
                self.logger.timing_since('deletes.timing', start_time)
            else:
                # when sync'ing a live object, use ts_meta - this is the time
                # at which the source object was last modified by a PUT or POST
                if self._object_in_remote_container(row['name'], sync_to,
                                                    user_key, realm, realm_key,
                                                    ts_meta):
                    return True
                exc = None
                # look up for the newest one; the symlink=get query-string has
                # no effect unless symlinks are enabled in the internal client
                # in which case it ensures that symlink objects retain their
                # symlink property when sync'd.
                headers_out = {
                    'X-Newest':
                    True,
                    'X-Backend-Storage-Policy-Index':
                    str(info['storage_policy_index'])
                }
                try:
                    source_obj_status, headers, body = \
                        self.swift.get_object(info['account'],
                                              info['container'], row['name'],
                                              headers=headers_out,
                                              acceptable_statuses=(2, 4),
                                              params={'symlink': 'get'})

                except (Exception, UnexpectedResponse, Timeout) as err:
                    headers = {}
                    body = None
                    exc = err
                timestamp = Timestamp(headers.get('x-timestamp', 0))
                if timestamp < ts_meta:
                    if exc:
                        raise exc
                    raise Exception(
                        _('Unknown exception trying to GET: '
                          '%(account)r %(container)r %(object)r'), {
                              'account': info['account'],
                              'container': info['container'],
                              'object': row['name']
                          })
                for key in ('date', 'last-modified'):
                    if key in headers:
                        del headers[key]
                if 'etag' in headers:
                    headers['etag'] = normalize_etag(headers['etag'])
                if 'content-type' in headers:
                    headers['content-type'] = clean_content_type(
                        headers['content-type'])
                self._update_sync_to_headers(row['name'], sync_to, user_key,
                                             realm, realm_key, 'PUT', headers)
                put_object(sync_to,
                           name=row['name'],
                           headers=headers,
                           contents=FileLikeIter(body),
                           proxy=self.select_http_proxy(),
                           logger=self.logger,
                           timeout=self.conn_timeout)
                self.container_puts += 1
                self.container_stats['puts'] += 1
                self.container_stats['bytes'] += row['size']
                self.logger.increment('puts')
                self.logger.timing_since('puts.timing', start_time)
        except ClientException as err:
            if err.http_status == HTTP_UNAUTHORIZED:
                self.logger.info(
                    _('Unauth %(sync_from)r => %(sync_to)r'), {
                        'sync_from':
                        '%s/%s' %
                        (quote(info['account']), quote(info['container'])),
                        'sync_to':
                        sync_to
                    })
            elif err.http_status == HTTP_NOT_FOUND:
                self.logger.info(
                    _('Not found %(sync_from)r => %(sync_to)r \
                      - object %(obj_name)r'), {
                        'sync_from':
                        '%s/%s' %
                        (quote(info['account']), quote(info['container'])),
                        'sync_to':
                        sync_to,
                        'obj_name':
                        row['name']
                    })
            else:
                self.logger.exception(_('ERROR Syncing %(db_file)s %(row)s'), {
                    'db_file': str(broker),
                    'row': row
                })
            self.container_failures += 1
            self.logger.increment('failures')
            return False
        except (Exception, Timeout) as err:
            self.logger.exception(_('ERROR Syncing %(db_file)s %(row)s'), {
                'db_file': str(broker),
                'row': row
            })
            self.container_failures += 1
            self.logger.increment('failures')
            return False
        return True

    def select_http_proxy(self):
        return choice(self.http_proxies) if self.http_proxies else None
class ObjectExpirer(Daemon):
    def __init__(self, conf):
        super(ObjectExpirer, self).__init__(conf)
        self.conf = conf
        self.logger = get_logger(conf, log_route='s3-object-expirer')
        self.logger.set_statsd_prefix('s3-object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.s3_expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            (conf.get('expiring_objects_account_name') or
             's3_expiring_objects')
        conf_path = conf.get('__file__') or '/etc/swift/s3-object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Object Expirer',
                                    request_tries)
        self.glacier = self._init_glacier()
        self.glacier_account_prefix = '.glacier_'
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
        self.client = Client(self.conf.get('sentry_sdn', ''))

    def _init_glacier(self):
        con = Layer2(region_name='ap-northeast-1')
        return con.get_vault('swift-s3-transition')

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.s3_expiring_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))

            for c in self.swift.iter_containers(self.
                                                s3_expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self
                                                 .s3_expiring_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue

                    pool.spawn_n(self.delete_object, container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.swift.delete_container(
                        self.s3_expiring_objects_account,
                        container,
                        acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
                except (Exception, Timeout) as err:
                    report_exception(self.logger,
                                     _('Exception while deleting container %s %s') %
                                     (container, str(err)), self.client.captureException())
            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout):
            report_exception(self.logger, _('Unhandled exception'), self.client)

    def run_forever(self, *args, **kwargs):
        """
        Executes passes forever, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            try:
                self.run_once(*args, **kwargs)
            except (Exception, Timeout):
                report_exception(self.logger, _('Unhandled exception'), self.client)
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(random() * (self.interval - elapsed))

    def get_process_values(self, kwargs):
        """
        Gets the processes, process from the kwargs if those values exist.

        Otherwise, return processes, process set in the config file.

        :param kwargs: Keyword args passed into the run_forever(), run_once()
                       methods.  They have values specified on the command
                       line when the daemon is run.
        """
        if kwargs.get('processes') is not None:
            processes = int(kwargs['processes'])
        else:
            processes = self.processes

        if kwargs.get('process') is not None:
            process = int(kwargs['process'])
        else:
            process = self.process

        if process < 0:
            raise ValueError(
                'process must be an integer greater than or equal to 0')

        if processes < 0:
            raise ValueError(
                'processes must be an integer greater than or equal to 0')

        if processes and process >= processes:
            raise ValueError(
                'process must be less than or equal to processes')

        return processes, process

    def delete_object(self, hidden_container, obj):
        start_time = time()
        try:
            account, container, object = obj.split('/', 2)
            lifecycle = Lifecycle(account, container, object,
                                  swift_client=self.swift)
            object_header = lifecycle.object.headers
            object_rule = lifecycle.get_object_rule_by_action('Expiration')
            last_modified = gmt_to_timestamp(object_header['Last-Modified'])

            validation_flg = lifecycle.object_lifecycle_validation()
            if (validation_flg == LIFECYCLE_OK) or \
                    (validation_flg == DISABLED_TRANSITION):
                times = calc_when_actions_do(object_rule, last_modified)
                actual_expire_time = int(times['Expiration'])
                if actual_expire_time == int(hidden_container):
                    self.delete_actual_object(obj)
                if lifecycle.get_s3_storage_class() == 'GLACIER':
                    self.delete_glacier_object(obj)
                self.report_objects += 1
                self.logger.increment('objects')
                self.swift.delete_object(self.s3_expiring_objects_account,
                                         hidden_container, obj)
        except (Exception, Timeout) as err:
            self.logger.increment('errors')
            report_exception(self.logger,
                             _('Exception while deleting object %s %s %s') %
                             (hidden_container, obj, str(err)), self.client)

        self.logger.timing_since('timing', start_time)
        self.report()

    def delete_glacier_object(self, obj):
        account, container, prefix = obj.split('/', 2)
        glacier_hidden_account = self.glacier_account_prefix + account
        objs = get_objects_by_prefix(glacier_hidden_account, container, prefix,
                                     swift_client=self.swift)

        glacier_obj = None
        for o in objs:
            name = get_glacier_objname_from_hidden_object(o)
            if name == prefix:
                glacier_obj = o
                break

        glacier_archive_id = get_glacier_key_from_hidden_object(glacier_obj)
        self.glacier.delete_archive(glacier_archive_id)
        self.swift.delete_object(glacier_hidden_account, container,
                                 glacier_obj)

    def delete_actual_object(self, obj):
        """
        Deletes the end-user object indicated by the actual object name given
        '<account>/<container>/<object>' if and only if the X-Delete-At value
        of the object is exactly the timestamp given.

        :param obj: The name of the end-user object to delete:
                           '<account>/<container>/<object>'
        """
        path = '/v1/' + urllib.quote(obj.lstrip('/'))
        self.swift.make_request('DELETE', path,
                                {}, (2, HTTP_NOT_FOUND))
Example #46
0
class ContainerReconciler(Daemon):
    """
    Move objects that are in the wrong storage policy.
    """

    def __init__(self, conf):
        self.conf = conf
        self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
        self.interval = int(conf.get('interval', 30))
        conf_path = conf.get('__file__') or \
            '/etc/swift/container-reconciler.conf'
        self.logger = get_logger(conf, log_route='container-reconciler')
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Container Reconciler',
                                    request_tries)
        self.stats = defaultdict(int)
        self.last_stat_time = time.time()

    def stats_log(self, metric, msg, *args, **kwargs):
        """
        Update stats tracking for metric and emit log message.
        """
        level = kwargs.pop('level', logging.DEBUG)
        log_message = '%s: ' % metric + msg
        self.logger.log(level, log_message, *args, **kwargs)
        self.stats[metric] += 1

    def log_stats(self, force=False):
        """
        Dump stats to logger, noop when stats have been already been
        logged in the last minute.
        """
        now = time.time()
        should_log = force or (now - self.last_stat_time > 60)
        if should_log:
            self.last_stat_time = now
            self.logger.info('Reconciler Stats: %r', dict(**self.stats))

    def pop_queue(self, container, obj, q_ts, q_record):
        """
        Issue a delete object request to the container for the misplaced
        object queue entry.

        :param container: the misplaced objects container
        :param q_ts: the timestamp of the misplaced object
        :param q_record: the timestamp of the queue entry

        N.B. q_ts will normally be the same time as q_record except when
        an object was manually re-enqued.
        """
        q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)
        x_timestamp = slightly_later_timestamp(max(q_record, q_ts))
        self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',
                       q_path, q_ts, x_timestamp)
        headers = {'X-Timestamp': x_timestamp}
        direct_delete_container_entry(
            self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,
            container, obj, headers=headers)

    def throw_tombstones(self, account, container, obj, timestamp,
                         policy_index, path):
        """
        Issue a delete object request to the given storage_policy.

        :param account: the account name
        :param container: the container name
        :param account: the object name
        :param timestamp: the timestamp of the object to delete
        :param policy_index: the policy index to direct the request
        :param path: the path to be used for logging
        """
        x_timestamp = slightly_later_timestamp(timestamp)
        self.stats_log('cleanup_attempt', '%r (%f) from policy_index '
                       '%s (%s) will be deleted',
                       path, timestamp, policy_index, x_timestamp)
        headers = {
            'X-Timestamp': x_timestamp,
            'X-Backend-Storage-Policy-Index': policy_index,
        }
        success = False
        try:
            self.swift.delete_object(account, container, obj,
                                     acceptable_statuses=(2, 404),
                                     headers=headers)
        except UnexpectedResponse as err:
            self.stats_log('cleanup_failed', '%r (%f) was not cleaned up '
                           'in storage_policy %s (%s)', path, timestamp,
                           policy_index, err)
        else:
            success = True
            self.stats_log('cleanup_success', '%r (%f) was successfully '
                           'removed from policy_index %s', path, timestamp,
                           policy_index)
        return success

    def _reconcile_object(self, account, container, obj, q_policy_index, q_ts,
                          q_op, path, **kwargs):
        """
        Perform object reconciliation.

        :param account: the account name of the misplaced object
        :param container: the container name of the misplaced object
        :param obj: the object name
        :param q_policy_index: the policy index of the source indicated by the
                               queue entry.
        :param q_ts: the timestamp of the misplaced object
        :param q_op: the operation of the misplaced request
        :param path: the full path of the misplaced object for logging

        :returns: True to indicate the request is fully processed
                  successfully, otherwise False.
        """
        container_policy_index = direct_get_container_policy_index(
            self.swift.container_ring, account, container)
        if container_policy_index is None:
            self.stats_log('unavailable_container', '%r (%f) unable to '
                           'determine the destination policy_index',
                           path, q_ts)
            return False
        if container_policy_index == q_policy_index:
            self.stats_log('noop_object', '%r (%f) container policy_index '
                           '%s matches queue policy index %s', path, q_ts,
                           container_policy_index, q_policy_index)
            return True

        # check if object exists in the destination already
        self.logger.debug('checking for %r (%f) in destination '
                          'policy_index %s', path, q_ts,
                          container_policy_index)
        headers = {
            'X-Backend-Storage-Policy-Index': container_policy_index}
        dest_obj = self.swift.get_object_metadata(account, container, obj,
                                                  headers=headers,
                                                  acceptable_statuses=(2, 4))
        dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0))
        if dest_ts >= q_ts:
            self.stats_log('found_object', '%r (%f) in policy_index %s '
                           'is newer than queue (%f)', path, dest_ts,
                           container_policy_index, q_ts)
            return self.throw_tombstones(account, container, obj, q_ts,
                                         q_policy_index, path)

        # object is misplaced
        self.stats_log('misplaced_object', '%r (%f) in policy_index %s '
                       'should be in policy_index %s', path, q_ts,
                       q_policy_index, container_policy_index)

        # fetch object from the source location
        self.logger.debug('fetching %r (%f) from storage policy %s', path,
                          q_ts, q_policy_index)
        headers = {
            'X-Backend-Storage-Policy-Index': q_policy_index}
        try:
            source_obj_status, source_obj_info, source_obj_iter = \
                self.swift.get_object(account, container, obj,
                                      headers=headers,
                                      acceptable_statuses=(2, 4))
        except UnexpectedResponse as err:
            source_obj_status = err.resp.status_int
            source_obj_info = {}
            source_obj_iter = None

        source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0))
        if source_obj_status == 404 and q_op == 'DELETE':
            return self.ensure_tombstone_in_right_location(
                q_policy_index, account, container, obj, q_ts, path,
                container_policy_index, source_ts)
        else:
            return self.ensure_object_in_right_location(
                q_policy_index, account, container, obj, q_ts, path,
                container_policy_index, source_ts, source_obj_status,
                source_obj_info, source_obj_iter)

    def ensure_object_in_right_location(self, q_policy_index, account,
                                        container, obj, q_ts, path,
                                        container_policy_index, source_ts,
                                        source_obj_status, source_obj_info,
                                        source_obj_iter, **kwargs):
        """
        Validate source object will satisfy the misplaced object queue entry
        and move to destination.

        :param q_policy_index: the policy_index for the source object
        :param account: the account name of the misplaced object
        :param container: the container name of the misplaced object
        :param obj: the name of the misplaced object
        :param q_ts: the timestamp of the misplaced object
        :param path: the full path of the misplaced object for logging
        :param container_policy_index: the policy_index of the destination
        :param source_ts: the timestamp of the source object
        :param source_obj_status: the HTTP status source object request
        :param source_obj_info: the HTTP headers of the source object request
        :param source_obj_iter: the body iter of the source object request
        """
        if source_obj_status // 100 != 2 or source_ts < q_ts:
            if q_ts < time.time() - self.reclaim_age:
                # it's old and there are no tombstones or anything; give up
                self.stats_log('lost_source', '%r (%s) was not available in '
                               'policy_index %s and has expired', path,
                               q_ts.internal, q_policy_index,
                               level=logging.CRITICAL)
                return True
            # the source object is unavailable or older than the queue
            # entry; a version that will satisfy the queue entry hopefully
            # exists somewhere in the cluster, so wait and try again
            self.stats_log('unavailable_source', '%r (%s) in '
                           'policy_index %s responded %s (%s)', path,
                           q_ts.internal, q_policy_index, source_obj_status,
                           source_ts.internal, level=logging.WARNING)
            return False

        # optimistically move any source with a timestamp >= q_ts
        ts = max(Timestamp(source_ts), q_ts)
        # move the object
        put_timestamp = slightly_later_timestamp(ts, offset=2)
        self.stats_log('copy_attempt', '%r (%f) in policy_index %s will be '
                       'moved to policy_index %s (%s)', path, source_ts,
                       q_policy_index, container_policy_index, put_timestamp)
        headers = source_obj_info.copy()
        headers['X-Backend-Storage-Policy-Index'] = container_policy_index
        headers['X-Timestamp'] = put_timestamp

        try:
            self.swift.upload_object(
                FileLikeIter(source_obj_iter), account, container, obj,
                headers=headers)
        except UnexpectedResponse as err:
            self.stats_log('copy_failed', 'upload %r (%f) from '
                           'policy_index %s to policy_index %s '
                           'returned %s', path, source_ts, q_policy_index,
                           container_policy_index, err, level=logging.WARNING)
            return False
        except:  # noqa
            self.stats_log('unhandled_error', 'unable to upload %r (%f) '
                           'from policy_index %s to policy_index %s ', path,
                           source_ts, q_policy_index, container_policy_index,
                           level=logging.ERROR, exc_info=True)
            return False

        self.stats_log('copy_success', '%r (%f) moved from policy_index %s '
                       'to policy_index %s (%s)', path, source_ts,
                       q_policy_index, container_policy_index, put_timestamp)

        return self.throw_tombstones(account, container, obj, q_ts,
                                     q_policy_index, path)

    def ensure_tombstone_in_right_location(self, q_policy_index, account,
                                           container, obj, q_ts, path,
                                           container_policy_index, source_ts,
                                           **kwargs):
        """
        Issue a DELETE request against the destination to match the
        misplaced DELETE against the source.
        """
        delete_timestamp = slightly_later_timestamp(q_ts, offset=2)
        self.stats_log('delete_attempt', '%r (%f) in policy_index %s '
                       'will be deleted from policy_index %s (%s)', path,
                       source_ts, q_policy_index, container_policy_index,
                       delete_timestamp)
        headers = {
            'X-Backend-Storage-Policy-Index': container_policy_index,
            'X-Timestamp': delete_timestamp,
        }
        try:
            self.swift.delete_object(account, container, obj,
                                     headers=headers)
        except UnexpectedResponse as err:
            self.stats_log('delete_failed', 'delete %r (%f) from '
                           'policy_index %s (%s) returned %s', path,
                           source_ts, container_policy_index,
                           delete_timestamp, err, level=logging.WARNING)
            return False
        except:  # noqa
            self.stats_log('unhandled_error', 'unable to delete %r (%f) '
                           'from policy_index %s (%s)', path, source_ts,
                           container_policy_index, delete_timestamp,
                           level=logging.ERROR, exc_info=True)
            return False

        self.stats_log('delete_success', '%r (%f) deleted from '
                       'policy_index %s (%s)', path, source_ts,
                       container_policy_index, delete_timestamp,
                       level=logging.INFO)

        return self.throw_tombstones(account, container, obj, q_ts,
                                     q_policy_index, path)

    def reconcile_object(self, info):
        """
        Process a possibly misplaced object write request.  Determine correct
        destination storage policy by checking with primary containers.  Check
        source and destination, copying or deleting into destination and
        cleaning up the source as needed.

        This method wraps _reconcile_object for exception handling.

        :param info: a queue entry dict

        :returns: True to indicate the request is fully processed
                  successfully, otherwise False.
        """
        self.logger.debug('checking placement for %r (%f) '
                          'in policy_index %s', info['path'],
                          info['q_ts'], info['q_policy_index'])
        success = False
        try:
            success = self._reconcile_object(**info)
        except:  # noqa
            self.logger.exception('Unhandled Exception trying to '
                                  'reconcile %r (%f) in policy_index %s',
                                  info['path'], info['q_ts'],
                                  info['q_policy_index'])
        if success:
            metric = 'success'
            msg = 'was handled successfully'
        else:
            metric = 'retry'
            msg = 'must be retried'
        msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg
        self.stats_log(metric, msg, info, level=logging.INFO)
        self.log_stats()
        return success

    def _iter_containers(self):
        """
        Generate a list of containers to process.
        """
        # hit most recent container first instead of waiting on the updaters
        current_container = get_reconciler_container_name(time.time())
        yield current_container
        container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT)
        self.logger.debug('looking for containers in %s',
                          MISPLACED_OBJECTS_ACCOUNT)
        while True:
            one_page = None
            try:
                one_page = list(itertools.islice(
                    container_gen, constraints.CONTAINER_LISTING_LIMIT))
            except UnexpectedResponse as err:
                self.logger.error('Error listing containers in '
                                  'account %s (%s)',
                                  MISPLACED_OBJECTS_ACCOUNT, err)

            if not one_page:
                # don't generally expect more than one page
                break
            # reversed order since we expect older containers to be empty
            for c in reversed(one_page):
                # encoding here is defensive
                container = c['name'].encode('utf8')
                if container == current_container:
                    continue  # we've already hit this one this pass
                yield container

    def _iter_objects(self, container):
        """
        Generate a list of objects to process.

        :param container: the name of the container to process

        If the given container is empty and older than reclaim_age this
        processor will attempt to reap it.
        """
        self.logger.debug('looking for objects in %s', container)
        found_obj = False
        try:
            for raw_obj in self.swift.iter_objects(
                    MISPLACED_OBJECTS_ACCOUNT, container):
                found_obj = True
                yield raw_obj
        except UnexpectedResponse as err:
            self.logger.error('Error listing objects in container %s (%s)',
                              container, err)
        if float(container) < time.time() - self.reclaim_age and \
                not found_obj:
            # Try to delete old empty containers so the queue doesn't
            # grow without bound. It's ok if there's a conflict.
            self.swift.delete_container(
                MISPLACED_OBJECTS_ACCOUNT, container,
                acceptable_statuses=(2, 404, 409, 412))

    def reconcile(self):
        """
        Main entry point for processing misplaced objects.

        Iterate over all queue entries and delegate to reconcile_object.
        """
        self.logger.debug('pulling items from the queue')
        for container in self._iter_containers():
            for raw_obj in self._iter_objects(container):
                try:
                    obj_info = parse_raw_obj(raw_obj)
                except Exception:
                    self.stats_log('invalid_record',
                                   'invalid queue record: %r', raw_obj,
                                   level=logging.ERROR, exc_info=True)
                    continue
                finished = self.reconcile_object(obj_info)
                if finished:
                    self.pop_queue(container, raw_obj['name'],
                                   obj_info['q_ts'],
                                   obj_info['q_record'])
            self.log_stats()
            self.logger.debug('finished container %s', container)

    def run_once(self, *args, **kwargs):
        """
        Process every entry in the queue.
        """
        try:
            self.reconcile()
        except:
            self.logger.exception('Unhandled Exception trying to reconcile')
        self.log_stats(force=True)

    def run_forever(self, *args, **kwargs):
        while True:
            self.run_once(*args, **kwargs)
            self.stats = defaultdict(int)
            self.logger.info('sleeping between intervals (%ss)', self.interval)
            time.sleep(self.interval)
Example #47
0
class ObjectExpirer(Daemon):
    """
    Daemon that queries the internal hidden expiring_objects_account to
    discover objects that need to be deleted.

    :param conf: The daemon configuration.
    """

    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf, log_route='object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path,
                                    'Swift Object Expirer',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_expiration_pass': elapsed,
                              'expired_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects expired') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.expiring_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))
            for c in self.swift.iter_containers(self.expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                for o in self.swift.iter_objects(self.expiring_objects_account,
                                                 container):
                    obj = o['name']
                    timestamp, actual_obj = obj.split('-', 1)
                    timestamp = int(timestamp)
                    if timestamp > int(time()):
                        break
                    start_time = time()
                    try:
                        self.delete_actual_object(actual_obj, timestamp)
                        self.swift.delete_object(self.expiring_objects_account,
                                                 container, obj)
                        self.report_objects += 1
                        self.logger.increment('objects')
                    except (Exception, Timeout), err:
                        self.logger.increment('errors')
                        self.logger.exception(
                            _('Exception while deleting object %s %s %s') %
                            (container, obj, str(err)))
                    self.logger.timing_since('timing', start_time)
                    self.report()
                try:
                    self.swift.delete_container(
                        self.expiring_objects_account,
                        container,
                        acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
                except (Exception, Timeout), err:
                    self.logger.exception(
                        _('Exception while deleting container %s %s') %
                        (container, str(err)))
            self.logger.debug(_('Run end'))
            self.report(final=True)
Example #48
0
class ContainerReconciler(Daemon):
    """
    Move objects that are in the wrong storage policy.
    """
    def __init__(self, conf):
        self.conf = conf
        # This option defines how long an un-processable misplaced object
        # marker will be retried before it is abandoned.  It is not coupled
        # with the tombstone reclaim age in the consistency engine.
        self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
        self.interval = int(conf.get('interval', 30))
        conf_path = conf.get('__file__') or \
            '/etc/swift/container-reconciler.conf'
        self.logger = get_logger(conf, log_route='container-reconciler')
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path, 'Swift Container Reconciler',
                                    request_tries)
        self.stats = defaultdict(int)
        self.last_stat_time = time.time()

    def stats_log(self, metric, msg, *args, **kwargs):
        """
        Update stats tracking for metric and emit log message.
        """
        level = kwargs.pop('level', logging.DEBUG)
        log_message = '%s: ' % metric + msg
        self.logger.log(level, log_message, *args, **kwargs)
        self.stats[metric] += 1

    def log_stats(self, force=False):
        """
        Dump stats to logger, noop when stats have been already been
        logged in the last minute.
        """
        now = time.time()
        should_log = force or (now - self.last_stat_time > 60)
        if should_log:
            self.last_stat_time = now
            self.logger.info('Reconciler Stats: %r', dict(**self.stats))

    def pop_queue(self, container, obj, q_ts, q_record):
        """
        Issue a delete object request to the container for the misplaced
        object queue entry.

        :param container: the misplaced objects container
        :param obj: the name of the misplaced object
        :param q_ts: the timestamp of the misplaced object
        :param q_record: the timestamp of the queue entry

        N.B. q_ts will normally be the same time as q_record except when
        an object was manually re-enqued.
        """
        q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)
        x_timestamp = slightly_later_timestamp(max(q_record, q_ts))
        self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',
                       q_path, q_ts, x_timestamp)
        headers = {'X-Timestamp': x_timestamp}
        direct_delete_container_entry(self.swift.container_ring,
                                      MISPLACED_OBJECTS_ACCOUNT,
                                      container,
                                      obj,
                                      headers=headers)

    def throw_tombstones(self, account, container, obj, timestamp,
                         policy_index, path):
        """
        Issue a delete object request to the given storage_policy.

        :param account: the account name
        :param container: the container name
        :param obj: the object name
        :param timestamp: the timestamp of the object to delete
        :param policy_index: the policy index to direct the request
        :param path: the path to be used for logging
        """
        x_timestamp = slightly_later_timestamp(timestamp)
        self.stats_log('cleanup_attempt', '%r (%f) from policy_index '
                       '%s (%s) will be deleted', path, timestamp,
                       policy_index, x_timestamp)
        headers = {
            'X-Timestamp': x_timestamp,
            'X-Backend-Storage-Policy-Index': policy_index,
        }
        success = False
        try:
            self.swift.delete_object(account,
                                     container,
                                     obj,
                                     acceptable_statuses=(2, 404),
                                     headers=headers)
        except UnexpectedResponse as err:
            self.stats_log(
                'cleanup_failed', '%r (%f) was not cleaned up '
                'in storage_policy %s (%s)', path, timestamp, policy_index,
                err)
        else:
            success = True
            self.stats_log(
                'cleanup_success', '%r (%f) was successfully '
                'removed from policy_index %s', path, timestamp, policy_index)
        return success

    def _reconcile_object(self, account, container, obj, q_policy_index, q_ts,
                          q_op, path, **kwargs):
        """
        Perform object reconciliation.

        :param account: the account name of the misplaced object
        :param container: the container name of the misplaced object
        :param obj: the object name
        :param q_policy_index: the policy index of the source indicated by the
                               queue entry.
        :param q_ts: the timestamp of the misplaced object
        :param q_op: the operation of the misplaced request
        :param path: the full path of the misplaced object for logging

        :returns: True to indicate the request is fully processed
                  successfully, otherwise False.
        """
        container_policy_index = direct_get_container_policy_index(
            self.swift.container_ring, account, container)
        if container_policy_index is None:
            self.stats_log(
                'unavailable_container', '%r (%f) unable to '
                'determine the destination policy_index', path, q_ts)
            return False
        if container_policy_index == q_policy_index:
            self.stats_log(
                'noop_object', '%r (%f) container policy_index '
                '%s matches queue policy index %s', path, q_ts,
                container_policy_index, q_policy_index)
            return True

        # check if object exists in the destination already
        self.logger.debug(
            'checking for %r (%f) in destination '
            'policy_index %s', path, q_ts, container_policy_index)
        headers = {'X-Backend-Storage-Policy-Index': container_policy_index}
        dest_obj = self.swift.get_object_metadata(account,
                                                  container,
                                                  obj,
                                                  headers=headers,
                                                  acceptable_statuses=(2, 4))
        dest_ts = Timestamp(dest_obj.get('x-backend-timestamp', 0))
        if dest_ts >= q_ts:
            self.stats_log(
                'found_object', '%r (%f) in policy_index %s '
                'is newer than queue (%f)', path, dest_ts,
                container_policy_index, q_ts)
            return self.throw_tombstones(account, container, obj, q_ts,
                                         q_policy_index, path)

        # object is misplaced
        self.stats_log(
            'misplaced_object', '%r (%f) in policy_index %s '
            'should be in policy_index %s', path, q_ts, q_policy_index,
            container_policy_index)

        # fetch object from the source location
        self.logger.debug('fetching %r (%f) from storage policy %s', path,
                          q_ts, q_policy_index)
        headers = {'X-Backend-Storage-Policy-Index': q_policy_index}
        try:
            source_obj_status, source_obj_info, source_obj_iter = \
                self.swift.get_object(account, container, obj,
                                      headers=headers,
                                      acceptable_statuses=(2, 4))
        except UnexpectedResponse as err:
            source_obj_status = err.resp.status_int
            source_obj_info = {}
            source_obj_iter = None

        source_ts = Timestamp(source_obj_info.get('x-backend-timestamp', 0))
        if source_obj_status == 404 and q_op == 'DELETE':
            return self.ensure_tombstone_in_right_location(
                q_policy_index, account, container, obj, q_ts, path,
                container_policy_index, source_ts)
        else:
            return self.ensure_object_in_right_location(
                q_policy_index, account, container, obj, q_ts, path,
                container_policy_index, source_ts, source_obj_status,
                source_obj_info, source_obj_iter)

    def ensure_object_in_right_location(self, q_policy_index, account,
                                        container, obj, q_ts, path,
                                        container_policy_index, source_ts,
                                        source_obj_status, source_obj_info,
                                        source_obj_iter, **kwargs):
        """
        Validate source object will satisfy the misplaced object queue entry
        and move to destination.

        :param q_policy_index: the policy_index for the source object
        :param account: the account name of the misplaced object
        :param container: the container name of the misplaced object
        :param obj: the name of the misplaced object
        :param q_ts: the timestamp of the misplaced object
        :param path: the full path of the misplaced object for logging
        :param container_policy_index: the policy_index of the destination
        :param source_ts: the timestamp of the source object
        :param source_obj_status: the HTTP status source object request
        :param source_obj_info: the HTTP headers of the source object request
        :param source_obj_iter: the body iter of the source object request
        """
        if source_obj_status // 100 != 2 or source_ts < q_ts:
            if q_ts < time.time() - self.reclaim_age:
                # it's old and there are no tombstones or anything; give up
                self.stats_log('lost_source', '%r (%s) was not available in '
                               'policy_index %s and has expired',
                               path,
                               q_ts.internal,
                               q_policy_index,
                               level=logging.CRITICAL)
                return True
            # the source object is unavailable or older than the queue
            # entry; a version that will satisfy the queue entry hopefully
            # exists somewhere in the cluster, so wait and try again
            self.stats_log('unavailable_source', '%r (%s) in '
                           'policy_index %s responded %s (%s)',
                           path,
                           q_ts.internal,
                           q_policy_index,
                           source_obj_status,
                           source_ts.internal,
                           level=logging.WARNING)
            return False

        # optimistically move any source with a timestamp >= q_ts
        ts = max(Timestamp(source_ts), q_ts)
        # move the object
        put_timestamp = slightly_later_timestamp(ts, offset=2)
        self.stats_log(
            'copy_attempt', '%r (%f) in policy_index %s will be '
            'moved to policy_index %s (%s)', path, source_ts, q_policy_index,
            container_policy_index, put_timestamp)
        headers = source_obj_info.copy()
        headers['X-Backend-Storage-Policy-Index'] = container_policy_index
        headers['X-Timestamp'] = put_timestamp

        try:
            self.swift.upload_object(FileLikeIter(source_obj_iter),
                                     account,
                                     container,
                                     obj,
                                     headers=headers)
        except UnexpectedResponse as err:
            self.stats_log('copy_failed', 'upload %r (%f) from '
                           'policy_index %s to policy_index %s '
                           'returned %s',
                           path,
                           source_ts,
                           q_policy_index,
                           container_policy_index,
                           err,
                           level=logging.WARNING)
            return False
        except:  # noqa
            self.stats_log('unhandled_error', 'unable to upload %r (%f) '
                           'from policy_index %s to policy_index %s ',
                           path,
                           source_ts,
                           q_policy_index,
                           container_policy_index,
                           level=logging.ERROR,
                           exc_info=True)
            return False

        self.stats_log(
            'copy_success', '%r (%f) moved from policy_index %s '
            'to policy_index %s (%s)', path, source_ts, q_policy_index,
            container_policy_index, put_timestamp)

        return self.throw_tombstones(account, container, obj, q_ts,
                                     q_policy_index, path)

    def ensure_tombstone_in_right_location(self, q_policy_index, account,
                                           container, obj, q_ts, path,
                                           container_policy_index, source_ts,
                                           **kwargs):
        """
        Issue a DELETE request against the destination to match the
        misplaced DELETE against the source.
        """
        delete_timestamp = slightly_later_timestamp(q_ts, offset=2)
        self.stats_log(
            'delete_attempt', '%r (%f) in policy_index %s '
            'will be deleted from policy_index %s (%s)', path, source_ts,
            q_policy_index, container_policy_index, delete_timestamp)
        headers = {
            'X-Backend-Storage-Policy-Index': container_policy_index,
            'X-Timestamp': delete_timestamp,
        }
        try:
            self.swift.delete_object(account, container, obj, headers=headers)
        except UnexpectedResponse as err:
            self.stats_log('delete_failed', 'delete %r (%f) from '
                           'policy_index %s (%s) returned %s',
                           path,
                           source_ts,
                           container_policy_index,
                           delete_timestamp,
                           err,
                           level=logging.WARNING)
            return False
        except:  # noqa
            self.stats_log('unhandled_error', 'unable to delete %r (%f) '
                           'from policy_index %s (%s)',
                           path,
                           source_ts,
                           container_policy_index,
                           delete_timestamp,
                           level=logging.ERROR,
                           exc_info=True)
            return False

        self.stats_log('delete_success', '%r (%f) deleted from '
                       'policy_index %s (%s)',
                       path,
                       source_ts,
                       container_policy_index,
                       delete_timestamp,
                       level=logging.INFO)

        return self.throw_tombstones(account, container, obj, q_ts,
                                     q_policy_index, path)

    def reconcile_object(self, info):
        """
        Process a possibly misplaced object write request.  Determine correct
        destination storage policy by checking with primary containers.  Check
        source and destination, copying or deleting into destination and
        cleaning up the source as needed.

        This method wraps _reconcile_object for exception handling.

        :param info: a queue entry dict

        :returns: True to indicate the request is fully processed
                  successfully, otherwise False.
        """
        self.logger.debug(
            'checking placement for %r (%f) '
            'in policy_index %s', info['path'], info['q_ts'],
            info['q_policy_index'])
        success = False
        try:
            success = self._reconcile_object(**info)
        except:  # noqa
            self.logger.exception(
                'Unhandled Exception trying to '
                'reconcile %r (%f) in policy_index %s', info['path'],
                info['q_ts'], info['q_policy_index'])
        if success:
            metric = 'success'
            msg = 'was handled successfully'
        else:
            metric = 'retry'
            msg = 'must be retried'
        msg = '%(path)r (%(q_ts)f) in policy_index %(q_policy_index)s ' + msg
        self.stats_log(metric, msg, info, level=logging.INFO)
        self.log_stats()
        return success

    def _iter_containers(self):
        """
        Generate a list of containers to process.
        """
        # hit most recent container first instead of waiting on the updaters
        current_container = get_reconciler_container_name(time.time())
        yield current_container
        container_gen = self.swift.iter_containers(MISPLACED_OBJECTS_ACCOUNT)
        self.logger.debug('looking for containers in %s',
                          MISPLACED_OBJECTS_ACCOUNT)
        while True:
            one_page = None
            try:
                one_page = list(
                    itertools.islice(container_gen,
                                     constraints.CONTAINER_LISTING_LIMIT))
            except UnexpectedResponse as err:
                self.logger.error(
                    'Error listing containers in '
                    'account %s (%s)', MISPLACED_OBJECTS_ACCOUNT, err)

            if not one_page:
                # don't generally expect more than one page
                break
            # reversed order since we expect older containers to be empty
            for c in reversed(one_page):
                # encoding here is defensive
                container = c['name'].encode('utf8')
                if container == current_container:
                    continue  # we've already hit this one this pass
                yield container

    def _iter_objects(self, container):
        """
        Generate a list of objects to process.

        :param container: the name of the container to process

        If the given container is empty and older than reclaim_age this
        processor will attempt to reap it.
        """
        self.logger.debug('looking for objects in %s', container)
        found_obj = False
        try:
            for raw_obj in self.swift.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
                                                   container):
                found_obj = True
                yield raw_obj
        except UnexpectedResponse as err:
            self.logger.error('Error listing objects in container %s (%s)',
                              container, err)
        if float(container) < time.time() - self.reclaim_age and \
                not found_obj:
            # Try to delete old empty containers so the queue doesn't
            # grow without bound. It's ok if there's a conflict.
            self.swift.delete_container(MISPLACED_OBJECTS_ACCOUNT,
                                        container,
                                        acceptable_statuses=(2, 404, 409, 412))

    def reconcile(self):
        """
        Main entry point for processing misplaced objects.

        Iterate over all queue entries and delegate to reconcile_object.
        """
        self.logger.debug('pulling items from the queue')
        for container in self._iter_containers():
            for raw_obj in self._iter_objects(container):
                try:
                    obj_info = parse_raw_obj(raw_obj)
                except Exception:
                    self.stats_log('invalid_record',
                                   'invalid queue record: %r',
                                   raw_obj,
                                   level=logging.ERROR,
                                   exc_info=True)
                    continue
                finished = self.reconcile_object(obj_info)
                if finished:
                    self.pop_queue(container, raw_obj['name'],
                                   obj_info['q_ts'], obj_info['q_record'])
            self.log_stats()
            self.logger.debug('finished container %s', container)

    def run_once(self, *args, **kwargs):
        """
        Process every entry in the queue.
        """
        try:
            self.reconcile()
        except:  # noqa
            self.logger.exception('Unhandled Exception trying to reconcile')
        self.log_stats(force=True)

    def run_forever(self, *args, **kwargs):
        while True:
            self.run_once(*args, **kwargs)
            self.stats = defaultdict(int)
            self.logger.info('sleeping between intervals (%ss)', self.interval)
            time.sleep(self.interval)
Example #49
0
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(list(POLICIES))
        new_policy = random.choice([p for p in POLICIES if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses if int(old_policy)
            == int(metadata['X-Backend-Storage-Policy-Index'])
        ]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object()
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        client = InternalClient(conf_file, 'probe-test', 3)
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        get_to_final_state()

        # verify entry in the queue
        client = InternalClient(conf_file, 'probe-test', 3)
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        get_to_final_state()
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)
Example #50
0
    def __init__(self, conf, container_ring=None, logger=None):
        #: The dict of configuration values from the [container-sync] section
        #: of the container-server.conf.
        self.conf = conf
        #: Logger to use for container-sync log lines.
        self.logger = logger or get_logger(conf, log_route='container-sync')
        #: Path to the local device mount points.
        self.devices = conf.get('devices', '/srv/node')
        #: Indicates whether mount points should be verified as actual mount
        #: points (normally true, false for tests and SAIO).
        self.mount_check = config_true_value(conf.get('mount_check', 'true'))
        #: Minimum time between full scans. This is to keep the daemon from
        #: running wild on near empty systems.
        self.interval = int(conf.get('interval', 300))
        #: Maximum amount of time to spend syncing a container before moving on
        #: to the next one. If a conatiner sync hasn't finished in this time,
        #: it'll just be resumed next scan.
        self.container_time = int(conf.get('container_time', 60))
        #: ContainerSyncCluster instance for validating sync-to values.
        self.realms_conf = ContainerSyncRealms(
            os.path.join(
                conf.get('swift_dir', '/etc/swift'),
                'container-sync-realms.conf'),
            self.logger)
        #: The list of hosts we're allowed to send syncs to. This can be
        #: overridden by data in self.realms_conf
        self.allowed_sync_hosts = [
            h.strip()
            for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
            if h.strip()]
        self.http_proxies = [
            a.strip()
            for a in conf.get('sync_proxy', '').split(',')
            if a.strip()]
        #: ContainerSyncStore instance for iterating over synced containers
        self.sync_store = ContainerSyncStore(self.devices,
                                             self.logger,
                                             self.mount_check)
        #: Number of containers with sync turned on that were successfully
        #: synced.
        self.container_syncs = 0
        #: Number of successful DELETEs triggered.
        self.container_deletes = 0
        #: Number of successful PUTs triggered.
        self.container_puts = 0
        #: Number of containers whose sync has been turned off, but
        #: are not yet cleared from the sync store.
        self.container_skips = 0
        #: Number of containers that had a failure of some type.
        self.container_failures = 0
        #: Time of last stats report.
        self.reported = time()
        self.swift_dir = conf.get('swift_dir', '/etc/swift')
        #: swift.common.ring.Ring for locating containers.
        self.container_ring = container_ring or Ring(self.swift_dir,
                                                     ring_name='container')
        bind_ip = conf.get('bind_ip', '0.0.0.0')
        self._myips = whataremyips(bind_ip)
        self._myport = int(conf.get('bind_port', 6001))
        swift.common.db.DB_PREALLOCATION = \
            config_true_value(conf.get('db_preallocation', 'f'))
        self.conn_timeout = float(conf.get('conn_timeout', 5))
        request_tries = int(conf.get('request_tries') or 3)

        internal_client_conf_path = conf.get('internal_client_conf_path')
        if not internal_client_conf_path:
            self.logger.warning(
                _('Configuration option internal_client_conf_path not '
                  'defined. Using default configuration, See '
                  'internal-client.conf-sample for options'))
            internal_client_conf = ConfigString(ic_conf_body)
        else:
            internal_client_conf = internal_client_conf_path
        try:
            self.swift = InternalClient(
                internal_client_conf, 'Swift Container Sync', request_tries)
        except IOError as err:
            if err.errno != errno.ENOENT:
                raise
            raise SystemExit(
                _('Unable to load internal client from config: %r (%s)') %
                (internal_client_conf_path, err))
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(ENABLED_POLICIES)
        new_policy = random.choice([p for p in ENABLED_POLICIES
                                    if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses
            if int(old_policy) ==
            int(metadata['X-Backend-Storage-Policy-Index'])]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        int_client = InternalClient(conf_file, 'probe-test', 3)
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        self.get_to_final_state()

        # verify entry in the queue
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        self.get_to_final_state()
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)

        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(int(new_policy))
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
Example #52
0
class ObjectExpirer(Daemon):
    """
    Daemon that queries the internal hidden expiring_objects_account to
    discover objects that need to be deleted.

    :param conf: The daemon configuration.
    """
    def __init__(self, conf):
        self.conf = conf
        self.logger = get_logger(conf, log_route='object-expirer')
        self.interval = int(conf.get('interval') or 300)
        self.expiring_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            'expiring_objects'
        conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path, 'Swift Object Expirer',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(
                _('Pass completed in %ds; %d objects expired') %
                (elapsed, self.report_objects))
            dump_recon_cache(
                {
                    'object_expiration_pass': elapsed,
                    'expired_last_pass': self.report_objects
                }, self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(
                _('Pass so far %ds; %d objects expired') %
                (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.expiring_objects_account)
            self.logger.info(
                _('Pass beginning; %s possible containers; %s '
                  'possible objects') % (containers, objects))
            for c in self.swift.iter_containers(self.expiring_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self.expiring_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' %
                                        (container, obj)).hexdigest(), 16)
                        if obj_process % processes != process:
                            continue
                    timestamp, actual_obj = obj.split('-', 1)
                    timestamp = int(timestamp)
                    if timestamp > int(time()):
                        break
                    pool.spawn_n(self.delete_object, actual_obj, timestamp,
                                 container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.swift.delete_container(
                        self.expiring_objects_account,
                        container,
                        acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
                except (Exception, Timeout) as err:
                    self.logger.exception(
                        _('Exception while deleting container %s %s') %
                        (container, str(err)))
            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout):
            self.logger.exception(_('Unhandled exception'))

    def run_forever(self, *args, **kwargs):
        """
        Executes passes forever, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            try:
                self.run_once(*args, **kwargs)
            except (Exception, Timeout):
                self.logger.exception(_('Unhandled exception'))
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(random() * (self.interval - elapsed))

    def get_process_values(self, kwargs):
        """
        Gets the processes, process from the kwargs if those values exist.

        Otherwise, return processes, process set in the config file.

        :param kwargs: Keyword args passed into the run_forever(), run_once()
                       methods.  They have values specified on the command
                       line when the daemon is run.
        """
        if kwargs.get('processes') is not None:
            processes = int(kwargs['processes'])
        else:
            processes = self.processes

        if kwargs.get('process') is not None:
            process = int(kwargs['process'])
        else:
            process = self.process

        if process < 0:
            raise ValueError(
                'process must be an integer greater than or equal to 0')

        if processes < 0:
            raise ValueError(
                'processes must be an integer greater than or equal to 0')

        if processes and process >= processes:
            raise ValueError('process must be less than or equal to processes')

        return processes, process

    def delete_object(self, actual_obj, timestamp, container, obj):
        start_time = time()
        try:
            self.delete_actual_object(actual_obj, timestamp)
            self.swift.delete_object(self.expiring_objects_account, container,
                                     obj)
            self.report_objects += 1
            self.logger.increment('objects')
        except (Exception, Timeout) as err:
            self.logger.increment('errors')
            self.logger.exception(
                _('Exception while deleting object %s %s %s') %
                (container, obj, str(err)))
        self.logger.timing_since('timing', start_time)
        self.report()

    def delete_actual_object(self, actual_obj, timestamp):
        """
        Deletes the end-user object indicated by the actual object name given
        '<account>/<container>/<object>' if and only if the X-Delete-At value
        of the object is exactly the timestamp given.

        :param actual_obj: The name of the end-user object to delete:
                           '<account>/<container>/<object>'
        :param timestamp: The timestamp the X-Delete-At value must match to
                          perform the actual delete.
        """
        path = '/v1/' + urllib.quote(actual_obj.lstrip('/'))
        self.swift.make_request('DELETE', path,
                                {'X-If-Delete-At': str(timestamp)},
                                (2, HTTP_NOT_FOUND, HTTP_PRECONDITION_FAILED))
Example #53
0
class TestObjectExpirer(ReplProbeTest):

    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertTrue('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertTrue('x-backend-timestamp' in metadata)
                self.assertTrue(Timestamp(metadata['x-backend-timestamp']) >
                                create_timestamp)

    def test_expirer_object_should_not_be_expired(self):
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        # T(obj_created)
        first_created_at = time.time()
        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        delete_at = int(time.time() + 1.5)
        # T(obj_recreated)
        recreated_at = time.time() + 2.0
        # T(expirer_executed) - 'now'
        sleep_for_expirer = 2.01

        obj_brain.put_container(int(self.policy))
        obj_brain.put_object(
            headers={'X-Delete-At': delete_at,
                     'X-Timestamp': Timestamp(first_created_at).internal})

        # some object servers stopped
        obj_brain.stop_primary_half()
        obj_brain.put_object(
            headers={'X-Timestamp': Timestamp(recreated_at).internal,
                     'X-Object-Meta-Expired': 'False'})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # some object servers recovered
        obj_brain.start_primary_half()
        # sleep to make sure expirer runs at the time after obj is recreated
        time.sleep(sleep_for_expirer)
        self.expirer.once()
        # inconsistent state of objects is recovered
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name)
        self.assertIn('x-object-meta-expired', metadata)
Example #54
0
 def __init__(self, conf_file, account='AUTH_test'):
     self.swift = InternalClient(conf_file, 'probe-test', 3)
     self.account = account
class ObjectTransitor(Daemon):
    def __init__(self, conf):
        super(ObjectTransitor, self).__init__(conf)
        self.conf = conf
        self.logger = get_logger(conf, log_route='s3-object-transitor')
        self.logger.set_statsd_prefix('s3-object-transitor')
        self.interval = int(conf.get('interval') or 300)
        self.s3_tr_objects_account = \
            (conf.get('auto_create_account_prefix') or '.') + \
            (conf.get('expiring_objects_account_name') or
             's3_transitioning_objects')
        conf_path = conf.get('__file__') or \
            '/etc/swift/s3-object-transitor.conf'
        request_tries = int(conf.get('request_tries') or 3)
        self.swift = InternalClient(conf_path, 'Swift Object Transitor',
                                    request_tries)
        self.report_interval = int(conf.get('report_interval') or 300)
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        self.recon_cache_path = conf.get('recon_cache_path',
                                         '/var/cache/swift')
        self.rcache = join(self.recon_cache_path, 'object.recon')
        self.concurrency = int(conf.get('concurrency', 1))
        if self.concurrency < 1:
            raise ValueError("concurrency must be set to at least 1")
        self.processes = int(self.conf.get('processes', 0))
        self.process = int(self.conf.get('process', 0))
        self.client = Client(self.conf.get('sentry_sdn', ''))

    def report(self, final=False):
        """
        Emits a log line report of the progress so far, or the final progress
        is final=True.

        :param final: Set to True for the last report once the expiration pass
                      has completed.
        """
        if final:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass completed in %ds; %d objects '
                               'transitioned') %
                             (elapsed, self.report_objects))
            dump_recon_cache({'object_transition_pass': elapsed,
                              'transitioned_last_pass': self.report_objects},
                             self.rcache, self.logger)
        elif time() - self.report_last_time >= self.report_interval:
            elapsed = time() - self.report_first_time
            self.logger.info(_('Pass so far %ds; %d objects transitioned') %
                             (elapsed, self.report_objects))
            self.report_last_time = time()

    def run_once(self, *args, **kwargs):
        """
        Executes a single pass, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon accepts processes and process keyword args.
                       These will override the values from the config file if
                       provided.
        """
        processes, process = self.get_process_values(kwargs)
        pool = GreenPool(self.concurrency)
        containers_to_delete = []
        self.report_first_time = self.report_last_time = time()
        self.report_objects = 0
        try:
            self.logger.debug(_('Run begin'))
            containers, objects = \
                self.swift.get_account_info(self.s3_tr_objects_account)
            self.logger.info(_('Pass beginning; %s possible containers; %s '
                               'possible objects') % (containers, objects))

            for c in self.swift.iter_containers(self.s3_tr_objects_account):
                container = c['name']
                timestamp = int(container)
                if timestamp > int(time()):
                    break
                containers_to_delete.append(container)
                for o in self.swift.iter_objects(self.s3_tr_objects_account,
                                                 container):
                    obj = o['name'].encode('utf8')
                    if processes > 0:
                        obj_process = int(
                            hashlib.md5('%s/%s' % (container, obj)).
                            hexdigest(), 16)
                        if obj_process % processes != process:
                            continue

                    pool.spawn_n(self.transition_object, container, obj)
            pool.waitall()
            for container in containers_to_delete:
                try:
                    self.swift.delete_container(self.s3_tr_objects_account,
                                                container, (2, 4))
                except (Exception, Timeout) as err:
                    report_exception(self.logger,
                                     _('Exception while deleting container %s %s') %
                                     (container, str(err)), self.client)
            self.logger.debug(_('Run end'))
            self.report(final=True)
        except (Exception, Timeout):
            report_exception(self.logger, _('Unhandled exception'), self.client)

    def run_forever(self, *args, **kwargs):
        """
        Executes passes forever, looking for objects to expire.

        :param args: Extra args to fulfill the Daemon interface; this daemon
                     has no additional args.
        :param kwargs: Extra keyword args to fulfill the Daemon interface; this
                       daemon has no additional keyword args.
        """
        sleep(random() * self.interval)
        while True:
            begin = time()
            try:
                self.run_once(*args, **kwargs)
            except (Exception, Timeout):
                report_exception(self.logger, _('Unhandled exception'), self.client)
            elapsed = time() - begin
            if elapsed < self.interval:
                sleep(random() * (self.interval - elapsed))

    def get_process_values(self, kwargs):
        """
        Gets the processes, process from the kwargs if those values exist.

        Otherwise, return processes, process set in the config file.

        :param kwargs: Keyword args passed into the run_forever(), run_once()
                       methods.  They have values specified on the command
                       line when the daemon is run.
        """
        if kwargs.get('processes') is not None:
            processes = int(kwargs['processes'])
        else:
            processes = self.processes

        if kwargs.get('process') is not None:
            process = int(kwargs['process'])
        else:
            process = self.process

        if process < 0:
            raise ValueError(
                'process must be an integer greater than or equal to 0')

        if processes < 0:
            raise ValueError(
                'processes must be an integer greater than or equal to 0')

        if processes and process >= processes:
            raise ValueError(
                'process must be less than or equal to processes')

        return processes, process

    def transition_object(self, container, obj):
        start_time = time()
        try:
            obj_account, obj_container, obj_object = obj.split('/', 2)

            lifecycle = Lifecycle(obj_account, obj_container, obj_object,
                                  swift_client=self.swift)

            if is_success(lifecycle.object.status):
                object_header = lifecycle.object.headers
                object_rule = lifecycle.get_object_rule_by_action(
                    'Transition')
                last_modified = object_header['Last-Modified']
                last_modified = gmt_to_timestamp(last_modified)

                validation_flg = lifecycle.object_lifecycle_validation()
                if (validation_flg == LIFECYCLE_OK) or \
                        (validation_flg == DISABLED_EXPIRATION):
                    times = calc_when_actions_do(object_rule, last_modified)
                    actual_expire_time = int(times['Transition'])
                    if actual_expire_time == int(container):
                        self.request_transition(obj)

                    self.swift.delete_object(self.s3_tr_objects_account,
                                             container, obj)
        except (Exception, Timeout) as err:
            self.logger.increment('errors')
            report_exception(self.logger,
                             _('Exception while transitioning object %s %s %s') %
                             (container, obj, str(err)), self.client)
        self.logger.timing_since('timing', start_time)
        self.report()

    def request_transition(self, actual_obj):
        path = '/v1/' + urllib.quote(actual_obj.lstrip('/'))
        headers = {GLACIER_FLAG_META: True,
                   'X-S3-Object-Transition': True}
        resp = self.swift.make_request('POST', path, headers, (2, 5))

        if resp.status_int == 500:
            raise Exception(resp.body)
        self.report_objects += 1
        self.logger.increment('objects')