Ejemplo n.º 1
0
    def test_rebuild_old_chunk(self):
        for c in self.chunks:
            convert_to_old_chunk(
                self._chunk_path(c), self.account, self.container, self.path,
                self.version, self.content_id)

        chunk = random.choice(self.chunks)
        chunk_volume = chunk['url'].split('/')[2]
        chunk_id = chunk['url'].split('/')[3]
        chunk_headers, chunk_stream = self.blob_client.chunk_get(
            chunk['url'], check_headers=False)
        os.remove(self._chunk_path(chunk))
        chunks_kept = list(self.chunks)
        chunks_kept.remove(chunk)

        conf = self.conf.copy()
        conf['allow_same_rawx'] = True
        rebuilder = BlobRebuilder(conf, service_id=chunk_volume)
        rebuilder_worker = rebuilder.create_worker(None, None)
        rebuilder_worker._process_item(
            (self.ns, self.cid, self.content_id, chunk_id))

        _, new_chunks = self.api.object_locate(
            self.account, self.container, self.path)
        new_chunk = list(new_chunks)

        self.assertEqual(len(new_chunks), len(chunks_kept) + 1)
        url_kept = [c['url'] for c in chunks_kept]
        new_chunk = None
        for c in new_chunks:
            if c['url'] not in url_kept:
                self.assertIsNone(new_chunk)
                new_chunk = c

        self.assertNotEqual(chunk['real_url'], new_chunk['real_url'])
        self.assertNotEqual(chunk['url'], new_chunk['url'])
        self.assertEqual(chunk['pos'], new_chunk['pos'])
        self.assertEqual(chunk['size'], new_chunk['size'])
        self.assertEqual(chunk['hash'], new_chunk['hash'])

        new_chunk_headers, new_chunk_stream = self.blob_client.chunk_get(
            new_chunk['url'])
        chunk_data = b''.join(chunk_stream)
        new_chunk_data = b''.join(new_chunk_stream)
        self.assertEqual(chunk_data, new_chunk_data)
        fullpath = encode_fullpath(self.account, self.container, self.path,
                                   self.version, self.content_id)
        self.assertEqual(fullpath, new_chunk_headers['full_path'])
        del new_chunk_headers['full_path']
        self.assertNotEqual(chunk_headers['chunk_id'],
                            new_chunk_headers['chunk_id'])
        new_chunk_id = new_chunk['url'].split('/')[3]
        self.assertEqual(new_chunk_id, new_chunk_headers['chunk_id'])
        del chunk_headers['chunk_id']
        del new_chunk_headers['chunk_id']
        self.assertEqual(OIO_VERSION, new_chunk_headers['oio_version'])
        del chunk_headers['oio_version']
        del new_chunk_headers['oio_version']
        self.assertEqual(chunk_headers, new_chunk_headers)
Ejemplo n.º 2
0
    def _take_action(self, parsed_args):
        if not self.distributed:  # local
            self.tool_conf['dry_run'] = parsed_args.dry_run
            self.tool_conf['try_chunk_delete'] = \
                parsed_args.delete_faulty_chunks

        self.rebuilder = BlobRebuilder(self.tool_conf,
                                       input_file=parsed_args.input_file,
                                       logger=self.logger)
        if self.distributed:
            self.rebuilder.prepare_distributed_dispatcher()
        else:
            self.rebuilder.prepare_local_dispatcher()

        for item, _, error in self.rebuilder.run():
            if error is None:
                status = 'OK'
            else:
                status = 'error'
            yield (self.rebuilder.string_from_item(item), status, error)
Ejemplo n.º 3
0
 def send_chunk_job(self, target, irreparable=False):
     """
     Send a "content broken" event, to trigger the
     reconstruction of the chunk.
     """
     item = (self.api.namespace, target.cid, target.content_id,
             target.chunk)
     ev_dict = BlobRebuilder.task_event_from_item(item)
     if irreparable:
         ev_dict['data']['irreparable'] = irreparable
     job = json.dumps(ev_dict)
     self.error_sender.send_job(job)
     self.error_sender.job_done()  # Don't expect any response
Ejemplo n.º 4
0
class RawxRebuildCommand(SingleServiceCommandMixin, ServiceRebuildCommand):

    tool_class = BlobRebuilder
    columns = ('Chunk', 'Status', 'Errors')

    def get_parser(self, prog_name):
        parser = super(RawxRebuildCommand, self).get_parser(prog_name)
        SingleServiceCommandMixin.patch_parser(self, parser)

        # common
        parser.add_argument(
            '--rdir-fetch-limit',
            type=int,
            help='Maximum number of entries returned in each rdir response. '
            '(default=%d)' % self.tool_class.DEFAULT_RDIR_FETCH_LIMIT)
        if not self.distributed:  # local
            parser.add_argument('--dry-run',
                                action='store_true',
                                help='Display actions but do nothing. '
                                '(default=%s)' %
                                self.tool_class.DEFAULT_DRY_RUN)
            parser.add_argument(
                '--delete-faulty-chunks',
                action='store_true',
                help='Try to delete faulty chunks after they have been '
                'rebuilt elsewhere. This option is useful if the chunks '
                'you are rebuilding are not actually missing but are '
                'corrupted. '
                '(default=%s)' % self.tool_class.DEFAULT_TRY_CHUNK_DELETE)

        return parser

    def _take_action(self, parsed_args):
        # common
        self.tool_conf['rdir_fetch_limit'] = parsed_args.rdir_fetch_limit
        if not self.distributed:  # local
            self.tool_conf['dry_run'] = parsed_args.dry_run
            self.tool_conf['try_chunk_delete'] = \
                parsed_args.delete_faulty_chunks

        self.rebuilder = BlobRebuilder(self.tool_conf,
                                       service_id=parsed_args.service,
                                       logger=self.logger)
        if self.distributed:
            self.rebuilder.prepare_distributed_dispatcher()
        else:
            self.rebuilder.prepare_local_dispatcher()

        for item, _, error in self.rebuilder.run():
            if error is None:
                status = 'OK'
            else:
                status = 'error'
            yield (self.rebuilder.string_from_item(item), status, error)

    def take_action(self, parsed_args):
        SingleServiceCommandMixin.check_and_load_parsed_args(
            self, self.app, parsed_args)
        return super(RawxRebuildCommand, self).take_action(parsed_args)
Ejemplo n.º 5
0
class ChunkRebuildCommand(ItemRebuildCommand):

    tool_class = BlobRebuilder
    columns = ('Chunk', 'Status', 'Errors')

    def get_parser(self, prog_name):
        parser = super(ChunkRebuildCommand, self).get_parser(prog_name)

        parser.add_argument(
            '--input-file',
            help='Read chunks from this file instead of rdir. '
            'Each line should be formatted like '
            '"container_id|content_id|short_chunk_id_or_position".')

        if not self.distributed:  # local
            parser.add_argument('--dry-run',
                                action='store_true',
                                help='Display actions but do nothing. '
                                '(default=%s)' %
                                self.tool_class.DEFAULT_DRY_RUN)
            parser.add_argument(
                '--delete-faulty-chunks',
                action='store_true',
                help='Try to delete faulty chunks after they have been '
                'rebuilt elsewhere. This option is useful if the chunks '
                'you are rebuilding are not actually missing but are '
                'corrupted. '
                '(default=%s)' % self.tool_class.DEFAULT_TRY_CHUNK_DELETE)

        return parser

    def _take_action(self, parsed_args):
        if not self.distributed:  # local
            self.tool_conf['dry_run'] = parsed_args.dry_run
            self.tool_conf['try_chunk_delete'] = \
                parsed_args.delete_faulty_chunks

        self.rebuilder = BlobRebuilder(self.tool_conf,
                                       input_file=parsed_args.input_file,
                                       logger=self.logger)
        if self.distributed:
            self.rebuilder.prepare_distributed_dispatcher()
        else:
            self.rebuilder.prepare_local_dispatcher()

        for item, _, error in self.rebuilder.run():
            if error is None:
                status = 'OK'
            else:
                status = 'error'
            yield (self.rebuilder.string_from_item(item), status, error)

    def take_action(self, parsed_args):
        if not parsed_args.input_file:
            raise ValueError('Missing input file')
        return super(ChunkRebuildCommand, self).take_action(parsed_args)
Ejemplo n.º 6
0
    def test_rebuild_old_chunk(self):
        for c in self.chunks:
            convert_to_old_chunk(self._chunk_path(c), self.account,
                                 self.container, self.path, self.version,
                                 self.content_id)

        chunk = random.choice(self.chunks)
        chunk_volume = chunk['url'].split('/')[2]
        chunk_id = chunk['url'].split('/')[3]
        chunk_headers, chunk_stream = self.blob_client.chunk_get(
            chunk['url'], check_headers=False)
        os.remove(self._chunk_path(chunk))
        chunks_kept = list(self.chunks)
        chunks_kept.remove(chunk)

        conf = self.conf.copy()
        conf['allow_same_rawx'] = True
        rebuilder = BlobRebuilder(conf, service_id=chunk_volume)
        rebuilder_worker = rebuilder.create_worker(None, None)
        rebuilder_worker._process_item(
            (self.ns, self.cid, self.content_id, chunk_id))

        _, new_chunks = self.api.object_locate(self.account, self.container,
                                               self.path)
        new_chunk = list(new_chunks)

        self.assertEqual(len(new_chunks), len(chunks_kept) + 1)
        url_kept = [c['url'] for c in chunks_kept]
        new_chunk = None
        for c in new_chunks:
            if c['url'] not in url_kept:
                self.assertIsNone(new_chunk)
                new_chunk = c

        # Cannot check if the URL is different: it may be the same since we
        # generate predictible chunk IDs.
        # self.assertNotEqual(chunk['real_url'], new_chunk['real_url'])
        # self.assertNotEqual(chunk['url'], new_chunk['url'])
        self.assertEqual(chunk['pos'], new_chunk['pos'])
        self.assertEqual(chunk['size'], new_chunk['size'])
        self.assertEqual(chunk['hash'], new_chunk['hash'])

        new_chunk_headers, new_chunk_stream = self.blob_client.chunk_get(
            new_chunk['url'])
        chunk_data = b''.join(chunk_stream)
        new_chunk_data = b''.join(new_chunk_stream)
        self.assertEqual(chunk_data, new_chunk_data)
        fullpath = encode_fullpath(self.account, self.container, self.path,
                                   self.version, self.content_id)
        self.assertEqual(fullpath, new_chunk_headers['full_path'])
        del new_chunk_headers['full_path']
        # Since we generate predictible chunk IDs, they can be equal
        # self.assertNotEqual(chunk_headers['chunk_id'],
        #                     new_chunk_headers['chunk_id'])
        # We could compare the modification time of the chunks,
        # but unfortunately they have a 1s resolution...
        # self.assertNotEqual(chunk_headers['chunk_mtime'],
        #                     new_chunk_headers['chunk_mtime'])
        new_chunk_id = new_chunk['url'].split('/')[3]
        self.assertEqual(new_chunk_id, new_chunk_headers['chunk_id'])
        del chunk_headers['chunk_id']
        del new_chunk_headers['chunk_id']
        self.assertEqual(OIO_VERSION, new_chunk_headers['oio_version'])
        del chunk_headers['oio_version']
        del new_chunk_headers['oio_version']
        del chunk_headers['chunk_mtime']
        del new_chunk_headers['chunk_mtime']
        self.assertEqual(chunk_headers, new_chunk_headers)