def test_replay_content(kafka_server, kafka_prefix, kafka_consumer_group):
    objstorage1 = get_objstorage(cls="memory")
    objstorage2 = get_objstorage(cls="memory")

    writer = get_journal_writer(
        cls="kafka",
        brokers=[kafka_server],
        client_id="kafka_writer",
        prefix=kafka_prefix,
        anonymize=False,
    )

    for content in CONTENTS:
        objstorage1.add(content.data)
        writer.write_addition("content", content)

    replayer = JournalClient(
        brokers=kafka_server,
        group_id=kafka_consumer_group,
        prefix=kafka_prefix,
        stop_on_eof=True,
        # stop_after_objects=len(objects),
    )

    worker_fn = functools.partial(process_replay_objects_content,
                                  src=objstorage1,
                                  dst=objstorage2)
    replayer.process(worker_fn)
    # only content with status visible will be copied in storage2
    expected_objstorage_state = {
        c.sha1: c.data
        for c in CONTENTS if c.status == "visible"
    }

    assert expected_objstorage_state == objstorage2.state
def test_random_generator_objstorage():
    sto = get_objstorage("random", {})
    assert sto

    blobs = [sto.get(None) for i in range(100)]
    lengths = [len(x) for x in blobs]
    assert max(lengths) <= 55056238
Example #3
0
def storage(request, postgresql):
    marker = request.node.get_closest_marker("shard_max_size")
    if marker is None:
        shard_max_size = 1024
    else:
        shard_max_size = marker.args[0]
    dsn = (
        f"postgres://{postgresql.info.user}"
        f":@{postgresql.info.host}:{postgresql.info.port}"
    )
    storage = get_objstorage(
        cls="winery",
        base_dsn=dsn,
        shard_dsn=dsn,
        shard_max_size=shard_max_size,
        throttle_write=200 * 1024 * 1024,
        throttle_read=100 * 1024 * 1024,
    )
    yield storage
    storage.winery.uninit()
    #
    # pytest-postgresql will not remove databases that it did not
    # create between tests (only at the very end).
    #
    d = DatabaseAdmin(dsn)
    for database in d.list_databases():
        if database != postgresql.info.dbname and database != "tests_tmpl":
            DatabaseAdmin(dsn, database).drop_database()
 def rw(self):
     self.storage = get_objstorage(
         cls="winery",
         base_dsn=self.args["base_dsn"],
         shard_dsn=self.args["shard_dsn"],
         shard_max_size=self.args["shard_max_size"],
         throttle_read=self.args["throttle_read"],
         throttle_write=self.args["throttle_write"],
         output_dir=self.args.get("output_dir"),
     )
     self.payloads_define()
     random_content = open("/dev/urandom", "rb")
     logger.info(f"Worker(rw, {os.getpid()}): start")
     start = time.time()
     count = 0
     while len(self.storage.winery.packers) == 0:
         content = random_content.read(random.choice(self.payloads))
         obj_id = self.storage.add(content=content)
         if self.stats.stats_active:
             self.stats.stats_write(obj_id, content)
         count += 1
     logger.info(f"Worker(rw, {os.getpid()}): packing {count} objects")
     packer = self.storage.winery.packers[0]
     packer.join()
     assert packer.exitcode == 0
     elapsed = time.time() - start
     logger.info(f"Worker(rw, {os.getpid()}): finished ({elapsed:.2f}s)")
 def _ro(self):
     self.storage = get_objstorage(
         cls="winery",
         readonly=True,
         base_dsn=self.args["base_dsn"],
         shard_dsn=self.args["shard_dsn"],
         shard_max_size=self.args["shard_max_size"],
         throttle_read=self.args["throttle_read"],
         throttle_write=self.args["throttle_write"],
         output_dir=self.args.get("output_dir"),
     )
     with self.storage.winery.base.db.cursor() as c:
         while True:
             c.execute(
                 "SELECT signature FROM signature2shard WHERE inflight = FALSE "
                 "ORDER BY random() LIMIT %s",
                 (self.args["ro_worker_max_request"], ),
             )
             if c.rowcount > 0:
                 break
             logger.info(f"Worker(ro, {os.getpid()}): empty, waiting")
             time.sleep(1)
         logger.info(
             f"Worker(ro, {os.getpid()}): requesting {c.rowcount} objects")
         start = time.time()
         for row in c:
             obj_id = row[0].tobytes()
             content = self.storage.get(obj_id)
             assert content is not None
             if self.stats.stats_active:
                 self.stats.stats_read(obj_id, content)
         elapsed = time.time() - start
         logger.info(
             f"Worker(ro, {os.getpid()}): finished ({elapsed:.2f}s)")
Example #6
0
 def setUp(self):
     super().setUp()
     self.base_dir = tempfile.mkdtemp()
     os.mkdir(os.path.join(self.base_dir, "root1"))
     os.mkdir(os.path.join(self.base_dir, "root2"))
     storage_config = {
         "cls": "striping",
         "args": {
             "objstorages": [
                 {
                     "cls": "pathslicing",
                     "args": {
                         "root": os.path.join(self.base_dir, "root1"),
                         "slicing": "0:2",
                         "allow_delete": True,
                     },
                 },
                 {
                     "cls": "pathslicing",
                     "args": {
                         "root": os.path.join(self.base_dir, "root2"),
                         "slicing": "0:2",
                         "allow_delete": True,
                     },
                 },
             ]
         },
     }
     self.storage = get_objstorage(**storage_config)
Example #7
0
 def filter_storage(self, sconf):
     return get_objstorage(
         "filtered",
         {
             "storage_conf": sconf,
             "filters_conf": [id_prefix(self.prefix)]
         },
     )
def test_random_generator_objstorage_list_content():
    sto = get_objstorage("random", {"total": 100})
    assert isinstance(sto.list_content(), Iterator)

    assert list(sto.list_content()) == [b"%d" % i for i in range(1, 101)]
    assert list(
        sto.list_content(limit=10)) == [b"%d" % i for i in range(1, 11)]
    assert list(sto.list_content(
        last_obj_id=b"10", limit=10)) == [b"%d" % i for i in range(11, 21)]
Example #9
0
def fsck(ctx):
    """Check the objstorage is not corrupted."""
    from swh.objstorage.factory import get_objstorage

    objstorage = get_objstorage(**ctx.obj["config"]["objstorage"])
    for obj_id in objstorage:
        try:
            objstorage.check(obj_id)
        except objstorage.Error as err:
            logging.error(err)
Example #10
0
 def test_pathslicing_objstorage(self):
     conf = {
         "cls": "pathslicing",
         "args": {
             "root": self.path,
             "slicing": "0:2/0:5"
         }
     }
     st = get_objstorage(**conf)
     self.assertTrue(isinstance(st, PathSlicingObjStorage))
Example #11
0
def obj_storage(swh_indexer_config):
    """An instance of in-memory objstorage that gets injected into all indexers
    classes.

    """
    objstorage = get_objstorage(**swh_indexer_config["objstorage"])
    fill_obj_storage(objstorage)
    with patch.dict(
        "swh.objstorage.factory._STORAGE_CLASSES", {"memory": lambda: objstorage}
    ):
        yield objstorage
 def setUp(self):
     super().setUp()
     self.slicing = "0:2/2:4/4:6"
     self.tmpdir = tempfile.mkdtemp()
     self.storage = get_objstorage(
         "pathslicing",
         {
             "root": self.tmpdir,
             "slicing": self.slicing,
             "compression": self.compression,
         },
     )
Example #13
0
    def __init__(self) -> None:
        self.config = load_from_envvar(DEFAULT_CONFIG)
        self.storage = get_storage(**self.config["storage"])
        self.objstorage = get_objstorage(**self.config["objstorage"])
        self.compute_checksums = self.config["compute_checksums"]
        self.recompute_checksums = self.config["recompute_checksums"]
        self.batch_size_retrieve_content = self.config[
            "batch_size_retrieve_content"]
        self.batch_size_update = self.config["batch_size_update"]
        self.log = logging.getLogger("swh.indexer.rehash")

        if not self.compute_checksums:
            raise ValueError("Checksums list should not be empty.")
def build_objstorage():
    """Build an HTTPReadOnlyObjStorage suitable for tests

    this instancaite 2 ObjStorage, one HTTPReadOnlyObjStorage (the "front" one
    being under test), and one InMemoryObjStorage (which actually stores the
    test content), and install a request mock fixture to route HTTP requests
    from the HTTPReadOnlyObjStorage to query the InMemoryStorage.

    Also fills the backend storage with a 100 objects.
    """
    sto_back = get_objstorage(cls="memory")
    objids = []
    for i in range(100):
        objids.append(sto_back.add(f"some content {i}".encode()))

    url = "http://127.0.0.1/content/"
    sto_front = get_objstorage(cls="http", url=url)
    mock = fixture.Fixture()
    mock.setUp()

    def get_cb(request, context):
        dirname, basename = request.path.rsplit("/", 1)
        objid = bytes.fromhex(basename)
        if dirname == "/content" and objid in sto_back:
            return sto_back.get(objid)
        context.status_code = 404

    def head_cb(request, context):
        dirname, basename = request.path.rsplit("/", 1)
        objid = bytes.fromhex(basename)
        if dirname != "/content" or objid not in sto_back:
            context.status_code = 404
            return b"Not Found"
        return b"Found"

    mock.register_uri(requests_mock.GET, requests_mock.ANY, content=get_cb)
    mock.register_uri(requests_mock.HEAD, requests_mock.ANY, content=head_cb)

    return sto_front, sto_back, objids
Example #15
0
 def setUp(self):
     super().setUp()
     self.tmpdir = tempfile.mkdtemp()
     pstorage = {
         "cls": "pathslicing",
         "root": self.tmpdir,
         "slicing": "0:5",
     }
     base_storage = get_objstorage(**pstorage)
     base_storage.id = compute_hash
     self.storage = get_objstorage("filtered",
                                   storage_conf=pstorage,
                                   filters_conf=[read_only()])
     self.valid_content = b"pre-existing content"
     self.invalid_content = b"invalid_content"
     self.true_invalid_content = b"Anything that is not correct"
     self.absent_content = b"non-existent content"
     # Create a valid content.
     self.valid_id = base_storage.add(self.valid_content)
     # Create an invalid id and add a content with it.
     self.invalid_id = base_storage.id(self.true_invalid_content)
     base_storage.add(self.invalid_content, obj_id=self.invalid_id)
     # Compute an id for a non-existing content.
     self.absent_id = base_storage.id(self.absent_content)
Example #16
0
    def setUp(self):
        self.tmpdir = tempfile.mkdtemp()
        self.config = {
            "objstorage": {
                "cls": "pathslicing",
                "root": self.tmpdir,
                "slicing": "0:1/0:5",
                "allow_delete": True,
            },
            "client_max_size": 8 * 1024 * 1024,
        }

        self.app = app
        super().setUp()
        self.storage = get_objstorage("remote", {"url": self.url()})
Example #17
0
def test_bwcompat_args(monkeypatch):
    monkeypatch.setattr(
        swh.objstorage.backends.azure,
        "ContainerClient",
        get_MockContainerClient(),
    )

    with pytest.deprecated_call():
        objs = get_objstorage(
            "azure",
            {
                "account_name": "account_name",
                "api_secret_key": base64.b64encode(b"account_key"),
                "container_name": "container_name",
            },
        )

    assert objs is not None
Example #18
0
def import_directories(ctx, directory):
    """Import a local directory in an existing objstorage."""
    from swh.objstorage.factory import get_objstorage

    objstorage = get_objstorage(**ctx.obj["config"]["objstorage"])
    nobj = 0
    volume = 0
    t0 = time.time()
    for dirname in directory:
        for root, _dirs, files in os.walk(dirname):
            for name in files:
                path = os.path.join(root, name)
                with open(path, "rb") as f:
                    objstorage.add(f.read())
                    volume += os.stat(path).st_size
                    nobj += 1
    click.echo("Imported %d files for a volume of %s bytes in %d seconds" %
               (nobj, volume, time.time() - t0))
Example #19
0
def test_bwcompat_args_prefixed(monkeypatch):
    monkeypatch.setattr(
        swh.objstorage.backends.azure,
        "ContainerClient",
        get_MockContainerClient(),
    )

    accounts = {
        prefix: {
            "account_name": f"account_name{prefix}",
            "api_secret_key": base64.b64encode(b"account_key"),
            "container_name": "container_name",
        }
        for prefix in "0123456789abcdef"
    }

    with pytest.deprecated_call():
        objs = get_objstorage("azure-prefixed", {"accounts": accounts})

    assert objs is not None
Example #20
0
    def setUp(self):
        super().setUp()
        self.ContainerClient = get_MockContainerClient()
        patcher = patch("swh.objstorage.backends.azure.ContainerClient",
                        self.ContainerClient)
        patcher.start()
        self.addCleanup(patcher.stop)

        patcher = patch("swh.objstorage.backends.azure.AsyncContainerClient",
                        self.ContainerClient)
        patcher.start()
        self.addCleanup(patcher.stop)

        self.accounts = {}
        for prefix in "0123456789abcdef":
            self.accounts[
                prefix] = "https://bogus-container-url.example/" + prefix

        self.storage = get_objstorage("azure-prefixed",
                                      {"accounts": self.accounts})
Example #21
0
    def setUp(self):
        super().setUp()
        ContainerClient = get_MockContainerClient()
        patcher = patch("swh.objstorage.backends.azure.ContainerClient",
                        ContainerClient)
        patcher.start()
        self.addCleanup(patcher.stop)

        patcher = patch("swh.objstorage.backends.azure.AsyncContainerClient",
                        ContainerClient)
        patcher.start()
        self.addCleanup(patcher.stop)

        self.storage = get_objstorage(
            "azure",
            {
                "container_url": "https://bogus-container-url.example",
                "compression": self.compression,
            },
        )
Example #22
0
    def prepare(self) -> None:
        """Prepare the indexer's needed runtime configuration.
           Without this step, the indexer cannot possibly run.

        """
        config_storage = self.config.get("storage")
        if config_storage:
            self.storage = get_storage(**config_storage)

        self.objstorage = get_objstorage(**self.config["objstorage"])

        idx_storage = self.config[INDEXER_CFG_KEY]
        self.idx_storage = get_indexer_storage(**idx_storage)

        _log = logging.getLogger("requests.packages.urllib3.connectionpool")
        _log.setLevel(logging.WARN)
        self.log = logging.getLogger("swh.indexer")

        if self.USE_TOOLS:
            self.tools = list(self.register_tools(self.config.get("tools",
                                                                  [])))
        self.results = []
Example #23
0
def test_replay_statsd(kafka_server, kafka_prefix, kafka_consumer_group,
                       statsd):
    objstorage1 = get_objstorage(cls="memory")
    objstorage2 = get_objstorage(cls="memory")

    writer = get_journal_writer(
        cls="kafka",
        brokers=[kafka_server],
        client_id="kafka_writer",
        prefix=kafka_prefix,
        anonymize=False,
    )

    # Fill the source objstorage with a bunch of content object. In the end,
    # there should be 2 content objects for each possible replaying decision
    # (aka. skipped, excluded, in_dst, not_in_src, failed and copied):
    # contents[0:2] are properly copied
    # contents[2:4] are excluded
    # contents[4:6] are in dst
    # contents[6:8] are hidden
    contents = [
        Content.from_data(f"foo{i}".encode(),
                          status="hidden" if 6 <= i < 8 else "visible")
        for i in range(8)
    ]

    for content in contents:
        objstorage1.add(content.data)
        writer.write_addition("content", content)
    excluded = [c.sha1 for c in contents[2:4]]

    def exclude_fn(cnt_d):
        return cnt_d["sha1"] in excluded

    for content in contents[4:6]:
        objstorage2.add(content.data)

    replayer = JournalClient(
        brokers=kafka_server,
        group_id=kafka_consumer_group,
        prefix=kafka_prefix,
        stop_on_eof=True,
        # stop_after_objects=len(objects),
    )

    worker_fn = functools.partial(
        process_replay_objects_content,
        src=objstorage1,
        dst=objstorage2,
        exclude_fn=exclude_fn,
    )
    replayer.process(worker_fn)

    # We cannot expect any order from replayed objects, so statsd reports won't
    # be sorted according to contents, so we just count the expected occurrence
    # of each statsd message.
    prefix = "swh_content_replayer"
    expected_reports = {
        # 4 because 2 for the copied objects + 2 for the in_dst ones
        f"^{prefix}_retries_total:1[|]c[|]#attempt:1,operation:obj_in_objstorage$":
        4,
        f"^{prefix}_retries_total:1[|]c[|]#attempt:1,operation:get_object$":
        2,
        f"^{prefix}_retries_total:1[|]c[|]#attempt:1,operation:put_object$":
        2,
        f"^{prefix}_duration_seconds:[0-9]+[.][0-9]+[|]ms[|]#request:get$":
        2,
        f"^{prefix}_duration_seconds:[0-9]+[.][0-9]+[|]ms[|]#request:put$":
        2,
        f"^{prefix}_bytes:4[|]c$":
        2,
    }
    decisions = ("copied", "skipped", "excluded", "in_dst", "not_in_src",
                 "failed")
    decision_re = (
        "^swh_content_replayer_operations_total:1[|]c[|]#decision:(?P<decision>"
        + "|".join(decisions) + ")(?P<extras>,.+)?$")

    operations = dict.fromkeys(decisions, 0)
    reports = dict.fromkeys(expected_reports, 0)

    for report in (r.decode() for r in statsd.socket.payloads):
        m = re.match(decision_re, report)
        if m:
            operations[m.group("decision")] += 1
        else:
            for expected in expected_reports:
                m = re.match(expected, report)
                if m:
                    reports[expected] += 1

    assert reports == expected_reports

    assert operations["skipped"] == 2
    assert operations["excluded"] == 2
    assert operations["in_dst"] == 2
    assert operations["copied"] == 2
    # TODO:
    assert operations["not_in_src"] == 0
    assert operations["failed"] == 0
Example #24
0
 def __init__(self, **objstorage):
     self.objstorage = get_objstorage(**objstorage)
Example #25
0
    def test_prefixedazure_instantiation_inconsistent_prefixes(self):
        self.accounts["00"] = self.accounts["0"]

        with self.assertRaisesRegex(ValueError, "Inconsistent prefixes"):
            get_objstorage("azure-prefixed", {"accounts": self.accounts})
Example #26
0
    def test_prefixedazure_instantiation_missing_prefixes(self):
        del self.accounts["d"]
        del self.accounts["e"]

        with self.assertRaisesRegex(ValueError, "Missing prefixes"):
            get_objstorage("azure-prefixed", {"accounts": self.accounts})
 def setUp(self):
     super().setUp()
     self.storage = get_objstorage(cls="memory", args={})
Example #28
0
def content_replay(ctx, stop_after_objects, exclude_sha1_file, check_dst, concurrency):
    """Fill a destination Object Storage using a journal stream.

    This is typically used for a mirror configuration, by reading a Journal
    and retrieving objects from an existing source ObjStorage.

    There can be several 'replayers' filling a given ObjStorage as long as they
    use the same ``group-id``. You can use the ``KAFKA_GROUP_INSTANCE_ID``
    environment variable to use KIP-345 static group membership.

    This service retrieves object ids to copy from the 'content' topic. It will
    only copy object's content if the object's description in the kafka
    nmessage has the status:visible set.

    ``--exclude-sha1-file`` may be used to exclude some hashes to speed-up the
    replay in case many of the contents are already in the destination
    objstorage. It must contain a concatenation of all (sha1) hashes,
    and it must be sorted.
    This file will not be fully loaded into memory at any given time,
    so it can be arbitrarily large.

    ``--check-dst`` sets whether the replayer should check in the destination
    ObjStorage before copying an object. You can turn that off if you know
    you're copying to an empty ObjStorage.

    ``--concurrency N`` sets the number of threads in charge of copy blob objects
    from the source objstorage to the destination one. Using a large concurrency
    value make sense if both the source and destination objstorages support highly
    parallel workloads. Make not to set the ``batch_size`` configuration option too
    low for the concurrency to be actually useful (each batch of kafka messages is
    dispatched among the threads).

    The expected configuration file should have 3 sections:

    - objstorage: the source object storage from which to retrieve objects to
      copy; this objstorage can (and should) be a read-only objstorage,

      https://docs.softwareheritage.org/devel/apidoc/swh.objstorage.html

    - objstorage_dst: the destination objstorage in which objects will be
      written into,

    - journal_client: the configuration of the kafka journal from which the
      `content` topic will be consumed to get the list of content objects to
      copy from the source objstorage to the destination one.

      https://docs.softwareheritage.org/devel/apidoc/swh.journal.client.html

    In addition to these 3 mandatory sections, an optional 'replayer' section
    can be provided with an 'error_reporter' config entry allowing to specify a
    Redis connection parameter set that will be used to report objects that
    could not be copied, eg.::

      objstorage:
        [...]
      objstorage_dst:
        [...]
      journal_client:
        [...]
      replayer:
        error_reporter:
          host: redis.local
          port: 6379
          db: 1

    """
    import functools
    import mmap

    from swh.journal.client import get_journal_client
    from swh.model.model import SHA1_SIZE
    from swh.objstorage.factory import get_objstorage
    from swh.objstorage.replayer.replay import (
        is_hash_in_bytearray,
        process_replay_objects_content,
    )

    conf = ctx.obj["config"]
    try:
        objstorage_src = get_objstorage(**conf.pop("objstorage"))
    except KeyError:
        ctx.fail("You must have a source objstorage configured in " "your config file.")
    try:
        objstorage_dst = get_objstorage(**conf.pop("objstorage_dst"))
    except KeyError:
        ctx.fail(
            "You must have a destination objstorage configured " "in your config file."
        )

    if exclude_sha1_file:
        map_ = mmap.mmap(exclude_sha1_file.fileno(), 0, prot=mmap.PROT_READ)
        if map_.size() % SHA1_SIZE != 0:
            ctx.fail(
                "--exclude-sha1 must link to a file whose size is an "
                "exact multiple of %d bytes." % SHA1_SIZE
            )
        nb_excluded_hashes = int(map_.size() / SHA1_SIZE)

        def exclude_fn(obj):
            return is_hash_in_bytearray(obj["sha1"], map_, nb_excluded_hashes)

    else:
        exclude_fn = None

    journal_cfg = conf.pop("journal_client")
    replayer_cfg = conf.pop("replayer", {})
    if "error_reporter" in replayer_cfg:
        from redis import Redis

        from swh.objstorage.replayer import replay

        replay.REPORTER = Redis(**replayer_cfg.get("error_reporter")).set

    client = get_journal_client(
        **journal_cfg, stop_after_objects=stop_after_objects, object_types=("content",),
    )
    worker_fn = functools.partial(
        process_replay_objects_content,
        src=objstorage_src,
        dst=objstorage_dst,
        exclude_fn=exclude_fn,
        check_dst=check_dst,
        concurrency=concurrency,
    )

    if notify:
        notify("READY=1")

    try:
        client.process(worker_fn)
    except KeyboardInterrupt:
        ctx.exit(0)
    else:
        print("Done.")
    finally:
        if notify:
            notify("STOPPING=1")
        client.close()
def test_random_generator_objstorage_total():
    sto = get_objstorage("random", {"total": 5})
    assert len([x for x in sto]) == 5
def test_random_generator_objstorage_size():
    sto = get_objstorage("random", {"filesize": 10})
    for i in range(10):
        assert len(sto.get(None)) == 10