コード例 #1
0
    def setUpClass(cls, num_replicas=3):
        super().setUpClass()

        cls.enclave_name = "test"
        cls.partition = Partition(cls.ca,
                                      replicas = num_replicas,
                                      config_file = "replica.benchmark.yml",
                                      debug = False)
        gprint("Started service %s" % cls.partition.service_id)
        gprint("Started partition %s" % cls.partition.get_spec())
        gprint()

        cls.frontends = []
        frontend_count = 1
        for port in range(1337, 1337 + 2 * frontend_count, 2):
            cls.frontends.append(cls.start_frontend(port))

        cls.client = KbupdClient(cls.frontends[0], cls.enclave_name, cls.partition.service_id)

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = random_id(BACKUP_DATA_LENGTH)
        cls.backup_pin  = random_id(32)

        cls.request_count = 10000
        cls.backup_count  = 0
コード例 #2
0
def send_valid_requests(test):
    client = test.client

    for backup_id in test.backup_ids:
        # test backup valid_from checking
        client.request(r"status=NotYetValid", "backup",
                           backup_id, test.backup_pin, test.backup_data, 2, valid_from=2**64-1)
        # test backup and restore
        client.request(r"status=Ok", "backup",
                           backup_id, test.backup_pin, test.backup_data, 2)
        result = client.request(r"status=Ok", "restore",
                                    backup_id, test.backup_pin)
        test.assertEqual(result.get("data"), test.backup_data)
        token = result["token"]
        # test pin mismatch
        client.request(r"status=PinMismatch", "restore",
                           backup_id, random_id(32), token=token)
        # test restore with token reuse
        client.request(r"status=TokenMismatch", "restore",
                           backup_id, test.backup_pin, token=token)
        # test restore with random token
        client.request(r"status=Missing", "restore",
                           backup_id, test.backup_pin, token=random_id(32))
        # test restore with creation_token reuse
        result = client.request(r"status=TokenMismatch", "restore",
                                    backup_id, test.backup_pin, token=token[:32] + random_id(16))
        token = result["token"]
        # test restore valid_from checking
        client.request(r"status=NotYetValid", "restore",
                           backup_id, test.backup_pin, token=token, valid_from=2**64-1)
        # test restore after above tries decrement
        result = client.request(r"status=Ok", "restore",
                                    backup_id, test.backup_pin, token=token)
        test.assertEqual(result.get("data"), test.backup_data)
        # test restore token mismatch
        client.request(r"status=TokenMismatch", "restore",
                           backup_id, test.backup_pin, token=token)
        # test deletion on tries=0
        client.request(r"status=Missing", "restore",
                           backup_id, random_id(32))
        # test deletion persistence
        client.request(r"status=Missing", "restore",
                           backup_id, test.backup_pin)
        client.request(r"", "delete", backup_id)

    # test with different backup data lengths
    for backup_data_length in range(BACKUP_DATA_LENGTH):
        backup_id = test.backup_ids[0]
        backup_data = test.backup_data[:backup_data_length * 2]
        # test backup and restore
        client.request(r"status=Ok", "backup",
                           backup_id, test.backup_pin, backup_data, 1)
        result = client.request(r"status=Ok", "restore",
                                    backup_id, test.backup_pin)
        test.assertEqual(result.get("data"), backup_data)
        client.request(r"", "delete", backup_id)
コード例 #3
0
    def setUpClass(cls):
        super().setUpClass()

        cls.partitions = []
        cls.enclave_name = "test"

        partition = Partition(cls.ca)
        cls.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started partition %s" % partition.get_spec())
        gprint()

        partition = partition.split_partition()
        partition.start_partition()
        cls.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started 2nd partition %s" % partition.get_spec())
        gprint()
        cls.partitions[0].wait_partition_started_source()
        cls.partitions[0].resume_partition()
        cls.partitions[0].pause_partition()
        cls.partitions[0].resume_partition()
        cls.partitions[0].wait_partition_source()
        partition.wait_partition_destination()
        partition.finish_partition()
        cls.partitions[0].finish_partition()

        cls.frontend = Kbupd(1337, "frontend", cls.ca, "--enclave-name",
                             cls.enclave_name, "--max-backup-data-length",
                             str(BACKUP_DATA_LENGTH), "--partitions",
                             ';'.join([p.get_spec() for p in cls.partitions]))
        gprint("Started frontend %s" % cls.frontend.node_id)
        gprint()

        cls.backup_ids = (
            backup_id_to_str(0),
            backup_id_to_str(2**256 - 1),
            backup_id_to_str((2**256 - 1) // 2),
            backup_id_to_str((2**256 - 1) // 2 + 1),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
        )
        cls.backup_data = random_id(BACKUP_DATA_LENGTH)
        cls.backup_pin = random_id(32)

        cls.client = KbupdClient(cls.frontend, cls.enclave_name,
                                 cls.partitions[0].service_id)
コード例 #4
0
    def setUpClass(cls):
        super().setUpClass()

        cls.enclave_name = "test"
        cls.partition = Partition(cls.ca,
                                      replicas = 3,
                                      config_file = "replica.benchmark.yml",
                                      debug = False,
                                      storage_size = 10)
        gprint("Started service %s" % cls.partition.service_id)
        gprint("Started partition %s" % cls.partition.get_spec())
        gprint()

        cls.frontend = Kbupd(1337, "frontend", cls.ca,
                                 "--enclave-name", cls.enclave_name,
                                 "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                 "--partitions", cls.partition.get_spec())

        gprint("Started frontend %s" % cls.frontend.node_id)
        gprint()

        cls.client = KbupdClient(cls.frontend, cls.enclave_name, cls.partition.service_id)

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = backup_id_to_str(random.randint(0, 2**256-1))
        cls.backup_pin  = random_id(32)
コード例 #5
0
def prepare_input_fifo(size, content_func=random_bytes):
    """
    Create temporary FIFO with content generated by `content_func`
    """

    # Write data first to a file and then from there to FIFO
    # (.. so that we can compare contents with the output file later)

    f = tempfile.NamedTemporaryFile(delete=False)
    f.write(content_func(size))
    f.close()
    f_fn = f.name

    fifo_fn = os.path.join(tempfile.gettempdir(), random_id())
    os.mkfifo(fifo_fn)

    def writer():
        with open(f_fn, "r") as f_fd:
            with open(fifo_fn, "w") as fifo_fd:
                for line in f_fd:
                    fifo_fd.write(line)

    threading.Thread(target=writer).start()

    print "Input file: fifo=", fifo_fn, "file=", f_fn
    return f_fn, fifo_fn
コード例 #6
0
def put_and_compare_file(size, content_func):
    """
    Create file with `size` and content generated by `content_func`.
    Use CLI to PUT and GET that file. Compare afterwards
    """

    obj = random_id()
    in_file = prepare_input_file(size, content_func)
    out_file = prepare_output_file()

    ret = call(["./veintidos.py",
                "--pool", POOL_NAME,
                "put", obj,
                in_file])
    eq(0, ret)

    ret = call(["./veintidos.py",
                "--pool", POOL_NAME,
                "get", obj,
                out_file])
    eq(0, ret)

    eq_file(in_file, out_file)

    os.unlink(in_file)
    os.unlink(out_file)
コード例 #7
0
def prepare_input_fifo(size, content_func=random_bytes):
    """
    Create temporary FIFO with content generated by `content_func`
    """

    # Write data first to a file and then from there to FIFO
    # (.. so that we can compare contents with the output file later)

    f = tempfile.NamedTemporaryFile(delete=False)
    f.write(content_func(size))
    f.close()
    f_fn = f.name

    fifo_fn = os.path.join(tempfile.gettempdir(), random_id())
    os.mkfifo(fifo_fn)

    def writer():
        with open(f_fn, "r") as f_fd:
            with open(fifo_fn, "w") as fifo_fd:
                for line in f_fd:
                    fifo_fd.write(line)

    threading.Thread(target=writer).start()

    print "Input file: fifo=", fifo_fn, "file=", f_fn
    return f_fn, fifo_fn
コード例 #8
0
    def __init__(self, name, **kwargs):
        self.name = name
        u.validate_run_name(name)

        self.logdir_ = None  # set during setup_logdir()
        self.kwargs = kwargs
        self.jobs = []
        self.placement_group_name = self.name + '-' + u.random_id()
コード例 #9
0
def setup_module():
    global rados
    rados = Rados(conffile='')
    rados.connect()

    global POOL_NAME
    POOL_NAME = random_id()
    rados.create_pool(POOL_NAME)
コード例 #10
0
    def test_00_valid_requests(self):
        client = self.client

        for backup_id in self.backup_ids:
            client.request(r"status=Missing", "restore",
                               backup_id, self.backup_pin, token=random_id(32))

        send_valid_requests(self)
コード例 #11
0
def setup_module():
    global rados
    rados = Rados(conffile='')
    rados.connect()

    global POOL_NAME
    POOL_NAME = random_id()
    rados.create_pool(POOL_NAME)
コード例 #12
0
def setup_module():
    global rados
    rados = Rados(conffile='')
    rados.connect()

    global pool_name
    pool_name = random_id()
    rados.create_pool(pool_name)

    global ioctx
    ioctx = rados.open_ioctx(pool_name)
コード例 #13
0
def test_chunker_versions():
    """
    Test: versions / head_version returns version of last write_full. Single write_full
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(10 * 1024**1))
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    eq(len(chunker.versions(obj_name)), 1)

    eq(version, chunker.head_version(obj_name))
    eq(version, chunker.versions(obj_name)[0])
コード例 #14
0
def test_chunker_put_get_single():
    """
    Test: read(write(x)) = x for x filling only a single chunk
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(42))
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    data_out = StringIO()
    chunker.read_full(obj_name, data_out, version)

    eq_buffer(data_in.getvalue(), data_out.getvalue())
コード例 #15
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_partial_read_past_size():
    """
    Test: partial reads past *file* size
    """

    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO("\x00" * chunker.chunk_size)
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    buf = chunker.read(obj_name, chunker.chunk_size, chunker.chunk_size, version)

    eq_buffer(buf, "")
コード例 #16
0
def test_chunker_put_get_multiple_fraction():
    """
    Test: read(write(x)) = x for x spread over multiple chunks. With partially filled chunks
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(int(chunker.chunk_size * 1.5)))
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    data_out = StringIO()
    chunker.read_full(obj_name, data_out, version)

    eq_buffer(data_in.getvalue(), data_out.getvalue())
コード例 #17
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_versions():
    """
    Test: versions / head_version returns version of last write_full. Single write_full
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(10*1024**1))
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    eq(len(chunker.versions(obj_name)), 1)

    eq(version, chunker.head_version(obj_name))
    eq(version, chunker.versions(obj_name)[0])
コード例 #18
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_put_get_multiple_fraction():
    """
    Test: read(write(x)) = x for x spread over multiple chunks. With partially filled chunks
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(int(chunker.chunk_size*1.5)))
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    data_out = StringIO()
    chunker.read_full(obj_name, data_out, version)

    eq_buffer(data_in.getvalue(), data_out.getvalue())
コード例 #19
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_put_get_single():
    """
    Test: read(write(x)) = x for x filling only a single chunk
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(42))
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    data_out = StringIO()
    chunker.read_full(obj_name, data_out, version)

    eq_buffer(data_in.getvalue(), data_out.getvalue())
コード例 #20
0
def test_chunker_partial_read_past_size():
    """
    Test: partial reads past *file* size
    """

    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO("\x00" * chunker.chunk_size)
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    buf = chunker.read(obj_name, chunker.chunk_size, chunker.chunk_size,
                       version)

    eq_buffer(buf, "")
コード例 #21
0
def put_and_compare_file(size, content_func):
    """
    Create file with `size` and content generated by `content_func`.
    Use CLI to PUT and GET that file. Compare afterwards
    """

    obj = random_id()
    in_file = prepare_input_file(size, content_func)
    out_file = prepare_output_file()

    ret = call(["./veintidos.py", "--pool", POOL_NAME, "put", obj, in_file])
    eq(0, ret)

    ret = call(["./veintidos.py", "--pool", POOL_NAME, "get", obj, out_file])
    eq(0, ret)

    eq_file(in_file, out_file)

    os.unlink(in_file)
    os.unlink(out_file)
コード例 #22
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_partial_read():
    """
    Test: partial reads using chunker.read with different input and weird extents
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO("\x00" * chunker.chunk_size +
                       "\xFF" * chunker.chunk_size)
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    middle = chunker.chunk_size / 2

    buf = chunker.read(obj_name, chunker.chunk_size, middle, version)

    eq(len(buf), chunker.chunk_size)
    eq_buffer("\x00" * (chunker.chunk_size/2) +
              "\xFF" * (chunker.chunk_size/2), buf)
コード例 #23
0
def test_chunker_partial_read():
    """
    Test: partial reads using chunker.read with different input and weird extents
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO("\x00" * chunker.chunk_size +
                       "\xFF" * chunker.chunk_size)
    obj_name = random_id()

    version = chunker.write_full(obj_name, data_in)

    middle = chunker.chunk_size / 2

    buf = chunker.read(obj_name, chunker.chunk_size, middle, version)

    eq(len(buf), chunker.chunk_size)
    eq_buffer(
        "\x00" * (chunker.chunk_size / 2) + "\xFF" * (chunker.chunk_size / 2),
        buf)
コード例 #24
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_no_litter():
    """
    Test: Write and immediate remove should not leave any object behind
    """

    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(chunker.chunk_size*4))
    obj_name = random_id()

    chunker.write_full(obj_name, data_in)
    chunker.remove_all_versions(obj_name)

    cas_objs = [x.key for x in ioctx_cas.list_objects()]
    index_objs = [x.key for x in ioctx_index.list_objects()]

    print "CAS objects left:", cas_objs
    print "Index objects left:", index_objs

    eq(len(cas_objs), 0)
    eq(len(index_objs), 0)
コード例 #25
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_multiple_versions():
    """
    Test: versions / head_version return version of last write_full. Multiple write_full
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = random_bytes(42)
    obj_name = random_id()

    versions = (
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
    )

    eq(len(versions), len(chunker.versions(obj_name)))

    eq(versions[-1], chunker.head_version(obj_name))
    eq(versions[0], chunker.versions(obj_name)[0])
コード例 #26
0
def test_chunker_no_litter():
    """
    Test: Write and immediate remove should not leave any object behind
    """

    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = StringIO(random_bytes(chunker.chunk_size * 4))
    obj_name = random_id()

    chunker.write_full(obj_name, data_in)
    chunker.remove_all_versions(obj_name)

    cas_objs = [x.key for x in ioctx_cas.list_objects()]
    index_objs = [x.key for x in ioctx_index.list_objects()]

    print "CAS objects left:", cas_objs
    print "Index objects left:", index_objs

    eq(len(cas_objs), 0)
    eq(len(index_objs), 0)
コード例 #27
0
def test_chunker_multiple_versions():
    """
    Test: versions / head_version return version of last write_full. Multiple write_full
    """
    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = random_bytes(42)
    obj_name = random_id()

    versions = (
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
        chunker.write_full(obj_name, StringIO(data_in)),
    )

    eq(len(versions), len(chunker.versions(obj_name)))

    eq(versions[-1], chunker.head_version(obj_name))
    eq(versions[0], chunker.versions(obj_name)[0])
コード例 #28
0
ファイル: test_cas.py プロジェクト: irq0/veintidos
def test_chunker_remove():
    """
    Test: remove actually removes

    - `remove_version(write_full)`: No versions, but index object
    - `write_full, write_full, remove_all_versions`: Index object gone
    """

    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = random_bytes(42)
    obj_name = random_id()

    version = chunker.write_full(obj_name, StringIO(data_in))
    chunker.remove_version(obj_name, version)

    eq(chunker.head_version(obj_name), None)

    chunker.write_full(obj_name, StringIO(data_in))
    chunker.write_full(obj_name, StringIO(data_in))
    chunker.remove_all_versions(obj_name)

    assert_raises(ObjectNotFound, chunker.head_version, obj_name)
コード例 #29
0
def test_chunker_remove():
    """
    Test: remove actually removes

    - `remove_version(write_full)`: No versions, but index object
    - `write_full, write_full, remove_all_versions`: Index object gone
    """

    cas = CAS(ioctx_cas)
    chunker = Chunker(cas, ioctx_index)

    data_in = random_bytes(42)
    obj_name = random_id()

    version = chunker.write_full(obj_name, StringIO(data_in))
    chunker.remove_version(obj_name, version)

    eq(chunker.head_version(obj_name), None)

    chunker.write_full(obj_name, StringIO(data_in))
    chunker.write_full(obj_name, StringIO(data_in))
    chunker.remove_all_versions(obj_name)

    assert_raises(ObjectNotFound, chunker.head_version, obj_name)
コード例 #30
0
    def test_00_invalid_requests(self):
        client = self.client
        service_id = self.partitions[0].service_id

        bad_input = r"ControlErrorSignal"
        client.request(bad_input,
                       "backup",
                       "",
                       self.backup_pin,
                       self.backup_data,
                       1,
                       token=random_id(32))
        client.request(bad_input,
                       "backup",
                       random_id(31),
                       self.backup_pin,
                       self.backup_data,
                       1,
                       token=random_id(32))
        client.request(bad_input,
                       "backup",
                       random_id(33),
                       self.backup_pin,
                       self.backup_data,
                       1,
                       token=random_id(32))
        for backup_id in self.backup_ids:
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           "",
                           1,
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           random_id(33),
                           1,
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           "",
                           self.backup_data,
                           1,
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           random_id(33),
                           self.backup_data,
                           1,
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           self.backup_data,
                           1,
                           service_id="",
                           token=random_id(32))
            client.request(bad_input,
                           "restore",
                           "",
                           self.backup_pin,
                           token=random_id(32))
            client.request(bad_input,
                           "restore",
                           random_id(31),
                           self.backup_pin,
                           token=random_id(32))
            client.request(bad_input,
                           "restore",
                           random_id(33),
                           self.backup_pin,
                           token=random_id(32))
            client.request(bad_input,
                           "restore",
                           backup_id,
                           "",
                           token=random_id(32))
            client.request(bad_input,
                           "restore",
                           backup_id,
                           random_id(33),
                           token=random_id(32))
            client.request(bad_input,
                           "restore",
                           backup_id,
                           self.backup_pin,
                           service_id="",
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           self.backup_data,
                           1,
                           service_id=service_id[:62],
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           self.backup_data,
                           1,
                           service_id=service_id + "00",
                           token=random_id(32))
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           self.backup_data,
                           1,
                           token="")
            client.request(bad_input,
                           "backup",
                           backup_id,
                           self.backup_pin,
                           self.backup_data,
                           1,
                           token=random_id(33))
            client.request(bad_input,
                           "restore",
                           backup_id,
                           self.backup_pin,
                           token="")
            client.request(bad_input,
                           "restore",
                           backup_id,
                           self.backup_pin,
                           token=random_id(33))
            client.request(r"request canceled by enclave",
                           "backup",
                           backup_id,
                           self.backup_pin,
                           self.backup_data,
                           1,
                           service_id=random_id(32),
                           token=random_id(32))
コード例 #31
0
 def pubmsg(conn, event):
     msg = eventbus.Message(
         eventbus.AuthorInfo(event.source.nick, str(event.source),
                             None), [" ".join(event.arguments)],
         (util.config["irc"]["name"], event.target), util.random_id(), [])
     asyncio.create_task(eventbus.push(msg))
コード例 #32
0
def send_valid_requests(test):
    client = test.client

    for backup_id in test.backup_ids:
        # test backup valid_from checking
        client.request(r"status=NotYetValid",
                       "backup",
                       backup_id,
                       test.backup_pin,
                       test.backup_data,
                       2,
                       valid_from=2**64 - 1)
        # test backup and restore
        client.request(r"status=Ok", "backup", backup_id, test.backup_pin,
                       test.backup_data, 2)
        token = client.request(r"status=Ok", "restore", backup_id,
                               test.backup_pin)
        # test pin mismatch
        client.request(r"status=PinMismatch",
                       "restore",
                       backup_id,
                       random_id(32),
                       token=token)
        # test restore with token reuse
        client.request(r"status=TokenMismatch",
                       "restore",
                       backup_id,
                       test.backup_pin,
                       token=token)
        # test restore with random token
        client.request(r"status=Missing",
                       "restore",
                       backup_id,
                       test.backup_pin,
                       token=random_id(32))
        # test restore with creation_token reuse
        token = client.request(r"status=TokenMismatch",
                               "restore",
                               backup_id,
                               test.backup_pin,
                               token=token[:32] + random_id(16))
        # test restore valid_from checking
        client.request(r"status=NotYetValid",
                       "restore",
                       backup_id,
                       test.backup_pin,
                       token=token,
                       valid_from=2**64 - 1)
        # test restore after above tries decrement
        client.request(r"status=Ok",
                       "restore",
                       backup_id,
                       test.backup_pin,
                       token=token)
        # test restore token mismatch
        client.request(r"status=TokenMismatch",
                       "restore",
                       backup_id,
                       test.backup_pin,
                       token=token)
        # test deletion on tries=0
        client.request(r"status=Missing", "restore", backup_id, random_id(32))
        # test deletion persistence
        client.request(r"status=Missing", "restore", backup_id,
                       test.backup_pin)
コード例 #33
0
def create_job(run, job_name, num_tasks, env_name):
    """Creates job, blocks until job is ready."""

    install_script = ''
    if args.install_script:
        with open(args.install_script, 'r') as f:
            install_script = f.read()

    ebs = launch_utils_lib.get_ebs_settings(
        use_iops=(args.attach_volume is None))
    if args.placement_group:
        print(
            "Warning, placement_group is deprecated, use --use-placement-group 1 for automatically picked placement group (same as run name)."
        )
        placement_group_name = args.placement_group
    # use run+randomly generated names
    # add randomness to avoid reusing placement groups from previous run of
    # same name, which could've used different availability zone (illegal)
    if args.use_placement_group:
        placement_group_name = args.name + '-' + u.random_id()
    else:
        placement_group_name = ''

    job = run.make_job(job_name,
                       num_tasks=num_tasks,
                       ebs=ebs,
                       instance_type=args.instance_type,
                       install_script=install_script,
                       placement_group=placement_group_name,
                       use_spot=args.spot)
    job.wait_until_ready()
    print(job.connect_instructions)

    job.run_async_join('killall python || echo failed')  # kill previous run

    # mount_volume hardcoded to use data now
    # TODO: this should be global setting/constant instead
    assert DATA_ROOT.endswith('/data')
    if args.attach_volume:
        launch_utils_lib.mount_volume_data(job,
                                           tag=args.attach_volume,
                                           offset=args.volume_offset)

    if not args.use_local_conda:
        job.run_async_join(f'source activate {env_name}')
    else:
        # enable conda command
        job.run_async_join('. /home/ubuntu/anaconda3/etc/profile.d/conda.sh')
        job.run_async_join(
            f'conda activate {DATA_ROOT}/anaconda3/envs/{env_name}')

    # job.run_async_join('source activate pytorch_source', ignore_errors=True) # currently a bug in latest pytorch
    job.run_async_join(
        'ulimit -n 9000')  # to prevent tcp too many files open error

    # upload files
    job.upload_async('training/resnet.py')
    job.upload_async('training/fp16util.py')
    job.upload_async('training/autoaugment.py')
    job.upload_async('training/dataloader.py')
    job.upload_async('training/dataloader_performance.py')
    job.upload_async('training/train_imagenet_nv.py')
    job.upload_async('training/experimental_utils.py')

    # Sometimes get SSH session not active or "connection reset by peer"
    # bad internet?

    setup_complete = [
        t.file_exists('/tmp/nv_setup_complete') for t in job.tasks
    ]
    if not all(setup_complete):
        job.upload_async('setup/setup_env_nv.sh')
        job.run_async_join('chmod +x setup_env_nv.sh')
        job.run_async_join('bash setup_env_nv.sh',
                           max_wait_sec=60 * 60,
                           check_interval=5)

    return job
コード例 #34
0
        for k, v in r.items():
            ans[k] += v
    with open(out, "a") as f:
        for k, v in ans.items():
            print("{} : {}".format(keys[k], v / runs), file=f)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Run multiple simulations \
        remotely')

    parser.add_argument(
        "-n",
        "--name",
        type=str,
        default=random_id(),
        help="The name of the folder in which the results will be saved.")
    parser.add_argument("-r",
                        "--runs",
                        type=int,
                        default=0,
                        help="Number of times that the job runs.")
    parser.add_argument(
        "-notify",
        type=int,
        default=0,
        help="The PID of process to be notified when all jobs were dispached.")
    parser.add_argument("-k",
                        "--kill",
                        nargs='?',
                        action="store",
コード例 #35
0
    def do_test_partitioning(self, update_specs, move):
        client = self.client

        client.request(r"status=Ok", "backup", count=100)

        backup_ids = list(self.backup_ids)
        if not move:
            backup_ids.append(backup_id_to_str((2**256-1)//4))
            backup_ids.append(backup_id_to_str((2**256-1)//4+1))

        for backup_id in backup_ids:
            client.request(r"status=Ok", "backup",
                               backup_id, self.backup_pin, self.backup_data, 2)

        pre_partition_count = sum([ i.get_backup_count() for i in self.partitions ])
        self.assertIsNotNone(pre_partition_count)
        pre_partition_specs =  [ p.get_spec() for p in self.partitions ]

        if move:
            partition = self.partitions[len(self.partitions)-1].move_partition()
        else:
            partition = self.partitions[len(self.partitions)-1].split_partition()
        self.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started 3rd partition %s" % partition.get_spec())
        gprint()

        partition_specs = pre_partition_specs + [partition.get_spec_no_range()]

        KbupdTestCase.frontend.kill()
        KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                    "--enclave-name", self.enclave_name,
                                    "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                    "--partitions", ';'.join(partition_specs),
                                    append_log=True)
        gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
        gprint()

        partition.start_partition()

        self.partitions[len(self.partitions)-2].wait_partition_started_source()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

        self.partitions[len(self.partitions)-2].resume_partition()

        if update_specs:
            self.partitions[len(self.partitions)-2].wait_partition_source()
            partition.wait_partition_destination()
            partition.finish_partition()
            self.partitions[len(self.partitions)-2].finish_partition()
            self.assertEqual(pre_partition_count, sum([ i.get_backup_count() for i in self.partitions ]))
            if move:
                self.partitions[len(self.partitions)-2].kill()
                del(self.partitions[len(self.partitions)-2])

            KbupdTestCase.frontend.kill()
            KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                        "--enclave-name", self.enclave_name,
                                        "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                        "--partitions", ';'.join([ p.get_spec() for p in self.partitions ]),
                                        append_log = True)
            gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
            gprint()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

            result = client.request(r"status=PinMismatch", "restore",
                                        backup_id, random_id(32))
            token = result["token"]

            client.request(r"status=Missing", "restore",
                               backup_id, random_id(32), token=token)
            client.request(r"status=Missing", "restore",
                               backup_id, self.backup_pin, token=token)

        if not update_specs:
            self.partitions[len(self.partitions)-2].wait_partition_source()
            partition.wait_partition_destination()
            partition.finish_partition()
            self.partitions[len(self.partitions)-2].finish_partition()
            self.assertEqual(pre_partition_count - len(backup_ids),
                             sum([ i.get_backup_count() for i in self.partitions ]))
            if move:
                self.partitions[len(self.partitions)-2].kill()
                del(self.partitions[len(self.partitions)-2])
            KbupdTestCase.frontend.kill()
            KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                               "--enclave-name", self.enclave_name,
                                               "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                               "--partitions", ';'.join([ p.get_spec() for p in self.partitions ]),
                                               append_log = True)
            gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
            gprint()
コード例 #36
0
    def test_00_invalid_requests(self):
        client = self.client
        service_id = self.partitions[0].service_id

        bad_input = r"ControlErrorSignal"
        # test backup with empty backup ID
        client.request(bad_input, "backup",
                           "", self.backup_pin, self.backup_data, 1, token=random_id(32))
        # test backup with too-short backup ID
        client.request(bad_input, "backup",
                           random_id(31), self.backup_pin, self.backup_data, 1,
                           token=random_id(32))
        # test backup with too-long backup ID
        client.request(bad_input, "backup",
                           random_id(33), self.backup_pin, self.backup_data, 1,
                           token=random_id(32))
        for backup_id in self.backup_ids:
            # test backup with too-long data
            client.request(bad_input, "backup",
                               backup_id, self.backup_pin, random_id(BACKUP_DATA_LENGTH + 1), 1,
                               token=random_id(32))
            # test backup with empty pin
            client.request(bad_input, "backup",
                               backup_id, "", self.backup_data, 1,token=random_id(32))
            # test backup with too-long pin
            client.request(bad_input, "backup",
                               backup_id, random_id(33), self.backup_data, 1,
                               token=random_id(32))
            # test backup with empty Service ID
            client.request(bad_input, "backup",
                               backup_id, self.backup_pin, self.backup_data, 1,
                               service_id="", token=random_id(32))
            # test restore with empty Backup ID
            client.request(bad_input, "restore", "", self.backup_pin, token=random_id(32))
            # test restore with too-short Backup ID
            client.request(bad_input, "restore", random_id(31), self.backup_pin,
                               token=random_id(32))
            # test restore with too-long Backup ID
            client.request(bad_input, "restore", random_id(33), self.backup_pin,
                               token=random_id(32))
            # test restore with empty pin
            client.request(bad_input, "restore", backup_id, "", token=random_id(32))
            # test restore with too-long pin
            client.request(bad_input, "restore", backup_id, random_id(33),
                               token=random_id(32))
            # test restore with empty Service ID
            client.request(bad_input, "restore",
                               backup_id, self.backup_pin, service_id="",
                               token=random_id(32))
            # test restore with too-short Service ID
            client.request(bad_input, "backup",
                               backup_id, self.backup_pin, self.backup_data, 1,
                               service_id=service_id[:62],
                               token=random_id(32))
            # test restore with too-long Service ID
            client.request(bad_input, "backup",
                               backup_id, self.backup_pin, self.backup_data, 1,
                               service_id=service_id + "00",
                               token=random_id(32))
            # test backup with empty token
            client.request(bad_input, "backup",
                               backup_id, self.backup_pin, self.backup_data, 1,
                               token="")
            # test backup with too-long token
            client.request(bad_input, "backup",
                               backup_id, self.backup_pin, self.backup_data, 1,
                               token=random_id(33))
            # test restore with empty token
            client.request(bad_input, "restore", backup_id, self.backup_pin,
                               token="")
            # test restore with too-long token
            client.request(bad_input, "restore", backup_id, self.backup_pin,
                               token=random_id(33))
            # test restore with wrong Service ID
            client.request(r"request canceled by enclave", "backup",
                               backup_id, self.backup_pin, self.backup_data, 1,
                               service_id=random_id(32), token=random_id(32))