Esempio n. 1
0
    def setUpClass(cls):
        super().setUpClass()

        cls.enclave_name = "test"
        cls.partition = Partition(cls.ca,
                                      replicas = 3,
                                      config_file = "replica.benchmark.yml",
                                      debug = False,
                                      storage_size = 10)
        gprint("Started service %s" % cls.partition.service_id)
        gprint("Started partition %s" % cls.partition.get_spec())
        gprint()

        cls.frontend = Kbupd(1337, "frontend", cls.ca,
                                 "--enclave-name", cls.enclave_name,
                                 "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                 "--partitions", cls.partition.get_spec())

        gprint("Started frontend %s" % cls.frontend.node_id)
        gprint()

        cls.client = KbupdClient(cls.frontend, cls.enclave_name, cls.partition.service_id)

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = backup_id_to_str(random.randint(0, 2**256-1))
        cls.backup_pin  = random_id(32)
Esempio n. 2
0
    def test_00_benchmark(self, max_time=30):
        cls = self.__class__

        cmd = [
            "client", "--enclave-name", self.enclave_name, "backup",
            "--request-count",
            str(self.request_count)
        ]
        backup_count = 0
        start_time = time.time()
        elapsed_time = 0
        while elapsed_time < max_time:
            kbupctls = []
            for frontend in self.frontends:
                self.client.log.write("CMD: " + " ".join(cmd) + "\n")
                self.client.log.flush()
                kbupctls.append(frontend.kbupctl_async(*cmd))
            for kbupctl in kbupctls:
                kbupctl_res = kbupctl.wait()
                if kbupctl_res != 0:
                    eprint()
                    eprint("TEST '%s' FAILED, returned %d" %
                           (" ".join(cmd), kbupctl_res))
                    raise Exception("Test failed")
            elapsed_time = time.time() - start_time
            backup_count += self.request_count * len(self.frontends)
            gprint("performed %7d backups in %6.03fs (%4d/s)" %
                   (backup_count, elapsed_time, backup_count / elapsed_time))
        cls.backup_count += backup_count
        gprint()
Esempio n. 3
0
    def _about_to_xfer(self):
        leader, followers = self.new_partition.get_replicas()
        #Kill follower
        self.new_partition.kill(followers[0])
        gprint("Killed dest partition follower", flush=True)

        self.thread = threading.Thread(target=self._cause_havoc, name='havoc')
        self.thread.start()
Esempio n. 4
0
 def start_frontend(cls, port):
     frontend = Kbupd(port, "frontend", cls.ca,
                      "--enclave-name", cls.enclave_name,
                      "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                      "--partitions", cls.partition.get_spec(),
                      config_file = "frontend.benchmark.yml",
                      debug = False)
     gprint("Started frontend %s" % frontend.node_id)
     gprint()
     return frontend
Esempio n. 5
0
 def start_frontend(cls, port):
     frontend = Kbupd(port,
                      "frontend",
                      cls.ca,
                      "--enclave-name",
                      cls.enclave_name,
                      "--partitions",
                      cls.partition.get_spec(),
                      config_file="frontend.benchmark.yml",
                      debug=False)
     gprint("Started frontend %s" % frontend.node_id)
     gprint()
     return frontend
Esempio n. 6
0
    def setUpClass(cls, num_replicas=3):
        super().setUpClass()

        cls.enclave_name = "test"
        cls.partition = Partition(cls.ca,
                                      replicas = num_replicas,
                                      config_file = "replica.benchmark.yml",
                                      debug = False)
        gprint("Started service %s" % cls.partition.service_id)
        gprint("Started partition %s" % cls.partition.get_spec())
        gprint()

        cls.frontends = []
        frontend_count = 1
        for port in range(1337, 1337 + 2 * frontend_count, 2):
            cls.frontends.append(cls.start_frontend(port))

        cls.client = KbupdClient(cls.frontends[0], cls.enclave_name, cls.partition.service_id)

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = random_id(BACKUP_DATA_LENGTH)
        cls.backup_pin  = random_id(32)

        cls.request_count = 10000
        cls.backup_count  = 0
Esempio n. 7
0
    def _cause_havoc(self):
        cls = self.__class__

        time.sleep(5)

        start = time.monotonic()
        while True:
            leader, followers = self.new_partition.get_replicas()
            if leader:
                break
            time.sleep(1)

        #Kill leader
        self.new_partition.kill(leader)
        gprint("Killed dest partition leader", flush=True)

        now = time.monotonic()
        if now < start + 5:
            time.sleep(5 - (now - start))

        leader, followers = cls.partition.get_replicas()

        #Kill follower
        cls.partition.kill(followers[0])
        gprint("Killed source partition follower", flush=True)

        if now < start + 10:
            time.sleep(10 - (now - start))

        while not leader:
            leader, followers = cls.partition.get_replicas()
            time.sleep(1)

        #Kill leader
        cls.partition.kill(leader)
        gprint("Killed source partition leader", flush=True)
Esempio n. 8
0
    def __init__(self,
                 ca,
                 first_id=backup_id_to_str(0),
                 last_id=backup_id_to_str(2**256 - 1),
                 source_partition=None,
                 replicas=3,
                 config_file=None,
                 debug=True,
                 storage_size=None):
        self.ca = ca
        self.first_id = first_id
        self.last_id = last_id
        self.config_file = config_file
        self.debug = debug
        self.peers = []

        source_nodes_cmd = []
        if source_partition:
            source_nodes_cmd.append("--firstid")
            source_nodes_cmd.append(first_id)
            source_nodes_cmd.append("--lastid")
            source_nodes_cmd.append(last_id)
            source_nodes_cmd.append("--source-nodes")
            source_nodes_cmd.append(source_partition.peer_addrs)

        storage_size_cmd = []
        if storage_size:
            storage_size_cmd.append("--storage-size")
            storage_size_cmd.append(str(storage_size))

        replica_ip_ports = ','.join([
            "127.0.0.1:%s" % (Partition.port + 1 + replica_num * 2)
            for replica_num in range(replicas)
        ])

        for num in range(replicas):
            net_cls_group = 'kbupd-%s' % num
            net_cls_id = (num * 2) + 31337
            create_cgroup(net_cls_group, net_cls_id)
            replica = Kbupd(Partition.port,
                            "replica",
                            self.ca,
                            "--replicas",
                            replica_ip_ports,
                            config_file=config_file,
                            debug=debug,
                            net_cls_group=net_cls_group,
                            net_cls_id=net_cls_id,
                            *source_nodes_cmd,
                            *storage_size_cmd)
            gprint("Started replica %s: %s (%s)" %
                   (num, replica.node_id, replica.enclave_lib))
            self.peers.append(replica)
            Partition.port += 2

        self.peers = sorted(self.peers,
                            key=lambda peer: peer.node_id,
                            reverse=True)
        self.peer_addrs = ','.join(
            ["127.0.0.1:%s" % (p.peer_port) for p in self.peers])

        #Destination replicas don't have a service ID yet.
        for peer in self.peers:
            while True:
                peer.refresh_info()
                if hasattr(peer, "group_id"):
                    if hasattr(self, "group_id"):
                        assert (self.group_id == peer.group_id)
                    else:
                        self.group_id = peer.group_id
                        self.service_id = getattr(peer, "service_id",
                                                  "<no_service_id>")
                    break
                time.sleep(0.1)

        while len(self.grep_logs(r"raft.*=== became leader at")) <= 0:
            time.sleep(0.1)

        Partition.partitions.append(self)
Esempio n. 9
0
    def setUpClass(cls):
        super().setUpClass()

        cls.partitions = []
        cls.enclave_name = "test"

        partition = Partition(cls.ca)
        cls.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started partition %s" % partition.get_spec())
        gprint()

        partition = partition.split_partition()
        partition.start_partition()
        cls.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started 2nd partition %s" % partition.get_spec())
        gprint()
        cls.partitions[0].wait_partition_started_source()
        cls.partitions[0].resume_partition()
        cls.partitions[0].pause_partition()
        cls.partitions[0].resume_partition()
        cls.partitions[0].wait_partition_source()
        partition.wait_partition_destination()
        partition.finish_partition()
        cls.partitions[0].finish_partition()

        cls.frontend = Kbupd(1337, "frontend", cls.ca,
                                 "--enclave-name", cls.enclave_name,
                                 "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                 "--partitions", ';'.join([ p.get_spec() for p in cls.partitions ]))
        gprint("Started frontend %s" % cls.frontend.node_id)
        gprint()

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = random_id(BACKUP_DATA_LENGTH)
        cls.backup_pin = random_id(32)

        cls.client = KbupdClient(cls.frontend, cls.enclave_name,
                                     cls.partitions[0].service_id)
Esempio n. 10
0
    def test_10_partition_transfer(self, num_keys=10**6):
        cls = self.__class__

        for backup_id in self.backup_ids:
            self.client.request(r"status=Ok", "backup", backup_id, self.backup_pin, self.backup_data, 1)
            cls.backup_count += 1

        backup_count  = 0
        start_time    = time.time()
        elapsed_time  = 0
        request_count = 50000
        cmd = ["client", "--enclave-name", self.enclave_name, "create",
               "--request-count", str(request_count), "--max-parallel", "5000"]
        while backup_count < num_keys:
            kbupctls = []
            for frontend in self.frontends:
                self.client.log.write("CMD: " + " ".join(cmd) + "\n")
                self.client.log.flush()
                kbupctls.append(frontend.kbupctl_async(*cmd))
            for kbupctl in kbupctls:
                kbupctl_res = kbupctl.wait()
                if kbupctl_res != 0:
                    eprint()
                    eprint("TEST '%s' FAILED, returned %d" % (" ".join(cmd), kbupctl_res))
                    raise Exception("Test failed")
            elapsed_time = time.time() - start_time
            backup_count += request_count * len(self.frontends)
            gprint("created %7d backups in %6.03fs (%4d/s)" % (backup_count, elapsed_time, backup_count / elapsed_time))
        cls.backup_count += backup_count

        self.assertEqual(self.partition.get_backup_count(), cls.backup_count)

        for frontend in cls.frontends:
            frontend.kill()

        self.new_partition = self.partition.move_partition()

        gprint("Started service %s" % self.new_partition.service_id)
        gprint("Started 2nd partition %s" % self.new_partition.get_spec())
        gprint()

        self.new_partition.start_partition()
        self.partition.wait_partition_started_source()
        self.partition.resume_partition()
        self._about_to_xfer()
        start_time = time.time()

        self.partition.wait_partition_source()
        self.new_partition.wait_partition_destination()
        elapsed_time = time.time() - start_time

        gprint("transferred %d backups in %.03fs (%4d/s)" % (self.backup_count, elapsed_time, self.backup_count / elapsed_time))
        gprint()

        self.new_partition.finish_partition()
        self.partition.finish_partition()
        self.partition.kill()
        cls.partition = self.new_partition
        del(self.new_partition)

        new_frontends = []
        for frontend in cls.frontends:
            new_frontends.append(self.start_frontend(frontend.control_port))
        cls.frontends = new_frontends

        for backup_id in self.backup_ids:
            result = self.client.request(r"status=Ok", "restore", backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

        self.assertEqual(self.partition.get_backup_count(), cls.backup_count)
Esempio n. 11
0
    def do_test_partitioning(self, update_specs, move):
        client = self.client

        client.request(r"status=Ok", "backup", count=100)

        backup_ids = list(self.backup_ids)
        if not move:
            backup_ids.append(backup_id_to_str((2**256-1)//4))
            backup_ids.append(backup_id_to_str((2**256-1)//4+1))

        for backup_id in backup_ids:
            client.request(r"status=Ok", "backup",
                               backup_id, self.backup_pin, self.backup_data, 2)

        pre_partition_count = sum([ i.get_backup_count() for i in self.partitions ])
        self.assertIsNotNone(pre_partition_count)
        pre_partition_specs =  [ p.get_spec() for p in self.partitions ]

        if move:
            partition = self.partitions[len(self.partitions)-1].move_partition()
        else:
            partition = self.partitions[len(self.partitions)-1].split_partition()
        self.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started 3rd partition %s" % partition.get_spec())
        gprint()

        partition_specs = pre_partition_specs + [partition.get_spec_no_range()]

        KbupdTestCase.frontend.kill()
        KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                    "--enclave-name", self.enclave_name,
                                    "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                    "--partitions", ';'.join(partition_specs),
                                    append_log=True)
        gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
        gprint()

        partition.start_partition()

        self.partitions[len(self.partitions)-2].wait_partition_started_source()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

        self.partitions[len(self.partitions)-2].resume_partition()

        if update_specs:
            self.partitions[len(self.partitions)-2].wait_partition_source()
            partition.wait_partition_destination()
            partition.finish_partition()
            self.partitions[len(self.partitions)-2].finish_partition()
            self.assertEqual(pre_partition_count, sum([ i.get_backup_count() for i in self.partitions ]))
            if move:
                self.partitions[len(self.partitions)-2].kill()
                del(self.partitions[len(self.partitions)-2])

            KbupdTestCase.frontend.kill()
            KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                        "--enclave-name", self.enclave_name,
                                        "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                        "--partitions", ';'.join([ p.get_spec() for p in self.partitions ]),
                                        append_log = True)
            gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
            gprint()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

            result = client.request(r"status=PinMismatch", "restore",
                                        backup_id, random_id(32))
            token = result["token"]

            client.request(r"status=Missing", "restore",
                               backup_id, random_id(32), token=token)
            client.request(r"status=Missing", "restore",
                               backup_id, self.backup_pin, token=token)

        if not update_specs:
            self.partitions[len(self.partitions)-2].wait_partition_source()
            partition.wait_partition_destination()
            partition.finish_partition()
            self.partitions[len(self.partitions)-2].finish_partition()
            self.assertEqual(pre_partition_count - len(backup_ids),
                             sum([ i.get_backup_count() for i in self.partitions ]))
            if move:
                self.partitions[len(self.partitions)-2].kill()
                del(self.partitions[len(self.partitions)-2])
            KbupdTestCase.frontend.kill()
            KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                               "--enclave-name", self.enclave_name,
                                               "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                               "--partitions", ';'.join([ p.get_spec() for p in self.partitions ]),
                                               append_log = True)
            gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
            gprint()