def test_30_leader_change(self):
        client = self.client
        backup_ids = list(self.backup_ids)
        backup_ids.append(backup_id_to_str((2**256-1)//4))
        backup_ids.append(backup_id_to_str((2**256-1)//4+1))
        backup_ids.append(backup_id_to_str(random.randint(0, 2**256-1)))
        backup_ids.append(backup_id_to_str(random.randint(0, 2**256-1)))

        for backup_id in backup_ids:
            client.request(r"status=Ok", "backup",
                               backup_id, self.backup_pin, self.backup_data, 1)
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

        for partition in self.partitions:
            while True:
                leader= partition.get_replicas()[0]
                if leader:
                    break
                time.sleep(1)
            leader.kill()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)
    def setUpClass(cls, num_replicas=3):
        super().setUpClass()

        cls.enclave_name = "test"
        cls.partition = Partition(cls.ca,
                                      replicas = num_replicas,
                                      config_file = "replica.benchmark.yml",
                                      debug = False)
        gprint("Started service %s" % cls.partition.service_id)
        gprint("Started partition %s" % cls.partition.get_spec())
        gprint()

        cls.frontends = []
        frontend_count = 1
        for port in range(1337, 1337 + 2 * frontend_count, 2):
            cls.frontends.append(cls.start_frontend(port))

        cls.client = KbupdClient(cls.frontends[0], cls.enclave_name, cls.partition.service_id)

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = random_id(BACKUP_DATA_LENGTH)
        cls.backup_pin  = random_id(32)

        cls.request_count = 10000
        cls.backup_count  = 0
Beispiel #3
0
    def setUpClass(cls):
        super().setUpClass()

        cls.partitions = []
        cls.enclave_name = "test"

        partition = Partition(cls.ca)
        cls.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started partition %s" % partition.get_spec())
        gprint()

        partition = partition.split_partition()
        partition.start_partition()
        cls.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started 2nd partition %s" % partition.get_spec())
        gprint()
        cls.partitions[0].wait_partition_started_source()
        cls.partitions[0].resume_partition()
        cls.partitions[0].pause_partition()
        cls.partitions[0].resume_partition()
        cls.partitions[0].wait_partition_source()
        partition.wait_partition_destination()
        partition.finish_partition()
        cls.partitions[0].finish_partition()

        cls.frontend = Kbupd(1337, "frontend", cls.ca, "--enclave-name",
                             cls.enclave_name, "--max-backup-data-length",
                             str(BACKUP_DATA_LENGTH), "--partitions",
                             ';'.join([p.get_spec() for p in cls.partitions]))
        gprint("Started frontend %s" % cls.frontend.node_id)
        gprint()

        cls.backup_ids = (
            backup_id_to_str(0),
            backup_id_to_str(2**256 - 1),
            backup_id_to_str((2**256 - 1) // 2),
            backup_id_to_str((2**256 - 1) // 2 + 1),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
            backup_id_to_str(random.randint(0, 2**256 - 1)),
        )
        cls.backup_data = random_id(BACKUP_DATA_LENGTH)
        cls.backup_pin = random_id(32)

        cls.client = KbupdClient(cls.frontend, cls.enclave_name,
                                 cls.partitions[0].service_id)
    def split_partition(self):
        last_id = str_to_backup_id(self.last_id)
        first_id = str_to_backup_id(self.first_id)
        split = first_id + (last_id - first_id) // 2
        new_partition = Partition(self.ca,
                                  self.first_id,
                                  backup_id_to_str(split),
                                  source_partition=self,
                                  replicas=len(self.peers),
                                  config_file=self.config_file,
                                  debug=self.debug)
        self.first_id = backup_id_to_str(split + 1)

        return new_partition
    def test_10_storage_size(self):
        client = self.client

        for iteration in range(0, 2):
            with self.subTest(iteration = iteration):
                send_valid_requests(self)
                for backup_id in self.backup_ids:
                    client.request(r"status=Ok", "backup",
                                       backup_id, self.backup_pin, self.backup_data)

        backup_ids_2 = []

        backup_id = backup_id_to_str(random.randint(0, 2**256-1))
        client.request(r"request canceled by enclave", "backup",
                           backup_id, self.backup_pin, self.backup_data)

        client.request(r"", "delete", self.backup_ids[0])

        client.request(r"status=Ok", "backup",
                           backup_id, self.backup_pin, self.backup_data)
        result = client.request(r"status=Ok", "restore",
                                    backup_id, self.backup_pin)
        self.assertEqual(result.get("data"), self.backup_data)
        backup_ids_2.append(backup_id)

        backup_id = backup_id_to_str(random.randint(0, 2**256-1))
        client.request(r"request canceled by enclave", "backup",
                           backup_id, self.backup_pin, self.backup_data)

        for backup_id in self.backup_ids:
            client.request(r"", "delete", backup_id)

        for iteration in range(0, 9):
            with self.subTest(iteration = iteration):
                backup_id = backup_id_to_str(random.randint(0, 2**256-1))
                client.request(r"status=Ok", "backup",
                                   backup_id, self.backup_pin, self.backup_data)
                backup_ids_2.append(backup_id)

        backup_id = backup_id_to_str(random.randint(0, 2**256-1))
        client.request(r"request canceled by enclave", "backup",
                           backup_id, self.backup_pin, self.backup_data)

        for backup_id in backup_ids_2:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)
    def test_10_reconnect(self):
        client = self.client
        backup_id = backup_id_to_str(random.randint(0, 2**256 - 1))

        client.request(r"status=Ok", "backup", backup_id, self.backup_pin,
                       self.backup_data, 1)
        client.request(r"status=Ok", "restore", backup_id, self.backup_pin)

        for partition in self.partitions:
            for peer in partition.peers:
                for other_partition in self.partitions:
                    for other_peer in other_partition.peers:
                        peer.disconnect_peer(other_peer.node_id)
                        peer.reconnect_peer(
                            other_peer.node_id,
                            "127.0.0.1:%s" % other_peer.peer_port)

        client.request(r"status=Ok", "restore", backup_id, self.backup_pin)
    def __init__(self,
                 ca,
                 first_id=backup_id_to_str(0),
                 last_id=backup_id_to_str(2**256 - 1),
                 source_partition=None,
                 replicas=3,
                 config_file=None,
                 debug=True,
                 storage_size=None):
        self.ca = ca
        self.first_id = first_id
        self.last_id = last_id
        self.config_file = config_file
        self.debug = debug
        self.peers = []

        source_nodes_cmd = []
        if source_partition:
            source_nodes_cmd.append("--firstid")
            source_nodes_cmd.append(first_id)
            source_nodes_cmd.append("--lastid")
            source_nodes_cmd.append(last_id)
            source_nodes_cmd.append("--source-nodes")
            source_nodes_cmd.append(source_partition.peer_addrs)

        storage_size_cmd = []
        if storage_size:
            storage_size_cmd.append("--storage-size")
            storage_size_cmd.append(str(storage_size))

        replica_ip_ports = ','.join([
            "127.0.0.1:%s" % (Partition.port + 1 + replica_num * 2)
            for replica_num in range(replicas)
        ])

        for num in range(replicas):
            net_cls_group = 'kbupd-%s' % num
            net_cls_id = (num * 2) + 31337
            create_cgroup(net_cls_group, net_cls_id)
            replica = Kbupd(Partition.port,
                            "replica",
                            self.ca,
                            "--replicas",
                            replica_ip_ports,
                            config_file=config_file,
                            debug=debug,
                            net_cls_group=net_cls_group,
                            net_cls_id=net_cls_id,
                            *source_nodes_cmd,
                            *storage_size_cmd)
            gprint("Started replica %s: %s (%s)" %
                   (num, replica.node_id, replica.enclave_lib))
            self.peers.append(replica)
            Partition.port += 2

        self.peers = sorted(self.peers,
                            key=lambda peer: peer.node_id,
                            reverse=True)
        self.peer_addrs = ','.join(
            ["127.0.0.1:%s" % (p.peer_port) for p in self.peers])

        #Destination replicas don't have a service ID yet.
        for peer in self.peers:
            while True:
                peer.refresh_info()
                if hasattr(peer, "group_id"):
                    if hasattr(self, "group_id"):
                        assert (self.group_id == peer.group_id)
                    else:
                        self.group_id = peer.group_id
                        self.service_id = getattr(peer, "service_id",
                                                  "<no_service_id>")
                    break
                time.sleep(0.1)

        while len(self.grep_logs(r"raft.*=== became leader at")) <= 0:
            time.sleep(0.1)

        Partition.partitions.append(self)
    def setUpClass(cls):
        super().setUpClass()

        cls.enclave_name = "test"
        cls.partition = Partition(cls.ca,
                                      replicas = 3,
                                      config_file = "replica.benchmark.yml",
                                      debug = False,
                                      storage_size = 10)
        gprint("Started service %s" % cls.partition.service_id)
        gprint("Started partition %s" % cls.partition.get_spec())
        gprint()

        cls.frontend = Kbupd(1337, "frontend", cls.ca,
                                 "--enclave-name", cls.enclave_name,
                                 "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                 "--partitions", cls.partition.get_spec())

        gprint("Started frontend %s" % cls.frontend.node_id)
        gprint()

        cls.client = KbupdClient(cls.frontend, cls.enclave_name, cls.partition.service_id)

        cls.backup_ids = (backup_id_to_str(0),
                              backup_id_to_str(2**256-1),
                              backup_id_to_str((2**256-1)//2),
                              backup_id_to_str((2**256-1)//2+1),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),
                              backup_id_to_str(random.randint(0, 2**256-1)),)
        cls.backup_data = backup_id_to_str(random.randint(0, 2**256-1))
        cls.backup_pin  = random_id(32)
    def do_test_partitioning(self, update_specs, move):
        client = self.client

        client.request(r"status=Ok", "backup", count=100)

        backup_ids = list(self.backup_ids)
        if not move:
            backup_ids.append(backup_id_to_str((2**256-1)//4))
            backup_ids.append(backup_id_to_str((2**256-1)//4+1))

        for backup_id in backup_ids:
            client.request(r"status=Ok", "backup",
                               backup_id, self.backup_pin, self.backup_data, 2)

        pre_partition_count = sum([ i.get_backup_count() for i in self.partitions ])
        self.assertIsNotNone(pre_partition_count)
        pre_partition_specs =  [ p.get_spec() for p in self.partitions ]

        if move:
            partition = self.partitions[len(self.partitions)-1].move_partition()
        else:
            partition = self.partitions[len(self.partitions)-1].split_partition()
        self.partitions.append(partition)
        gprint("Started service %s" % partition.service_id)
        gprint("Started 3rd partition %s" % partition.get_spec())
        gprint()

        partition_specs = pre_partition_specs + [partition.get_spec_no_range()]

        KbupdTestCase.frontend.kill()
        KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                    "--enclave-name", self.enclave_name,
                                    "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                    "--partitions", ';'.join(partition_specs),
                                    append_log=True)
        gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
        gprint()

        partition.start_partition()

        self.partitions[len(self.partitions)-2].wait_partition_started_source()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

        self.partitions[len(self.partitions)-2].resume_partition()

        if update_specs:
            self.partitions[len(self.partitions)-2].wait_partition_source()
            partition.wait_partition_destination()
            partition.finish_partition()
            self.partitions[len(self.partitions)-2].finish_partition()
            self.assertEqual(pre_partition_count, sum([ i.get_backup_count() for i in self.partitions ]))
            if move:
                self.partitions[len(self.partitions)-2].kill()
                del(self.partitions[len(self.partitions)-2])

            KbupdTestCase.frontend.kill()
            KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                        "--enclave-name", self.enclave_name,
                                        "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                        "--partitions", ';'.join([ p.get_spec() for p in self.partitions ]),
                                        append_log = True)
            gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
            gprint()

        for backup_id in backup_ids:
            result = client.request(r"status=Ok", "restore",
                                        backup_id, self.backup_pin)
            self.assertEqual(result.get("data"), self.backup_data)

            result = client.request(r"status=PinMismatch", "restore",
                                        backup_id, random_id(32))
            token = result["token"]

            client.request(r"status=Missing", "restore",
                               backup_id, random_id(32), token=token)
            client.request(r"status=Missing", "restore",
                               backup_id, self.backup_pin, token=token)

        if not update_specs:
            self.partitions[len(self.partitions)-2].wait_partition_source()
            partition.wait_partition_destination()
            partition.finish_partition()
            self.partitions[len(self.partitions)-2].finish_partition()
            self.assertEqual(pre_partition_count - len(backup_ids),
                             sum([ i.get_backup_count() for i in self.partitions ]))
            if move:
                self.partitions[len(self.partitions)-2].kill()
                del(self.partitions[len(self.partitions)-2])
            KbupdTestCase.frontend.kill()
            KbupdTestCase.frontend = Kbupd(1337, "frontend", self.ca,
                                               "--enclave-name", self.enclave_name,
                                               "--max-backup-data-length", str(BACKUP_DATA_LENGTH),
                                               "--partitions", ';'.join([ p.get_spec() for p in self.partitions ]),
                                               append_log = True)
            gprint("Started frontend %s" % KbupdTestCase.frontend.node_id)
            gprint()