Exemplo n.º 1
0
    def config_fmke_pop(self, kube_namespace):
        logger.info('---------------------------')
        logger.info('4. Starting populating data')
        fmke_k8s_dir = self.configs['exp_env']['fmke_yaml_path']

        logger.debug('Modify the populate_data template file')
        configurator = k8s_resources_configurator()
        fmke_list = configurator.get_k8s_resources(resource='pod',
                                                   label_selectors='app=fmke',
                                                   kube_namespace=kube_namespace)
        logger.info('fmke_list: %s' % len(fmke_list.items))
        fmke_IPs = list()
        for cluster in self.configs['exp_env']['clusters']:
            for fmke in fmke_list.items:
                if cluster in fmke.metadata.name:
                    fmke_IPs.append('fmke@%s' % fmke.status.pod_ip)
        with open(os.path.join(fmke_k8s_dir, 'populate_data.yaml.template')) as f:
            doc = yaml.safe_load(f)
        doc['metadata']['name'] = 'populate-data-without-prescriptions'
        doc['spec']['template']['spec']['containers'][0]['args'] = ['-f -d small --noprescriptions'] + fmke_IPs
        with open(os.path.join(fmke_k8s_dir, 'populate_data.yaml'), 'w') as f:
            yaml.safe_dump(doc, f)

        logger.info("Populating the FMKe benchmark data")
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        configurator.deploy_k8s_resources(files=[os.path.join(fmke_k8s_dir, 'populate_data.yaml')],
                                          namespace=kube_namespace)

        logger.info('Waiting for populating data')
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors="app=fmke_pop",
                                        kube_namespace=kube_namespace)

        logger.debug('Modify the populate_data file to populate prescriptions')
        with open(os.path.join(fmke_k8s_dir, 'populate_data.yaml.template')) as f:
            doc = yaml.safe_load(f)
        doc['metadata']['name'] = 'populate-data-with-onlyprescriptions'
        doc['spec']['template']['spec']['containers'][0]['args'] = ['-f -d small --onlyprescriptions -p 1'] + fmke_IPs
        with open(os.path.join(fmke_k8s_dir, 'populate_data.yaml'), 'w') as f:
            yaml.safe_dump(doc, f)

        logger.info("Populating the FMKe benchmark data with prescriptions")
        configurator.deploy_k8s_resources(files=[os.path.join(fmke_k8s_dir, 'populate_data.yaml')],
                                          namespace=kube_namespace)

        logger.info('Waiting for populating data')
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors="app=fmke_pop",
                                        timeout=90,
                                        kube_namespace=kube_namespace)
        logger.info('Finish populating data')
Exemplo n.º 2
0
    def config_fmke(self, kube_master, kube_namespace):
        logger.info('------------------------------------')
        logger.info('3. Starting deploying FMKe benchmark')
        fmke_k8s_dir = self.configs['exp_env']['fmke_yaml_path']

        logger.debug('Delete old deployment files')
        for filename in os.listdir(fmke_k8s_dir):
            if '.template' not in filename:
                try:
                    os.remove(os.path.join(fmke_k8s_dir, filename))
                except OSError:
                    logger.debug("Error while deleting file")

        logger.debug('Create headless service file')
        file1 = os.path.join(fmke_k8s_dir, 'headlessService.yaml.template')
        file2 = os.path.join(fmke_k8s_dir, 'headlessService.yaml')
        shutil.copyfile(file1, file2)

        logger.debug('Create FMKe statefulSet files for each DC')
        file_path = os.path.join(fmke_k8s_dir, 'statefulSet_fmke.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)

        configurator = k8s_resources_configurator()
        service_list = configurator.get_k8s_resources(resource='service',
                                                      label_selectors='app=antidote,type=exposer-service',
                                                      kube_namespace=kube_namespace)

        for cluster in self.configs['exp_env']['clusters']:
            # Get IP of antidote DC exposer service for each cluster
            for service in service_list.items:
                if cluster in service.metadata.name:
                    ip = service.spec.cluster_ip
            doc['spec']['replicas'] = self.configs['exp_env']['n_fmke_app_per_dc']
            doc['metadata']['name'] = 'fmke-%s' % cluster
            doc['spec']['template']['spec']['containers'][0]['env'] = [
                {'name': 'DATABASE_ADDRESSES', 'value': ip}]
            doc['spec']['template']['spec']['nodeSelector'] = {'service_g5k': 'fmke', 'cluster_g5k': '%s' % cluster}
            file_path = os.path.join(fmke_k8s_dir, 'statefulSet_fmke_%s.yaml' % cluster)
            with open(file_path, 'w') as f:
                yaml.safe_dump(doc, f)

        logger.info("Starting FMKe instances on each Antidote DC")
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        configurator.deploy_k8s_resources(path=fmke_k8s_dir, namespace=kube_namespace)

        logger.info('Waiting until all fmke app servers are up')
        configurator.wait_k8s_resources(resource='pod',
                                        label_selectors="app=fmke",
                                        kube_namespace=kube_namespace)
        logger.info('Finish deploying FMKe benchmark')
Exemplo n.º 3
0
    def _setup_g5k_kube_volumes(self, kube_workers, n_pv=3):

        logger.info("Setting volumes on %s kubernetes workers" %
                    len(kube_workers))
        cmd = """umount /dev/sda5;
                 mount -t ext4 /dev/sda5 /tmp"""
        execute_cmd(cmd, kube_workers)
        logger.debug(
            "Create n_pv partitions on the physical disk to make a PV can be shared"
        )
        cmd = """for i in $(seq 1 %s); do
                     mkdir -p /tmp/pv/vol${i}
                     mkdir -p /mnt/disks/vol${i}
                     mount --bind /tmp/pv/vol${i} /mnt/disks/vol${i}
                 done""" % n_pv
        execute_cmd(cmd, kube_workers)

        logger.info("Creating local persistance volumes on Kubernetes cluster")
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        antidote_k8s_dir = self.configs["exp_env"]["antidote_yaml_path"]
        deploy_files = [
            os.path.join(antidote_k8s_dir, "local_persistentvolume.yaml"),
            os.path.join(antidote_k8s_dir, "storageClass.yaml"),
        ]
        configurator.deploy_k8s_resources(files=deploy_files)

        logger.info("Waiting for setting local persistance volumes")
        configurator.wait_k8s_resources(
            resource="pod",
            label_selectors=
            "app.kubernetes.io/instance=local-volume-provisioner",
        )
Exemplo n.º 4
0
 def _set_kube_workers_label(self, kube_workers):
     logger.info("Set labels for all kubernetes workers")
     configurator = k8s_resources_configurator()
     for host in kube_workers:
         cluster = host.split("-")[0]
         labels = "cluster_g5k=%s,service_g5k=antidote" % cluster
         configurator.set_labels_node(host, labels)
Exemplo n.º 5
0
 def _set_kube_workers_label(self, kube_workers):
     logger.info('Set labels for all kubernetes workers')
     configurator = k8s_resources_configurator()
     for host in kube_workers:
         cluster = host.split('-')[0]
         labels = 'cluster=%s,service=antidote' % cluster
         configurator.set_labels_node(host, labels)
Exemplo n.º 6
0
    def _setup_g5k_kube_volumes(self, kube_workers, n_pv=3):

        logger.info('Setting volumes on %s kubernetes workers' %
                    len(kube_workers))
        cmd = '''umount /dev/sda5;
                 mount -t ext4 /dev/sda5 /tmp'''
        execute_cmd(cmd, kube_workers)
        logger.debug(
            'Create n_pv partitions on the physical disk to make a PV can be shared'
        )
        cmd = '''for i in $(seq 1 %s); do
                     mkdir -p /tmp/pv/vol${i}
                     mkdir -p /mnt/disks/vol${i}
                     mount --bind /tmp/pv/vol${i} /mnt/disks/vol${i}
                 done''' % n_pv
        execute_cmd(cmd, kube_workers)

        logger.info('Creating local persistance volumes on Kubernetes cluster')
        logger.debug('Init configurator: k8s_resources_configurator')
        configurator = k8s_resources_configurator()
        antidote_k8s_dir = self.configs['exp_env']['antidotedb_yaml_path']
        deploy_files = [
            os.path.join(antidote_k8s_dir, 'local_persistentvolume.yaml'),
            os.path.join(antidote_k8s_dir, 'storageClass.yaml'),
        ]
        configurator.deploy_k8s_resources(files=deploy_files)

        logger.info('Waiting for setting local persistance volumes')
        configurator.wait_k8s_resources(
            resource='pod',
            label_selectors=
            'app.kubernetes.io/instance=local-volume-provisioner',
        )
Exemplo n.º 7
0
 def clean_exp_env(self, kube_namespace):
     logger.info('1. Cleaning the experiment environment')
     logger.info('Deleting all k8s running resources from the previous run in namespace "%s"' % kube_namespace)
     logger.debug('Delete namespace "%s" to delete all the running resources, then create it again' % kube_namespace)
     configurator = k8s_resources_configurator()
     configurator.delete_namespace(kube_namespace)
     configurator.create_namespace(kube_namespace)
Exemplo n.º 8
0
    def clean_exp_env(self, kube_namespace, n_fmke_client_per_dc):
        logger.info('1. Cleaning the experiment environment')
        logger.info(
            'Deleting all k8s running resources from the previous run in namespace "%s"'
            % kube_namespace)
        logger.debug(
            'Delete namespace "%s" to delete all the running resources, then create it again'
            % kube_namespace)
        configurator = k8s_resources_configurator()
        configurator.delete_namespace(kube_namespace)
        configurator.create_namespace(kube_namespace)

        if n_fmke_client_per_dc > 0:
            logger.info(
                'Delete all files in /tmp/results folder on fmke_client nodes')
            fmke_nodes_info = configurator.get_k8s_resources(
                resource='node',
                label_selectors='service=fmke',
                kube_namespace=kube_namespace)
            fmke_nodes = [
                r.metadata.annotations['flannel.alpha.coreos.com/public-ip']
                for r in fmke_nodes_info.items
            ]
            cmd = 'rm -rf /tmp/results && mkdir -p /tmp/results'
            execute_cmd(cmd, fmke_nodes)
    def connect_antidote_DCs(self, antidote_services_ips, kube_config,
                             kube_namespace):
        antidote_k8s_dir = self.args.yaml_path
        logger.debug(
            'Creating connectDCs_antidote.yaml to connect all Antidote DCs')
        file_path = os.path.join(antidote_k8s_dir, 'connectDCs.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)
        doc['spec']['template']['spec']['containers'][0]['args'] = [
            '--connectDcs'
        ] + antidote_services_ips
        file_path = os.path.join(antidote_k8s_dir, 'connectDCs_antidote.yaml')
        with open(file_path, 'w') as f:
            yaml.safe_dump(doc, f)

        logger.info("Connecting all Antidote DCs into a cluster")
        configurator = k8s_resources_configurator()
        configurator.deploy_k8s_resources(kube_config=kube_config,
                                          files=[file_path],
                                          namespace=kube_namespace)

        logger.info('Waiting until connecting all Antidote DCs')
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors='app=antidote',
                                        kube_config=kube_config,
                                        kube_namespace=kube_namespace)
    def _setup_g5k_kube_volumes(self, kube_workers):
        logger.info("Setting volumes on %s kubernetes workers" %
                    len(kube_workers))
        N_PV = 3
        cmd = '''umount /dev/sda5;
                 mount -t ext4 /dev/sda5 /tmp'''
        execute_cmd(cmd, kube_workers)
        cmd = '''for i in $(seq 1 %s); do
                     mkdir -p /tmp/pv/vol${i}
                     mkdir -p /mnt/disks/vol${i}
                     mount --bind /tmp/pv/vol${i} /mnt/disks/vol${i}
                 done''' % N_PV
        execute_cmd(cmd, kube_workers)

        logger.info("Creating local persistance volumes on Kubernetes workers")
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        antidote_k8s_dir = self.args.yaml_path
        deploy_files = [
            os.path.join(antidote_k8s_dir, 'local_persistentvolume.yaml'),
            os.path.join(antidote_k8s_dir, 'storageClass.yaml')
        ]
        configurator.deploy_k8s_resources(files=deploy_files)

        logger.info('Waiting for setting local persistance volumes')
        configurator.wait_k8s_resources(
            resource='pod',
            label_selectors=
            "app.kubernetes.io/instance=local-volume-provisioner")
Exemplo n.º 11
0
 def _set_kube_workers_label(self, kube_master):
     configurator = k8s_resources_configurator()
     for node in self.nodes:
         if node['ipAddresses'][0]['ip'] == kube_master:
             pass
         else:
             r = configurator.set_labels_node(nodename=node['name'],
                                              labels='cluster=%s,service=antidote' % node['region'].lower())
             if r is None:
                 logger.info('Cannot Set labels for kubernetes workers')
                 exit()
Exemplo n.º 12
0
    def run_workflow(self, kube_namespace, kube_master, comb, sweeper):

        comb_ok = False

        try:
            logger.info('=======================================')
            logger.info('Performing combination: ' + slugify(comb))

            self.clean_exp_env(kube_namespace)
            self.deploy_antidote(kube_namespace, comb)

            logger.debug(
                'Getting hosts and IP of antidoteDB instances on their nodes')
            antidote_ips = dict()
            configurator = k8s_resources_configurator()
            pod_list = configurator.get_k8s_resources(
                resource='pod',
                label_selectors='app=antidote',
                kube_namespace=kube_namespace)
            for pod in pod_list.items:
                node_ip = pod.status.host_ip
                if node_ip not in antidote_ips:
                    antidote_ips[node_ip] = list()
                antidote_ips[node_ip].append(pod.status.pod_ip)
            antidote_hosts = list(antidote_ips.keys())
            elmerfs_hosts = antidote_hosts

            is_elmerfs = self.deploy_elmerfs(kube_master, kube_namespace,
                                             elmerfs_hosts, antidote_ips)
            if is_elmerfs:
                if self.args.monitoring:
                    self.deploy_monitoring(kube_master, kube_namespace)
                is_finished, hosts = self.run_benchmark(comb, elmerfs_hosts)

                if is_finished:
                    comb_ok = True
                    self.save_results(comb, hosts)
            else:
                raise CancelException('Cannot deploy elmerfs')
        except (ExecuteCommandException, CancelException) as e:
            logger.error('Combination exception: %s' % e)
            comb_ok = False
        finally:
            if comb_ok:
                sweeper.done(comb)
                logger.info('Finish combination: %s' % slugify(comb))
            else:
                sweeper.cancel(comb)
                logger.warning(slugify(comb) + ' is canceled')
            logger.info('%s combinations remaining\n' %
                        len(sweeper.get_remaining()))
        return sweeper
Exemplo n.º 13
0
    def clean_k8s_resources(self, kube_namespace):
        logger.info('1. Deleting all k8s resource from the previous run in namespace "%s"' % kube_namespace)
        logger.debug('Delete namespace "%s" to delete all the resources, then create it again' % kube_namespace)
        configurator = k8s_resources_configurator()
        configurator.delete_namespace(kube_namespace)
        configurator.create_namespace(kube_namespace)

        logger.debug('Delete all files in /tmp/results folder on fmke_client nodes')
        results_nodes = configurator.get_k8s_resources_name(resource='node',
                                                            label_selectors='service_g5k=fmke_client',
                                                            kube_namespace=kube_namespace)
        cmd = 'rm -rf /tmp/results && mkdir -p /tmp/results'
        execute_cmd(cmd, results_nodes)
Exemplo n.º 14
0
    def setup_k8s_env(self, kube_master, kube_namespace, kube_workers):
        self._get_credential(kube_master)

        logger.info('Create k8s namespace "%s" for this experiment' % kube_namespace)
        configurator = k8s_resources_configurator()
        configurator.create_namespace(namespace=kube_namespace)

        logger.info('Set labels for all kubernetes workers')
        self._set_kube_workers_label(kube_master)

        self._setup_ovh_kube_volumes(kube_workers, n_pv=3)

        logger.info('Finish deploying the Kubernetes cluster')
Exemplo n.º 15
0
    def save_results(self, comb):
        logger.info("----------------------------------")
        logger.info("6. Starting dowloading the results")

        configurator = k8s_resources_configurator()
        results_nodes = configurator.get_k8s_resources_name(resource='node',
                                                            label_selectors='service_g5k=fmke_client')

        get_results(comb=comb,
                    hosts=results_nodes,
                    remote_result_files=['/tmp/results/'],
                    local_result_dir=self.configs['exp_env']['results_dir'])

        logger.info("Finish dowloading the results")
Exemplo n.º 16
0
 def _set_kube_workers_label(self, kube_master):
     configurator = k8s_resources_configurator()
     for node in self.nodes:
         if node['ipAddresses'][0]['ip'] == kube_master:
             pass
         else:
             configurator.set_labels_node(nodename=node['name'],
                                          labels='cluster=%s' %
                                          node['region'].lower())
             if node['id'] in self.data_node_ids:
                 configurator.set_labels_node(nodename=node['name'],
                                              labels='service=antidote')
             else:
                 configurator.set_labels_node(nodename=node['name'],
                                              labels='service=fmke')
Exemplo n.º 17
0
    def config_host(self, kube_master, kube_namespace):
        logger.debug("Init configurator: kubernetes_configurator")
        configurator = kubernetes_configurator(hosts=self.hosts, kube_master=kube_master)
        _, kube_workers = configurator.deploy_kubernetes_cluster()

        self._get_credential(kube_master)

        logger.info('Create k8s namespace "%s" for this experiment' % kube_namespace)
        configurator = k8s_resources_configurator()
        configurator.create_namespace(namespace=kube_namespace)

        self._setup_g5k_kube_volumes(kube_workers, n_pv=3)

        logger.info('Set labels for all kubernetes workers')
        self._set_kube_workers_label(kube_workers)

        logger.info("Finish deploying the Kubernetes cluster")
Exemplo n.º 18
0
    def run_exp_workflow(self, kube_namespace, comb, kube_master, sweeper):
        comb_ok = False
        try:
            logger.info('=======================================')
            logger.info('Performing combination: ' + slugify(comb))

            self.clean_exp_env(kube_namespace, comb['n_fmke_client_per_dc'])
            self.deploy_antidote(kube_namespace, comb)
            if self.args.monitoring:
                prometheus_url, _ = self.deploy_monitoring(
                    kube_master, kube_namespace)
            self.deploy_fmke_app(kube_namespace, comb)
            pop_result = self.deploy_fmke_pop(kube_namespace, comb)
            pop_errors = None
            if self.args.monitoring:
                pop_errors = self.get_prometheus_metric(
                    'antidote_error_count', prometheus_url)
                pop_errors = sum(pop_errors.values())
                logger.info('Total ops errors of population: %s' % pop_errors)
            if comb['n_fmke_client_per_dc'] > 0:
                self.deploy_fmke_client(kube_namespace, comb)
                configurator = k8s_resources_configurator()
                fmke_nodes_info = configurator.get_k8s_resources(
                    resource='node', label_selectors='service=fmke')
                fmke_nodes = [
                    r.metadata.
                    annotations['flannel.alpha.coreos.com/public-ip']
                    for r in fmke_nodes_info.items
                ]
                self.save_results(comb, pop_result, pop_errors, fmke_nodes)
            else:
                self.save_results(comb, pop_result, pop_errors)
            comb_ok = True
        except (ExecuteCommandException, CancelException) as e:
            logger.error('Combination exception: %s' % e)
            comb_ok = False
        finally:
            if comb_ok:
                sweeper.done(comb)
                logger.info('Finish combination: %s' % slugify(comb))
            else:
                sweeper.cancel(comb)
                logger.warning(slugify(comb) + ' is canceled')
            logger.info('%s combinations remaining\n' %
                        len(sweeper.get_remaining()))
        return sweeper
Exemplo n.º 19
0
    def config_host(self, kube_master_site, kube_namespace):
        kube_master = self.args.kube_master

        if self.args.kube_master is None:
            antidote_hosts = list()
            for cluster in self.configs['clusters']:
                cluster_name = cluster['cluster']
                if cluster_name == self.configs['exp_env']['kube_master_site']:
                    antidote_hosts += [
                        host for host in self.hosts
                        if host.startswith(cluster_name)
                    ][0:cluster['n_nodes'] + 1]
                else:
                    antidote_hosts += [
                        host for host in self.hosts
                        if host.startswith(cluster_name)
                    ][0:cluster['n_nodes']]

            for host in antidote_hosts:
                if host.startswith(kube_master_site):
                    kube_master = host
                    break

            kube_workers = self.config_kube(kube_master, antidote_hosts,
                                            kube_namespace)
        else:
            logger.info('Kubernetes master: %s' % kube_master)
            self._get_credential(kube_master)

            configurator = k8s_resources_configurator()
            deployed_hosts = configurator.get_k8s_resources(resource='node')
            kube_workers = [
                host.metadata.name for host in deployed_hosts.items
            ]
            kube_workers.remove(kube_master)

        logger.info('Installing elmerfs dependencies')
        configurator = packages_configurator()
        configurator.install_packages(['libfuse2', 'wget', 'jq'], kube_workers)
        # Create mount point on elmerfs hosts
        cmd = 'mkdir -p /tmp/dc-$(hostname)'
        execute_cmd(cmd, kube_workers)

        # Installing filebench for running the experiments
        logger.info('Installing Filebench')
        self.install_filebench(kube_workers)
Exemplo n.º 20
0
    def _set_kube_workers_label(self, kube_workers):
        configurator = k8s_resources_configurator()
        clusters = dict()
        for host in kube_workers:
            cluster = host.split('-')[0]
            clusters[cluster] = [host] + clusters.get(cluster, list())
            configurator.set_labels_node(nodename=host,
                                         labels='cluster=%s' % cluster)

        n_fmke_per_dc = max(self.normalized_parameters['n_fmke_client_per_dc'])
        n_antidotedb_per_dc = max(self.normalized_parameters['n_nodes_per_dc'])

        for cluster, list_of_hosts in clusters.items():
            for n, service_name in [(n_antidotedb_per_dc, 'antidote'),
                                    (n_fmke_per_dc, 'fmke')]:
                for host in list_of_hosts[0:n]:
                    configurator.set_labels_node(nodename=host,
                                                 labels='service=%s' %
                                                 service_name)
                list_of_hosts = list_of_hosts[n:]
Exemplo n.º 21
0
    def clean_exp_env(self, kube_namespace, n_fmke_client_per_dc):
        logger.info('1. Cleaning the experiment environment')
        logger.info(
            'Deleting all k8s running resources from the previous run in namespace "%s"'
            % kube_namespace)
        logger.debug(
            'Delete namespace "%s" to delete all the running resources, then create it again'
            % kube_namespace)
        configurator = k8s_resources_configurator()
        configurator.delete_namespace(kube_namespace)
        configurator.create_namespace(kube_namespace)

        if n_fmke_client_per_dc > 0:
            logger.info(
                'Delete old result files in /tmp/results on FMKe client nodes')
            fmke_nodes = configurator.get_k8s_resources_name(
                resource='node',
                label_selectors='service=fmke',
                kube_namespace=kube_namespace)
            cmd = 'rm -rf /tmp/results && mkdir -p /tmp/results'
            execute_cmd(cmd, fmke_nodes)
Exemplo n.º 22
0
    def config_host(self, kube_master_site, kube_namespace):
        kube_master = self.args.kube_master

        if self.args.kube_master is None:
            antidote_hosts = list()
            for cluster in self.configs["exp_env"]["antidote_clusters"]:
                cluster_name = cluster["cluster"]
                if cluster_name == self.configs["exp_env"]["kube_master_site"]:
                    antidote_hosts += [
                        host for host in self.hosts
                        if host.startswith(cluster_name)
                    ][0:cluster["n_antidotedb_per_dc"] + 1]
                else:
                    antidote_hosts += [
                        host for host in self.hosts
                        if host.startswith(cluster_name)
                    ][0:cluster["n_antidotedb_per_dc"]]

            for host in antidote_hosts:
                if host.startswith(kube_master_site):
                    kube_master = host
                    break
            elmerfs_hosts = antidote_hosts
            elmerfs_hosts.remove(kube_master)

            self.config_kube(kube_master, antidote_hosts, kube_namespace)
        else:
            logger.info("Kubernetes master: %s" % kube_master)
            self._get_credential(kube_master)

            configurator = k8s_resources_configurator()
            antidote_hosts = configurator.get_k8s_resources_name(
                resource="node", label_selectors="service_g5k=antidote")

            elmerfs_hosts = antidote_hosts

        self.install_benchsoftware(elmerfs_hosts)

        return elmerfs_hosts
Exemplo n.º 23
0
    def config_kube(self, kube_master, antidote_hosts, kube_namespace):
        logger.info("Starting configuring a Kubernetes cluster")
        logger.debug("Init configurator: kubernetes_configurator")
        configurator = kubernetes_configurator(hosts=self.hosts,
                                               kube_master=kube_master)
        configurator.deploy_kubernetes_cluster()

        self._get_credential(kube_master)

        logger.info('Create k8s namespace "%s" for this experiment' %
                    kube_namespace)
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        configurator.create_namespace(kube_namespace)

        kube_workers = [host for host in antidote_hosts if host != kube_master]

        self._setup_g5k_kube_volumes(kube_workers, n_pv=3)

        self._set_kube_workers_label(kube_workers)

        logger.info("Finish configuring the Kubernetes cluster\n")
Exemplo n.º 24
0
    def config_host(self, kube_master_site, kube_namespace):
        kube_master = self.args.kube_master

        if self.args.kube_master is None:
            antidote_hosts = list()
            for cluster in self.configs['exp_env']['antidote_clusters']:
                if cluster == self.configs['exp_env']['kube_master_site']:
                    antidote_hosts += [
                        host for host in self.hosts if host.startswith(cluster)
                    ][0:self.configs['exp_env']['n_antidotedb_per_dc'] + 1]
                else:
                    antidote_hosts += [
                        host for host in self.hosts if host.startswith(cluster)
                    ][0:self.configs['exp_env']['n_antidotedb_per_dc']]

            for host in antidote_hosts:
                if host.startswith(kube_master_site):
                    kube_master = host
                    break
            elmerfs_hosts = antidote_hosts
            elmerfs_hosts.remove(kube_master)

            self.config_kube(kube_master, antidote_hosts, kube_namespace)

        else:
            logger.info('Kubernetes master: %s' % kube_master)
            self._get_credential(kube_master)

            configurator = k8s_resources_configurator()
            antidote_hosts = configurator.get_k8s_resources_name(
                resource='node', label_selectors='service_g5k=antidote')
            elmerfs_hosts = antidote_hosts

        self.deploy_antidote(kube_namespace)
        self.deploy_elmerfs(kube_master, kube_namespace, elmerfs_hosts)

        if self.args.monitoring is not None:
            self.deploy_monitoring(kube_master, kube_namespace)
    def config_host(self, kube_namespace):
        logger.info("Starting configuring an AntidoteDB cluster")
        antidote_services_ips = list()
        for cluster in self.clusters:
            kube_config = self._get_credential(cluster)

            logger.info('Creating namespace "%s" to deploy Antidote DC' %
                        kube_namespace)
            configurator = k8s_resources_configurator()
            configurator.create_namespace(namespace=kube_namespace,
                                          kube_config=kube_config)

            self.config_antidote(kube_config, cluster, kube_namespace)

            service_name = 'antidote-exposer-%s' % cluster.name
            antidote_services_ips.append(
                '%s:8087' % configurator.get_k8s_endpoint_ip(
                    service_name, kube_config, kube_namespace))

        logger.info(
            "Starting connecting all AntidoteDB DCs to create a cluster")
        self.connect_antidote_DCs(antidote_services_ips, kube_config,
                                  kube_namespace)
        logger.info("Finish configuring an AntidoteDB cluster\n")
Exemplo n.º 26
0
    def perform_combination(self, kube_namespace, concurrent_clients):
        logger.info('-----------------------------------------------------------------')
        logger.info('5. Starting deploying fmke client to stress the Antidote database')
        fmke_client_k8s_dir = self.configs['exp_env']['fmke_yaml_path']

        logger.debug('Delete old k8s yaml files if exists')
        for filename in os.listdir(fmke_client_k8s_dir):
            if filename.startswith('create_fmke_client_') or filename.startswith('fmke_client_'):
                if '.template' not in filename:
                    try:
                        os.remove(os.path.join(fmke_client_k8s_dir, filename))
                    except OSError:
                        logger.debug("Error while deleting file")

        logger.debug('Create fmke_client folder on each fmke_client node')
        configurator = k8s_resources_configurator()
        exp_nodes = configurator.get_k8s_resources_name(resource='node',
                                                        label_selectors='service_g5k=fmke_client')
        cmd = 'mkdir -p /tmp/fmke_client'
        execute_cmd(cmd, exp_nodes)

        logger.debug('Create fmke_client config files to stress database for each Antidote DC')
        file_path = os.path.join(fmke_client_k8s_dir, 'fmke_client.config.template')

        fmke_list = configurator.get_k8s_resources(resource='pod',
                                                   label_selectors='app=fmke',
                                                   kube_namespace=kube_namespace)
        for cluster in self.configs['exp_env']['clusters']:
            fmke_IPs = list()
            for fmke in fmke_list.items:
                if cluster in fmke.metadata.name:
                    fmke_IPs.append(fmke.status.pod_ip)
            fmke_ports = [9090 for i in range(0, len(fmke_IPs))]
            # Modify fmke_client config files with new values
            with open(file_path) as f:
                doc = f.read().replace('["127.0.0.1"]', '%s' % fmke_IPs)
                doc = doc.replace("[9090]", '%s' % fmke_ports)
                doc = doc.replace("{concurrent, 16}.", '{concurrent, %s}.' % concurrent_clients)
                doc = doc.replace("'", '"')
            file_path2 = os.path.join(fmke_client_k8s_dir, 'fmke_client_%s.config' % cluster)
            with open(file_path2, 'w') as f:
                f.write(doc)
            logger.debug('Upload fmke_client config files to kube_master to be used by kubectl to run fmke_client pods')
            getput_file(hosts=exp_nodes, file_paths=[file_path2], dest_location='/tmp/fmke_client/', action='put')

        logger.debug('Create create_fmke_client.yaml files to run job stress for each Antidote DC')
        file_path = os.path.join(fmke_client_k8s_dir, 'create_fmke_client.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)
        fmke_client_files = list()
        for cluster in self.configs['exp_env']['clusters']:
            doc['spec']['parallelism'] = self.configs['exp_env']['n_fmke_client_per_dc']
            doc['spec']['completions'] = self.configs['exp_env']['n_fmke_client_per_dc']
            doc['metadata']['name'] = 'fmke-client-%s' % cluster
            doc['spec']['template']['spec']['containers'][0]['lifecycle']['postStart']['exec']['command'] = [
                "cp", "/cluster_node/fmke_client_%s.config" % cluster, "/fmke_client/fmke_client.config"]
            doc['spec']['template']['spec']['nodeSelector'] = {
                'service_g5k': 'fmke_client', 'cluster_g5k': '%s' % cluster}
            file_path = os.path.join(fmke_client_k8s_dir, 'create_fmke_client_%s.yaml' % cluster)
            with open(file_path, 'w') as f:
                yaml.safe_dump(doc, f)
            fmke_client_files.append(file_path)

        logger.info("Running fmke client instances on each DC")
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        configurator.deploy_k8s_resources(files=fmke_client_files, namespace=kube_namespace)

        t = '0'
        with open(os.path.join(fmke_client_k8s_dir, 'fmke_client.config.template')) as search:
            for line in search:
                line = line.rstrip()  # remove '\n' at end of line
                if "{duration" in line:
                    t = line.split(',')[1].split('}')[0].strip()
        timeout = (int(t) + 5)*60

        logger.info("Stressing database in %s minutes ....." % t)
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors="app=fmke-client",
                                        timeout=timeout,
                                        kube_namespace=kube_namespace)
        logger.info("Finish stressing Antidote database")
Exemplo n.º 27
0
    def deploy_fmke_client(self, fmke_yaml_path, test_duration, concurrent_clients, n_total_fmke_clients, workload=None, kube_namespace='default'):
        """Deploy FMKe client on the given K8s cluster

        Parameters
        ----------
        fmke_yaml_path: str
            a path to the K8s yaml deployment files
        test_duration: int
            the duration to perform the workload
        concurrent_clients: int
            the number of concurrent clients 
        n_total_fmke_clients: int
            the total number of clients need to be deployed on the system
        workload: dict
            the workload ratio of FMKe benchmark
        kube_namespace: str
            the name of K8s namespace
        """
        logger.debug('Delete old k8s yaml files if exists')
        for filename in os.listdir(fmke_yaml_path):
            if filename.startswith('create_fmke_client_') or filename.startswith('fmke_client_'):
                if '.template' not in filename:
                    try:
                        os.remove(os.path.join(fmke_yaml_path, filename))
                    except OSError:
                        logger.debug('Error while deleting file')
        if workload:
            logger.debug('Create the new workload ratio')
            new_workload = ',\n'.join(['  {%s, %s}' % (key, val) for key, val in workload.items()])
            operations = '{operations,[\n%s\n]}.' % new_workload

        logger.debug('Init configurator: k8s_resources_configurator')
        configurator = k8s_resources_configurator()
        fmke_list = configurator.get_k8s_resources(resource='pod',
                                                   label_selectors='app=fmke',
                                                   kube_namespace=kube_namespace)

        fmke_client_files = list()
        config_file_path = os.path.join(fmke_yaml_path, 'fmke_client.config.template')
        create_file_path = os.path.join(fmke_yaml_path, 'create_fmke_client.yaml.template')
        for fmke in fmke_list.items:
            node = fmke.spec.node_name.split('.')[0]
            # Modify fmke_client config files with new values
            logger.debug('Create fmke_client config files to stress database for each AntidoteDB DC')
            with open(config_file_path) as f:
                doc = f.read()
                doc = doc.replace('127.0.0.1', '%s' % fmke.status.pod_ip)
                doc = doc.replace('{concurrent, 16}.', '{concurrent, %s}.' % concurrent_clients)
                doc = doc.replace('{duration, 3}.', '{duration, %s}.' % test_duration)
                doc = doc.replace("'", '"')
                if workload:
                    doc = re.sub(r'{operations.*', operations, doc, flags=re.S)
            file_path = os.path.join(fmke_yaml_path, 'fmke_client_%s.config' % node)
            with open(file_path, 'w') as f:
                f.write(doc)
            logger.debug('Create fmke_client folder on each fmke_client node')
            cmd = 'mkdir -p /tmp/fmke_client'
            execute_cmd(cmd, fmke.status.host_ip)
            logger.debug('Upload fmke_client config files to kube_master to be used by kubectl to run fmke_client pods')
            getput_file(hosts=fmke.status.host_ip, file_paths=[file_path], dest_location='/tmp/fmke_client/', action='put')


            logger.debug('Create create_fmke_client.yaml files to deploy one FMKe client')
            with open(create_file_path) as f:
                doc = yaml.safe_load(f)            
            doc['metadata']['name'] = 'fmke-client-%s' % node
            doc['spec']['template']['spec']['containers'][0]['lifecycle']['postStart']['exec']['command'] = [
                'cp', '/cluster_node/fmke_client_%s.config' % node, '/fmke_client/fmke_client.config']
            doc['spec']['template']['spec']['nodeSelector'] = {
                'service': 'fmke', 'kubernetes.io/hostname': '%s' % fmke.spec.node_name}
            file_path = os.path.join(fmke_yaml_path, 'create_fmke_client_%s.yaml' % node)
            with open(file_path, 'w') as f:
                yaml.safe_dump(doc, f)
            fmke_client_files.append(file_path)

        logger.info('Starting FMKe client instances on each AntidoteDB DC')
        configurator.deploy_k8s_resources(files=fmke_client_files, namespace=kube_namespace)
        sleep(20)
        logger.info('Checking if deploying enough the number of running FMKe_client or not')
        fmke_client_list = configurator.get_k8s_resources_name(resource='pod',
                                                            label_selectors='app=fmke-client',
                                                            kube_namespace=kube_namespace)
        if len(fmke_client_list) != n_total_fmke_clients:
            logger.info('n_fmke_client = %s, n_deployed_fmke_client = %s' %(n_total_fmke_clients, len(fmke_client_list)))
            raise CancelException('Cannot deploy enough FMKe_client')

        logger.info('Stressing database in %s minutes .....' % test_duration)
        deploy_ok = configurator.wait_k8s_resources(resource='job',
                                        label_selectors='app=fmke-client',
                                        timeout=(test_duration + 5)*60,
                                        kube_namespace=kube_namespace)
        if not deploy_ok:
            logger.error('Cannot wait until all FMKe client instances running completely')
            raise CancelException('Cannot wait until all FMKe client instances running completely')

        logger.info('Finish stressing AntidoteDB cluster')
    def config_antidote(self, kube_namespace):
        logger.info('Starting deploying Antidote cluster')
        antidote_k8s_dir = self.args.yaml_path

        logger.info('Deleting all old resources on namspace %s' %
                    kube_namespace)
        configurator = k8s_resources_configurator()
        configurator.delete_namespace(namespace=kube_namespace)
        configurator.create_namespace(namespace=kube_namespace)

        logger.debug('Delete old deployment files if exists')
        for filename in os.listdir(antidote_k8s_dir):
            if filename.startswith('createDC_') or filename.startswith(
                    'statefulSet_') or filename.startswith(
                        'exposer-service_') or filename.startswith(
                            'connectDCs_antidote'):
                if '.template' not in filename:
                    try:
                        os.remove(os.path.join(antidote_k8s_dir, filename))
                    except OSError:
                        logger.debug("Error while deleting file")

        deploy_files = [os.path.join(antidote_k8s_dir, 'headlessService.yaml')]
        logger.debug('Modify the statefulSet file')
        file_path = os.path.join(antidote_k8s_dir, 'statefulSet.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)
        for cluster in self.configs['clusters']:
            doc['spec']['replicas'] = cluster['n_nodes']
            doc['metadata']['name'] = 'antidote-%s' % cluster['cluster']
            doc['spec']['template']['spec']['nodeSelector'] = {
                'service_g5k': 'antidote',
                'cluster_g5k': '%s' % cluster['cluster']
            }
            file_path = os.path.join(
                antidote_k8s_dir, 'statefulSet_%s.yaml' % cluster['cluster'])
            with open(file_path, 'w') as f:
                yaml.safe_dump(doc, f)
            deploy_files.append(file_path)

        logger.info("Starting AntidoteDB instances")
        logger.debug("Init configurator: k8s_resources_configurator")
        configurator = k8s_resources_configurator()
        configurator.deploy_k8s_resources(files=deploy_files,
                                          namespace=kube_namespace)

        logger.info('Waiting until all Antidote instances are up')
        configurator.wait_k8s_resources(resource='pod',
                                        label_selectors='app=antidote',
                                        kube_namespace=kube_namespace)

        logger.debug('Creating createDc.yaml file for each Antidote DC')
        dcs = dict()
        for cluster in self.configs['clusters']:
            dcs[cluster['cluster']] = list()
        antidote_list = configurator.get_k8s_resources_name(
            resource='pod',
            label_selectors='app=antidote',
            kube_namespace=kube_namespace)
        for antidote in antidote_list:
            cluster = antidote.split('-')[1].strip()
            dcs[cluster].append(antidote.strip())

        file_path = os.path.join(antidote_k8s_dir, 'createDC.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)

        antidote_masters = list()
        createdc_files = list()
        for cluster, pods in dcs.items():
            doc['spec']['template']['spec']['containers'][0]['args'] = [
                '--createDc', '%s.antidote:8087' % pods[0]
            ] + ['antidote@%s.antidote' % pod for pod in pods]
            doc['metadata']['name'] = 'createdc-%s' % cluster
            antidote_masters.append('%s.antidote:8087' % pods[0])
            file_path = os.path.join(antidote_k8s_dir,
                                     'createDC_%s.yaml' % cluster)
            with open(file_path, 'w') as f:
                yaml.safe_dump(doc, f)
            createdc_files.append(file_path)

        logger.debug('Creating exposer-service.yaml files')
        file_path = os.path.join(antidote_k8s_dir,
                                 'exposer-service.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)
        for cluster, pods in dcs.items():
            doc['spec']['selector'][
                'statefulset.kubernetes.io/pod-name'] = pods[0]
            doc['metadata']['name'] = 'antidote-exposer-%s' % cluster
            file_path = os.path.join(antidote_k8s_dir,
                                     'exposer-service_%s.yaml' % cluster)
            with open(file_path, 'w') as f:
                yaml.safe_dump(doc, f)
                createdc_files.append(file_path)

        logger.info("Creating Antidote DCs and exposing services")
        configurator.deploy_k8s_resources(files=createdc_files,
                                          namespace=kube_namespace)

        logger.info('Waiting until all antidote DCs are created')
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors='app=antidote',
                                        kube_namespace=kube_namespace)

        logger.debug(
            'Creating connectDCs_antidote.yaml to connect all Antidote DCs')
        file_path = os.path.join(antidote_k8s_dir, 'connectDCs.yaml.template')
        with open(file_path) as f:
            doc = yaml.safe_load(f)
        doc['spec']['template']['spec']['containers'][0]['args'] = [
            '--connectDcs'
        ] + antidote_masters
        file_path = os.path.join(antidote_k8s_dir, 'connectDCs_antidote.yaml')
        with open(file_path, 'w') as f:
            yaml.safe_dump(doc, f)

        logger.info("Connecting all Antidote DCs into a cluster")
        configurator.deploy_k8s_resources(files=[file_path],
                                          namespace=kube_namespace)

        logger.info('Waiting until connecting all Antidote DCs')
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors='app=antidote',
                                        kube_namespace=kube_namespace)

        logger.info('Finish deploying an Antidote cluster')
Exemplo n.º 29
0
    def deploy_fmke_pop(self, fmke_yaml_path, dataset, n_fmke_pop_process, clusters, stabilizing_time, timeout=600, kube_namespace='default'):
        """Deploy FMKe populator on the given K8s cluster to generate the data in to an AntidoteDB cluster

        Parameters
        ----------
        fmke_yaml_path: str
            a path to the K8s yaml deployment files
        dataset: str
            name of the dataset of FMKe benchmark to generate into the database
        n_fmke_pop_process: int
            the number of FMKe populator processes running simultaneously
        clusters: list
            list of cluster name
        stabilizing_time: int
            the time to wait for synchronize and replication process between datacenters
        kube_namespace: str
            the name of K8s namespace
        """
        logger.debug('Modify the populate_data template file')
        configurator = k8s_resources_configurator()
        fmke_list = configurator.get_k8s_resources(resource='pod',
                                                   label_selectors='app=fmke',
                                                   kube_namespace=kube_namespace)
        fmke_IPs = list()
        for cluster in clusters:
            for fmke in fmke_list.items:
                if cluster.lower() in fmke.metadata.name:
                    fmke_IPs.append('fmke@%s' % fmke.status.pod_ip)
        with open(os.path.join(fmke_yaml_path, 'populate_data.yaml.template')) as f:
            doc = yaml.safe_load(f)
        doc['metadata']['name'] = 'populate-data-without-prescriptions'
        doc['spec']['template']['spec']['containers'][0]['args'] = ['-f -d %s --noprescriptions -p %s' %
                                                                    (dataset, n_fmke_pop_process)] + fmke_IPs
        with open(os.path.join(fmke_yaml_path, 'populate_data.yaml'), 'w') as f:
            yaml.safe_dump(doc, f)

        logger.info('Populating the FMKe benchmark data without prescriptions')
        logger.debug('Init configurator: k8s_resources_configurator')
        configurator = k8s_resources_configurator()
        configurator.deploy_k8s_resources(files=[os.path.join(fmke_yaml_path, 'populate_data.yaml')],
                                          namespace=kube_namespace)

        logger.info('Waiting for populating data without prescriptions')
        deploy_ok = configurator.wait_k8s_resources(resource='job',
                                                    label_selectors='app=fmke_pop',
                                                    timeout=timeout,
                                                    kube_namespace=kube_namespace)
        if not deploy_ok:
            raise CancelException('Cannot wait until finishing populating data')

        logger.info('Checking if the populating process finished successfully or not')
        fmke_pop_pods = configurator.get_k8s_resources_name(resource='pod',
                                                            label_selectors='job-name=populate-data-without-prescriptions',
                                                            kube_namespace=kube_namespace)
        logger.debug('FMKe pod name: %s' % fmke_pop_pods[0])
        pop_result = dict()
        if len(fmke_pop_pods) > 0:
            log = configurator.get_k8s_pod_log(pod_name=fmke_pop_pods[0], kube_namespace=kube_namespace)
            last_line = log.strip().split('\n')[-1]
            logger.info('Last line of log: %s' % last_line)
            if 'Populated' in last_line and 'entities in' in last_line and 'avg' in last_line:
                result = log.strip().split('\n')[-1].split(' ')
                if len(result) == 8:
                    pop_result = result[4] + '\n' + result[6]
                if len(result) == 9:
                    pop_result = result[4] + '\n' + result[7]
                t = stabilizing_time
                logger.info('Waiting %s minutes for the replication and key distribution mechanisms between DCs' % t)
                sleep(t*60)
            else:
                raise CancelException('Populating process ERROR')
            logger.debug('FMKe populator result: \n%s' % pop_result)

        logger.debug('Modify the populate_data file to populate prescriptions')
        with open(os.path.join(fmke_yaml_path, 'populate_data.yaml.template')) as f:
            doc = yaml.safe_load(f)
        doc['metadata']['name'] = 'populate-data-with-onlyprescriptions'
        doc['spec']['template']['spec']['containers'][0]['args'] = [
            '-f --onlyprescriptions -p 1'] + fmke_IPs
        with open(os.path.join(fmke_yaml_path, 'populate_data.yaml'), 'w') as f:
            yaml.safe_dump(doc, f)

        logger.info('Populating the FMKe benchmark data with prescriptions')
        configurator.deploy_k8s_resources(files=[os.path.join(fmke_yaml_path, 'populate_data.yaml')],
                                          namespace=kube_namespace)

        logger.info('Waiting for populating data with prescriptions')
        configurator.wait_k8s_resources(resource='job',
                                        label_selectors='app=fmke_pop',
                                        timeout=timeout,
                                        kube_namespace=kube_namespace)
        logger.info('Checking if the populating process finished successfully or not')
        fmke_pop_pods = configurator.get_k8s_resources_name(resource='pod',
                                                            label_selectors='job-name=populate-data-with-onlyprescriptions',
                                                            kube_namespace=kube_namespace)
        logger.info('FMKe pod: %s' % fmke_pop_pods[0])
        if len(fmke_pop_pods) > 0:
            log = configurator.get_k8s_pod_log(
                pod_name=fmke_pop_pods[0], kube_namespace=kube_namespace)
            last_line = log.strip().split('\n')[-1]
            logger.info('Last line of log: %s' % last_line)
            if 'Populated' not in last_line:
                raise CancelException('Populating process ERROR')
            t = stabilizing_time
            logger.info('Waiting %s minutes for the replication and key distribution mechanisms between DCs' % t)
            sleep(t*60)
        logger.info('Finish populating data')

        return pop_result
        
 def _set_kube_workers_label(self, kube_workers):
     configurator = k8s_resources_configurator()
     for host in kube_workers:
         cluster = host.split('-')[0]
         labels = 'cluster_g5k=%s,service_g5k=antidote' % cluster
         configurator.set_labels_node(nodename=host, labels=labels)