def ycsb_run(self, iterations, res_dir, workloads, recordcount,
              threadcount, fieldlength, target):
     # we build a list of single elements sets with the nodes that will run the yscb workload
     """
     Run a series of workloads a number of iterations and save the results in a directory
     :param iterations: the number of times to execute each workload
     :param res_dir: the directory in which to store the results
     :param workloads: the different workloads that we want to execute
     :param recordcount: the number of records that will be inserted
     :param threadcount: the number of threads for each client
     """
     yscb_clients = self.cassandra_ycsb.install_nodes
     # create a directory for the results
     general_util.Remote(cmd="mkdir results", hosts=yscb_clients).run()
     general_util.Remote(cmd="mkdir results/" + res_dir,
                         hosts=yscb_clients).run()
     for workload in workloads:
         self.cassandra_ycsb.load_workload(from_node=yscb_clients,
                                           workload=workload,
                                           recordcount=recordcount,
                                           threadcount=threadcount,
                                           fieldlength=fieldlength)
         for i in range(iterations):
             self.cassandra_ycsb.run_workload(iteration=i,
                                              res_dir="results/" + res_dir,
                                              from_node=yscb_clients,
                                              workload=workload,
                                              recordcount=recordcount,
                                              threadcount=threadcount,
                                              fieldlength=fieldlength,
                                              target=target)
Exemplo n.º 2
0
 def save_results(self):
     logs_from_images = [
         'ches/kafka', 'alvarobrandon/spark-worker',
         'alvarobrandon/spark-master', 'uhopper/hadoop-datanode:2.8.1',
         'uhopper/hadoop-namenode:2.8.1', 'zookeeper',
         'mesosphere/marathon-lb:v1.11.1', 'alvarobrandon/spark-bench',
         'alvarobrandon/ycsb', 'cassandra'
     ]
     # Extract here from the marathon API all the Mesos TaskIDs for the different applications
     for agent in self.private_agents:
         for image in logs_from_images:
             p = general_util.SshProcess(
                 'sudo docker ps -f "ancestor={0}" -q -a'.format(image),
                 host=agent).run()
             image_dir = image.replace('/', '_')
             for containerid in p.stdout.split('\r\n')[:-1]:
                 if image == 'alvarobrandon/spark-worker':
                     print(containerid, image_dir)
                 p = general_util.SshProcess(
                     'mkdir /home/vagrant/{0}_{1}_logs'.format(
                         image_dir, containerid),
                     host=agent).run()
                 p = general_util.SshProcess(
                     'sudo docker logs {1} >> /home/vagrant/{0}_{1}_logs/stdout_{0}_{1}.out 2>&1'
                     .format(image_dir, containerid),
                     host=agent).run()
                 if image == 'ches/kafka':  # if image_dir is kafka then copy some extra logs
                     p = general_util.SshProcess(
                         'sudo docker cp {1}:/kafka/logs /home/vagrant/{0}_{1}_logs/'
                         .format(image_dir, containerid),
                         host=agent).run()
                 if image == 'alvarobrandon/spark-worker':  # if image_dir is spark copy the extra logs
                     p = general_util.SshProcess(
                         'sudo docker cp {1}:/spark/work/ /home/vagrant/{0}_{1}_logs/'
                         .format(image_dir, containerid),
                         host=agent).run()
     RcaVagrantExperiment.save_results(self)
     # clean the jars first since we don't want them
     general_util.Remote(
         hosts=self.private_agents,
         cmd=
         "sudo rm -f /home/vagrant/*/work/*/*/spark-bench-2.1.1_0.3.0-RELEASE.jar"
     ).run()
     general_util.Remote(hosts=self.private_agents,
                         cmd="sudo rm /home/vagrant/*.scrap.gz").run()
     general_util.Get(hosts=self.private_agents,
                      remote_files=["/home/vagrant/"],
                      local_location=self.results_directory).run()
Exemplo n.º 3
0
 def load_ycsb_cassandra(self, list_cassandra_nodes, workload):
     general_util.Remote(
         cmd=
         'sudo docker run alvarobrandon/ycsb load cassandra-cql -P ycsb-0.12.0/workloads/{0} -p hosts="{1}" -p recordcount="100000"'
         .format(workload, ",".join(list_cassandra_nodes)),
         hosts=list_cassandra_nodes[0],
         process_args={
             'stdout_handlers': [sys.stdout],
             'stderr_handlers': [sys.stderr]
         }).run()
Exemplo n.º 4
0
 def load_cassandra_database(self, cassandra_node):
     general_util.Put(local_files=["aux_utilities/ycsb_init.cql"],
                      hosts=cassandra_node).run()
     general_util.Remote(
         cmd=
         "sudo docker run -v /home/vagrant:/home/vagrant cassandra:3.10 cqlsh -f /home/vagrant/ycsb_init.cql {0}"
         .format(cassandra_node),
         hosts=cassandra_node,
         process_args={
             'stdout_handlers': [sys.stdout],
             'stderr_handlers': [sys.stderr]
         }).run()
 def save_results(self):
     monitoring_util.stop_sysdig(self.nodes)
     VagrantExperiment.save_results(self)
     general_util.Remote(cmd="gzip {{{host}}}.scrap*",
                         hosts=self.nodes).run()
     general_util.Get(hosts=self.nodes,
                      remote_files=["{{{host}}}.scrap.gz"],
                      local_location=self.results_directory).run()
     self.experiment_log.to_csv("{0}/experiment_log.csv".format(
         self.results_directory),
                                index=False)
     self.experiment_log.to_pickle("{0}/experiment_log.pickle".format(
         self.results_directory))
 def build_regions(self, proportions, central_region):
     # we divide the nodes into regions without considering what it will be the master region
     """
     The vagrant experiment has the ability to split its private agents into regions
     :param proportions: the proportions in which we want to split the private_nodes
     :param central_region: the machine that is going to act as central region. It is going to normally hold
     the Marathon user service and all the centralised cloud services
     """
     # We build the regions without considering the central region
     self.regions = general_util.divide_nodes_into_regions(
         proportions, list(self.private_agents.difference(central_region)))
     self.central_region = central_region
     # we now include the central region
     self.regions.append(central_region)
     for i in xrange(len(self.regions)):
         if i == (len(self.regions) - 1):
             region_name = "regioncentral"
         else:
             region_name = "region" + str(i)
         with open("mesos-slave-common", "w") as f:
             f.write("MESOS_ATTRIBUTES=region:{0}".format(region_name))
         # it's not possible to ssh as root into a guest VM. We will copy the file to home and then sudo cp
         general_util.Put(hosts=self.regions[i],
                          local_files=["mesos-slave-common"],
                          remote_location="/home/vagrant").run()
         general_util.Remote(
             "sudo cp /home/vagrant/mesos-slave-common /var/lib/dcos/mesos-slave-common",
             hosts=self.regions[i]).run()
         # we reinitialise the slaves for the attributes to be taken into account
         general_util.Remote("sudo systemctl stop dcos-mesos-slave",
                             hosts=self.regions[i]).run()
         general_util.Remote(
             "sudo rm -f /var/lib/mesos/slave/meta/slaves/latest",
             hosts=self.regions[i]).run()
         general_util.Remote("sudo systemctl start dcos-mesos-slave",
                             hosts=self.regions[i]).run()
         print "The region {0} has the nodes: {1}".format(
             region_name, self.regions[i])
 def install_cassandra_dcos(self, ncassandra, nseeds):
     master = list(self.masters)[0]
     # print "Execute this command in the machine {0}: dcos package install --yes --options=cassandra-config.json cassandra" \
     #     .format(master_name)
     # print "And this: dcos package install cassandra --cli"
     # raw_input("After executing press enter: ")
     self.cassandra_nodes = dcos_util.install_cassandra(
         masternode=master, ncassandra=ncassandra, nseeds=nseeds)
     general_util.Put(local_files=["aux_utilities/ycsb_init.cql"],
                      hosts=self.cassandra_nodes).run()
     general_util.Remote(
         cmd=
         "sudo docker run -v /home/vagrant:/home/vagrant cassandra:3.10 cqlsh -f /home/vagrant/ycsb_init.cql {{{host}}}",
         hosts=list(self.cassandra_nodes)[0],
         process_args={
             'stdout_handlers': [sys.stdout],
             'stderr_handlers': [sys.stderr]
         }).run()
 def install_cassandra(self, ncassandra, nseeds):
     """
     install cassandra natively and spread the cassandra nodes in the central region
     :param ncassandra: the number of nodes
     :param nseeds: the number of seeds
     """
     # sample = int(ncassandra)/self.regions.__len__()
     # cassandra_nodes = []
     # for region in self.regions:
     #     cassandra_nodes.extend(list(region)[:sample])
     self.cassandra_nodes = cassandra_util.install_cassandra(
         nodes=set(self.central_region),
         nseeds=int(nseeds),
         dc_name="central_test")
     general_util.Put(local_files=["aux_utilities/ycsb_init.cql"],
                      hosts=self.cassandra_nodes).run()
     general_util.Remote(
         cmd=
         "sudo docker run -v /home/vagrant:/home/vagrant cassandra:3.10 cqlsh -f /home/vagrant/ycsb_init.cql {{{host}}}",
         hosts=list(self.cassandra_nodes)[0],
         process_args={
             'stdout_handlers': [sys.stdout],
             'stderr_handlers': [sys.stderr]
         }).run()
 def check_resilience(
         self):  # Would be possible to add the region here as a parameter?
     results = []  ## here we are going to include all of the results
     curl_node = list(self.masters)[0]
     p = general_util.SshProcess(
         'curl "http://leader.mesos/service/marathon-user/v2/tasks"',
         host=curl_node).run()
     d = json.loads(p.stdout)
     fmone_tasks = filter(
         lambda task: task['appId'] ==
         u'/fmonmongorpipe2/fmondocker2/fmoneagentdockerregion2',
         d.get('tasks'))
     kill_host = fmone_tasks[0].get('host')
     general_util.Remote('sudo docker rm -f $(sudo docker ps -a -q)',
                         hosts=kill_host,
                         process_args={
                             "nolog_exit_code": True
                         }).run()
     time1 = time()
     sleep(20)  ## We leave some time till the fmone agent runs again
     p = general_util.SshProcess(
         'curl "http://leader.mesos/service/marathon-user/v2/tasks"',
         host=curl_node).run()
     d = json.loads(p.stdout)
     killed_host = filter(lambda task: (task['host'] == kill_host),
                          d.get('tasks'))
     start_end = [(task.get('stagedAt'), task.get('startedAt'))
                  for task in killed_host]
     time_differences = map(
         lambda pair:
         (mktime(strptime(pair[1][:-5], '%Y-%m-%dT%H:%M:%S'))) -
         (time1 - 7200), start_end)
     print "The mean time to recover for a Fmone agent is: {0} and its variance {1}"\
                 .format(mean(time_differences), std(time_differences))
     results.append(mean(time_differences))
     mongo_tasks = filter(
         lambda task: task['appId'] ==
         u'/fmonmongorpipe2/mongor2/mongoregion2', d.get('tasks'))
     kill_host = mongo_tasks[0].get('host')
     general_util.Remote('sudo docker rm -f $(sudo docker ps -a -q)',
                         hosts=kill_host,
                         process_args={
                             "nolog_exit_code": True
                         }).run()
     time1 = time()
     sleep(
         60
     )  ## we leave some time until all the fmone agents are up and running again
     p = general_util.SshProcess(
         'curl "http://leader.mesos/service/marathon-user/v2/tasks"',
         host=curl_node).run()
     d = json.loads(p.stdout)
     fmone_tasks = filter(
         lambda task: task['appId'] ==
         u'/fmonmongorpipe2/fmondocker2/fmoneagentdockerregion2',
         d.get('tasks'))
     df = pd.DataFrame(fmone_tasks)
     df['startedAt'] = pd.to_datetime(df['startedAt'])
     last_started = (df.sort_values(
         'startedAt',
         ascending=False).head(1)['startedAt'].values[0].astype('uint64') /
                     1e9)
     print "The mean time to recover a Fmone pipeline is: {0}".format(
         last_started - time1)
     results.append(last_started - time1)
     general_util.Remote('sudo docker rm -f $(sudo docker ps -a -q)',
                         hosts=self.private_agents,
                         process_args={
                             "nolog_exit_code": True
                         }).run()
     time1 = time()
     sleep(260)
     p = general_util.SshProcess(
         'curl "http://leader.mesos/service/marathon-user/v2/tasks"',
         host=curl_node).run()
     d = json.loads(p.stdout)
     fmone_tasks = filter(
         lambda task: task['appId'] ==
         u'/fmonmongorpipe2/fmondocker2/fmoneagentdockerregion2',
         d.get('tasks'))
     df = pd.DataFrame(fmone_tasks)
     df['startedAt'] = pd.to_datetime(df['startedAt'])
     last_started = (df.sort_values(
         'startedAt',
         ascending=False).head(1)['startedAt'].values[0].astype('uint64') /
                     1e9)
     print "The mean time to recover from a general failure is: {0}".format(
         last_started - time1)
     results.append(last_started - time1)
     return results