Example #1
0
 def post(self):
     try:
         cluster_name = self.get_argument("cluster_name", "")
         if (cluster_name == ""):
             return self.render('error.html', error_msg="Cluster name is empty!")
         conn = utils.get_ec2_conn(self)
         (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
         if len(master_nodes) > 0:
             return self.render('error.html', error_msg="Cluster name is already existed!")
         num_slave = self.get_argument("num_slave", "2")
         key_pair = self.get_argument("key_pair", "")
         instance_type = self.get_argument("instance_type", "m1.small")
         master_instance_type = self.get_argument("master_instance_type", "m1.small")
         zone = self.get_argument("zone", "us-east-1e")
         ebs_vol_size = self.get_argument("ebs_vol_size", "10")
         swap = self.get_argument("swap", "1024")
         cluster_type = self.get_argument("cluster_type", "mesos")
         (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
         os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY
         os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_KEY
         key_pair_file =  os.getcwd() + "/keys/" + key_pair + ".pem" 
         sys.argv = ["spark_ec2.py", "-s", num_slave, "-u", "root", "-k", key_pair, "-i", key_pair_file, "-t", instance_type, "-m", master_instance_type, "-r", "us-east-1", "-z" , zone, "--ebs-vol-size=" + ebs_vol_size, "--swap=" + swap, "--cluster-type=" + cluster_type, "launch", cluster_name]
         t = Thread(target=spark_ec2.main, args=())
         t.daemon = True
         t.start()
         self.render('notice.html', identity_file=key_pair_file)
     except Exception as e:
         print >> stderr, (e)
         self.render('error.html', error_msg=str(e))
Example #2
0
 def post(self):
     try:
         cluster_name = self.get_argument("cluster_name", "")
         if cluster_name == "":
             return self.render("error.html", error_msg="Cluster name is empty!")
         conn = utils.get_ec2_conn(self)
         (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
         if len(master_nodes) > 0:
             return self.render("error.html", error_msg="Cluster name is already existed!")
         num_slave = self.get_argument("num_slave", "2")
         key_pair = self.get_argument("key_pair", "")
         instance_type = self.get_argument("instance_type", "m1.small")
         master_instance_type = self.get_argument("master_instance_type", "m1.small")
         zone = self.get_argument("zone", "us-east-1e")
         ebs_vol_size = self.get_argument("ebs_vol_size", "10")
         swap = self.get_argument("swap", "1024")
         cluster_type = self.get_argument("cluster_type", "mesos")
         (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
         os.environ["AWS_ACCESS_KEY_ID"] = AWS_ACCESS_KEY
         os.environ["AWS_SECRET_ACCESS_KEY"] = AWS_SECRET_KEY
         key_pair_file = os.getcwd() + "/keys/" + key_pair + ".pem"
         sys.argv = [
             "spark_ec2.py",
             "-s",
             num_slave,
             "-u",
             "root",
             "-k",
             key_pair,
             "-i",
             key_pair_file,
             "-t",
             instance_type,
             "-m",
             master_instance_type,
             "-r",
             "us-east-1",
             "-z",
             zone,
             "--ebs-vol-size=" + ebs_vol_size,
             "--swap=" + swap,
             "--cluster-type=" + cluster_type,
             "launch",
             cluster_name,
         ]
         t = Thread(target=spark_ec2.main, args=())
         t.daemon = True
         t.start()
         self.render("notice.html", identity_file=key_pair_file)
     except Exception as e:
         print >> stderr, (e)
         self.render("error.html", error_msg=str(e))
Example #3
0
    def post(self):
        try:
            cluster_name = self.get_argument("cluster_name", "")
            if (cluster_name == ""):
                return self.render('error.html',
                                   error_msg="Cluster name is empty!")
            conn = utils.get_ec2_conn(self)
            (master_nodes, slave_nodes,
             zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
            if len(master_nodes) > 0:
                return self.render(
                    'error.html', error_msg="Cluster name is already existed!")
            num_slave = self.get_argument("num_slave", "2")
            key_pair = self.get_argument("key_pair", "")
            instance_type = self.get_argument("instance_type", "m3.large")
            #master_instance_type                = self.get_argument("master_instance_type", "m1.small")
            #zone                                = self.get_argument("zone", "us-east-1e")
            ebs_vol_size = self.get_argument("ebs_vol_size", "10")
            #swap                                = self.get_argument("swap", "1024")
            cluster_type = self.get_argument("cluster_type", "mesos")
            elastic_ip = self.get_argument("elastic_ip", "")
            (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
            os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY
            os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_KEY
            key_pair_file = os.getcwd() + "/keys/" + key_pair + ".pem"

            command = [
                installer_dir + "launch-cluster.sh",
                cluster_name,
                num_slave,
                "--elastic-ip",
                elastic_ip,
                "--ssh-key",
                key_pair,
                "--type",
                instance_type,
                #"--zone", zone,
                "--ebs",
                ebs_vol_size
            ]
            print("Running : " + ' '.join(command))

            subprocess.Popen(command)

            #save the (cluster_name, elastic_ip) to file
            utils.set_elastic_ip(cluster_name, elastic_ip)

            time.sleep(10)
            self.redirect("/")
        except Exception as e:
            print >> stderr, (e)
            self.render('error.html', error_msg=str(e))
Example #4
0
    def post(self):
        try:
            cluster_name = self.get_argument("cluster_name", "")
            if (cluster_name == ""):
                return self.render('error.html', error_msg="Cluster name is empty!")
            conn = utils.get_ec2_conn(self)
            (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
            if len(master_nodes) > 0:
                return self.render('error.html', error_msg="Cluster name is already existed!")
            num_slave                           = self.get_argument("num_slave", "2")
            key_pair                            = self.get_argument("key_pair", "")
            instance_type                       = self.get_argument("instance_type", "m3.large")
            #master_instance_type                = self.get_argument("master_instance_type", "m1.small")
            #zone                                = self.get_argument("zone", "us-east-1e")
            ebs_vol_size                        = self.get_argument("ebs_vol_size", "10")
            #swap                                = self.get_argument("swap", "1024")
            cluster_type                        = self.get_argument("cluster_type", "mesos")
            elastic_ip                          = self.get_argument("elastic_ip", "")
            (AWS_ACCESS_KEY, AWS_SECRET_KEY)    = utils.get_aws_credentials()
            os.environ['AWS_ACCESS_KEY_ID']     = AWS_ACCESS_KEY
            os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_KEY
            key_pair_file =  os.getcwd() + "/keys/" + key_pair + ".pem" 

            command = [installer_dir+"launch-cluster.sh", 
              cluster_name, 
              num_slave, 
              "--elastic-ip", elastic_ip, 
              "--ssh-key", key_pair,
              "--type", instance_type,
              #"--zone", zone, 
              "--ebs", ebs_vol_size
              ]
            print ("Running : " + ' '.join(command))
            
            subprocess.Popen(command)

            #save the (cluster_name, elastic_ip) to file
            utils.set_elastic_ip(cluster_name, elastic_ip)

            time.sleep(10)
            self.redirect("/")
        except Exception as e:
            print >> stderr, (e)
            self.render('error.html', error_msg=str(e))
Example #5
0
    def get(self):
        try:
            cluster_name = self.get_argument("cluster_name", "")
            dns = self.get_argument("dns", "")
            service = self.get_argument("service", "")
            action = self.get_argument("action", "")
            key_pair = self.get_argument("key_pair", "")
            key_pair_file = os.getcwd() + "/keys/" + key_pair + ".pem"

            # Execute action
            if service == "mesos":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "spark-ec2/mesos/start-mesos")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "spark-ec2/mesos/stop-mesos")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "spark-ec2/mesos/stop-mesos && spark-ec2/mesos/start-mesos")
            elif service == "shark":
                if action == "start":
                    command = (
                        "rsync --ignore-existing -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' "
                        + "'%s/' 'root@%s:/root/shark-0.2/conf'"
                    ) % (key_pair_file, "deploy.shark", dns)
                    subprocess.check_call(command, shell=True)
                    yield async_ssh(
                        key_pair_file, dns, "nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &"
                    )
                    time.sleep(2)  # Wait for Shark to restart
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, 'ps ax|grep shark.SharkServer|awk "{print $1}"|xargs kill')
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file,
                        dns,
                        "ps ax|grep shark.SharkServer|awk '{print $1}'|xargs kill && nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &",
                    )
                    time.sleep(2)  # Wait for Shark to restart
            elif service == "ganglia":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad start && /etc/init.d/httpd start")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad stop && /etc/init.d/httpd stop")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad restart && /etc/init.d/httpd restart")
            elif service == "ephemeral_hdfs":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/start-dfs.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-dfs.sh")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-dfs.sh && ~/ephemeral-hdfs/bin/start-dfs.sh"
                    )
            elif service == "persistent_hdfs":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "~/persistent-hdfs/bin/start-dfs.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "~/persistent-hdfs/bin/stop-dfs.sh")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns, "~/persistent-hdfs/bin/stop-dfs.sh && ~/persistent-hdfs/bin/start-dfs.sh"
                    )
            elif service == "hadoop_mapreduce":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/start-mapred.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-mapred.sh")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file,
                        dns,
                        "~/ephemeral-hdfs/bin/stop-mapred.sh && ~/ephemeral-hdfs/bin/start-mapred.sh",
                    )
            elif service == "cluster":
                if action == "start":
                    (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
                    os.environ["AWS_ACCESS_KEY_ID"] = AWS_ACCESS_KEY
                    os.environ["AWS_SECRET_ACCESS_KEY"] = AWS_SECRET_KEY
                    sys.argv = [
                        "spark_ec2.py",
                        "-u",
                        "root",
                        "-k",
                        key_pair,
                        "-i",
                        key_pair_file,
                        "start",
                        cluster_name,
                    ]
                    t = Thread(target=spark_ec2.main, args=())
                    t.daemon = True
                    t.start()
                    self.render("notice.html", identity_file=key_pair_file)
                    return
                elif action == "stop":
                    conn = utils.get_ec2_conn(self)
                    (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
                    for inst in master_nodes:
                        if inst.state not in ["shutting-down", "terminated"]:
                            inst.stop()
                    print "Stopping slaves..."
                    for inst in slave_nodes:
                        if inst.state not in ["shutting-down", "terminated"]:
                            inst.stop()
                    if zoo_nodes != []:
                        print "Stopping zoo..."
                        for inst in zoo_nodes:
                            if inst.state not in ["shutting-down", "terminated"]:
                                inst.stop()
                    time.sleep(1)
                    self.redirect("/")
                    return
                elif action == "terminate":
                    conn = utils.get_ec2_conn(self)
                    (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
                    for inst in master_nodes:
                        inst.terminate()
                    for inst in slave_nodes:
                        inst.terminate()
                    if zoo_nodes != []:
                        for inst in zoo_nodes:
                            inst.terminate()
                    time.sleep(1)
                    self.redirect("/")
                    return
            time.sleep(1)
            self.redirect("/cluster/" + cluster_name)
        except Exception as e:
            # print >> stderr, (e)
            self.render("error.html", error_msg=str(e))
Example #6
0
 def get(self):
     (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
     self.render("settings.html", AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY, error_code=-1)
Example #7
0
 def get(self):
     (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
     self.render('settings.html', AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY, error_code= -1)
Example #8
0
 def get(self):
     try:
         cluster_name = self.get_argument("cluster_name", "")
         dns = self.get_argument("dns", "")
         service = self.get_argument("service", "")
         action = self.get_argument("action", "")
         key_pair = self.get_argument("key_pair", "")
         key_pair_file = os.getcwd() + "/keys/" + key_pair + ".pem"
         
         # Execute action
         if service == "mesos":
             if action == "start":
                 yield async_ssh(key_pair_file, dns, "spark-ec2/mesos/start-mesos")
             elif action == "stop":
                 yield async_ssh(key_pair_file, dns, "spark-ec2/mesos/stop-mesos")
             elif action == "restart":
                 yield async_ssh(key_pair_file, dns, "spark-ec2/mesos/stop-mesos && spark-ec2/mesos/start-mesos")
         elif service == "shark":
             if action == "start":
                 command = (("rsync --ignore-existing -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " + 
                             "'%s/' 'root@%s:/root/shark-0.2/conf'") % (key_pair_file, 'deploy.shark', dns))
                 subprocess.check_call(command, shell=True)
                 yield async_ssh(key_pair_file, dns, "nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &")
                 time.sleep(2)  # Wait for Shark to restart
             elif action == "stop":
                 yield async_ssh(key_pair_file, dns, "ps ax|grep shark.SharkServer|awk \"{print $1}\"|xargs kill")
             elif action == "restart":
                 yield async_ssh(key_pair_file, dns, "ps ax|grep shark.SharkServer|awk '{print $1}'|xargs kill && nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &")
                 time.sleep(2)  # Wait for Shark to restart
         elif service == "ganglia":
             if action == "start":
                 yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad start && /etc/init.d/httpd start")
             elif action == "stop":
                 yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad stop && /etc/init.d/httpd stop")
             elif action == "restart":
                 yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad restart && /etc/init.d/httpd restart")
         elif service == "ephemeral_hdfs":
             if action == "start":
                 yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/start-dfs.sh")
             elif action == "stop":
                 yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-dfs.sh")
             elif action == "restart":
                 yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-dfs.sh && ~/ephemeral-hdfs/bin/start-dfs.sh")
         elif service == "persistent_hdfs":
             if action == "start":
                 yield async_ssh(key_pair_file, dns, "~/persistent-hdfs/bin/start-dfs.sh")
             elif action == "stop":
                 yield async_ssh(key_pair_file, dns, "~/persistent-hdfs/bin/stop-dfs.sh")
             elif action == "restart":
                 yield async_ssh(key_pair_file, dns, "~/persistent-hdfs/bin/stop-dfs.sh && ~/persistent-hdfs/bin/start-dfs.sh")
         elif service == "hadoop_mapreduce":
             if action == "start":
                 yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/start-mapred.sh")
             elif action == "stop":
                 yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-mapred.sh")
             elif action == "restart":
                 yield async_ssh(key_pair_file, dns, "~/ephemeral-hdfs/bin/stop-mapred.sh && ~/ephemeral-hdfs/bin/start-mapred.sh")
         elif service == "cluster":
             if action == "start":
                 (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
                 os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY
                 os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_KEY
                 sys.argv = ["spark_ec2.py", "-u", "root", "-k", key_pair, "-i", key_pair_file, "start", cluster_name]
                 t = Thread(target=spark_ec2.main, args=())
                 t.daemon = True
                 t.start()
                 self.render('notice.html', identity_file=key_pair_file)
                 return
             elif action == "stop":
                 conn = utils.get_ec2_conn(self)
                 (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
                 for inst in master_nodes:
                     if inst.state not in ["shutting-down", "terminated"]:
                       inst.stop()
                 print "Stopping slaves..."
                 for inst in slave_nodes:
                     if inst.state not in ["shutting-down", "terminated"]:
                       inst.stop()
                 if zoo_nodes != []:
                     print "Stopping zoo..."
                     for inst in zoo_nodes:
                       if inst.state not in ["shutting-down", "terminated"]:
                         inst.stop()
                 time.sleep(1)
                 self.redirect("/")
                 return
             elif action == "terminate":
                 conn = utils.get_ec2_conn(self)
                 (master_nodes, slave_nodes, zoo_nodes) = utils.get_existing_cluster(conn, cluster_name)
                 for inst in master_nodes:
                     inst.terminate()
                 for inst in slave_nodes:
                     inst.terminate()
                 if zoo_nodes != []:
                     for inst in zoo_nodes:
                       inst.terminate()
                 time.sleep(1)
                 self.redirect("/")
                 return
         time.sleep(1)
         self.redirect("/cluster/" + cluster_name)
     except Exception as e:
         # print >> stderr, (e)
         self.render('error.html', error_msg=str(e))
Example #9
0
    def get(self):
        try:
            cluster_name    = self.get_argument("cluster_name")
            dns             = self.get_argument("dns")
            service         = self.get_argument("service")
            action          = self.get_argument("action")
            key_pair        = self.get_argument("key_pair")
            key_pair_file   = os.getcwd() + "/keys/" + key_pair + ".pem"
            
            # Execute action
            if service == "mesos":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "/root/spark-ec2/mesos/start-mesos")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "/root/spark-ec2/mesos/stop-mesos")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "/root/spark-ec2/mesos/stop-mesos && /root/spark-ec2/mesos/start-mesos")
            elif service == "shark":
                if action == "start":
                    command = (("rsync --ignore-existing -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " + 
                                "'%s/' 'root@%s:/root/shark-0.2/conf'") % (key_pair_file, 'deploy.shark', dns))
                    subprocess.check_call(command, shell=True)
                    yield async_ssh(key_pair_file, dns, "nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &")
                    time.sleep(2)  # Wait for Shark to restart
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "ps ax|grep shark.SharkServer|awk \"{print $1}\"|xargs kill")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "ps ax|grep shark.SharkServer|awk '{print $1}'|xargs kill && nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &")
                    time.sleep(2)  # Wait for Shark to restart
            elif service == "ganglia":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad start && /etc/init.d/httpd start")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad stop && /etc/init.d/httpd stop")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "/etc/init.d/gmetad restart && /etc/init.d/httpd restart")
            elif service == "pa":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "pssh -v -h /root/spark-ec2/slaves -l root '/root/BigR/server/exe/start-rserve.sh' && /root/BigR/server/exe/start-pa-server.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "/root/BigR/server/exe/stop-pa-server.sh")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "/root/BigR/server/exe/stop-pa-server.sh && pssh -v -h /root/spark-ec2/slaves -l root '/root/BigR/server/exe/start-rserve.sh' && /root/BigR/server/exe/start-pa-server.sh")
            elif service == "pi":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "/root/pInsights/run-pInsights-server.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "pkill -f ipython")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "/root/pInsights/run-pInsights-server.sh")
            elif service == "ephemeral_hdfs":
                if action == "start":
                    yield async_ssh(key_pair_file, dns, "/root/ephemeral-hdfs/bin/start-dfs.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "/root/ephemeral-hdfs/bin/stop-dfs.sh")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns, "/root/ephemeral-hdfs/bin/stop-dfs.sh && /root/ephemeral-hdfs/bin/start-dfs.sh")
            elif service == "cluster":
                if action == "start":
                    (AWS_ACCESS_KEY, AWS_SECRET_KEY) = utils.get_aws_credentials()
                    os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY
                    os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_KEY
                    # get the elastic-ip associated with cluster_name
                    elastic_ip = utils.get_elastic_ip(cluster_name)                    
                    command = [installer_dir+"start-cluster.sh", 
                      cluster_name, 
                      "--elastic-ip", elastic_ip]
                    print ("Running : " + ' '.join(command))
                    subprocess.Popen(command)
                    time.sleep(5)
                    self.redirect("/")
                    return
                elif action == "stop":
                    command = [installer_dir+"stop-cluster.sh", cluster_name]
                    print ("Running : " + ' '.join(command))
                    subprocess.Popen(command)
                    time.sleep(3)
                    self.redirect("/")
                    return
                elif action == "terminate": 
                    command = [installer_dir+"terminate-cluster.sh", cluster_name]
                    print ("Running : " + ' '.join(command))
                    subprocess.Popen(command)

                    # delete the elastic-ip associated with cluster_name
                    utils.delete_elastic_ip(cluster_name)

                    time.sleep(3)
                    self.redirect("/")
                    return
            time.sleep(1)
            self.redirect("/cluster/" + cluster_name)
        except Exception as e:
            # print >> stderr, (e)
            self.render('error.html', error_msg=str(e))
Example #10
0
    def get(self):
        try:
            cluster_name = self.get_argument("cluster_name")
            dns = self.get_argument("dns")
            service = self.get_argument("service")
            action = self.get_argument("action")
            key_pair = self.get_argument("key_pair")
            key_pair_file = os.getcwd() + "/keys/" + key_pair + ".pem"

            # Execute action
            if service == "mesos":
                if action == "start":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/spark-ec2/mesos/start-mesos")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/spark-ec2/mesos/stop-mesos")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns,
                        "/root/spark-ec2/mesos/stop-mesos && /root/spark-ec2/mesos/start-mesos"
                    )
            elif service == "shark":
                if action == "start":
                    command = ((
                        "rsync --ignore-existing -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' "
                        + "'%s/' 'root@%s:/root/shark-0.2/conf'") %
                               (key_pair_file, 'deploy.shark', dns))
                    subprocess.check_call(command, shell=True)
                    yield async_ssh(
                        key_pair_file, dns,
                        "nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &"
                    )
                    time.sleep(2)  # Wait for Shark to restart
                elif action == "stop":
                    yield async_ssh(
                        key_pair_file, dns,
                        "ps ax|grep shark.SharkServer|awk \"{print $1}\"|xargs kill"
                    )
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns,
                        "ps ax|grep shark.SharkServer|awk '{print $1}'|xargs kill && nohup ~/shark-0.2/bin/shark --service sharkserver >/dev/null &"
                    )
                    time.sleep(2)  # Wait for Shark to restart
            elif service == "ganglia":
                if action == "start":
                    yield async_ssh(
                        key_pair_file, dns,
                        "/etc/init.d/gmetad start && /etc/init.d/httpd start")
                elif action == "stop":
                    yield async_ssh(
                        key_pair_file, dns,
                        "/etc/init.d/gmetad stop && /etc/init.d/httpd stop")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns,
                        "/etc/init.d/gmetad restart && /etc/init.d/httpd restart"
                    )
            elif service == "pa":
                if action == "start":
                    yield async_ssh(
                        key_pair_file, dns,
                        "pssh -v -h /root/spark-ec2/slaves -l root '/root/BigR/server/exe/start-rserve.sh' && /root/BigR/server/exe/start-pa-server.sh"
                    )
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/BigR/server/exe/stop-pa-server.sh")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns,
                        "/root/BigR/server/exe/stop-pa-server.sh && pssh -v -h /root/spark-ec2/slaves -l root '/root/BigR/server/exe/start-rserve.sh' && /root/BigR/server/exe/start-pa-server.sh"
                    )
            elif service == "pi":
                if action == "start":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/pInsights/run-pInsights-server.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns, "pkill -f ipython")
                elif action == "restart":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/pInsights/run-pInsights-server.sh")
            elif service == "ephemeral_hdfs":
                if action == "start":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/ephemeral-hdfs/bin/start-dfs.sh")
                elif action == "stop":
                    yield async_ssh(key_pair_file, dns,
                                    "/root/ephemeral-hdfs/bin/stop-dfs.sh")
                elif action == "restart":
                    yield async_ssh(
                        key_pair_file, dns,
                        "/root/ephemeral-hdfs/bin/stop-dfs.sh && /root/ephemeral-hdfs/bin/start-dfs.sh"
                    )
            elif service == "cluster":
                if action == "start":
                    (AWS_ACCESS_KEY,
                     AWS_SECRET_KEY) = utils.get_aws_credentials()
                    os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY
                    os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_KEY
                    # get the elastic-ip associated with cluster_name
                    elastic_ip = utils.get_elastic_ip(cluster_name)
                    command = [
                        installer_dir + "start-cluster.sh", cluster_name,
                        "--elastic-ip", elastic_ip
                    ]
                    print("Running : " + ' '.join(command))
                    subprocess.Popen(command)
                    time.sleep(5)
                    self.redirect("/")
                    return
                elif action == "stop":
                    command = [installer_dir + "stop-cluster.sh", cluster_name]
                    print("Running : " + ' '.join(command))
                    subprocess.Popen(command)
                    time.sleep(3)
                    self.redirect("/")
                    return
                elif action == "terminate":
                    command = [
                        installer_dir + "terminate-cluster.sh", cluster_name
                    ]
                    print("Running : " + ' '.join(command))
                    subprocess.Popen(command)

                    # delete the elastic-ip associated with cluster_name
                    utils.delete_elastic_ip(cluster_name)

                    time.sleep(3)
                    self.redirect("/")
                    return
            time.sleep(1)
            self.redirect("/cluster/" + cluster_name)
        except Exception as e:
            # print >> stderr, (e)
            self.render('error.html', error_msg=str(e))
Example #11
0
 def establish_s3_connection(self):
     """Get a boto connection for s3 operations using credentials from the ini file"""
     if self.s3conn is None:
         self.s3conn = boto.s3.connection.S3Connection(**utils.get_aws_credentials())