opts.spot_price = None opts.master_instance_type = "" opts.wait = 160 opts.hadoop_major_version = "1" opts.ganglia = True opts.spark_version = "1.0.1" opts.swap = 1024 opts.worker_instances = 1 opts.master_opts = "" if opts.resume: (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) else: (master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name) wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes) setup_cluster(conn, master_nodes, slave_nodes, opts, True) master = master_nodes[0].public_dns_name install_thunder(master, opts) print "\n\n" print "-------------------------------" print "Cluster successfully launched!" print "Go to http://%s:8080 to see the web UI for your cluster" % master print "-------------------------------" print "\n" if action != "launch": conn = ec2.connect_to_region(opts.region) (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) master = master_nodes[0].public_dns_name
opts.spot_price = None opts.master_instance_type = "" opts.wait = 160 opts.hadoop_major_version = "1" opts.ganglia = True opts.spark_version = "0.9.0" opts.swap = 1024 if opts.resume: (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) else: (master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name) wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes) setup_cluster(conn, master_nodes, slave_nodes, opts, True) master = master_nodes[0].public_dns_name install_thunder(master, opts) if action != "launch": conn = ec2.connect_to_region(opts.region) (master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name) master = master_nodes[0].public_dns_name # Login to the cluster if action == "login": print "Logging into master " + master + "..." proxy_opt = [] subprocess.check_call(