def main():
    parser = argparse.ArgumentParser(prog='ephemeral-spark-submit.py')
    parser.add_argument(
        '--admin-username',
        default="admin",
        help=
        'Name of an user with administrative access (defaults to %(default)s)')
    parser.add_argument(
        '--admin-password',
        default="admin",
        help='Password for the administrative user (defaults to %(default)s)')
    parser.add_argument(
        '--server',
        default="http://localhost:7189",
        help="Cloudera Director server URL (defaults to %(default)s)")
    parser.add_argument(
        '--cm',
        help="The name of the Cloudera Manager server to use in Director")
    parser.add_argument('--environment',
                        help="The name of the Environment to use in Director")
    parser.add_argument(
        '--jar', help="JAR for Spark job you want to run on ephemeral cluster")
    parser.add_argument('--jarclass', help="The --class flag for spark-submit")
    parser.add_argument('--args', help="The arguments for the jar")
    parser.add_argument('--script', help="Script that runs before spark job")
    parser.add_argument('config_file',
                        help="Cluster configuration file (.ini)")
    args = parser.parse_args()

    if not isfile(args.config_file):
        print 'Error: "%s" not found or not a file' % args.config_file
        return -1

    config = ConfigParser.SafeConfigParser()
    config.read(args.config_file)

    #Create authenticated client
    client = cluster.get_authenticated_client(args)

    #Execute cluster creation
    cluster_name = cluster.create_cluster(client, args.environment, args.cm,
                                          config)
    print 'Waiting for the cluster to be ready. Check the web interface for details.'
    cluster.wait_for_cluster(client, args.environment, args.cm, cluster_name)
    client = ApiClient(args.server)
    AuthenticationApi(client).login(
        Login(username=args.admin_username, password=args.admin_password))
    clusters = ClustersApi(client)
    eph_cluster = clusters.get(args.environment, args.cm, cluster_name)
    instances = eph_cluster.instances
    #Find which is a gateway node
    for instance in instances:
        if str(instance.virtualInstance.template.name) == 'gateway':
            gateway = instance
    gateway = gateway.properties['publicDnsName']
    print("The Gateway url is: " + gateway)

    #Copy the JAR and postscript to the GW
    copy_jar(args.jar, gateway, config)
    #Copy script to the GW
    copy_script(args.script, gateway, config)
    #Create directory in HDFS with correct permissions
    configure_hdfs(gateway, config)
    #Execute the job
    execute_spark(args.jar, args.jarclass, args.args, gateway, config)
    #Run some post script
    execute_script(args.script, gateway, config)
    #Destroy the cluster
    print "Job complete, terminating the instance"
    clusters.delete(args.environment, args.cm, cluster_name)

    return 0
def main():
    parser = argparse.ArgumentParser(prog="ephemeral-spark-submit.py")
    parser.add_argument(
        "--admin-username", default="admin", help="Name of an user with administrative access (defaults to %(default)s)"
    )
    parser.add_argument(
        "--admin-password", default="admin", help="Password for the administrative user (defaults to %(default)s)"
    )
    parser.add_argument(
        "--server", default="http://localhost:7189", help="Cloudera Director server URL (defaults to %(default)s)"
    )
    parser.add_argument("--cm", help="The name of the Cloudera Manager server to use in Director")
    parser.add_argument("--environment", help="The name of the Environment to use in Director")
    parser.add_argument("--jar", help="JAR for Spark job you want to run on ephemeral cluster")
    parser.add_argument("--jarclass", help="The --class flag for spark-submit")
    parser.add_argument("--args", help="The arguments for the jar")
    parser.add_argument("--script", help="Script that runs before spark job")
    parser.add_argument("config_file", help="Cluster configuration file (.ini)")
    args = parser.parse_args()

    if not isfile(args.config_file):
        print 'Error: "%s" not found or not a file' % args.config_file
        return -1

    config = ConfigParser.SafeConfigParser()
    config.read(args.config_file)

    # Create authenticated client
    client = cluster.get_authenticated_client(args)

    # Execute cluster creation
    cluster_name = cluster.create_cluster(client, args.environment, args.cm, config)
    print "Waiting for the cluster to be ready. Check the web interface for details."
    cluster.wait_for_cluster(client, args.environment, args.cm, cluster_name)
    client = ApiClient(args.server)
    AuthenticationApi(client).login(Login(username=args.admin_username, password=args.admin_password))
    clusters = ClustersApi(client)
    eph_cluster = clusters.get(args.environment, args.cm, cluster_name)
    instances = eph_cluster.instances
    # Find which is a gateway node
    for instance in instances:
        if str(instance.virtualInstance.template.name) == "gateway":
            gateway = instance
    gateway = gateway.properties["publicDnsName"]
    print ("The Gateway url is: " + gateway)

    # Copy the JAR and postscript to the GW
    copy_jar(args.jar, gateway, config)
    # Copy script to the GW
    copy_script(args.script, gateway, config)
    # Create directory in HDFS with correct permissions
    configure_hdfs(gateway, config)
    # Execute the job
    execute_spark(args.jar, args.jarclass, args.args, gateway, config)
    # Run some post script
    execute_script(args.script, gateway, config)
    # Destroy the cluster
    print "Job complete, terminating the instance"
    clusters.delete(args.environment, args.cm, cluster_name)

    return 0