Пример #1
0
def main():
    master_addr = ClusterUtils.cluster_get_master_addr(params.workspace,
                                                       params.clusterid,
                                                       params.token)
    if master_addr is None:
        print("Error, didn't get master address")
        sys.exit(1)
    print("Master node address is: %s" % master_addr)

    print("Copying script")
    rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (
        params.private_key_file, params.local_script, master_addr,
        params.script_dest)
    print("rsync command: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)

    print("Copying source")
    rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (
        params.private_key_file, params.source_tgz, master_addr,
        params.tgz_dest)
    print("rsync command: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)

    ssh_command = "bash -c 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s %s %s %s %s 2>&1 | tee buildout; if [ `echo ${PIPESTATUS[0]}` -ne 0 ]; then false; else true; fi'" % (
        master_addr, params.private_key_file, params.script_dest,
        params.tgz_dest, params.base_spark_pom_version, params.build_profiles)
    print("ssh command: %s" % ssh_command)
    subprocess.check_call(ssh_command, shell=True)

    print("Copying built tarball back")
    rsync_command = "rsync  -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" ubuntu@%s:/home/ubuntu/spark-rapids-built.tgz ./" % (
        params.private_key_file, master_addr)
    print("rsync command to get built tarball: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)
Пример #2
0
def main():
    workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
    token = ''
    clusterid = '0617-140138-umiak14'
    delete = False

    try:
        opts, args = getopt.getopt(
            sys.argv[1:], 'hs:t:c:d',
            ['workspace=', 'token=', 'clusterid=', 'delete'])
    except getopt.GetoptError:
        print('shutdown.py -s <workspace> -t <token> -c <clusterid>')
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print('shutdown.py -s <workspace> -t <token> -c <clusterid>')
            sys.exit()
        elif opt in ('-s', '--workspace'):
            workspace = arg
        elif opt in ('-t', '--token'):
            token = arg
        elif opt in ('-c', '--clusterid'):
            clusterid = arg
        elif opt in ('-d', '--delete'):
            delete = True

    print('-s is ' + workspace)
    print('-c is ' + clusterid)

    if not clusterid:
        print("You must specify clusterid!")
        sys.exit(1)

    if not token:
        print("You must specify token!")
        sys.exit(1)

    if delete:
        ClusterUtils.delete_cluster(workspace, clusterid, token)
    else:
        ClusterUtils.terminate_cluster(workspace, clusterid, token)
Пример #3
0
def main():
    """Define main function."""
    master_addr = ClusterUtils.cluster_get_master_addr(params.workspace, params.clusterid, params.token)
    if master_addr is None:
        print("Error, didn't get master address")
        sys.exit(1)
    print("Master node address is: %s" % master_addr)

    print("Copying script")
    rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\"" \
        " %s ubuntu@%s:%s" % (params.private_key_file, params.local_script, master_addr, params.script_dest)
    print("rsync command: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)

    ssh_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s " \
        "'LOCAL_JAR_PATH=%s SPARK_CONF=%s BASE_SPARK_VER=%s bash %s %s 2>&1 | tee testout; " \
        "if [ ${PIPESTATUS[0]} -ne 0 ]; then false; else true; fi'" % \
        (master_addr, params.private_key_file, params.jar_path, params.spark_conf, params.base_spark_pom_version,
         params.script_dest, ' '.join(params.script_args))
    print("ssh command: %s" % ssh_command)
    subprocess.check_call(ssh_command, shell=True)
Пример #4
0
def main():

    master_addr = ClusterUtils.cluster_get_master_addr(params.workspace,
                                                       params.clusterid,
                                                       params.token)
    if master_addr is None:
        print("Error, didn't get master address")
        sys.exit(1)
    print("Master node address is: %s" % master_addr)

    print("Copying script")
    rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (
        params.private_key_file, params.local_script, master_addr,
        params.script_dest)
    print("rsync command: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)

    ssh_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s %s %s 2>&1 | tee testout; if [ `echo ${PIPESTATUS[0]}` -ne 0 ]; then false; else true; fi" % (
        master_addr, params.private_key_file, params.script_dest,
        params.jar_path)
    print("ssh command: %s" % ssh_command)
    subprocess.check_call(ssh_command, shell=True)
Пример #5
0
def main():
    workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
    token = ''
    sshkey = ''
    cluster_name = 'CI-GPU-databricks-0.4.0-SNAPSHOT'
    idletime = 240
    runtime = '7.0.x-gpu-ml-scala2.12'
    num_workers = 1
    worker_type = 'g4dn.xlarge'
    driver_type = 'g4dn.xlarge'

    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hw:t:k:n:i:r:o:d:e:', [
            'workspace=', 'token=', 'sshkey=', 'clustername=', 'idletime=',
            'runtime=', 'workertype=', 'drivertype=', 'numworkers='
        ])
    except getopt.GetoptError:
        print(
            'create.py -w <workspace> -t <token> -k <sshkey> -n <clustername> -i <idletime> -r <runtime> -o <workernodetype> -d <drivernodetype> -e <numworkers>'
        )
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print(
                'create.py -w <workspace> -t <token> -k <sshkey> -n <clustername> -i <idletime> -r <runtime> -o <workernodetype> -d <drivernodetype> -e <numworkers>'
            )
            sys.exit()
        elif opt in ('-w', '--workspace'):
            workspace = arg
        elif opt in ('-t', '--token'):
            token = arg
        elif opt in ('-k', '--sshkey'):
            sshkey = arg
        elif opt in ('-n', '--clustername'):
            cluster_name = arg
        elif opt in ('-i', '--idletime'):
            idletime = arg
        elif opt in ('-r', '--runtime'):
            runtime = arg
        elif opt in ('-o', '--workertype'):
            worker_type = arg
        elif opt in ('-d', '--drivertype'):
            driver_type = arg
        elif opt in ('-e', '--numworkers'):
            num_workers = arg

    print('-w is ' + workspace, file=sys.stderr)
    print('-k is ' + sshkey, file=sys.stderr)
    print('-n is ' + cluster_name, file=sys.stderr)
    print('-i is ' + str(idletime), file=sys.stderr)
    print('-r is ' + runtime, file=sys.stderr)
    print('-o is ' + worker_type, file=sys.stderr)
    print('-d is ' + driver_type, file=sys.stderr)
    print('-e is ' + str(num_workers), file=sys.stderr)

    if not sshkey:
        print("You must specify an sshkey!", file=sys.stderr)
        sys.exit(2)

    if not token:
        print("You must specify an token!", file=sys.stderr)
        sys.exit(2)

    templ = ClusterUtils.generate_create_templ(sshkey,
                                               cluster_name,
                                               runtime,
                                               idletime,
                                               num_workers,
                                               driver_type,
                                               worker_type,
                                               printLoc=sys.stderr)
    clusterid = ClusterUtils.create_cluster(workspace,
                                            templ,
                                            token,
                                            printLoc=sys.stderr)
    ClusterUtils.wait_for_cluster_start(workspace,
                                        clusterid,
                                        token,
                                        printLoc=sys.stderr)

    # only print the clusterid to stdout so a calling script can get it easily
    print(clusterid, file=sys.stdout)
Пример #6
0
def main():
  workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
  token = ''
  private_key_file = "~/.ssh/id_rsa"
  local_script = 'build.sh'
  script_dest = '/home/ubuntu/build.sh'
  source_tgz = 'spark-rapids-ci.tgz'
  tgz_dest = '/home/ubuntu/spark-rapids-ci.tgz'
  ci_rapids_jar = 'rapids-4-spark_2.12-0.1-SNAPSHOT-ci.jar'
  # the plugin version to use for the jar we build against databricks
  db_version = '0.3.0-SNAPSHOT'
  scala_version = '2.12'
  spark_version = '3.0.0'
  cudf_version = '0.17-SNAPSHOT'
  cuda_version = 'cuda10-1'
  ci_cudf_jar = 'cudf-0.14-cuda10-1.jar'
  base_spark_pom_version = '3.0.0'
  clusterid = ''

  try:
      opts, args = getopt.getopt(sys.argv[1:], 'hw:t:c:p:l:d:z:j:b:k:a:f:u:m:v:',
                                 ['workspace=', 'token=', 'clusterid=', 'private=', 'localscript=', 'dest=', 'sparktgz=', 'cirapidsjar=', 'databricksversion=', 'sparkversion=', 'scalaversion=', 'cudfversion=', 'cudaversion=', 'cicudfjar=', 'basesparkpomversion='])
  except getopt.GetoptError:
      print(
          'run-tests.py -s <workspace> -t <token> -c <clusterid> -p <privatekeyfile> -l <localscript> -d <scriptdestinatino> -z <sparktgz> -j <cirapidsjar> -b <databricksversion> -k <sparkversion> -a <scalaversion> -f <cudfversion> -u <cudaversion> -m <cicudfjar> -v <basesparkpomversion>')
      sys.exit(2)

  for opt, arg in opts:
      if opt == '-h':
          print(
              'run-tests.py -s <workspace> -t <token> -c <clusterid> -p <privatekeyfile> -n <skipstartingcluster> -l <localscript> -d <scriptdestinatino>, -z <sparktgz> -j <cirapidsjar> -b <databricksversion> -k <sparkversion> -a <scalaversion> -f <cudfversion> -u <cudaversion> -m <cicudfjar> -v <basesparkpomversion>')
          sys.exit()
      elif opt in ('-w', '--workspace'):
          workspace = arg
      elif opt in ('-t', '--token'):
          token = arg
      elif opt in ('-c', '--clusterid'):
          clusterid = arg
      elif opt in ('-p', '--private'):
          private_key_file = arg
      elif opt in ('-l', '--localscript'):
          local_script = arg
      elif opt in ('-d', '--dest'):
          script_dest = arg
      elif opt in ('-z', '--sparktgz'):
          source_tgz = arg
      elif opt in ('-j', '--cirapidsjar'):
          ci_rapids_jar = arg
      elif opt in ('-b', '--databricksversion'):
          db_version = arg
      elif opt in ('-k', '--sparkversion'):
          spark_version = arg
      elif opt in ('-a', '--scalaversion'):
          scala_version = arg
      elif opt in ('-f', '--cudfversion'):
          cudf_version = arg
      elif opt in ('-u', '--cudaversion'):
          cuda_version = arg
      elif opt in ('-m', '--cicudfjar'):
          ci_cudf_jar = arg
      elif opt in ('-v', '--basesparkpomversion'):
          base_spark_pom_version = arg

  print('-w is ' + workspace)
  print('-c is ' + clusterid)
  print('-p is ' + private_key_file)
  print('-l is ' + local_script)
  print('-d is ' + script_dest)
  print('-z is ' + source_tgz)
  print('-j is ' + ci_rapids_jar)
  print('-b is ' + db_version)
  print('-k is ' + spark_version)
  print('-a is ' + scala_version)
  print('-f is ' + cudf_version)
  print('-u is ' + cuda_version)
  print('-m is ' + ci_cudf_jar)
  print('-v is ' + base_spark_pom_version)

  master_addr = ClusterUtils.cluster_get_master_addr(workspace, clusterid, token)
  if master_addr is None:
      print("Error, didn't get master address")
      sys.exit(1)
  print("Master node address is: %s" % master_addr)
  print("Copying script")
  rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (private_key_file, local_script, master_addr, script_dest)
  print("rsync command: %s" % rsync_command)
  subprocess.check_call(rsync_command, shell = True)

  print("Copying source")
  rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (private_key_file, source_tgz, master_addr, tgz_dest)
  print("rsync command: %s" % rsync_command)
  subprocess.check_call(rsync_command, shell = True)

  ssh_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s %s %s %s %s %s %s %s %s %s %s 2>&1 | tee buildout; if [ `echo ${PIPESTATUS[0]}` -ne 0 ]; then false; else true; fi" % (master_addr, private_key_file, script_dest, tgz_dest, db_version, scala_version, ci_rapids_jar, spark_version, cudf_version, cuda_version, ci_cudf_jar, base_spark_pom_version)
  print("ssh command: %s" % ssh_command)
  subprocess.check_call(ssh_command, shell = True)

  print("Copying built tarball back")
  rsync_command = "rsync  -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" ubuntu@%s:/home/ubuntu/spark-rapids-built.tgz ./" % (private_key_file, master_addr)
  print("rsync command to get built tarball: %s" % rsync_command)
  subprocess.check_call(rsync_command, shell = True)
Пример #7
0
def main():
    workspace = 'https://dbc-9ff9942e-a9c4.cloud.databricks.com'
    token = ''
    private_key_file = "~/.ssh/id_rsa"
    local_script = 'build.sh'
    script_dest = '/home/ubuntu/build.sh'
    source_tgz = 'spark-rapids-ci.tgz'
    tgz_dest = '/home/ubuntu/spark-rapids-ci.tgz'
    base_spark_pom_version = '3.0.0'
    clusterid = ''
    build_profiles = 'databricks,!snapshot-shims'

    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hw:t:c:p:l:d:z:m:v:b:', [
            'workspace=', 'token=', 'clusterid=', 'private=', 'localscript=',
            'dest=', 'sparktgz=', 'basesparkpomversion=', 'buildprofiles='
        ])
    except getopt.GetoptError:
        print(
            'run-tests.py -s <workspace> -t <token> -c <clusterid> -p <privatekeyfile> -l <localscript> -d <scriptdestinatino> -z <sparktgz> -v <basesparkpomversion> -b <buildprofiles>'
        )
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print(
                'run-tests.py -s <workspace> -t <token> -c <clusterid> -p <privatekeyfile> -n <skipstartingcluster> -l <localscript> -d <scriptdestinatino>, -z <sparktgz> -v <basesparkpomversion> -b <buildprofiles>'
            )
            sys.exit()
        elif opt in ('-w', '--workspace'):
            workspace = arg
        elif opt in ('-t', '--token'):
            token = arg
        elif opt in ('-c', '--clusterid'):
            clusterid = arg
        elif opt in ('-p', '--private'):
            private_key_file = arg
        elif opt in ('-l', '--localscript'):
            local_script = arg
        elif opt in ('-d', '--dest'):
            script_dest = arg
        elif opt in ('-z', '--sparktgz'):
            source_tgz = arg
        elif opt in ('-v', '--basesparkpomversion'):
            base_spark_pom_version = arg
        elif opt in ('-b', '--bulidprofiles'):
            build_profiles = arg

    print('-w is ' + workspace)
    print('-c is ' + clusterid)
    print('-p is ' + private_key_file)
    print('-l is ' + local_script)
    print('-d is ' + script_dest)
    print('-z is ' + source_tgz)
    print('-v is ' + base_spark_pom_version)
    print('-b is ' + build_profiles)

    master_addr = ClusterUtils.cluster_get_master_addr(workspace, clusterid,
                                                       token)
    if master_addr is None:
        print("Error, didn't get master address")
        sys.exit(1)
    print("Master node address is: %s" % master_addr)
    print("Copying script")
    rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (
        private_key_file, local_script, master_addr, script_dest)
    print("rsync command: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)

    print("Copying source")
    rsync_command = "rsync -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" %s ubuntu@%s:%s" % (
        private_key_file, source_tgz, master_addr, tgz_dest)
    print("rsync command: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)

    ssh_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@%s -p 2200 -i %s %s %s %s %s 2>&1 | tee buildout; if [ `echo ${PIPESTATUS[0]}` -ne 0 ]; then false; else true; fi" % (
        master_addr, private_key_file, script_dest, tgz_dest,
        base_spark_pom_version, build_profiles)
    print("ssh command: %s" % ssh_command)
    subprocess.check_call(ssh_command, shell=True)

    print("Copying built tarball back")
    rsync_command = "rsync  -I -Pave \"ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2200 -i %s\" ubuntu@%s:/home/ubuntu/spark-rapids-built.tgz ./" % (
        private_key_file, master_addr)
    print("rsync command to get built tarball: %s" % rsync_command)
    subprocess.check_call(rsync_command, shell=True)