Exemplo n.º 1
0
    def _wrapper_fun(iter):

        for i in iter:
            executor_num = i

        tb_pid = 0
        tb_hdfs_path = ''
        hdfs_exec_logdir = ''

        t = threading.Thread(target=devices.print_periodic_gpu_utilization)
        if devices.get_num_gpus() > 0:
            t.start()

        global local_logdir_bool

        try:
            #Arguments
            if args_dict:
                argcount = six.get_function_code(map_fun).co_argcount
                names = six.get_function_code(map_fun).co_varnames

                args = []
                argIndex = 0
                param_string = ''
                while argcount > 0:
                    #Get args for executor and run function
                    param_name = names[argIndex]
                    param_val = args_dict[param_name][executor_num]
                    param_string += str(param_name) + '=' + str(
                        param_val) + '.'
                    args.append(param_val)
                    argcount -= 1
                    argIndex += 1
                param_string = param_string[:-1]

                val = _get_metric(param_string, app_id, generation_id, run_id)
                hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs.create_directories(
                    app_id,
                    run_id,
                    param_string,
                    'differential_evolution',
                    sub_type='generation.' + str(generation_id))
                pydoop.hdfs.dump('',
                                 os.environ['EXEC_LOGFILE'],
                                 user=hopshdfs.project_user())
                hopshdfs.init_logger()
                tb_hdfs_path, tb_pid = tensorboard.register(
                    hdfs_exec_logdir,
                    hdfs_appid_logdir,
                    executor_num,
                    local_logdir=local_logdir_bool)
                gpu_str = '\nChecking for GPUs in the environment' + devices.get_gpu_info(
                )
                hopshdfs.log(gpu_str)
                print(gpu_str)
                print(
                    '-------------------------------------------------------')
                print('Started running task ' + param_string + '\n')
                if val:
                    print('Reading returned metric from previous run: ' +
                          str(val))
                hopshdfs.log('Started running task ' + param_string)
                task_start = datetime.datetime.now()
                if not val:
                    val = map_fun(*args)
                task_end = datetime.datetime.now()
                time_str = 'Finished task ' + param_string + ' - took ' + util.time_diff(
                    task_start, task_end)
                print('\n' + time_str)
                hopshdfs.log(time_str)
                try:
                    castval = int(val)
                except:
                    raise ValueError(
                        'Your function needs to return a metric (number) which should be maximized or minimized'
                    )

                metric_file = hdfs_exec_logdir + '/metric'
                fs_handle = hopshdfs.get_fs()
                try:
                    fd = fs_handle.open_file(metric_file, mode='w')
                except:
                    fd = fs_handle.open_file(metric_file, flags='w')

                fd.write(str(float(val)).encode())
                fd.flush()
                fd.close()
                print('Returning metric ' + str(val))
                print(
                    '-------------------------------------------------------')
        except:
            #Always do cleanup
            if tb_hdfs_path:
                _cleanup(tb_hdfs_path)
            if devices.get_num_gpus() > 0:
                t.do_run = False
                t.join()
            raise
        finally:
            if local_logdir_bool:
                local_tb = tensorboard.local_logdir_path
                util.store_local_tensorboard(local_tb, hdfs_exec_logdir)

        hopshdfs.log('Finished running')
        if tb_hdfs_path:
            _cleanup(tb_hdfs_path)
        if devices.get_num_gpus() > 0:
            t.do_run = False
            t.join()
Exemplo n.º 2
0
    def _wrapper_fun(iter):

        for i in iter:
            executor_num = i

        hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs.create_directories(
            app_id, run_id, None, 'horovod')

        tb_pid = 0
        tb_hdfs_path = ''

        pydoop.hdfs.dump('',
                         os.environ['EXEC_LOGFILE'],
                         user=hopshdfs.project_user())
        hopshdfs.init_logger()
        hopshdfs.log('Starting Spark executor with arguments')
        if executor_num == 0:
            tb_hdfs_path, tb_pid = tensorboard.register(
                hdfs_exec_logdir,
                hdfs_appid_logdir,
                0,
                local_logdir=local_logdir)

        gpu_str = '\n\nChecking for GPUs in the environment\n' + devices.get_gpu_info(
        )
        hopshdfs.log(gpu_str)
        print(gpu_str)

        #1. Download notebook file
        fs_handle = hopshdfs.get_fs()

        try:
            fd = fs_handle.open_file(nb_path, flags='r')
        except:
            fd = fs_handle.open_file(nb_path, mode='r')

        notebook = ''
        for line in fd:
            notebook += line

        path, filename = os.path.split(nb_path)
        f_nb = open(filename, "w+")
        f_nb.write(notebook)
        f_nb.flush()
        f_nb.close()

        # 2. Convert notebook to py file
        jupyter_runnable = os.path.abspath(
            os.path.join(os.environ['PYSPARK_PYTHON'], os.pardir)) + '/jupyter'
        conversion_cmd = jupyter_runnable + ' nbconvert --to python ' + filename
        conversion = subprocess.Popen(conversion_cmd,
                                      shell=True,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
        conversion.wait()
        stdout, stderr = conversion.communicate()
        print(stdout)
        print(stderr)

        # 3. Make py file runnable
        py_runnable = os.getcwd() + '/' + filename.split('.')[0] + '.py'
        st = os.stat(py_runnable)
        os.chmod(py_runnable, st.st_mode | stat.S_IEXEC)

        t_gpus = threading.Thread(
            target=devices.print_periodic_gpu_utilization)
        if devices.get_num_gpus() > 0:
            t_gpus.start()

        mpi_logfile_path = os.getcwd() + '/mpirun.log'
        if os.path.exists(mpi_logfile_path):
            os.remove(mpi_logfile_path)

        mpi_logfile = open(mpi_logfile_path, 'w')

        # 4. Run allreduce
        mpi_np = os.environ['MPI_NP']
        mpi_cmd = 'HOROVOD_TIMELINE=' + tensorboard.logdir() + '/timeline.json' + \
                  ' TENSORBOARD_LOGDIR=' + tensorboard.logdir() + \
                  ' mpirun -np ' + str(mpi_np) + \
                  ' -bind-to none -map-by slot ' + \
                  ' -x HOROVOD_TIMELINE ' + \
                  ' -x TENSORBOARD_LOGDIR ' + \
                  ' -x NCCL_DEBUG=INFO ' + \
                  os.environ['PYSPARK_PYTHON'] + ' ' + py_runnable
        mpi = subprocess.Popen(mpi_cmd,
                               shell=True,
                               stdout=mpi_logfile,
                               stderr=mpi_logfile,
                               preexec_fn=util.on_executor_exit('SIGTERM'))

        t_log = threading.Thread(target=print_log)
        t_log.start()

        mpi.wait()

        if devices.get_num_gpus() > 0:
            t_gpus.do_run = False
            t_gpus.join()

        return_code = mpi.returncode

        if local_logdir:
            local_tb = tensorboard.local_logdir_path
            pydoop.hdfs.put(local_tb, hdfs_exec_logdir)

        if return_code != 0:
            cleanup(tb_hdfs_path)
            t_log.do_run = False
            t_log.join()
            raise Exception('mpirun FAILED, look in the logs for the error')

        cleanup(tb_hdfs_path)
        t_log.do_run = False
        t_log.join()

        hopshdfs.kill_logger()
Exemplo n.º 3
0
    def _wrapper_fun(iter):

        for i in iter:
            executor_num = i

        tb_hdfs_path = ''
        hdfs_exec_logdir = ''

        t = threading.Thread(target=devices.print_periodic_gpu_utilization)
        if devices.get_num_gpus() > 0:
            t.start()

        try:
            #Arguments
            if args_dict:
                argcount = six.get_function_code(map_fun).co_argcount
                names = six.get_function_code(map_fun).co_varnames

                args = []
                argIndex = 0
                param_string = ''
                while argcount > 0:
                    #Get args for executor and run function
                    param_name = names[argIndex]
                    param_val = args_dict[param_name][executor_num]
                    param_string += str(param_name) + '=' + str(
                        param_val) + '.'
                    args.append(param_val)
                    argcount -= 1
                    argIndex += 1
                param_string = param_string[:-1]
                hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs.create_directories(
                    app_id, run_id, param_string, 'grid_search')
                pydoop.hdfs.dump('',
                                 os.environ['EXEC_LOGFILE'],
                                 user=hopshdfs.project_user())
                hopshdfs.init_logger()
                tb_hdfs_path, tb_pid = tensorboard.register(
                    hdfs_exec_logdir,
                    hdfs_appid_logdir,
                    executor_num,
                    local_logdir=local_logdir)

                gpu_str = '\nChecking for GPUs in the environment' + devices.get_gpu_info(
                )
                hopshdfs.log(gpu_str)
                print(gpu_str)
                print(
                    '-------------------------------------------------------')
                print('Started running task ' + param_string + '\n')
                hopshdfs.log('Started running task ' + param_string)
                task_start = datetime.datetime.now()
                retval = map_fun(*args)
                task_end = datetime.datetime.now()
                _handle_return(retval, hdfs_exec_logdir)
                time_str = 'Finished task ' + param_string + ' - took ' + util.time_diff(
                    task_start, task_end)
                print('\n' + time_str)
                print(
                    '-------------------------------------------------------')
                hopshdfs.log(time_str)
        except:
            #Always do cleanup
            _cleanup(tb_hdfs_path)
            if devices.get_num_gpus() > 0:
                t.do_run = False
                t.join()
            raise
        finally:
            if local_logdir:
                local_tb = tensorboard.local_logdir_path
                util.store_local_tensorboard(local_tb, hdfs_exec_logdir)

        _cleanup(tb_hdfs_path)
        if devices.get_num_gpus() > 0:
            t.do_run = False
            t.join()
Exemplo n.º 4
0
def begin(spark,
          name='no-name',
          local_logdir=False,
          versioned_resources=None,
          description=None):
    """ Start an experiment

    Args:
      :spark_session: SparkSession object
      :name: (optional) name of the job
    """
    global running
    if running:
        raise RuntimeError(
            "An experiment is currently running. Please call experiment.stop() to stop it."
        )

    try:
        global app_id
        global experiment_json
        global elastic_id
        global run_id
        global driver_tensorboard_hdfs_path

        running = True

        sc = spark.sparkContext
        app_id = str(sc.applicationId)

        run_id = run_id + 1

        versioned_path = util.version_resources(versioned_resources,
                                                get_logdir(app_id))

        experiment_json = None

        experiment_json = util.populate_experiment(sc, name,
                                                   'experiment', 'begin',
                                                   get_logdir(app_id), None,
                                                   versioned_path, description)

        util.version_resources(versioned_resources, get_logdir(app_id))

        util.put_elastic(hopshdfs.project_name(), app_id, elastic_id,
                         experiment_json)

        hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs.create_directories(
            app_id, run_id, None, 'begin')

        pydoop.hdfs.dump('',
                         os.environ['EXEC_LOGFILE'],
                         user=hopshdfs.project_user())

        hopshdfs.init_logger()

        driver_tensorboard_hdfs_path, _ = tensorboard.register(
            hdfs_exec_logdir,
            hdfs_appid_logdir,
            0,
            local_logdir=local_logdir,
            tensorboard_driver=True)
    except:
        exception_handler()
        raise

    return