예제 #1
0
def get_sub_images_from_prediction_results(para_file, polygons_shp,
                                           image_folder_or_path, image_pattern,
                                           saved_dir):

    class_names = parameters.get_string_list_parameters(
        para_file, 'object_names')

    dstnodata = parameters.get_digit_parameters(para_file, 'dst_nodata', 'int')
    bufferSize = parameters.get_digit_parameters(para_file, 'buffer_size',
                                                 'int')
    rectangle_ext = parameters.get_string_parameters_None_if_absence(
        para_file, 'b_use_rectangle')
    if rectangle_ext is not None:
        b_rectangle = True
    else:
        b_rectangle = False

    process_num = parameters.get_digit_parameters(para_file, 'process_num',
                                                  'int')

    get_sub_images_pixel_json_files(polygons_shp, image_folder_or_path,
                                    image_pattern, class_names, bufferSize,
                                    dstnodata, saved_dir, b_rectangle,
                                    process_num)

    pass
예제 #2
0
def train_evaluation_deeplab_separate(WORK_DIR, deeplab_dir, expr_name,
                                      para_file, network_setting_ini, gpu_num):
    '''
    in "train_evaluation_deeplab", run training, stop, then evaluation, then traininng, make learning rate strange, and the results worse.
    so in this function, we start two process, one for training, another for evaluation (run on CPU)
    '''
    # prepare training folder
    EXP_FOLDER = expr_name
    INIT_FOLDER = os.path.join(WORK_DIR, EXP_FOLDER, 'init_models')
    TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train')
    EVAL_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'eval')
    VIS_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'vis')
    EXPORT_DIR = os.path.join(WORK_DIR, EXP_FOLDER, 'export')

    io_function.mkdir(INIT_FOLDER)
    io_function.mkdir(TRAIN_LOGDIR)
    io_function.mkdir(EVAL_LOGDIR)
    io_function.mkdir(VIS_LOGDIR)
    io_function.mkdir(EXPORT_DIR)

    # prepare the tensorflow check point (pretrained model) for training
    pre_trained_dir = parameters.get_directory_None_if_absence(
        network_setting_ini, 'pre_trained_model_folder')
    pre_trained_tar = parameters.get_string_parameters(network_setting_ini,
                                                       'TF_INIT_CKPT')
    pre_trained_path = os.path.join(pre_trained_dir, pre_trained_tar)
    if os.path.isfile(pre_trained_path) is False:
        print('pre-trained model: %s not exist, try to download' %
              pre_trained_path)
        # try to download the file
        pre_trained_url = parameters.get_string_parameters_None_if_absence(
            network_setting_ini, 'pre_trained_model_url')
        res = os.system('wget %s ' % pre_trained_url)
        if res != 0:
            sys.exit(1)
        io_function.movefiletodir(pre_trained_tar, pre_trained_dir)

    # unpack pre-trained model to INIT_FOLDER
    os.chdir(INIT_FOLDER)
    res = os.system('tar -xf %s' % pre_trained_path)
    if res != 0:
        raise IOError('failed to unpack %s' % pre_trained_path)
    os.chdir(WORK_DIR)

    dataset_dir = os.path.join(WORK_DIR, 'tfrecord')
    batch_size = parameters.get_digit_parameters(network_setting_ini,
                                                 'batch_size', 'int')
    # maximum iteration number
    iteration_num = parameters.get_digit_parameters(network_setting_ini,
                                                    'iteration_num', 'int')
    base_learning_rate = parameters.get_digit_parameters(
        network_setting_ini, 'base_learning_rate', 'float')

    train_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_output_stride', 'int')
    train_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_atrous_rates1', 'int')
    train_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_atrous_rates2', 'int')
    train_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_atrous_rates3', 'int')

    inf_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_output_stride', 'int')
    inf_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates1', 'int')
    inf_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates2', 'int')
    inf_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates3', 'int')

    # depth_multiplier default is 1.0.
    depth_multiplier = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'depth_multiplier', 'float')

    decoder_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'decoder_output_stride', 'int')
    aspp_convs_filters = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'aspp_convs_filters', 'int')

    train_script = os.path.join(deeplab_dir, 'train.py')
    train_split = os.path.splitext(
        parameters.get_string_parameters(para_file,
                                         'training_sample_list_txt'))[0]
    model_variant = parameters.get_string_parameters(network_setting_ini,
                                                     'model_variant')
    checkpoint = parameters.get_string_parameters(network_setting_ini,
                                                  'tf_initial_checkpoint')
    init_checkpoint_files = io_function.get_file_list_by_pattern(
        INIT_FOLDER, checkpoint + '*')
    if len(init_checkpoint_files) < 1:
        raise IOError('No initial checkpoint in %s with pattern: %s' %
                      (INIT_FOLDER, checkpoint))
    init_checkpoint = os.path.join(INIT_FOLDER, checkpoint)
    b_early_stopping = parameters.get_bool_parameters(para_file,
                                                      'b_early_stopping')
    b_initialize_last_layer = parameters.get_bool_parameters(
        para_file, 'b_initialize_last_layer')

    dataset = parameters.get_string_parameters(para_file, 'dataset_name')
    num_classes_noBG = parameters.get_digit_parameters_None_if_absence(
        para_file, 'NUM_CLASSES_noBG', 'int')
    assert num_classes_noBG != None
    if b_initialize_last_layer is True:
        if pre_trained_tar in pre_trained_tar_21_classes:
            print(
                'warning, pretrained model %s is trained with 21 classes, set num_of_classes to 21'
                % pre_trained_tar)
            num_classes_noBG = 20
        if pre_trained_tar in pre_trained_tar_19_classes:
            print(
                'warning, pretrained model %s is trained with 19 classes, set num_of_classes to 19'
                % pre_trained_tar)
            num_classes_noBG = 18
    num_of_classes = num_classes_noBG + 1

    image_crop_size = parameters.get_string_list_parameters(
        para_file, 'image_crop_size')
    if len(image_crop_size) != 2 and image_crop_size[0].isdigit(
    ) and image_crop_size[1].isdigit():
        raise ValueError('image_crop_size should be height,width')
    crop_size_str = ','.join(image_crop_size)

    # validation interval (epoch), do
    # validation_interval = parameters.get_digit_parameters_None_if_absence(para_file,'validation_interval','int')

    train_count, val_count = get_train_val_sample_count(WORK_DIR, para_file)
    iter_per_epoch = math.ceil(train_count / batch_size)
    total_epoches = math.ceil(iteration_num / iter_per_epoch)
    already_trained_iteration = get_trained_iteration(TRAIN_LOGDIR)
    if already_trained_iteration >= iteration_num:
        basic.outputlogMessage('Training already run %d iterations, skip' %
                               already_trained_iteration)
        return True

    save_interval_secs = 1200  # default is 1200 second for saving model
    save_summaries_secs = 600  # default is 600 second for saving summaries
    eval_interval_secs = save_interval_secs  # default is 300 second for running evaluation, if no new saved model, no need to run evaluation?

    train_process = Process(
        target=train_deeplab,
        args=(train_script, dataset, train_split, num_of_classes,
              base_learning_rate, model_variant, init_checkpoint, TRAIN_LOGDIR,
              dataset_dir, gpu_num, train_atrous_rates1, train_atrous_rates2,
              train_atrous_rates3, train_output_stride, crop_size_str,
              batch_size, iteration_num, depth_multiplier,
              decoder_output_stride, aspp_convs_filters,
              b_initialize_last_layer))
    train_process.start()
    time.sleep(60)  # wait
    if train_process.exitcode is not None and train_process.exitcode != 0:
        sys.exit(1)

    # eval_process.start()
    # time.sleep(10)  # wait
    # if eval_process.exitcode is not None and eval_process.exitcode != 0:
    #     sys.exit(1)

    while True:

        # only run evaluation when there is new trained model
        already_trained_iteration = get_trained_iteration(TRAIN_LOGDIR)
        miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)
        basic.outputlogMessage(
            'Already trained iteration: %d, latest evaluation at %d step' %
            (already_trained_iteration, miou_dict['step'][-1]))
        if already_trained_iteration > miou_dict['step'][-1]:

            # run evaluation and wait until it finished
            gpuid = ""  # set gpuid to empty string, making evaluation run on CPU
            evl_script = os.path.join(deeplab_dir, 'eval.py')
            evl_split = os.path.splitext(
                parameters.get_string_parameters(
                    para_file, 'validation_sample_list_txt'))[0]
            # max_eva_number = -1  # run as many evaluation as possible, --eval_interval_secs (default is 300 seconds)
            max_eva_number = 1  # only run once inside the while loop, use while loop to control multiple evaluation
            eval_process = Process(
                target=evaluation_deeplab,
                args=(evl_script, dataset, evl_split, num_of_classes,
                      model_variant, inf_atrous_rates1, inf_atrous_rates2,
                      inf_atrous_rates3, inf_output_stride, TRAIN_LOGDIR,
                      EVAL_LOGDIR, dataset_dir, crop_size_str, max_eva_number,
                      depth_multiplier, decoder_output_stride,
                      aspp_convs_filters, gpuid, eval_interval_secs))
            eval_process.start(
            )  # put Process inside while loop to avoid error: AssertionError: cannot start a process twice
            while eval_process.is_alive():
                time.sleep(5)

        # check if need early stopping
        if b_early_stopping:
            print(datetime.now(), 'check early stopping')
            miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)
            if 'overall' in miou_dict.keys() and len(
                    miou_dict['overall']) >= 5:
                # if the last five miou did not improve, then stop training
                if np.all(np.diff(miou_dict['overall'][-5:]) < 0.005
                          ):  # 0.0001 (%0.01)  # 0.5 %
                    basic.outputlogMessage(
                        'early stopping: stop training because overall miou did not improved in the last five evaluation'
                    )
                    output_early_stopping_message(TRAIN_LOGDIR)

                    # train_process.kill()    # this one seems not working
                    # subprocess pid different from ps output
                    # https://stackoverflow.com/questions/4444141/subprocess-pid-different-from-ps-output
                    # os.system('kill ' + str(train_process.pid)) # still not working.  train_process.pid is not the one output by ps -aux

                    # train_process.terminate()   # Note that descendant processes of the process will not be terminated
                    # train_process.join()        # Wait until child process terminates

                    with open('train_py_pid.txt', 'r') as f_obj:
                        lines = f_obj.readlines()
                        train_pid = int(lines[0].strip())
                        os.system('kill ' + str(train_pid))
                        basic.outputlogMessage(
                            'kill training processing with id: %d' % train_pid)

                    break  # this breaks the while loop, making that it may not evaluate on some new saved model.

        # if the evaluation step is less than saved model iteration, run another iteration again immediately
        already_trained_iteration = get_trained_iteration(TRAIN_LOGDIR)
        miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)
        if already_trained_iteration > miou_dict['step'][-1]:
            continue

        # if finished training
        if train_process.is_alive() is False:
            break
        # # if eval_process exit, then quit training as well
        # if eval_process.is_alive() is False and train_process.is_alive():
        #     train_process.kill()
        #     break
        time.sleep(eval_interval_secs)  # wait for next evaluation

    # save loss value to disk
    get_loss_learning_rate_list(TRAIN_LOGDIR)
    # get miou again
    miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)

    # eval_process did not exit as expected, kill it again.
    # os.system('kill ' + str(eval_process.pid))

    # get iou and backup
    iou_path = os.path.join(EVAL_LOGDIR, 'miou.txt')
    loss_path = os.path.join(TRAIN_LOGDIR, 'loss_learning_rate.txt')
    patch_info = os.path.join(WORK_DIR, 'sub_images_patches_info.txt')

    # backup miou and training_loss & learning rate
    test_id = os.path.basename(WORK_DIR) + '_' + expr_name
    backup_dir = os.path.join(WORK_DIR, 'result_backup')
    if os.path.isdir(backup_dir) is False:
        io_function.mkdir(backup_dir)
    new_iou_name = os.path.join(backup_dir,
                                test_id + '_' + os.path.basename(iou_path))
    io_function.copy_file_to_dst(iou_path, new_iou_name, overwrite=True)

    loss_new_name = os.path.join(backup_dir,
                                 test_id + '_' + os.path.basename(loss_path))
    io_function.copy_file_to_dst(loss_path, loss_new_name, overwrite=True)

    new_patch_info = os.path.join(backup_dir,
                                  test_id + '_' + os.path.basename(patch_info))
    io_function.copy_file_to_dst(patch_info, new_patch_info, overwrite=True)

    # plot mIOU, loss, and learnint rate curves, and backup
    miou_curve_path = plot_miou_loss_curve.plot_miou_loss_main(
        iou_path,
        train_count=train_count,
        val_count=val_count,
        batch_size=batch_size)
    loss_curve_path = plot_miou_loss_curve.plot_miou_loss_main(
        loss_path,
        train_count=train_count,
        val_count=val_count,
        batch_size=batch_size)
    miou_curve_bakname = os.path.join(
        backup_dir, test_id + '_' + os.path.basename(miou_curve_path))
    io_function.copy_file_to_dst(miou_curve_path,
                                 miou_curve_bakname,
                                 overwrite=True)
    loss_curve_bakname = os.path.join(
        backup_dir, test_id + '_' + os.path.basename(loss_curve_path))
    io_function.copy_file_to_dst(loss_curve_path,
                                 loss_curve_bakname,
                                 overwrite=True)
예제 #3
0
def train_evaluation_deeplab(WORK_DIR, deeplab_dir, expr_name, para_file,
                             network_setting_ini, gpu_num):

    # prepare training folder
    EXP_FOLDER = expr_name
    INIT_FOLDER = os.path.join(WORK_DIR, EXP_FOLDER, 'init_models')
    TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train')
    EVAL_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'eval')
    VIS_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'vis')
    EXPORT_DIR = os.path.join(WORK_DIR, EXP_FOLDER, 'export')

    io_function.mkdir(INIT_FOLDER)
    io_function.mkdir(TRAIN_LOGDIR)
    io_function.mkdir(EVAL_LOGDIR)
    io_function.mkdir(VIS_LOGDIR)
    io_function.mkdir(EXPORT_DIR)

    # prepare the tensorflow check point (pretrained model) for training
    pre_trained_dir = parameters.get_directory_None_if_absence(
        network_setting_ini, 'pre_trained_model_folder')
    pre_trained_tar = parameters.get_string_parameters(network_setting_ini,
                                                       'TF_INIT_CKPT')
    pre_trained_path = os.path.join(pre_trained_dir, pre_trained_tar)
    if os.path.isfile(pre_trained_path) is False:
        print('pre-trained model: %s not exist, try to download' %
              pre_trained_path)
        # try to download the file
        pre_trained_url = parameters.get_string_parameters_None_if_absence(
            network_setting_ini, 'pre_trained_model_url')
        res = os.system('wget %s ' % pre_trained_url)
        if res != 0:
            sys.exit(1)
        io_function.movefiletodir(pre_trained_tar, pre_trained_dir)

    # unpack pre-trained model to INIT_FOLDER
    os.chdir(INIT_FOLDER)
    res = os.system('tar -xf %s' % pre_trained_path)
    if res != 0:
        raise IOError('failed to unpack %s' % pre_trained_path)
    os.chdir(WORK_DIR)

    dataset_dir = os.path.join(WORK_DIR, 'tfrecord')
    batch_size = parameters.get_digit_parameters(network_setting_ini,
                                                 'batch_size', 'int')
    # maximum iteration number
    iteration_num = parameters.get_digit_parameters(network_setting_ini,
                                                    'iteration_num', 'int')
    base_learning_rate = parameters.get_digit_parameters(
        network_setting_ini, 'base_learning_rate', 'float')

    train_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_output_stride', 'int')
    train_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_atrous_rates1', 'int')
    train_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_atrous_rates2', 'int')
    train_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'train_atrous_rates3', 'int')

    inf_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_output_stride', 'int')
    inf_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates1', 'int')
    inf_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates2', 'int')
    inf_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates3', 'int')

    # depth_multiplier default is 1.0.
    depth_multiplier = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'depth_multiplier', 'float')

    decoder_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'decoder_output_stride', 'int')
    aspp_convs_filters = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'aspp_convs_filters', 'int')

    train_script = os.path.join(deeplab_dir, 'train.py')
    train_split = os.path.splitext(
        parameters.get_string_parameters(para_file,
                                         'training_sample_list_txt'))[0]
    model_variant = parameters.get_string_parameters(network_setting_ini,
                                                     'model_variant')
    checkpoint = parameters.get_string_parameters(network_setting_ini,
                                                  'tf_initial_checkpoint')
    init_checkpoint_files = io_function.get_file_list_by_pattern(
        INIT_FOLDER, checkpoint + '*')
    if len(init_checkpoint_files) < 1:
        raise IOError('No initial checkpoint in %s with pattern: %s' %
                      (INIT_FOLDER, checkpoint))
    init_checkpoint = os.path.join(INIT_FOLDER, checkpoint)
    b_early_stopping = parameters.get_bool_parameters(para_file,
                                                      'b_early_stopping')
    b_initialize_last_layer = parameters.get_bool_parameters(
        para_file, 'b_initialize_last_layer')

    dataset = parameters.get_string_parameters(para_file, 'dataset_name')
    num_classes_noBG = parameters.get_digit_parameters_None_if_absence(
        para_file, 'NUM_CLASSES_noBG', 'int')
    assert num_classes_noBG != None
    if b_initialize_last_layer is True:
        if pre_trained_tar in pre_trained_tar_21_classes:
            print(
                'warning, pretrained model %s is trained with 21 classes, set num_of_classes to 21'
                % pre_trained_tar)
            num_classes_noBG = 20
        if pre_trained_tar in pre_trained_tar_19_classes:
            print(
                'warning, pretrained model %s is trained with 19 classes, set num_of_classes to 19'
                % pre_trained_tar)
            num_classes_noBG = 18
    num_of_classes = num_classes_noBG + 1

    image_crop_size = parameters.get_string_list_parameters(
        para_file, 'image_crop_size')
    if len(image_crop_size) != 2 and image_crop_size[0].isdigit(
    ) and image_crop_size[1].isdigit():
        raise ValueError('image_crop_size should be height,width')
    crop_size_str = ','.join(image_crop_size)

    evl_script = os.path.join(deeplab_dir, 'eval.py')
    evl_split = os.path.splitext(
        parameters.get_string_parameters(para_file,
                                         'validation_sample_list_txt'))[0]
    max_eva_number = 1

    # validation interval (epoch)
    validation_interval = parameters.get_digit_parameters_None_if_absence(
        para_file, 'validation_interval', 'int')
    train_count, val_count = get_train_val_sample_count(WORK_DIR, para_file)
    iter_per_epoch = math.ceil(train_count / batch_size)
    total_epoches = math.ceil(iteration_num / iter_per_epoch)
    already_trained_iteration = get_trained_iteration(TRAIN_LOGDIR)
    if already_trained_iteration >= iteration_num:
        basic.outputlogMessage('Training already run %d iterations, skip' %
                               already_trained_iteration)
        return True
    if validation_interval is None:
        basic.outputlogMessage(
            'No input validation_interval, so training to %d, then evaluating in the end'
            % iteration_num)
        # run training
        train_deeplab(train_script, dataset, train_split, num_of_classes,
                      base_learning_rate, model_variant, init_checkpoint,
                      TRAIN_LOGDIR, dataset_dir, gpu_num, train_atrous_rates1,
                      train_atrous_rates2, train_atrous_rates3,
                      train_output_stride, crop_size_str, batch_size,
                      iteration_num, depth_multiplier, decoder_output_stride,
                      aspp_convs_filters, b_initialize_last_layer)

        # run evaluation
        evaluation_deeplab(evl_script, dataset, evl_split, num_of_classes,
                           model_variant, inf_atrous_rates1, inf_atrous_rates2,
                           inf_atrous_rates3, inf_output_stride, TRAIN_LOGDIR,
                           EVAL_LOGDIR, dataset_dir, crop_size_str,
                           max_eva_number, depth_multiplier,
                           decoder_output_stride, aspp_convs_filters)
        miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)
        get_loss_learning_rate_list(TRAIN_LOGDIR)
    else:
        basic.outputlogMessage(
            'training to the maximum iteration of %d, and evaluating very %d epoch(es)'
            % (iteration_num, validation_interval))
        for epoch in range(validation_interval,
                           total_epoches + validation_interval,
                           validation_interval):

            to_iter_num = min(epoch * iter_per_epoch, iteration_num)
            if to_iter_num <= already_trained_iteration:
                continue
            basic.outputlogMessage(
                'training and evaluating to %d epoches (to iteration: %d)' %
                (epoch, to_iter_num))

            # run training
            train_deeplab(train_script, dataset, train_split, num_of_classes,
                          base_learning_rate, model_variant, init_checkpoint,
                          TRAIN_LOGDIR, dataset_dir, gpu_num,
                          train_atrous_rates1, train_atrous_rates2,
                          train_atrous_rates3, train_output_stride,
                          crop_size_str, batch_size, to_iter_num,
                          depth_multiplier, decoder_output_stride,
                          aspp_convs_filters, b_initialize_last_layer)

            # run evaluation
            evaluation_deeplab(evl_script, dataset, evl_split, num_of_classes,
                               model_variant, inf_atrous_rates1,
                               inf_atrous_rates2, inf_atrous_rates3,
                               inf_output_stride, TRAIN_LOGDIR, EVAL_LOGDIR,
                               dataset_dir, crop_size_str, max_eva_number,
                               depth_multiplier, decoder_output_stride,
                               aspp_convs_filters)

            # get miou
            miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)
            # save loss value to disk
            get_loss_learning_rate_list(TRAIN_LOGDIR)
            # check if need to early stopping
            if b_early_stopping:
                if len(miou_dict['overall']) >= 5:
                    # if the last five miou did not improve, then stop training
                    if np.all(np.diff(miou_dict['overall'][-5:]) < 0.005
                              ):  # 0.0001 (%0.01)  # 0.5 %
                        basic.outputlogMessage(
                            'early stopping: stop training because overall miou did not improved in the last five evaluation'
                        )
                        output_early_stopping_message(TRAIN_LOGDIR)
                        break

    # plot mIOU, loss, and learnint rate curves
    iou_path = os.path.join(EVAL_LOGDIR, 'miou.txt')
    loss_path = os.path.join(TRAIN_LOGDIR, 'loss_learning_rate.txt')
    miou_curve_path = plot_miou_loss_curve.plot_miou_loss_main(
        iou_path,
        train_count=train_count,
        val_count=val_count,
        batch_size=batch_size)
    loss_curve_path = plot_miou_loss_curve.plot_miou_loss_main(
        loss_path,
        train_count=train_count,
        val_count=val_count,
        batch_size=batch_size)

    # backup miou and training_loss & learning rate
    test_id = os.path.basename(WORK_DIR) + '_' + expr_name
    backup_dir = os.path.join(WORK_DIR, 'result_backup')
    if os.path.isdir(backup_dir) is False:
        io_function.mkdir(backup_dir)

    new_iou_name = os.path.join(backup_dir,
                                test_id + '_' + os.path.basename(iou_path))
    io_function.copy_file_to_dst(iou_path, new_iou_name, overwrite=True)
    miou_curve_bakname = os.path.join(
        backup_dir, test_id + '_' + os.path.basename(miou_curve_path))
    io_function.copy_file_to_dst(miou_curve_path,
                                 miou_curve_bakname,
                                 overwrite=True)

    loss_new_name = os.path.join(backup_dir,
                                 test_id + '_' + os.path.basename(loss_path))
    io_function.copy_file_to_dst(loss_path, loss_new_name, overwrite=True)
    loss_curve_bakname = os.path.join(
        backup_dir, test_id + '_' + os.path.basename(loss_curve_path))
    io_function.copy_file_to_dst(loss_curve_path,
                                 loss_curve_bakname,
                                 overwrite=True)
예제 #4
0
def postProcess(para_file, inf_post_note, b_skip_getshp=False, test_id=None):
    # test_id is the related to training

    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    # the test string in 'exe.sh'
    test_note = inf_post_note

    WORK_DIR = os.getcwd()

    SECONDS = time.time()

    expr_name = parameters.get_string_parameters(para_file, 'expr_name')
    network_setting_ini = parameters.get_string_parameters(
        para_file, 'network_setting_ini')

    inf_dir = parameters.get_directory(para_file, 'inf_output_dir')
    if test_id is None:
        test_id = os.path.basename(WORK_DIR) + '_' + expr_name

    # get name of inference areas
    multi_inf_regions = parameters.get_string_list_parameters(
        para_file, 'inference_regions')

    # run post-processing parallel
    # max_parallel_postProc_task = 8

    backup_dir = os.path.join(WORK_DIR, 'result_backup')
    io_function.mkdir(backup_dir)

    # loop each inference regions
    sub_tasks = []
    same_area_time_inis = group_same_area_time_observations(multi_inf_regions)
    region_eva_reports = {}
    for key in same_area_time_inis.keys():
        multi_observations = same_area_time_inis[key]
        area_name = parameters.get_string_parameters(
            multi_observations[0],
            'area_name')  # they have the same name and time
        area_time = parameters.get_string_parameters(multi_observations[0],
                                                     'area_time')
        merged_shp_list = []
        map_raster_list_2d = [None] * len(multi_observations)
        for area_idx, area_ini in enumerate(multi_observations):
            area_remark = parameters.get_string_parameters(
                area_ini, 'area_remark')
            area_save_dir, shp_pre, _ = get_observation_save_dir_shp_pre(
                inf_dir, area_name, area_time, area_remark, test_id)

            # get image list
            inf_image_dir = parameters.get_directory(area_ini, 'inf_image_dir')
            # it is ok consider a file name as pattern and pass it the following functions to get file list
            inf_image_or_pattern = parameters.get_string_parameters(
                area_ini, 'inf_image_or_pattern')
            inf_img_list = io_function.get_file_list_by_pattern(
                inf_image_dir, inf_image_or_pattern)
            img_count = len(inf_img_list)
            if img_count < 1:
                raise ValueError(
                    'No image for inference, please check inf_image_dir and inf_image_or_pattern in %s'
                    % area_ini)

            merged_shp = os.path.join(WORK_DIR, area_save_dir,
                                      shp_pre + '.shp')
            if b_skip_getshp:
                pass
            else:
                # post image one by one
                result_shp_list = []
                map_raster_list = []
                for img_idx, img_path in enumerate(inf_img_list):
                    out_shp, out_raster = inf_results_to_shapefile(
                        WORK_DIR, img_idx, area_save_dir, test_id)
                    if out_shp is None or out_raster is None:
                        continue
                    result_shp_list.append(os.path.join(WORK_DIR, out_shp))
                    map_raster_list.append(out_raster)
                # merge shapefiles
                if merge_shape_files(result_shp_list, merged_shp) is False:
                    continue
                map_raster_list_2d[area_idx] = map_raster_list

            merged_shp_list.append(merged_shp)

        if b_skip_getshp is False:
            # add occurrence to each polygons
            get_occurence_for_multi_observation(merged_shp_list)

        for area_idx, area_ini in enumerate(multi_observations):
            area_remark = parameters.get_string_parameters(
                area_ini, 'area_remark')
            area_save_dir, shp_pre, area_remark_time = get_observation_save_dir_shp_pre(
                inf_dir, area_name, area_time, area_remark, test_id)

            merged_shp = os.path.join(WORK_DIR, area_save_dir,
                                      shp_pre + '.shp')
            if os.path.isfile(merged_shp) is False:
                print('Warning, %s not exist, skip' % merged_shp)
                continue

            # add attributes to shapefile
            # add_attributes_script = os.path.join(code_dir,'datasets', 'get_polygon_attributes.py')
            shp_attributes = os.path.join(WORK_DIR, area_save_dir,
                                          shp_pre + '_post_NOrm.shp')
            # add_polygon_attributes(add_attributes_script,merged_shp, shp_attributes, para_file, area_ini )
            add_polygon_attributes(merged_shp, shp_attributes, para_file,
                                   area_ini)

            # remove polygons
            # rm_polygon_script = os.path.join(code_dir,'datasets', 'remove_mappedPolygons.py')
            shp_post = os.path.join(WORK_DIR, area_save_dir,
                                    shp_pre + '_post.shp')
            # remove_polygons(rm_polygon_script,shp_attributes, shp_post, para_file)
            remove_polygons_main(shp_attributes, shp_post, para_file)

            # evaluate the mapping results
            # eval_shp_script = os.path.join(code_dir,'datasets', 'evaluation_result.py')
            out_report = os.path.join(WORK_DIR, area_save_dir,
                                      shp_pre + '_evaluation_report.txt')
            # evaluation_polygons(eval_shp_script, shp_post, para_file, area_ini,out_report)
            evaluation_polygons(shp_post, para_file, area_ini, out_report)

            ##### copy and backup files ######
            # copy files to result_backup
            if len(test_note) > 0:
                backup_dir_area = os.path.join(
                    backup_dir, area_name + '_' + area_remark_time + '_' +
                    test_id + '_' + test_note)
            else:
                backup_dir_area = os.path.join(
                    backup_dir,
                    area_name + '_' + area_remark_time + '_' + test_id)
            io_function.mkdir(backup_dir_area)
            if len(test_note) > 0:
                bak_merged_shp = os.path.join(
                    backup_dir_area, '_'.join([shp_pre, test_note]) + '.shp')
                bak_post_shp = os.path.join(
                    backup_dir_area,
                    '_'.join([shp_pre, 'post', test_note]) + '.shp')
                bak_eva_report = os.path.join(
                    backup_dir_area,
                    '_'.join([shp_pre, 'eva_report', test_note]) + '.txt')
                bak_area_ini = os.path.join(
                    backup_dir_area,
                    '_'.join([shp_pre, 'region', test_note]) + '.ini')
            else:
                bak_merged_shp = os.path.join(backup_dir_area,
                                              '_'.join([shp_pre]) + '.shp')
                bak_post_shp = os.path.join(
                    backup_dir_area, '_'.join([shp_pre, 'post']) + '.shp')
                bak_eva_report = os.path.join(
                    backup_dir_area,
                    '_'.join([shp_pre, 'eva_report']) + '.txt')
                bak_area_ini = os.path.join(
                    backup_dir_area, '_'.join([shp_pre, 'region']) + '.ini')

            io_function.copy_shape_file(merged_shp, bak_merged_shp)
            io_function.copy_shape_file(shp_post, bak_post_shp)
            if os.path.isfile(out_report):
                io_function.copy_file_to_dst(out_report,
                                             bak_eva_report,
                                             overwrite=True)
            io_function.copy_file_to_dst(area_ini,
                                         bak_area_ini,
                                         overwrite=True)

            # copy map raster
            b_backup_map_raster = parameters.get_bool_parameters_None_if_absence(
                area_ini, 'b_backup_map_raster')
            if b_backup_map_raster is True:
                if map_raster_list_2d[area_idx] is not None:
                    for map_tif in map_raster_list_2d[area_idx]:
                        bak_map_tif = os.path.join(backup_dir_area,
                                                   os.path.basename(map_tif))
                        io_function.copy_file_to_dst(map_tif,
                                                     bak_map_tif,
                                                     overwrite=True)

            region_eva_reports[shp_pre] = bak_eva_report

    if len(test_note) > 0:
        bak_para_ini = os.path.join(
            backup_dir, '_'.join([test_id, 'para', test_note]) + '.ini')
        bak_network_ini = os.path.join(
            backup_dir, '_'.join([test_id, 'network', test_note]) + '.ini')
        bak_time_cost = os.path.join(
            backup_dir, '_'.join([test_id, 'time_cost', test_note]) + '.txt')
    else:
        bak_para_ini = os.path.join(backup_dir,
                                    '_'.join([test_id, 'para']) + '.ini')
        bak_network_ini = os.path.join(backup_dir,
                                       '_'.join([test_id, 'network']) + '.ini')
        bak_time_cost = os.path.join(backup_dir,
                                     '_'.join([test_id, 'time_cost']) + '.txt')
    io_function.copy_file_to_dst(para_file, bak_para_ini)
    io_function.copy_file_to_dst(network_setting_ini, bak_network_ini)
    if os.path.isfile('time_cost.txt'):
        io_function.copy_file_to_dst('time_cost.txt', bak_time_cost)

    # output the evaluation report to screen
    for key in region_eva_reports.keys():
        report = region_eva_reports[key]
        if os.path.isfile(report) is False:
            continue
        print('evaluation report for %s:' % key)
        os.system('head -n 7 %s' % report)

    # output evaluation report to table
    if len(test_note) > 0:
        out_table = os.path.join(
            backup_dir,
            '_'.join([test_id, 'accuracy_table', test_note]) + '.xlsx')
    else:
        out_table = os.path.join(
            backup_dir, '_'.join([test_id, 'accuracy_table']) + '.xlsx')
    eva_reports = [
        region_eva_reports[key] for key in region_eva_reports
        if os.path.isfile(region_eva_reports[key])
    ]
    eva_report_to_tables.eva_reports_to_table(eva_reports, out_table)

    duration = time.time() - SECONDS
    os.system(
        'echo "$(date): time cost of post-procesing: %.2f seconds">>time_cost.txt'
        % duration)
예제 #5
0
def main(options, args):

    print(
        "%s : prediction using the trained model (run parallel if use multiple GPUs) "
        % os.path.basename(sys.argv[0]))
    machine_name = os.uname()[1]
    start_time = datetime.datetime.now()

    para_file = args[0]
    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    basic.setlogfile('parallel_predict_Log.txt')

    deeplab_inf_script = os.path.join(code_dir, 'deeplabBased',
                                      'deeplab_inference.py')
    network_setting_ini = parameters.get_string_parameters(
        para_file, 'network_setting_ini')

    global tf1x_python
    tf1x_python = parameters.get_file_path_parameters(network_setting_ini,
                                                      'tf1x_python')

    trained_model = options.trained_model

    outdir = parameters.get_directory(para_file, 'inf_output_dir')

    # remove previous results (let user remove this folder manually or in exe.sh folder)
    io_function.mkdir(outdir)

    # get name of inference areas
    multi_inf_regions = parameters.get_string_list_parameters(
        para_file, 'inference_regions')

    # max_parallel_inf_task = parameters.get_digit_parameters(para_file,'max_parallel_inf_task','int')

    b_use_multiGPUs = parameters.get_bool_parameters(para_file,
                                                     'b_use_multiGPUs')

    # loop each inference regions
    sub_tasks = []
    for area_idx, area_ini in enumerate(multi_inf_regions):

        area_name = parameters.get_string_parameters(area_ini, 'area_name')
        area_remark = parameters.get_string_parameters(area_ini, 'area_remark')
        area_time = parameters.get_string_parameters(area_ini, 'area_time')

        inf_image_dir = parameters.get_directory(area_ini, 'inf_image_dir')

        # it is ok consider a file name as pattern and pass it the following functions to get file list
        inf_image_or_pattern = parameters.get_string_parameters(
            area_ini, 'inf_image_or_pattern')

        inf_img_list = io_function.get_file_list_by_pattern(
            inf_image_dir, inf_image_or_pattern)
        img_count = len(inf_img_list)
        if img_count < 1:
            raise ValueError(
                'No image for inference, please check inf_image_dir and inf_image_or_pattern in %s'
                % area_ini)

        area_save_dir = os.path.join(
            outdir, area_name + '_' + area_remark + '_' + area_time)
        io_function.mkdir(area_save_dir)

        # parallel inference images for this area
        CUDA_VISIBLE_DEVICES = []
        if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
            CUDA_VISIBLE_DEVICES = [
                int(item.strip())
                for item in os.environ['CUDA_VISIBLE_DEVICES'].split(',')
            ]
        idx = 0
        while idx < img_count:

            if b_use_multiGPUs:
                # get available GPUs  # https://github.com/anderskm/gputil
                deviceIDs = GPUtil.getAvailable(order='first',
                                                limit=100,
                                                maxLoad=0.5,
                                                maxMemory=0.5,
                                                includeNan=False,
                                                excludeID=[],
                                                excludeUUID=[])
                # only use the one in CUDA_VISIBLE_DEVICES
                if len(CUDA_VISIBLE_DEVICES) > 0:
                    deviceIDs = [
                        item for item in deviceIDs
                        if item in CUDA_VISIBLE_DEVICES
                    ]
                    basic.outputlogMessage('on ' + machine_name +
                                           ', available GPUs:' +
                                           str(deviceIDs) +
                                           ', among visible ones:' +
                                           str(CUDA_VISIBLE_DEVICES))
                else:
                    basic.outputlogMessage('on ' + machine_name +
                                           ', available GPUs:' +
                                           str(deviceIDs))

                if len(deviceIDs) < 1:
                    time.sleep(
                        60
                    )  # wait one minute, then check the available GPUs again
                    continue
                # set only the first available visible
                gpuid = deviceIDs[0]
                basic.outputlogMessage(
                    '%d: predict image %s on GPU %d of %s' %
                    (idx, inf_img_list[idx], gpuid, machine_name))
            else:
                gpuid = None
                basic.outputlogMessage('%d: predict image %s on %s' %
                                       (idx, inf_img_list[idx], machine_name))

            # run inference
            img_save_dir = os.path.join(area_save_dir, 'I%d' % idx)
            inf_list_file = os.path.join(area_save_dir, '%d.txt' % idx)

            # if it already exist, then skip
            if os.path.isdir(img_save_dir) and is_file_exist_in_folder(
                    img_save_dir):
                basic.outputlogMessage(
                    'folder of %dth image (%s) already exist, '
                    'it has been predicted or is being predicted' %
                    (idx, inf_img_list[idx]))
                idx += 1
                continue

            with open(inf_list_file, 'w') as inf_obj:
                inf_obj.writelines(inf_img_list[idx] + '\n')

            sub_process = Process(target=predict_one_image_deeplab,
                                  args=(deeplab_inf_script, para_file,
                                        network_setting_ini, img_save_dir,
                                        inf_list_file, gpuid, trained_model))
            sub_process.start()
            sub_tasks.append(sub_process)

            if b_use_multiGPUs is False:
                # wait until previous one finished
                while sub_process.is_alive():
                    time.sleep(5)

            idx += 1

            # wait until predicted image patches exist or exceed 20 minutes
            time0 = time.time()
            elapsed_time = time.time() - time0
            while elapsed_time < 20 * 60:
                elapsed_time = time.time() - time0
                file_exist = is_file_exist_in_folder(img_save_dir)
                if file_exist is True or sub_process.is_alive() is False:
                    break
                else:
                    time.sleep(5)

            if sub_process.exitcode is not None and sub_process.exitcode != 0:
                sys.exit(1)

            # if 'chpc' in machine_name:
            #     time.sleep(60)  # wait 60 second on ITSC services
            # else:
            #     time.sleep(10)

    # check all the tasks already finished
    while b_all_task_finish(sub_tasks) is False:
        basic.outputlogMessage('wait all tasks to finish')
        time.sleep(60)

    end_time = datetime.datetime.now()

    diff_time = end_time - start_time
    out_str = "%s: time cost of total parallel inference on %s: %d seconds" % (
        str(end_time), machine_name, diff_time.seconds)
    basic.outputlogMessage(out_str)
    with open("time_cost.txt", 'a') as t_obj:
        t_obj.writelines(out_str + '\n')
예제 #6
0
def get_sub_images_multi_regions(para_file):

    print(
        "extract sub-images and sub-labels for a given shape file (training polygons)"
    )

    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    get_subImage_script = os.path.join(code_dir, 'datasets',
                                       'get_subImages.py')
    SECONDS = time.time()

    # get name of training areas
    multi_training_regions = parameters.get_string_list_parameters_None_if_absence(
        para_file, 'training_regions')
    if multi_training_regions is None or len(multi_training_regions) < 1:
        raise ValueError('No training area is set in %s' % para_file)

    # multi_training_files = parameters.get_string_parameters_None_if_absence(para_file, 'multi_training_files')

    dstnodata = parameters.get_string_parameters(para_file, 'dst_nodata')
    buffersize = parameters.get_string_parameters(para_file, 'buffer_size')
    rectangle_ext = parameters.get_string_parameters(para_file,
                                                     'b_use_rectangle')
    process_num = parameters.get_digit_parameters(para_file, 'process_num',
                                                  'int')

    b_no_label_image = parameters.get_bool_parameters_None_if_absence(
        para_file, 'b_no_label_image')

    if os.path.isfile('sub_images_labels_list.txt'):
        io_function.delete_file_or_dir('sub_images_labels_list.txt')

    subImage_dir = parameters.get_string_parameters_None_if_absence(
        para_file, 'input_train_dir')
    subLabel_dir = parameters.get_string_parameters_None_if_absence(
        para_file, 'input_label_dir')

    # loop each training regions
    for idx, area_ini in enumerate(multi_training_regions):

        input_image_dir = parameters.get_directory_None_if_absence(
            area_ini, 'input_image_dir')

        # it is ok consider a file name as pattern and pass it the following functions to get file list
        input_image_or_pattern = parameters.get_string_parameters(
            area_ini, 'input_image_or_pattern')

        b_sub_images_json = parameters.get_bool_parameters(
            area_ini, 'b_sub_images_json')
        if b_sub_images_json is True:
            # copy sub-images, then covert json files to label images.
            object_names = parameters.get_string_list_parameters(
                para_file, 'object_names')
            get_subImages_json.get_subimages_label_josn(
                input_image_dir,
                input_image_or_pattern,
                subImage_dir,
                subLabel_dir,
                object_names,
                b_no_label_image=b_no_label_image,
                process_num=process_num)

            pass
        else:

            all_train_shp = parameters.get_file_path_parameters_None_if_absence(
                area_ini, 'training_polygons')
            train_shp = parameters.get_string_parameters(
                area_ini, 'training_polygons_sub')

            # get subImage and subLabel for one training polygons
            print(
                'extract training data from image folder (%s) and polgyons (%s)'
                % (input_image_dir, train_shp))
            if b_no_label_image is True:
                get_subImage_one_shp(get_subImage_script,
                                     all_train_shp,
                                     buffersize,
                                     dstnodata,
                                     rectangle_ext,
                                     train_shp,
                                     input_image_dir,
                                     file_pattern=input_image_or_pattern,
                                     process_num=process_num)
            else:
                get_subImage_subLabel_one_shp(
                    get_subImage_script,
                    all_train_shp,
                    buffersize,
                    dstnodata,
                    rectangle_ext,
                    train_shp,
                    input_image_dir,
                    file_pattern=input_image_or_pattern,
                    process_num=process_num)

    # check black sub-images or most part of the sub-images is black (nodata)
    new_sub_image_label_list = []
    delete_sub_image_label_list = []
    subImage_dir_delete = subImage_dir + '_delete'
    subLabel_dir_delete = subLabel_dir + '_delete'
    io_function.mkdir(subImage_dir_delete)
    if b_no_label_image is None or b_no_label_image is False:
        io_function.mkdir(subLabel_dir_delete)
    get_valid_percent_entropy.plot_valid_entropy(subImage_dir)
    with open('sub_images_labels_list.txt', 'r') as f_obj:
        lines = f_obj.readlines()
        for line in lines:
            image_path, label_path = line.strip().split(':')
            # valid_per = raster_io.get_valid_pixel_percentage(image_path)
            valid_per, entropy = raster_io.get_valid_percent_shannon_entropy(
                image_path)  # base=10
            if valid_per > 60 and entropy >= 0.5:
                new_sub_image_label_list.append(line)
            else:
                delete_sub_image_label_list.append(line)
                io_function.movefiletodir(image_path, subImage_dir_delete)
                if os.path.isfile(label_path):
                    io_function.movefiletodir(label_path, subLabel_dir_delete)
    if len(delete_sub_image_label_list) > 0:
        with open('sub_images_labels_list.txt', 'w') as f_obj:
            for line in new_sub_image_label_list:
                f_obj.writelines(line)

    # check weather they have the same subImage and subLabel
    if b_no_label_image is None or b_no_label_image is False:
        sub_image_list = io_function.get_file_list_by_pattern(
            subImage_dir, '*.tif')
        sub_label_list = io_function.get_file_list_by_pattern(
            subLabel_dir, '*.tif')
        if len(sub_image_list) != len(sub_label_list):
            raise ValueError(
                'the count of subImage (%d) and subLabel (%d) is different' %
                (len(sub_image_list), len(sub_label_list)))

    # save brief information of sub-images
    height_list = []
    width_list = []
    band_count = 0
    dtype = 'unknown'
    for line in new_sub_image_label_list:
        image_path, label_path = line.strip().split(':')
        height, width, band_count, dtype = raster_io.get_height_width_bandnum_dtype(
            image_path)
        height_list.append(height)
        width_list.append(width)
    # save info to file, if it exists, it will be overwritten
    img_count = len(new_sub_image_label_list)
    with open('sub_images_patches_info.txt', 'w') as f_obj:
        f_obj.writelines('information of sub-images: \n')
        f_obj.writelines('number of sub-images : %d \n' % img_count)
        f_obj.writelines('band count : %d \n' % band_count)
        f_obj.writelines('data type : %s \n' % dtype)
        f_obj.writelines('maximum width and height: %d, %d \n' %
                         (max(width_list), max(height_list)))
        f_obj.writelines('minimum width and height: %d, %d \n' %
                         (min(width_list), min(height_list)))
        f_obj.writelines(
            'mean width and height: %.2f, %.2f \n\n' %
            (sum(width_list) / img_count, sum(height_list) / img_count))

    duration = time.time() - SECONDS
    os.system(
        'echo "$(date): time cost of getting sub images and labels: %.2f seconds">>time_cost.txt'
        % duration)
예제 #7
0
def calculate_polygon_topography(polygons_shp,
                                 para_file,
                                 dem_files,
                                 slope_files,
                                 aspect_files=None,
                                 dem_diffs=None):
    """
    calculate the topography information such elevation and slope of each polygon
    Args:
        polygons_shp: input shapfe file
        dem_files: DEM raster file or tiles, should have the same projection of shapefile
        slope_files: slope raster file or tiles  (can be drived from dem file by using QGIS or ArcGIS)
        aspect_files: aspect raster file or tiles (can be drived from dem file by using QGIS or ArcGIS)

    Returns: True if successful, False Otherwise
    """
    if io_function.is_file_exist(polygons_shp) is False:
        return False
    operation_obj = shape_opeation()

    ## calculate the topography information from the buffer area

    # the para file was set in parameters.set_saved_parafile_path(options.para_file)
    b_use_buffer_area = parameters.get_bool_parameters(
        para_file, 'b_topo_use_buffer_area')

    if b_use_buffer_area is True:

        b_buffer_size = 5  # meters (the same as the shape file)

        basic.outputlogMessage(
            "info: calculate the topography information from the buffer area")
        buffer_polygon_shp = io_function.get_name_by_adding_tail(
            polygons_shp, 'buffer')
        # if os.path.isfile(buffer_polygon_shp) is False:
        if vector_features.get_buffer_polygons(
                polygons_shp, buffer_polygon_shp, b_buffer_size) is False:
            basic.outputlogMessage(
                "error, failed in producing the buffer_polygon_shp")
            return False
        # else:
        #     basic.outputlogMessage("warning, buffer_polygon_shp already exist, skip producing it")
        # replace the polygon shape file
        polygons_shp_backup = polygons_shp
        polygons_shp = buffer_polygon_shp
    else:
        basic.outputlogMessage(
            "info: calculate the topography information from the inside of each polygon"
        )

    # all_touched: bool, optional
    #     Whether to include every raster cell touched by a geometry, or only
    #     those having a center point within the polygon.
    #     defaults to `False`
    #   Since the dem usually is coarser, so we set all_touched = True
    all_touched = True
    process_num = 4

    # #DEM
    if dem_files is not None:
        stats_list = ['min', 'max', 'mean', 'median',
                      'std']  #['min', 'max', 'mean', 'count','median','std']
        # if operation_obj.add_fields_from_raster(polygons_shp, dem_file, "dem", band=1,stats_list=stats_list,all_touched=all_touched) is False:
        #     return False
        if zonal_stats_multiRasters(polygons_shp,
                                    dem_files,
                                    stats=stats_list,
                                    prefix='dem',
                                    band=1,
                                    all_touched=all_touched,
                                    process_num=process_num) is False:
            return False
    else:
        basic.outputlogMessage(
            "warning, DEM file not exist, skip the calculation of DEM information"
        )

    # #slope
    if slope_files is not None:
        stats_list = ['min', 'max', 'mean', 'median', 'std']
        if zonal_stats_multiRasters(polygons_shp,
                                    slope_files,
                                    stats=stats_list,
                                    prefix='slo',
                                    band=1,
                                    all_touched=all_touched,
                                    process_num=process_num) is False:
            return False
    else:
        basic.outputlogMessage(
            "warning, slope file not exist, skip the calculation of slope information"
        )

    # #aspect
    if aspect_files is not None:
        stats_list = ['min', 'max', 'mean', 'std']
        if zonal_stats_multiRasters(polygons_shp,
                                    aspect_files,
                                    stats=stats_list,
                                    prefix='asp',
                                    band=1,
                                    all_touched=all_touched,
                                    process_num=process_num) is False:
            return False
    else:
        basic.outputlogMessage(
            'warning, aspect file not exist, ignore adding aspect information')

    # elevation difference
    if dem_diffs is not None:
        stats_list = ['min', 'max', 'mean', 'median', 'std', 'area']
        # only count the pixel within this range when do statistics
        dem_diff_range_str = parameters.get_string_list_parameters(
            para_file, 'dem_difference_range')
        range = [
            None if item.upper() == 'NONE' else float(item)
            for item in dem_diff_range_str
        ]

        # expand the polygon when doing dem difference statistics
        buffer_size_dem_diff = parameters.get_digit_parameters(
            para_file, 'buffer_size_dem_diff', 'float')

        if zonal_stats_multiRasters(polygons_shp,
                                    dem_diffs,
                                    stats=stats_list,
                                    prefix='demD',
                                    band=1,
                                    all_touched=all_touched,
                                    process_num=process_num,
                                    range=range,
                                    buffer=buffer_size_dem_diff) is False:
            return False
    else:
        basic.outputlogMessage(
            'warning, dem difference file not exist, ignore adding dem diff information'
        )

    # # hillshape

    # copy the topography information
    if b_use_buffer_area is True:
        operation_obj.add_fields_shape(polygons_shp_backup, buffer_polygon_shp,
                                       polygons_shp_backup)

    return True
예제 #8
0
def main(options, args):

    print("%s : export the frozen inference graph" %
          os.path.basename(sys.argv[0]))

    para_file = args[0]
    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    network_setting_ini = parameters.get_string_parameters(
        para_file, 'network_setting_ini')
    tf_research_dir = parameters.get_directory_None_if_absence(
        network_setting_ini, 'tf_research_dir')
    print(tf_research_dir)
    if tf_research_dir is None:
        raise ValueError('tf_research_dir is not in %s' % para_file)
    if os.path.isdir(tf_research_dir) is False:
        raise ValueError('%s does not exist' % tf_research_dir)
    if os.getenv('PYTHONPATH'):
        os.environ['PYTHONPATH'] = os.getenv(
            'PYTHONPATH') + ':' + tf_research_dir + ':' + os.path.join(
                tf_research_dir, 'slim')
    else:
        os.environ['PYTHONPATH'] = tf_research_dir + ':' + os.path.join(
            tf_research_dir, 'slim')

    global tf1x_python
    tf1x_python = parameters.get_file_path_parameters(network_setting_ini,
                                                      'tf1x_python')

    deeplab_dir = os.path.join(tf_research_dir, 'deeplab')
    WORK_DIR = os.getcwd()

    expr_name = parameters.get_string_parameters(para_file, 'expr_name')

    EXP_FOLDER = expr_name
    TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train')
    EXPORT_DIR = os.path.join(WORK_DIR, EXP_FOLDER, 'export')

    inf_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_output_stride', 'int')
    inf_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates1', 'int')
    inf_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates2', 'int')
    inf_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates3', 'int')

    # depth_multiplier default is 1.0.
    depth_multiplier = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'depth_multiplier', 'float')

    decoder_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'decoder_output_stride', 'int')
    aspp_convs_filters = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'aspp_convs_filters', 'int')

    model_variant = parameters.get_string_parameters(network_setting_ini,
                                                     'model_variant')
    num_classes_noBG = parameters.get_digit_parameters_None_if_absence(
        para_file, 'NUM_CLASSES_noBG', 'int')
    assert num_classes_noBG != None
    b_initialize_last_layer = parameters.get_bool_parameters(
        para_file, 'b_initialize_last_layer')
    if b_initialize_last_layer is False:
        pre_trained_tar = parameters.get_string_parameters(
            network_setting_ini, 'TF_INIT_CKPT')
        if pre_trained_tar in pre_trained_tar_21_classes:
            print(
                'warning, pretrained model %s is trained with 21 classes, set num_of_classes to 21'
                % pre_trained_tar)
            num_classes_noBG = 20
        if pre_trained_tar in pre_trained_tar_19_classes:
            print(
                'warning, pretrained model %s is trained with 19 classes, set num_of_classes to 19'
                % pre_trained_tar)
            num_classes_noBG = 18
    num_of_classes = num_classes_noBG + 1

    image_crop_size = parameters.get_string_list_parameters(
        para_file, 'image_crop_size')
    if len(image_crop_size) != 2 and image_crop_size[0].isdigit(
    ) and image_crop_size[1].isdigit():
        raise ValueError('image_crop_size should be height,width')

    iteration_num = get_trained_iteration(TRAIN_LOGDIR)

    multi_scale = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'export_multi_scale', 'int')

    export_script = os.path.join(deeplab_dir, 'export_model.py')
    CKPT_PATH = os.path.join(TRAIN_LOGDIR, 'model.ckpt-%s' % iteration_num)

    EXPORT_PATH = os.path.join(EXPORT_DIR,
                               'frozen_inference_graph_%s.pb' % iteration_num)
    if os.path.isfile(EXPORT_PATH):
        basic.outputlogMessage('%s exists, skipping exporting models' %
                               EXPORT_PATH)
        return
    export_graph(export_script, CKPT_PATH, EXPORT_PATH, model_variant,
                 num_of_classes, inf_atrous_rates1, inf_atrous_rates2,
                 inf_atrous_rates3, inf_output_stride, image_crop_size[0],
                 image_crop_size[1], multi_scale, depth_multiplier,
                 decoder_output_stride, aspp_convs_filters)
예제 #9
0
def run_evaluation(WORK_DIR,
                   deeplab_dir,
                   expr_name,
                   para_file,
                   network_setting_ini,
                   gpu_num,
                   train_dir=None):

    EXP_FOLDER = expr_name
    if train_dir is None:
        TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train')
    else:
        TRAIN_LOGDIR = train_dir
    EVAL_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'eval')
    dataset_dir = os.path.join(WORK_DIR, 'tfrecord')

    inf_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_output_stride', 'int')
    inf_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates1', 'int')
    inf_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates2', 'int')
    inf_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'inf_atrous_rates3', 'int')

    b_initialize_last_layer = parameters.get_bool_parameters(
        para_file, 'b_initialize_last_layer')
    pre_trained_tar = parameters.get_string_parameters(network_setting_ini,
                                                       'TF_INIT_CKPT')

    # depth_multiplier default is 1.0.
    depth_multiplier = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'depth_multiplier', 'float')

    decoder_output_stride = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'decoder_output_stride', 'int')
    aspp_convs_filters = parameters.get_digit_parameters_None_if_absence(
        network_setting_ini, 'aspp_convs_filters', 'int')
    model_variant = parameters.get_string_parameters(network_setting_ini,
                                                     'model_variant')

    dataset = parameters.get_string_parameters(para_file, 'dataset_name')
    num_classes_noBG = parameters.get_digit_parameters_None_if_absence(
        para_file, 'NUM_CLASSES_noBG', 'int')
    assert num_classes_noBG != None
    if b_initialize_last_layer is True:
        if pre_trained_tar in pre_trained_tar_21_classes:
            print(
                'warning, pretrained model %s is trained with 21 classes, set num_of_classes to 21'
                % pre_trained_tar)
            num_classes_noBG = 20
        if pre_trained_tar in pre_trained_tar_19_classes:
            print(
                'warning, pretrained model %s is trained with 19 classes, set num_of_classes to 19'
                % pre_trained_tar)
            num_classes_noBG = 18
    num_of_classes = num_classes_noBG + 1

    image_crop_size = parameters.get_string_list_parameters(
        para_file, 'image_crop_size')
    if len(image_crop_size) != 2 and image_crop_size[0].isdigit(
    ) and image_crop_size[1].isdigit():
        raise ValueError('image_crop_size should be height,width')
    crop_size_str = ','.join(image_crop_size)

    evl_script = os.path.join(deeplab_dir, 'eval.py')
    evl_split = os.path.splitext(
        parameters.get_string_parameters(para_file,
                                         'validation_sample_list_txt'))[0]
    max_eva_number = 1
    eval_interval_secs = 300

    # gpuid = ''      # do not use GPUs

    evaluation_deeplab(evl_script,
                       dataset,
                       evl_split,
                       num_of_classes,
                       model_variant,
                       inf_atrous_rates1,
                       inf_atrous_rates2,
                       inf_atrous_rates3,
                       inf_output_stride,
                       TRAIN_LOGDIR,
                       EVAL_LOGDIR,
                       dataset_dir,
                       crop_size_str,
                       max_eva_number,
                       depth_multiplier,
                       decoder_output_stride,
                       aspp_convs_filters,
                       eval_interval_secs=eval_interval_secs)

    # get miou again
    miou_dict = get_miou_list_class_all(EVAL_LOGDIR, num_of_classes)
예제 #10
0
def image_label_to_yolo_format(para_file):

    print("Image labels (semantic segmentation) to YOLO object detection")

    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    img_ext = parameters.get_string_parameters_None_if_absence(
        para_file, 'split_image_format')
    proc_num = parameters.get_digit_parameters(para_file, 'process_num', 'int')

    SECONDS = time.time()

    # get image and label path
    image_list = []
    label_list = []
    with open(os.path.join('list', 'trainval.txt'), 'r') as f_obj:
        lines = [item.strip() for item in f_obj.readlines()]
        for line in lines:
            image_list.append(os.path.join('split_images', line + img_ext))
            label_list.append(os.path.join('split_labels', line + img_ext))

    num_classes_noBG = parameters.get_digit_parameters_None_if_absence(
        para_file, 'NUM_CLASSES_noBG', 'int')
    b_ignore_edge_objects = parameters.get_bool_parameters_None_if_absence(
        para_file, 'b_ignore_edge_objects')
    if b_ignore_edge_objects is None:
        b_ignore_edge_objects = False

    # get boxes
    total_count = len(image_list)
    for idx, (img, label) in enumerate(zip(image_list, label_list)):
        get_yolo_boxes_one_img(idx,
                               total_count,
                               img,
                               label,
                               num_classes_noBG,
                               rm_edge_obj=b_ignore_edge_objects)

    # write obj.data file
    train_sample_txt = parameters.get_string_parameters(
        para_file, 'training_sample_list_txt')
    val_sample_txt = parameters.get_string_parameters(
        para_file, 'validation_sample_list_txt')
    train_img_list = get_image_list('list', train_sample_txt, 'split_images',
                                    img_ext)
    val_img_list = get_image_list('list', val_sample_txt, 'split_images',
                                  img_ext)

    expr_name = parameters.get_string_parameters(para_file, 'expr_name')
    object_names = parameters.get_string_list_parameters(
        para_file, 'object_names')
    io_function.mkdir('data')
    io_function.mkdir(expr_name)

    with open(os.path.join('data', 'obj.data'), 'w') as f_obj:
        f_obj.writelines('classes = %d' % num_classes_noBG + '\n')

        train_txt = os.path.join('data', 'train.txt')
        io_function.save_list_to_txt(train_txt, train_img_list)
        f_obj.writelines('train = %s' % train_txt + '\n')

        val_txt = os.path.join('data', 'val.txt')
        io_function.save_list_to_txt(val_txt, val_img_list)
        f_obj.writelines('valid = %s' % val_txt + '\n')

        obj_name_txt = os.path.join('data', 'obj.names')
        io_function.save_list_to_txt(obj_name_txt, object_names)
        f_obj.writelines('names = %s' % obj_name_txt + '\n')

        f_obj.writelines('backup = %s' % expr_name + '\n')

    duration = time.time() - SECONDS
    os.system(
        'echo "$(date): time cost of converting to yolo format: %.2f seconds">>time_cost.txt'
        % duration)

    pass
예제 #11
0
def mmseg_parallel_predict_main(para_file, trained_model):

    print(
        "MMSegmetation prediction using the trained model (run parallel if use multiple GPUs)"
    )
    machine_name = os.uname()[1]
    start_time = datetime.now()

    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    expr_name = parameters.get_string_parameters(para_file, 'expr_name')
    # network_ini = parameters.get_string_parameters(para_file, 'network_setting_ini')
    # mmseg_repo_dir = parameters.get_directory(network_ini, 'mmseg_repo_dir')
    # mmseg_code_dir = osp.join(mmseg_repo_dir,'mmseg')

    # if os.path.isdir(mmseg_code_dir) is False:
    #     raise ValueError('%s does not exist' % mmseg_code_dir)

    # # set PYTHONPATH to use my modified version of mmseg
    # if os.getenv('PYTHONPATH'):
    #     os.environ['PYTHONPATH'] = os.getenv('PYTHONPATH') + ':' + mmseg_code_dir
    # else:
    #     os.environ['PYTHONPATH'] = mmseg_code_dir
    # print('\nPYTHONPATH is: ',os.getenv('PYTHONPATH'))

    if trained_model is None:
        trained_model = os.path.join(expr_name, 'latest.pth')

    outdir = parameters.get_directory(para_file, 'inf_output_dir')
    # remove previous results (let user remove this folder manually or in exe.sh folder)
    io_function.mkdir(outdir)

    # get name of inference areas
    multi_inf_regions = parameters.get_string_list_parameters(
        para_file, 'inference_regions')
    b_use_multiGPUs = parameters.get_bool_parameters(para_file,
                                                     'b_use_multiGPUs')

    # loop each inference regions
    sub_tasks = []
    for area_idx, area_ini in enumerate(multi_inf_regions):

        area_name = parameters.get_string_parameters(area_ini, 'area_name')
        area_remark = parameters.get_string_parameters(area_ini, 'area_remark')
        area_time = parameters.get_string_parameters(area_ini, 'area_time')

        inf_image_dir = parameters.get_directory(area_ini, 'inf_image_dir')

        # it is ok consider a file name as pattern and pass it the following functions to get file list
        inf_image_or_pattern = parameters.get_string_parameters(
            area_ini, 'inf_image_or_pattern')

        inf_img_list = io_function.get_file_list_by_pattern(
            inf_image_dir, inf_image_or_pattern)
        img_count = len(inf_img_list)
        if img_count < 1:
            raise ValueError(
                'No image for inference, please check inf_image_dir and inf_image_or_pattern in %s'
                % area_ini)

        area_save_dir = os.path.join(
            outdir, area_name + '_' + area_remark + '_' + area_time)
        io_function.mkdir(area_save_dir)

        # parallel inference images for this area
        CUDA_VISIBLE_DEVICES = []
        if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
            CUDA_VISIBLE_DEVICES = [
                int(item.strip())
                for item in os.environ['CUDA_VISIBLE_DEVICES'].split(',')
            ]
        idx = 0
        while idx < img_count:

            if b_use_multiGPUs:
                # get available GPUs  # https://github.com/anderskm/gputil
                # memory: orders the available GPU device ids by ascending memory usage
                deviceIDs = GPUtil.getAvailable(order='memory',
                                                limit=100,
                                                maxLoad=0.5,
                                                maxMemory=0.5,
                                                includeNan=False,
                                                excludeID=[],
                                                excludeUUID=[])
                # only use the one in CUDA_VISIBLE_DEVICES
                if len(CUDA_VISIBLE_DEVICES) > 0:
                    deviceIDs = [
                        item for item in deviceIDs
                        if item in CUDA_VISIBLE_DEVICES
                    ]
                    basic.outputlogMessage('on ' + machine_name +
                                           ', available GPUs:' +
                                           str(deviceIDs) +
                                           ', among visible ones:' +
                                           str(CUDA_VISIBLE_DEVICES))
                else:
                    basic.outputlogMessage('on ' + machine_name +
                                           ', available GPUs:' +
                                           str(deviceIDs))

                if len(deviceIDs) < 1:
                    time.sleep(
                        60
                    )  # wait 60 seconds (mmseg need longer time to load models) , then check the available GPUs again
                    continue
                # set only the first available visible
                gpuid = deviceIDs[0]
                basic.outputlogMessage(
                    '%d: predict image %s on GPU %d of %s' %
                    (idx, inf_img_list[idx], gpuid, machine_name))
            else:
                gpuid = None
                basic.outputlogMessage('%d: predict image %s on %s' %
                                       (idx, inf_img_list[idx], machine_name))

            # run inference
            img_save_dir = os.path.join(area_save_dir, 'I%d' % idx)
            inf_list_file = os.path.join(area_save_dir, '%d.txt' % idx)

            done_indicator = '%s_done' % inf_list_file
            if os.path.isfile(done_indicator):
                basic.outputlogMessage('warning, %s exist, skip prediction' %
                                       done_indicator)
                idx += 1
                continue

            # if it already exist, then skip
            if os.path.isdir(img_save_dir) and is_file_exist_in_folder(
                    img_save_dir):
                basic.outputlogMessage(
                    'folder of %dth image (%s) already exist, '
                    'it has been predicted or is being predicted' %
                    (idx, inf_img_list[idx]))
                idx += 1
                continue

            with open(inf_list_file, 'w') as inf_obj:
                inf_obj.writelines(inf_img_list[idx] + '\n')

            sub_process = Process(target=predict_one_image_mmseg,
                                  args=(para_file, inf_img_list[idx],
                                        img_save_dir, inf_list_file, gpuid,
                                        trained_model))
            sub_process.start()
            sub_tasks.append(sub_process)

            if b_use_multiGPUs is False:
                # wait until previous one finished
                while sub_process.is_alive():
                    time.sleep(1)

            idx += 1

            # wait until predicted image patches exist or exceed 20 minutes
            time0 = time.time()
            elapsed_time = time.time() - time0
            while elapsed_time < 20 * 60:
                elapsed_time = time.time() - time0
                file_exist = os.path.isdir(
                    img_save_dir) and is_file_exist_in_folder(img_save_dir)
                if file_exist is True or sub_process.is_alive() is False:
                    break
                else:
                    time.sleep(1)

            if sub_process.exitcode is not None and sub_process.exitcode != 0:
                sys.exit(1)

            basic.close_remove_completed_process(sub_tasks)
            # if 'chpc' in machine_name:
            #     time.sleep(60)  # wait 60 second on ITSC services
            # else:
            #     time.sleep(10)

    # check all the tasks already finished
    wait_all_finish = 0
    while basic.b_all_process_finish(sub_tasks) is False:
        if wait_all_finish % 100 == 0:
            basic.outputlogMessage('wait all tasks to finish')
        time.sleep(1)
        wait_all_finish += 1

    basic.close_remove_completed_process(sub_tasks)
    end_time = datetime.now()

    diff_time = end_time - start_time
    out_str = "%s: time cost of total parallel inference on %s: %d seconds" % (
        str(end_time), machine_name, diff_time.seconds)
    basic.outputlogMessage(out_str)
    with open("time_cost.txt", 'a') as t_obj:
        t_obj.writelines(out_str + '\n')
예제 #12
0
def image_translate_train_generate_main(para_file, gpu_num):
    '''
     apply GAN to translate image from source domain to target domain

    existing sub-images (with sub-labels), these are image in source domain
    depend images for inference but no training data, each image for inference can be considered as on target domain

    '''
    print(datetime.now(), "image translation (train and generate) using GAN")

    if os.path.isfile(para_file) is False:
        raise IOError('File %s not exists in current folder: %s' %
                      (para_file, os.getcwd()))

    gan_para_file = parameters.get_string_parameters_None_if_absence(
        para_file, 'regions_n_setting_image_translation_ini')
    if gan_para_file is None:
        print(
            'regions_n_setting_image_translation_ini is not set, skip image translation using GAN'
        )
        return None
    gan_para_file = os.path.abspath(
        gan_para_file
    )  # change to absolute path, because later, we change folder
    training_regions = parameters.get_string_list_parameters(
        para_file, 'training_regions')

    machine_name = os.uname()[1]
    SECONDS = time.time()

    # get regions (equal to or subset of inference regions) need apply image translation
    multi_gan_regions = parameters.get_string_list_parameters(
        gan_para_file, 'regions_need_image_translation')
    multi_gan_source_regions = parameters.get_string_list_parameters(
        gan_para_file, 'source_domain_regions')
    # check target domain
    if len(multi_gan_source_regions) != len(multi_gan_regions):
        raise ValueError(
            'the number of source domain and target domain is different')
    if set(multi_gan_source_regions).issubset(training_regions) is False:
        raise ValueError(
            'the source domain regions are not the subset of training regions')
    for area_idx, (area_gan_ini, area_src_ini) in enumerate(
            zip(multi_gan_regions, multi_gan_source_regions)):
        basic.outputlogMessage('%d: source and target area: %s vs %s' %
                               (area_idx, area_src_ini, area_gan_ini))

    gan_working_dir = parameters.get_string_parameters(gan_para_file,
                                                       'working_root')
    # gan_dir_pre_name = parameters.get_string_parameters(gan_para_file, 'gan_dir_pre_name')
    # use GAN model name as the gan_dir_pre_name
    gan_model = parameters.get_string_parameters(gan_para_file, 'gan_model')
    gan_dir_pre_name = gan_model  # '_' +

    # loop each regions need image translation
    sub_tasks = []
    for area_idx, (area_gan_ini, area_src_ini) in enumerate(
            zip(multi_gan_regions, multi_gan_source_regions)):

        area_ini = os.path.abspath(area_gan_ini)
        area_src_ini = os.path.abspath(area_src_ini)
        area_name = parameters.get_string_parameters(area_ini, 'area_name')
        area_remark = parameters.get_string_parameters(area_ini, 'area_remark')
        area_time = parameters.get_string_parameters(area_ini, 'area_time')

        inf_image_dir = parameters.get_directory(area_ini, 'inf_image_dir')

        # it is ok consider a file name as pattern and pass it the following functions to get file list
        inf_image_or_pattern = parameters.get_string_parameters(
            area_ini, 'inf_image_or_pattern')

        inf_img_list = io_function.get_file_list_by_pattern(
            inf_image_dir, inf_image_or_pattern)
        img_count = len(inf_img_list)
        if img_count < 1:
            raise ValueError(
                'No image for image translation, please check inf_image_dir and inf_image_or_pattern in %s'
                % area_ini)

        gan_project_save_dir = get_gan_project_save_dir(
            gan_working_dir, gan_dir_pre_name, area_name, area_remark,
            area_time, area_src_ini)

        if os.path.isdir(gan_project_save_dir):
            if generate_image_exists(gan_project_save_dir) is True:
                basic.outputlogMessage(
                    'generated new images (generate.txt_done) exist for %s exist, skip'
                    % gan_project_save_dir)
                continue
        else:
            io_function.mkdir(gan_project_save_dir)

        # parallel run image translation for this area
        CUDA_VISIBLE_DEVICES = []
        if 'CUDA_VISIBLE_DEVICES' in os.environ.keys():
            CUDA_VISIBLE_DEVICES = [
                int(item.strip())
                for item in os.environ['CUDA_VISIBLE_DEVICES'].split(',')
            ]

        # get an valid GPU
        gpuids = []
        while len(gpuids) < 1:
            # get available GPUs  # https://github.com/anderskm/gputil
            deviceIDs = GPUtil.getAvailable(order='first',
                                            limit=100,
                                            maxLoad=0.5,
                                            maxMemory=0.5,
                                            includeNan=False,
                                            excludeID=[],
                                            excludeUUID=[])
            # only use the one in CUDA_VISIBLE_DEVICES
            if len(CUDA_VISIBLE_DEVICES) > 0:
                deviceIDs = [
                    item for item in deviceIDs if item in CUDA_VISIBLE_DEVICES
                ]
                basic.outputlogMessage('on ' + machine_name +
                                       ', available GPUs:' + str(deviceIDs) +
                                       ', among visible ones:' +
                                       str(CUDA_VISIBLE_DEVICES))
            else:
                basic.outputlogMessage('on ' + machine_name +
                                       ', available GPUs:' + str(deviceIDs))

            if len(deviceIDs) < 1:
                print(datetime.now(),
                      'No available GPUs, will check again in 60 seconds')
                time.sleep(
                    60)  # wait one minute, then check the available GPUs again
                continue
            # set only the first available visible
            gpuids.append(deviceIDs[0])
            basic.outputlogMessage(
                '%d:image translation for  %s on GPU %s of %s' %
                (area_idx, area_ini, str(gpuids), machine_name))

        # run image translation
        # pytorch consider first GPUs in CUDA_VISIBLE_DEVICES as zero, so need to re-index gpu ids
        if len(CUDA_VISIBLE_DEVICES) > 0:
            gpuids = [CUDA_VISIBLE_DEVICES.index(id) for id in gpuids]

        sub_process = Process(target=image_translate_train_generate_one_domain,
                              args=(gan_project_save_dir, gan_para_file,
                                    area_src_ini, area_ini, gpuids,
                                    inf_img_list))

        sub_process.start()
        sub_tasks.append(sub_process)

        # wait until image translation has started or exceed 20 minutes
        time0 = time.time()
        elapsed_time = time.time() - time0
        while elapsed_time < 20 * 60:
            elapsed_time = time.time() - time0
            if CUT_gan_is_ready_to_train(
                    gan_project_save_dir) is True or sub_process.is_alive(
                    ) is False:
                break
            else:
                time.sleep(5)

        time.sleep(
            10
        )  # wait, allowing time for the GAN process to start, and run into problem

        if sub_process.exitcode is not None and sub_process.exitcode != 0:
            sys.exit(1)

        basic.close_remove_completed_process(sub_tasks)

    # check all the tasks already finished
    while basic.b_all_process_finish(sub_tasks) is False:
        basic.outputlogMessage('wait all tasks to finish')
        time.sleep(60)
    basic.check_exitcode_of_process(sub_tasks)
    basic.close_remove_completed_process(sub_tasks)

    save_image_dir = parameters.get_string_parameters(para_file,
                                                      'input_train_dir')
    save_label_dir = parameters.get_string_parameters(para_file,
                                                      'input_label_dir')
    merge_subImages_from_gan(multi_gan_source_regions, multi_gan_regions,
                             gan_working_dir, gan_dir_pre_name, save_image_dir,
                             save_label_dir)

    duration = time.time() - SECONDS
    os.system(
        'echo "$(date): time cost of translating sub images to target domains: %.2f seconds">>time_cost.txt'
        % duration)