annotations_file_name = os.path.join(
            data_path, 'BL_' + set.title() + '_leaf_location.csv')
        with open(annotations_file_name, 'r', newline='') as file:
            image_data_leaf_location = read_imgs_data(
                csv.reader(file, delimiter=','))
        for image_data in image_data_leaf_location:
            im_original = Image.open(os.path.join(data_path, image_data))
            im_drawing = copy.deepcopy(im_original)
            draw_mask = ImageDraw.Draw(im_drawing)
            for point in image_data_leaf_location[image_data]:
                x_tl = round(int(point['x']) - 6)
                x_br = round(int(point['x']) + 6)
                y_tl = round(int(point['y']) - 6)
                y_br = round(int(point['y']) + 6)
                draw_mask.ellipse([x_tl, y_tl, x_br, y_br], fill='red')
            # im_drawing.show()
            new_file_name = os.path.join(Params['drawing_path'], image_data)
            im_drawing.save(new_file_name)


if __name__ == "__main__":
    Params = {}
    Params['data_main_path'] = os.path.join(GetEnvVar("DatasetsPath"),
                                            "Counting Datasets",
                                            "Banana_leaves", "BL")
    Params['sets'] = ['train', 'test', 'val']
    Params['drawing_path'] = os.path.join(Params['data_main_path'],
                                          "leaf_center_drawing")
    if not os.path.exists(Params['drawing_path']):
        os.makedirs(Params['drawing_path'])
    main(Params)
Пример #2
0
    # store final result too
    model.save(os.path.join(args.snapshot_path, 'resnet50_final.h5'))


if __name__ == '__main__':
    args = None

    # parse arguments
    if args is None:
        args = sys.argv[1:]
        args = parse_args(args)

    args.pipe = 'reg'
    # path to the data (that organized as the data in the below folder)
    args.dataset = 'BL'
    args.data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                  'Phenotyping Datasets', 'Plant phenotyping',
                                  'data_2', 'CVPPP2017_LCC_training',
                                  'training', '{}'.format(args.dataset))

    if args.dataset == 'BL':
        args.data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                      'Counting Datasets', 'Banana_leaves',
                                      args.dataset)

        args.train_csv_leaf_number_file = os.path.join(
            args.data_path, 'train', args.dataset + '_Train.csv')
        args.train_csv_leaf_location_file = os.path.join(
            args.data_path, 'train', args.dataset + '_Train_leaf_location.csv')
        args.val_csv_leaf_number_file = os.path.join(args.data_path, 'val',
                                                     args.dataset + '_Val.csv')
def main(args=None):

    random.seed(50)

    args.random_transform = True

    args.pipe = 'reg'  #'keyPfinder' #'reg'

    args.exp_num = 141013

    args.early_stopping_indicator = "AbsCountDiff"
    args.epochs = 2
    args.gpu = '0'

    args.exp_name = 'hyper_1'

    args.early_stopping_patience = 1

    # important?
    args.multi_gpu = False
    args.multi_gpu_force = False

    if args.pipe == 'reg':
        args.option = 0
        args.calc_det_performance = False
    elif args.pipe == 'keyPfinder':
        args.option = 10
        args.calc_det_performance = True
    else:
        print("Choose a relevant pipe - keyPfinder or reg")
        return

    all_data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                 'Phenotyping Datasets', 'Plant phenotyping',
                                 'data_2', 'CVPPP2017_LCC_training',
                                 'training')

    Ac_files_path = os.path.join(all_data_path, 'Ac')

    args.snapshot_path = os.path.join(GetEnvVar('ModelsPath'),
                                      'LCC_Models_senepshots', args.pipe,
                                      'exp_' + str(args.exp_num))
    args.tensorboard_dir = os.path.join(GetEnvVar('ExpResultsPath'),
                                        'LCC_exp_res', args.pipe, 'log_dir',
                                        'exp_' + str(args.exp_num))

    args.save_path = os.path.join(GetEnvVar('ExpResultsPath'), 'LCC_exp_res',
                                  args.pipe, "results",
                                  'exp_' + str(args.exp_num))

    files_paths = get_paths_dict(Ac_files_path, args.exp_num)

    get_data(files_paths, all_data_path)

    train_count_file = files_paths['train_count_file']
    train_centers_file = files_paths['train_centers_file']

    val_count_file = files_paths['val_count_file']
    val_centers_file = files_paths['val_centers_file']

    args.train_csv_leaf_number_file = train_count_file
    args.train_csv_leaf_location_file = train_centers_file

    args.val_csv_leaf_number_file = val_count_file
    args.val_csv_leaf_location_file = val_centers_file

    #Train the model based on current split
    print('Start training on Ac')
    if args.pipe == 'keyPfinder':
        train.main(args)

    elif args.pipe == 'reg':
        train_reg.main(args)

    print("Done")
def main(args=None):

    random.seed(50)
    np.random.seed(0)
    args.pipe = 'reg'  #'reg' or 'keyPfinder'

    args.random_transform = True

    # reg options:
    # reg_baseline_c5_dubreshko
    # reg_baseline_c5
    # reg_fpn_p3
    # reg_fpn_p3_p7_avg
    # reg_fpn_p3_p7_mle
    # reg_fpn_p3_p7_min_sig
    # reg_fpn_p3_p7_mle_L1
    # reg_fpn_p3_p7_min_sig_L1
    # keyPfinder options:
    # detection_option_20

    args.exp_name = 'reg_baseline_c5_dubreshko'

    args.lr = 1e-5
    args.reduce_lr = True
    args.reduceLR_patience = 5
    args.reduceLR_factor = 0.05

    args.early_stopping_indicator = "AbsCountDiff"
    args.early_stopping_patience = 50

    args.step_multi = 5

    args.multi_gpu = False
    args.multi_gpu_force = False

    if args.pipe == 'reg':
        args.option = args.exp_name
        args.calc_det_performance = False
        args.do_dropout = False

    elif args.pipe == 'keyPfinder':
        # key point detection options:
        # 10 - best option, as in the paper
        # 20 - reducing size GT Gaussian maps for the sub-model
        args.option = 20

        # the detection performance is done using the PCK metric - see our paper for mor information
        args.calc_det_performance = False

    else:
        print("Choose a relevant pipe - keyPfinder or reg")
        return

    args.save_res_path = os.path.join(
        GetEnvVar('ExpResultsPath'), 'Counting_Agri', args.pipe, "results",
        'results_' + args.pipe + '_exp_' + args.exp_name + '_' +
        str(args.exp_num) + ".csv")

    images_num = {}
    images_num['A1'] = 128
    images_num['A2'] = 31
    images_num['A3'] = 27
    images_num['A4'] = 624
    images_num['BL'] = 1016

    # chosen_datasets = ['A1', 'A2', 'A3', 'A4']
    chosen_datasets = ['BL']

    num_of_CV = args.num_of_CV
    agreement_per_ds = {}
    total_num_of_images = 0
    total_mean_agreement = 0

    for ds in chosen_datasets:

        total_num_of_images += images_num[ds]

        if ds == 'A1' or ds == 'A2' or ds == 'A3' or ds == 'A4':
            args.data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                          'Counting Datasets',
                                          'CVPPP2017_LCC_training', 'training',
                                          ds)
        elif ds == 'BL':
            args.data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                          'Counting Datasets', 'Banana_leaves',
                                          ds)
        #
        # stats = {}
        # stats[ds] ={}
        #
        # stats[ds]['CountDiff'] = []
        # stats[ds]['AbsCountDiff'] = []
        # stats[ds]['CountAgreement'] = []
        # stats[ds]['MSE'] = []
        # stats[ds]['R_2'] = []
        # stats[ds]['ap'] = []

        All_Splitted_Data = data_split(args.data_path, ds)

        print('Working on dataset:', ds)

        if not os.path.isfile(args.save_res_path):
            with open(args.save_res_path, 'w', newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow([
                    "Exp", "Augmantation", "dataset", "dic", "AbsDic",
                    "CountAgreement", "MSE", "R_2", "ap", "nd_weight",
                    "wd_weight", "epochs"
                ])

        for cv_fold in range(1, 5):

            saving_path_name = os.path.join('exp_' + str(args.exp_num),
                                            'cv_' + str(cv_fold))

            args.snapshot_path = os.path.join(GetEnvVar('ModelsPath'),
                                              'Counting_Models_snapshots',
                                              args.pipe, saving_path_name)

            args.model = os.path.join(args.snapshot_path, 'resnet50_csv.h5')

            args.save_path = os.path.join(GetEnvVar('ExpResultsPath'),
                                          'Counting_Agri', args.pipe,
                                          "results", saving_path_name)
            args.tensorboard_dir = os.path.join(GetEnvVar('ExpResultsPath'),
                                                'Counting_Agri', args.pipe,
                                                'log_dir', saving_path_name)

            Test_fold_num = (cv_fold) % num_of_CV + 1
            Val_fold_num = (cv_fold + 1) % num_of_CV + 1
            Train_fold_num = [(cv_fold + 2) % num_of_CV + 1,
                              (cv_fold + 3) % num_of_CV + 1]

            current_data_dict = split_to_sets(All_Splitted_Data, Test_fold_num,
                                              Val_fold_num, Train_fold_num)
            csv_file_names = create_csv_files_for_fold(args.data_path, ds,
                                                       cv_fold, args.exp_num,
                                                       current_data_dict)

            args.train_csv_leaf_number_file = csv_file_names[
                'train_count_file']
            args.train_csv_leaf_location_file = csv_file_names[
                'train_centers_files']

            args.val_csv_leaf_number_file = csv_file_names['val_count_file']
            args.val_csv_leaf_location_file = csv_file_names[
                'val_centers_files']

            # Train the model based on current split
            if args.pipe == 'keyPfinder':
                history = train.main(args)
            elif args.pipe == 'reg':
                train_reg.main(args)

            # Test the model

            # update args for evaluation
            args.val_csv_leaf_number_file = csv_file_names['test_count_file']
            args.val_csv_leaf_location_file = csv_file_names[
                'test_centers_files']

            if args.calc_det_performance:
                CountDiff, AbsCountDiff, CountAgreement, MSE, R_2, ap = evaluate_LCC.main(
                    args)
                ap = str(round(ap, 3))

            else:
                CountDiff, AbsCountDiff, CountAgreement, MSE, R_2 = evaluate_LCC.main(
                    args)
                ap = 'not available'

            with open(args.save_res_path, 'a', newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow([
                    str(args.exp_num),
                    str(args.random_transform), ds,
                    str(round(CountDiff, 3)),
                    str(round(AbsCountDiff, 3)),
                    str(round(CountAgreement, 3)),
                    str(round(MSE, 3)),
                    str(round(R_2, 3)),
                    str(ap),
                    str(args.epochs)
                ])

            # print('Result of cv_',str(cv_fold),'-', 'testing ', ds)
            # print('CountDiff:',CountDiff, 'AbsCountDiff:', AbsCountDiff, 'CountAgreement', CountAgreement, 'MSE:', MSE)
            #

            # stats[ds]['CountDiff'].append(CountDiff)
            # stats[ds]['AbsCountDiff'].append(AbsCountDiff)
            # stats[ds]['CountAgreement'].append(CountAgreement)
            # stats[ds]['MSE'].append(MSE)
            # stats[ds]['R_2'].append(R_2)
            # stats[ds]['ap'].append(ap)

            # Delete current temp csv files
            for file in csv_file_names:
                if os.path.isfile(csv_file_names[file]):
                    os.remove(csv_file_names[file])

            if args.nd[3] == 1:
                break

        args.exp_num += 1
        # get mean and std errors, and save to results file

        # if not os.path.isfile(args.save_res_path) :
        #     with open(args.save_res_path, 'w', newline='') as csvfile:
        #         writer = csv.writer(csvfile)
        #         writer.writerow(["Exp", "Augmantation", "Train_set", "Test_set", "mean_Dic", "std_Dic",
        #                          "mean_AbsDic", "std_AbsDic", "mean_Agreement", "std_Agreement",
        #                          "mean_MSE", "std_MSE", 'mean_R_2', "std_R_2", "mean_ap", "std_ap",
        #                          "all_dic", "all_AbsDic", "all_CountAgreement", "all_MSE",
        #                          "all_R_2", "all_ap"])

    #     mean_CountAgreement = get_aggregated_results_withHyper(args, stats, ds)
    #     agreement_per_ds[ds] = mean_CountAgreement
    #
    # # get weighted average of count agreement
    # for ds in chosen_datasets:
    #     total_mean_agreement += agreement_per_ds[ds]*(images_num[ds]/total_num_of_images)
    #
    # print('total_mean_agreement:', total_mean_agreement)

    print("Done")

    return history
Пример #5
0
    return new_csv_file_data

def write_to_csv(new_csv_file_name, new_csv_file_data):
    with open(new_csv_file_name, 'w', newline='') as csvfile:
        wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
        for row in new_csv_file_data:
            wr.writerow([row[0], row[1], row[2]])

def main(Ai_data_path, dataset_name):

    print('creating leaf center csv on dataset {}'.format(dataset_name))

    # find images names
    csv_file_name = os.path.join(Ai_data_path, dataset_name + '.csv')
    masks_center_names = find_images_names(csv_file_name)
    centers_file_path = os.path.join(Ai_data_path, dataset_name + "_leaf_location.csv")

    new_csv_file_data = create_data_to_write(Ai_data_path, masks_center_names, dataset_name)

    # write the data
    write_to_csv(centers_file_path, new_csv_file_data)

if __name__ == "__main__":

    dataset_name = "A4"
    plant_phen_path = os.path.join(GetEnvVar('DatasetsPath'), 'Phenotyping Datasets', 'Plant phenotyping')
    data_path = os.path.join(plant_phen_path, 'CVPPP2017_LCC_training', 'training')
    Ai_data_path = os.path.join(data_path, dataset_name)

    main(Ai_data_path, dataset_name)
Пример #6
0
        'reg_fpn_p3_p7_min_sig'
        '''
        args.option = 'reg_fpn_p3_p7_mle'
        args.calc_det_performance = False
    elif args.pipe == 'keyPfinder':
        args.option = 20
        args.calc_det_performance = False
    else:
        print("Choose a relevant pipe - keyPfinder or reg")
        sys.exit

    args.dataset_type = 'csv'
    random.seed(10)

    if args.dataset == 'BL':
        args.data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                      'Counting Datasets', 'Banana_leaves',
                                      args.dataset)

        args.save_path = os.path.join(GetEnvVar('ExpResultsPath'),
                                      'BL_exp_res', args.pipe, "results",
                                      'exp_' + str(args.exp_num))

        args.val_csv_leaf_number_file = os.path.join(
            args.data_path, 'test', args.dataset + '_Test.csv')
        args.val_csv_leaf_location_file = os.path.join(
            args.data_path, 'test', args.dataset + '_Test_leaf_location.csv')
    else:
        args.data_path = os.path.join(GetEnvVar('DatasetsPath'),
                                      'Counting Datasets',
                                      'CVPPP2017_LCC_training', 'training',