def main(path_results, dataset_name, model_num, score, iou, num_annotation_file=1, plot=True, full=True, best_weights=True): model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) class_names = ktrain.get_classes(train_params['path_classes']) # train_params['path_annotations'][1] = train_params['path_annotations'][1].replace('_sk10', '_sk20') annotations_file = train_params['path_annotations'][num_annotation_file] if 'adl' in annotations_file and train_params.get('size_suffix', '') != '': annotations_file = annotations_file.replace(train_params.get('size_suffix', ''), '') # annotations_file = annotations_file.replace('.txt', '_pr416.txt') print(' * Exploring:', annotations_file) if best_weights: preds_filename = '{}preds_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) eval_filename = '{}stats_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) else: preds_filename = '{}preds_stage2_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) eval_filename = '{}stats_stage2_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) # print(preds_filename) print('='*80) train_diff, train_loss, val_loss = get_train_resume(model_folder) _ = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotations_file, preds_filename, score, iou, best_weights=best_weights) occurrences = None eval_stats, videos = get_full_evaluation(annotations_file, preds_filename, class_names, full) # resume = get_excel_resume(model_folder, train_params, train_loss, val_loss, eval_stats, train_diff, fps, score, iou) plot_prediction_resume(eval_stats, videos, class_names, 'video', annotations_file, model_num, plot); occurrences = plot_prediction_resume(eval_stats, videos, class_names, 'class', annotations_file, model_num, plot) # print(resume) return model_folder, class_names, videos, occurrences, eval_stats, train_params, (train_loss, val_loss, train_diff)
path_results = '/mnt/hdd/egocentric_results/' dataset_name = 'adl' path_dataset = '/home/asabater/projects/ADL_dataset/' #num_ann, score = 0, MIN_SCORE num_ann, score = 1, 0 min_score = 0.3 model_nums = {45:'320', 56:'416', 44:'608'} #model_nums = {18:'320', 57:'416', 17:'608'} preds_num = {} for model_num, label in model_nums.items(): model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) annotations_file = train_params['path_annotations'][num_ann] preds_filename = '{}preds_stage2_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, 0.5) preds = json.load(open(preds_filename, 'r')) preds = [ p for p in preds if p['score'] >= min_score ] preds_num[label] = pd.DataFrame(preds) # %% #preds_count = { k:pd.DataFrame(v.groupby('image_id').score.sum() / v.groupby('image_id').score.size()) for k,v in preds_num.items() } #for k,v in preds_count.items(): preds_count[k].columns = [k] preds_count = { k:pd.DataFrame(v.groupby('image_id').size(), columns=[k]) for k,v in preds_num.items() }
def main(): import os os.chdir(os.getcwd() + '/..') # %% import train_utils import pandas as pd import sys sys.path.append('keras_yolo3/') sys.path.append('keras_yolo3/yolo3/') import keras_yolo3.train as ktrain ##path_dataset = '/mnt/hdd/datasets/VOC/' #path_results = '/mnt/hdd/egocentric_results/' ##path_classes = './dataset_scripts/coco/coco_classes.txt' #path_dataset = '/mnt/hdd/datasets/coco/' #path_classes = './dataset_scripts/adl/adl_classes_v3_8.txt' # ##gt_filename = '/home/asabater/projects/Egocentric-object-detection/dataset_scripts/coco/annotations_coco_val_coco.json' ##gt = json.load(open(gt_filename, 'r'))['annotations'] # #score = 0.4 #gt_filename = path_results + 'default/voc_yolo_model_0/preds_annotations_coco_val_score{}_iou.json'.format(score) ##gt_filename = path_results + 'adl/0414_2252_model_20/preds_annotations_adl_val_v3_8_r_fd10_fsn1_score0_iou0.5.json' ##gt_filename = path_results + 'adl/0414_2252_model_20/preds_annotations_adl_train_v3_8_r_fd10_fsn1_score5e-05_iou0.5.json' #gt = json.load(open(gt_filename, 'r')) #path_classes = './dataset_scripts/voc/voc_classes.txt' #path_dataset = '/mnt/hdd/datasets/VOC/' #score = 0.4 #gt_filename = path_results + 'voc/0326_1706_model_0/preds_annotations_voc_val_score{}_iou.json'.format(score) #gt = json.load(open(gt_filename, 'r')) path_results = '/mnt/hdd/egocentric_results/' dataset_name = 'adl' path_dataset = '/home/asabater/projects/ADL_dataset/' model_num = 62 model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) annotations_file = train_params['path_annotations'][1] preds_filename = '{}preds_stage2_{}_score{}_iou{}.json'.format( model_folder, annotations_file.split('/')[-1][:-4], 0, 0.5) gt = json.load(open(preds_filename, 'r')) # %% preds = pd.DataFrame([p for p in gt if p['score'] >= 0.3]) preds = [{ 'image_id': i, 'bboxes': g.bbox.tolist(), 'category_id': g.category_id.tolist() } for i, g in preds.groupby('image_id')] # %% from playground.test_annotations import print_annotations class_names = ktrain.get_classes(train_params['path_classes']) hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) preds = preds[random.randint(0, len(preds)):] for pred in preds: sample = '{}{}.jpg'.format(path_dataset, pred['image_id']) for bbox, cat in zip(pred['bboxes'], pred['category_id']): sample += ' {},{},{},{},{}'.format(bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3], cat) print(sample) pass # result = print_annotations(sample, 0.7, class_names) result = print_annotations(sample, class_names, colors, perc=0.7) cv2.imshow("result", result) if cv2.waitKey(200) & 0xFF == ord('q'): cv2.destroyAllWindows() break
def main_v2(path_results, dataset_name, model_num, input_shape=None, eval_train=True, full=False): model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) class_names = ktrain.get_classes(train_params['path_classes']) annotation_files = train_params['path_annotations'] # annotation_files[1] = annotation_files[1].replace('_sk20', '_sk3') # train_params['eval_val_score'] = 0.05 if input_shape is not None: train_params['input_shape'] = input_shape # train_params['eval_val_score'] = 0.005 # train_params['eval_train_score'] = 0.005 # Pred and eval val bw _, preds_filename_val_bw = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotation_files[1], model_folder, image_size=train_params['input_shape'], score=train_params.get('eval_val_score'), nms_iou=0.5, best_weights=True, raw_eval='def') eval_stats_val_bw, videos = get_full_evaluation(annotation_files[1], preds_filename_val_bw, class_names, full) # Pred and eval val st2 _, preds_filename_val_st2 = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotation_files[1], model_folder, train_params['input_shape'], score=train_params.get('eval_val_score'), nms_iou=0.5, best_weights=False, raw_eval='def') eval_stats_val_st2, videos = get_full_evaluation(annotation_files[1], preds_filename_val_st2, class_names, full) print('='*80) print('mAP@50 | val_bw: {:.2f} | val_st2: {:2f}'.format(eval_stats_val_bw['total'][1]*100, eval_stats_val_st2['total'][1]*100)) print('='*80) # Pred and eval train best_weights = eval_stats_val_bw['total'][1] > eval_stats_val_st2['total'][1] if eval_train: _, preds_filename_train = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotation_files[0], model_folder, train_params['input_shape'], score=train_params.get('eval_train_score'), nms_iou=0.5, best_weights=best_weights, raw_eval='def') eval_stats_train, videos = get_full_evaluation(annotation_files[0], preds_filename_train, class_names, full) else: eval_stats_train = None train_diff, train_loss, val_loss = get_train_resume(model_folder) eval_stats_val = eval_stats_val_bw if best_weights else eval_stats_val_st2 full_resume = get_excel_resume_full(model_folder, train_params, train_loss, val_loss, eval_stats_train, eval_stats_val, train_diff, best_weights) print('='*80) print(full_resume) pyperclip.copy(full_resume) print('='*80) if eval_train: print('mAP@50 | val_bw: {:.4f} | val_st2: {:.4f} | train: {:.4f}'.format( eval_stats_val_bw['total'][1]*100, eval_stats_val_st2['total'][1]*100, eval_stats_train['total'][1]*100 )) print('R10 | val_bw: {:.4f} | val_st2: {:.4f} | train: {:.4f}'.format( eval_stats_val_bw['total'][7]*100, eval_stats_val_st2['total'][7]*100, eval_stats_train['total'][7]*100 )) else: print('mAP@50 | val_bw: {:.4f} | val_st2: {:.4f}'.format( eval_stats_val_bw['total'][1]*100, eval_stats_val_st2['total'][1]*100 )) print('R10 | val_bw: {:.4f} | val_st2: {:.4f}'.format( eval_stats_val_bw['total'][7]*100, eval_stats_val_st2['total'][7]*100 )) print('='*80) # return full_resume return eval_stats_train, eval_stats_val, videos, class_names
model_path = 'base_models/yolov3-openimages.h5' classes_path = 'base_models/openimages_classes.txt' elif model == 'voc': # path_dataset = '/mnt/hdd/datasets/adl_dataset/ADL_frames/' input_shape = (416, 416) # path_dataset = '/mnt/hdd/datasets/VOC/' # path_annotations = ['./dataset_scripts/voc/annotations_voc_train.txt', # './dataset_scripts/voc/annotations_voc_val.txt'] ## path_annotations = ['/home/asabater/projects/ADL_dataset/annotations_adl_train.txt', ## '/home/asabater/projects/ADL_dataset/annotations_adl_val.txt'] # path_classes = './dataset_scripts/voc/voc_classes.txt' # anchors_path = 'base_models/yolo_anchors.txt' model_num = 0 model_folder = train_utils.get_model_path('/mnt/hdd/egocentric_results/', 'voc', model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) # model_path = model_folder + 'weights/trained_weights_stage_1.h5' classes_path = train_params['path_classes'] anchors_path = train_params['path_anchors'] model_image_size = train_params['input_shape'] path_base = '/mnt/hdd/datasets/VOC/' elif model == 'adl': # anchors_path = 'base_models/yolo_anchors.txt' model_num = 16 model_folder = train_utils.get_model_path('/mnt/hdd/egocentric_results/', 'adl', model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) # model_path = model_folder + 'weights/trained_weights_stage_1.h5' classes_path = train_params['path_classes']
model_nums, dataset_name, score = {64: 'original', 66: 'v2', 62: 'v3'}, 'adl', 0 #model_nums, dataset_name, score = {56:'v2 old', 52: 'v2', 60: 'v2 | xy: 5, wh: 5, conf_obj: 2.3'}, 'adl', 0 #model_nums, dataset_name, score = {57:'v3 old', 53: 'v3 new'}, 'adl', 0 #model_nums, dataset_name, score = {0: 'cv1_17', 1: 'cv2_18'}, 'kitchen', 0.005 models = {} preds = {} for model_num, _ in model_nums.items(): preds_file = '/'.join(video_file.split('/')[:-1]) + '/predictions/' + \ video_file.split('/')[-1][:-4] + \ '_model{}_ms{}'.format(model_num, score) + '.pckl' model_folder = train_utils.get_model_path('/mnt/hdd/egocentric_results/', dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) classes_path = train_params['path_classes'] anchors_path = train_params['path_anchors'] model_image_size = train_params['input_shape'] model_path = get_best_weights(model_folder, train_params, score=score) model = EYOLO( model_image_size = (416, 416), model_path = model_path, anchors_path = anchors_path, classes_path = classes_path, score = score, iou = 0.5, # gpu_num = 2 td_len = train_params['td_len'],