def initialize_model(path_classes, path_anchors, path_model, input_shape, freeze_body, path_weights, td_len, mode, spp, loss_percs, **kwargs): class_names = ktrain.get_classes(path_classes) num_classes = len(class_names) anchors = ktrain.get_anchors(path_anchors) # Create model title = 'Create Keras model' print('{} {} {}'.format('=' * print_indnt, title, '=' * (print_line - 2 - len(title) - print_indnt))) model = create_model(input_shape, anchors, num_classes, freeze_body=freeze_body, weights_path=path_weights, td_len=td_len, mode=mode, spp=spp, loss_percs=loss_percs) # Store model architecture model_architecture = model.to_json() with open(path_model + 'architecture.json', 'w') as f: json.dump(model_architecture, f) print("Model architecture stored as json") print('=' * print_line) # Training callbacks callbacks = { 'logging': TensorBoard(log_dir=path_model), 'checkpoint': ModelCheckpoint( path_model + 'weights/' + 'ep{epoch:03d}-loss{loss:.5f}-val_loss{val_loss:.5f}.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=1), 'reduce_lr_1': ReduceLROnPlateau(monitor='loss', min_delta=0.5, factor=0.1, patience=4, verbose=1), 'reduce_lr_2': ReduceLROnPlateau(monitor='val_loss', min_delta=0, factor=0.1, patience=4, verbose=1), 'early_stopping': EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) } log('MODEL CREATED') return model, callbacks, anchors, num_classes
def main(path_results, dataset_name, model_num, score, iou, num_annotation_file=1, plot=True, full=True, best_weights=True): model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) class_names = ktrain.get_classes(train_params['path_classes']) # train_params['path_annotations'][1] = train_params['path_annotations'][1].replace('_sk10', '_sk20') annotations_file = train_params['path_annotations'][num_annotation_file] if 'adl' in annotations_file and train_params.get('size_suffix', '') != '': annotations_file = annotations_file.replace(train_params.get('size_suffix', ''), '') # annotations_file = annotations_file.replace('.txt', '_pr416.txt') print(' * Exploring:', annotations_file) if best_weights: preds_filename = '{}preds_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) eval_filename = '{}stats_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) else: preds_filename = '{}preds_stage2_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) eval_filename = '{}stats_stage2_{}_score{}_iou{}.json'.format(model_folder, annotations_file.split('/')[-1][:-4], score, iou) # print(preds_filename) print('='*80) train_diff, train_loss, val_loss = get_train_resume(model_folder) _ = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotations_file, preds_filename, score, iou, best_weights=best_weights) occurrences = None eval_stats, videos = get_full_evaluation(annotations_file, preds_filename, class_names, full) # resume = get_excel_resume(model_folder, train_params, train_loss, val_loss, eval_stats, train_diff, fps, score, iou) plot_prediction_resume(eval_stats, videos, class_names, 'video', annotations_file, model_num, plot); occurrences = plot_prediction_resume(eval_stats, videos, class_names, 'class', annotations_file, model_num, plot) # print(resume) return model_folder, class_names, videos, occurrences, eval_stats, train_params, (train_loss, val_loss, train_diff)
print('='*80) print(preds_num[label][preds_num[label]['image_id'] == image_id][['bbox', 'score']]) boxes = preds_num[label][preds_num[label]['image_id'] == image_id] # boxes = boxes.apply(lambda r: ','.join([ str(b) for b in r['bbox'] ]) + ',' + str(r['category_id']), axis=1).tolist() sample = '{}{}.jpg'.format(path_dataset, image_id) for i,r in boxes.iterrows(): bbox = r['bbox'] sample += ' {},{},{},{},{}'.format(bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3], r['category_id']) result = print_annotations(sample, class_names, colors, perc=0.4) # result = print_annotations(sample, perc=0.4, class_names=class_names) results.append(result) return results class_names = ktrain.get_classes(train_params['path_classes']) hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) preds_count = preds_count[preds_count['320'] != 0] #image_id = preds_count.index[1] for image_id in preds_count.index: print('='*80) print('='*80) print(image_id)
def main(): import os os.chdir(os.getcwd() + '/..') # %% import train_utils import pandas as pd import sys sys.path.append('keras_yolo3/') sys.path.append('keras_yolo3/yolo3/') import keras_yolo3.train as ktrain ##path_dataset = '/mnt/hdd/datasets/VOC/' #path_results = '/mnt/hdd/egocentric_results/' ##path_classes = './dataset_scripts/coco/coco_classes.txt' #path_dataset = '/mnt/hdd/datasets/coco/' #path_classes = './dataset_scripts/adl/adl_classes_v3_8.txt' # ##gt_filename = '/home/asabater/projects/Egocentric-object-detection/dataset_scripts/coco/annotations_coco_val_coco.json' ##gt = json.load(open(gt_filename, 'r'))['annotations'] # #score = 0.4 #gt_filename = path_results + 'default/voc_yolo_model_0/preds_annotations_coco_val_score{}_iou.json'.format(score) ##gt_filename = path_results + 'adl/0414_2252_model_20/preds_annotations_adl_val_v3_8_r_fd10_fsn1_score0_iou0.5.json' ##gt_filename = path_results + 'adl/0414_2252_model_20/preds_annotations_adl_train_v3_8_r_fd10_fsn1_score5e-05_iou0.5.json' #gt = json.load(open(gt_filename, 'r')) #path_classes = './dataset_scripts/voc/voc_classes.txt' #path_dataset = '/mnt/hdd/datasets/VOC/' #score = 0.4 #gt_filename = path_results + 'voc/0326_1706_model_0/preds_annotations_voc_val_score{}_iou.json'.format(score) #gt = json.load(open(gt_filename, 'r')) path_results = '/mnt/hdd/egocentric_results/' dataset_name = 'adl' path_dataset = '/home/asabater/projects/ADL_dataset/' model_num = 62 model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) annotations_file = train_params['path_annotations'][1] preds_filename = '{}preds_stage2_{}_score{}_iou{}.json'.format( model_folder, annotations_file.split('/')[-1][:-4], 0, 0.5) gt = json.load(open(preds_filename, 'r')) # %% preds = pd.DataFrame([p for p in gt if p['score'] >= 0.3]) preds = [{ 'image_id': i, 'bboxes': g.bbox.tolist(), 'category_id': g.category_id.tolist() } for i, g in preds.groupby('image_id')] # %% from playground.test_annotations import print_annotations class_names = ktrain.get_classes(train_params['path_classes']) hsv_tuples = [(x / len(class_names), 1., 1.) for x in range(len(class_names))] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) colors = list( map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) preds = preds[random.randint(0, len(preds)):] for pred in preds: sample = '{}{}.jpg'.format(path_dataset, pred['image_id']) for bbox, cat in zip(pred['bboxes'], pred['category_id']): sample += ' {},{},{},{},{}'.format(bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3], cat) print(sample) pass # result = print_annotations(sample, 0.7, class_names) result = print_annotations(sample, class_names, colors, perc=0.7) cv2.imshow("result", result) if cv2.waitKey(200) & 0xFF == ord('q'): cv2.destroyAllWindows() break
from dataset_scripts.annotations_to_coco import annotations_to_coco from dataset_scripts.generate_custom_anchors import EYOLO_Kmeans remove_empty_frames = True cluster_number = 9 dataset_version = '' path_annotations = [ './dataset_scripts/kitchen/annotations_kitchen_train{}.txt'.format( dataset_version), './dataset_scripts/kitchen/annotations_kitchen_val{}.txt'.format( dataset_version) ] path_classes = './dataset_scripts/kitchen/kitchen_classes{}.txt'.format( dataset_version) class_names = ktrain.get_classes(path_classes) merge_classes = [ ([ 'spoon', 'knife', 'fork', 'cutlery', 'ladle', 'utensil', 'spatula', 'tongs' ], 'cutlery'), (['tap'], 'tap'), (['plate', 'bowl'], 'plate/bowl'), (['fridge', 'freezer'], 'fridge/freezer'), (['pan', 'pot', 'cooker:slow'], 'pan/pot'), (['salt', 'oil', 'vinegar'], 'salt/oil/vinegar'), (['cup', 'glass'], 'cup/glass'), ([ 'bottle', 'jar', 'jug', 'liquid', 'liquid:washing', 'drink', 'beer', 'coke', 'can', 'soap'
def main_v2(path_results, dataset_name, model_num, input_shape=None, eval_train=True, full=False): model_folder = train_utils.get_model_path(path_results, dataset_name, model_num) train_params = json.load(open(model_folder + 'train_params.json', 'r')) class_names = ktrain.get_classes(train_params['path_classes']) annotation_files = train_params['path_annotations'] # annotation_files[1] = annotation_files[1].replace('_sk20', '_sk3') # train_params['eval_val_score'] = 0.05 if input_shape is not None: train_params['input_shape'] = input_shape # train_params['eval_val_score'] = 0.005 # train_params['eval_train_score'] = 0.005 # Pred and eval val bw _, preds_filename_val_bw = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotation_files[1], model_folder, image_size=train_params['input_shape'], score=train_params.get('eval_val_score'), nms_iou=0.5, best_weights=True, raw_eval='def') eval_stats_val_bw, videos = get_full_evaluation(annotation_files[1], preds_filename_val_bw, class_names, full) # Pred and eval val st2 _, preds_filename_val_st2 = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotation_files[1], model_folder, train_params['input_shape'], score=train_params.get('eval_val_score'), nms_iou=0.5, best_weights=False, raw_eval='def') eval_stats_val_st2, videos = get_full_evaluation(annotation_files[1], preds_filename_val_st2, class_names, full) print('='*80) print('mAP@50 | val_bw: {:.2f} | val_st2: {:2f}'.format(eval_stats_val_bw['total'][1]*100, eval_stats_val_st2['total'][1]*100)) print('='*80) # Pred and eval train best_weights = eval_stats_val_bw['total'][1] > eval_stats_val_st2['total'][1] if eval_train: _, preds_filename_train = prediction_utils.predict_and_store_from_annotations(model_folder, train_params, annotation_files[0], model_folder, train_params['input_shape'], score=train_params.get('eval_train_score'), nms_iou=0.5, best_weights=best_weights, raw_eval='def') eval_stats_train, videos = get_full_evaluation(annotation_files[0], preds_filename_train, class_names, full) else: eval_stats_train = None train_diff, train_loss, val_loss = get_train_resume(model_folder) eval_stats_val = eval_stats_val_bw if best_weights else eval_stats_val_st2 full_resume = get_excel_resume_full(model_folder, train_params, train_loss, val_loss, eval_stats_train, eval_stats_val, train_diff, best_weights) print('='*80) print(full_resume) pyperclip.copy(full_resume) print('='*80) if eval_train: print('mAP@50 | val_bw: {:.4f} | val_st2: {:.4f} | train: {:.4f}'.format( eval_stats_val_bw['total'][1]*100, eval_stats_val_st2['total'][1]*100, eval_stats_train['total'][1]*100 )) print('R10 | val_bw: {:.4f} | val_st2: {:.4f} | train: {:.4f}'.format( eval_stats_val_bw['total'][7]*100, eval_stats_val_st2['total'][7]*100, eval_stats_train['total'][7]*100 )) else: print('mAP@50 | val_bw: {:.4f} | val_st2: {:.4f}'.format( eval_stats_val_bw['total'][1]*100, eval_stats_val_st2['total'][1]*100 )) print('R10 | val_bw: {:.4f} | val_st2: {:.4f}'.format( eval_stats_val_bw['total'][7]*100, eval_stats_val_st2['total'][7]*100 )) print('='*80) # return full_resume return eval_stats_train, eval_stats_val, videos, class_names