def main(argv): config_path = args.conf num_anchors = args.anchors with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) if config['parser_annotation_type'] == 'xml': # parse annotations of the training set train_imgs, train_labels = parse_annotation_xml( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) elif config['parser_annotation_type'] == 'csv': # parse annotations of the training set train_imgs, train_labels = parse_annotation_csv( config['train']['train_csv_file'], config['model']['labels'], config['train']['train_csv_base_path']) input_size = (config['model']['input_size_h'], config['model']['input_size_w'], 3) feature_extractor = import_feature_extractor(config['model']['backend'], input_size) grid_w = config['model'][ 'input_size_w'] / feature_extractor.get_output_shape()[1] grid_h = config['model'][ 'input_size_h'] / feature_extractor.get_output_shape()[0] # run k_mean to find the anchors annotation_dims = [] for image in train_imgs: cell_w = image['width'] / grid_w cell_h = image['height'] / grid_h for obj in image['object']: relative_w = (float(obj['xmax']) - float(obj['xmin'])) / cell_w relative_h = (float(obj["ymax"]) - float(obj['ymin'])) / cell_h annotation_dims.append(tuple(map(float, (relative_w, relative_h)))) annotation_dims = np.array(annotation_dims) centroids = run_kmeans(annotation_dims, num_anchors) # write anchors to file print('\naverage IOU for', num_anchors, 'anchors:', '%0.2f' % avg_IOU(annotation_dims, centroids)) print_anchors(centroids)
def _main_(args): config_path = args.conf with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) ############################### # Parse the annotations ############################### if config['parser_annotation_type'] == 'xml': # parse annotations of the training set train_imgs, train_labels = parse_annotation_xml( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation_xml( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) split = False else: split = True elif config['parser_annotation_type'] == 'csv': # parse annotations of the training set train_imgs, train_labels = parse_annotation_csv( config['train']['train_csv_file'], config['model']['labels'], config['train']['train_csv_base_path']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_csv_file']): valid_imgs, valid_labels = parse_annotation_csv( config['valid']['valid_csv_file'], config['model']['labels'], config['valid']['valid_csv_base_path']) split = False else: split = True else: raise ValueError( "'parser_annotations_type' must be 'xml' or 'csv' not {}.".format( config['parser_annotations_type'])) if split: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Saving on all seen labels.') config['model']['labels'] = train_labels.keys() with open("labels.json", 'w') as outfile: json.dump({"labels": list(train_labels.keys())}, outfile) if os.path.isdir("./roi_dataset"): print("roi_dataset already exists, please move or delete it first.") return else: os.mkdir("roi_dataset") os.mkdir("roi_dataset/train") os.mkdir("roi_dataset/val") all_imgs = [train_imgs, valid_imgs] for j, folder_name in enumerate(["train", "val"]): print("generating", folder_name) for img in tqdm(all_imgs[j]): image = cv2.imread(img['filename']) for i, obj in enumerate(img['object']): xmin = obj['xmin'] ymin = obj['ymin'] xmax = obj['xmax'] ymax = obj['ymax'] name = obj['name'] if not os.path.isdir("roi_dataset/{}/{}".format( folder_name, name)): os.mkdir("roi_dataset/{}/{}".format(folder_name, name)) roi = image[ymin:ymax, xmin:xmax] base_name = os.path.basename(img['filename']) base_name, ext = os.path.splitext(base_name) cv2.imwrite( "roi_dataset/{}/{}/{}_{}_{}.jpg".format( folder_name, name, name, base_name, i), roi)
def _main_(args): config_path = args.conf keras.backend.tensorflow_backend.set_session(get_session()) with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) if config['backup']['create_backup']: config = create_backup(config) ############################### # Parse the annotations ############################### if config['parser_annotation_type'] == 'xml': # parse annotations of the training set train_imgs, train_labels = parse_annotation( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) split = False else: split = True elif config['parser_annotation_type'] == 'csv': # parse annotations of the training set train_imgs, train_labels = parse_annotation_csv( config['train']['train_csv_file'], config['model']['labels'], config['train']['train_csv_base_path']) # parse annotations of the validation set, if any, otherwise split the training set if os.path.exists(config['valid']['valid_csv_file']): valid_imgs, valid_labels = parse_annotation_csv( config['valid']['valid_csv_file'], config['model']['labels'], config['valid']['valid_csv_base_path']) split = False else: print("Validation file not found commensing split") split = True else: raise ValueError( "'parser_annotations_type' must be 'xml' or 'csv' not {}.".format( config['parser_annotations_type'])) if split: train_valid_split = int(0.8 * len(train_imgs)) np.random.shuffle(train_imgs) valid_imgs = train_imgs[train_valid_split:] train_imgs = train_imgs[:train_valid_split] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Train on all seen labels.') config['model']['labels'] = train_labels.keys() with open("labels.json", 'w') as outfile: json.dump({"labels": list(train_labels.keys())}, outfile) ############################### # Construct the model ############################### yolo = YOLO(backend=config['model']['backend'], input_size=(config['model']['input_size_h'], config['model']['input_size_w']), labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors'], gray_mode=config['model']['gray_mode']) ############################### # Load the pretrained weights (if any) ############################### if os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) ############################### # Start the training process ############################### yolo.train(train_imgs=train_imgs, valid_imgs=valid_imgs, train_times=config['train']['train_times'], valid_times=config['valid']['valid_times'], nb_epochs=config['train']['nb_epochs'], learning_rate=config['train']['learning_rate'], batch_size=config['train']['batch_size'], warmup_epochs=config['train']['warmup_epochs'], object_scale=config['train']['object_scale'], no_object_scale=config['train']['no_object_scale'], coord_scale=config['train']['coord_scale'], class_scale=config['train']['class_scale'], saved_weights_name=config['train']['saved_weights_name'], debug=config['train']['debug'], early_stop=config['train']['early_stop'], workers=config['train']['workers'], max_queue_size=config['train']['max_queue_size'], tb_logdir=config['train']['tensorboard_log_dir'])
def _main_(args): config_path = args.conf weights_path = args.weights keras.backend.tensorflow_backend.set_session(get_session()) with open(config_path) as config_buffer: config = json.loads(config_buffer.read()) if weights_path == '': weights_path = config['train']['pretrained_weights"'] ############################### # Parse the annotations ############################### without_valid_imgs = False if config['parser_annotation_type'] == 'xml': # parse annotations of the training set train_imgs, train_labels = parse_annotation_xml( config['train']['train_annot_folder'], config['train']['train_image_folder'], config['model']['labels']) # parse annotations of the validation set, if any. if os.path.exists(config['valid']['valid_annot_folder']): valid_imgs, valid_labels = parse_annotation_xml( config['valid']['valid_annot_folder'], config['valid']['valid_image_folder'], config['model']['labels']) else: without_valid_imgs = True elif config['parser_annotation_type'] == 'csv': # parse annotations of the training set train_imgs, train_labels = parse_annotation_csv( config['train']['train_csv_file'], config['model']['labels'], config['train']['train_csv_base_path']) # parse annotations of the validation set, if any. if os.path.exists(config['valid']['valid_csv_file']): valid_imgs, valid_labels = parse_annotation_csv( config['valid']['valid_csv_file'], config['model']['labels'], config['valid']['valid_csv_base_path']) else: without_valid_imgs = True else: raise ValueError( "'parser_annotations_type' must be 'xml' or 'csv' not {}.".format( config['parser_annotations_type'])) #remove samples without objects in the image for i in range(len(train_imgs) - 1, 0, -1): if len(train_imgs[i]['object']) == 0: del train_imgs[i] if len(config['model']['labels']) > 0: overlap_labels = set(config['model']['labels']).intersection( set(train_labels.keys())) print('Seen labels:\t', train_labels) print('Given labels:\t', config['model']['labels']) print('Overlap labels:\t', overlap_labels) if len(overlap_labels) < len(config['model']['labels']): print( 'Some labels have no annotations! Please revise the list of labels in the config.json file!' ) return else: print('No labels are provided. Evaluate on all seen labels.') config['model']['labels'] = train_labels.keys() with open("labels.json", 'w') as outfile: json.dump({"labels": list(train_labels.keys())}, outfile) ############################### # Construct the model ############################### yolo = YOLO(backend=config['model']['backend'], input_size=(config['model']['input_size_h'], config['model']['input_size_w']), labels=config['model']['labels'], max_box_per_image=config['model']['max_box_per_image'], anchors=config['model']['anchors'], gray_mode=config['model']['gray_mode']) ############################### # Load the pretrained weights (if any) ############################### if weights_path != '': print("Loading pre-trained weights in", weights_path) yolo.load_weights(weights_path) elif os.path.exists(config['train']['pretrained_weights']): print("Loading pre-trained weights in", config['train']['pretrained_weights']) yolo.load_weights(config['train']['pretrained_weights']) else: raise Exception("No pretrained weights found.") ############################### # Evaluate the network ############################### print("calculing mAP for iou threshold = {}".format(args.iou)) generator_config = { 'IMAGE_H': yolo.input_size[0], 'IMAGE_W': yolo.input_size[1], 'IMAGE_C': yolo.input_size[2], 'GRID_H': yolo.grid_h, 'GRID_W': yolo.grid_w, 'BOX': yolo.nb_box, 'LABELS': yolo.labels, 'CLASS': len(yolo.labels), 'ANCHORS': yolo.anchors, 'BATCH_SIZE': 4, 'TRUE_BOX_BUFFER': yolo.max_box_per_image, } if not without_valid_imgs: valid_generator = BatchGenerator(valid_imgs, generator_config, norm=yolo.feature_extractor.normalize, jitter=False) valid_eval = YOLO.MAP_evaluation(yolo, valid_generator, iou_threshold=args.iou) mAP, average_precisions = valid_eval.evaluate_mAP() for label, average_precision in average_precisions.items(): print(yolo.labels[label], '{:.4f}'.format(average_precision)) print('validation dataset mAP: {:.4f}\n'.format(mAP)) train_generator = BatchGenerator(train_imgs, generator_config, norm=yolo.feature_extractor.normalize, jitter=False) train_eval = YOLO.MAP_evaluation(yolo, train_generator, iou_threshold=args.iou) mAP, average_precisions = train_eval.evaluate_mAP() for label, average_precision in average_precisions.items(): print(yolo.labels[label], '{:.4f}'.format(average_precision)) print('training dataset mAP: {:.4f}'.format(mAP))