def main(args=None): # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu keras.backend.tensorflow_backend.set_session(get_session()) # make save path if it doesn't exist if args.save_path is not None and not os.path.exists(args.save_path): os.makedirs(args.save_path) # create the generator generator = create_generator(args) # load the model print('Loading model, this may take a second...') # model = keras.models.load_model(args.model, custom_objects=custom_objects) # inputs = keras.layers.Input(shape=(None, None, 3)) resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True) model = retinanet.retinanet_bbox(inputs=inputs, num_classes=65, backbone=resnet) model.load_weights(args.model) # print model summary print(model.summary()) # start evaluation average_precisions, average_recalls = evaluate( generator, model, iou_threshold=args.iou_threshold, score_threshold=args.score_threshold, max_detections=args.max_detections, top=args.top, wordvector=args.wordvector, save_path=args.save_path ) # print evaluation for label, average_precision in average_precisions.items(): print(generator.label_to_name(label), '{:.4f}'.format(average_precision)) seenMAP = sum(average_precisions.values()[:65]) / 65 unseenMAP = sum(average_precisions.values()[65:]) / 15 HMMAP = (2 * seenMAP * unseenMAP) / (seenMAP + unseenMAP) seenRE = sum(average_recalls.values()[:65]) / 65 unseenRE = sum(average_recalls.values()[65:]) / 15 HMRE = (2 * seenRE * unseenRE) / (seenRE + unseenRE) print('Seen mAP: {:.4f} Unseen mAP: {:.4f} HM mAP: {:.4f}'.format(seenMAP,unseenMAP, HMMAP)) print('Seen Rec: {:.4f} Unseen Rec: {:.4f} HM Rec: {:.4f}'.format(seenRE, unseenRE, HMRE))
def main(args=None): # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu keras.backend.tensorflow_backend.set_session(get_session()) # make save path if it doesn't exist if args.save_path is not None and not os.path.exists(args.save_path): os.makedirs(args.save_path) # create the generator generator = create_generator(args) # load the model print('Loading model, this may take a second...') # model = keras.models.load_model(args.model, custom_objects=custom_objects) # shafin inputs = keras.layers.Input(shape=(None, None, 3)) resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True) model = retinanet.retinanet_bbox(inputs=inputs, num_classes=72, backbone=resnet) model.load_weights(args.model) print("args_model:{}".format(args.model)) # print model summary print(model.summary()) # start evaluation # average_precisions, average_recalls all_detections_with_labels = evaluate(generator, model, iou_threshold=args.iou_threshold, score_threshold=args.score_threshold, max_detections=args.max_detections, top=args.top, wordvector=args.wordvector, save_path=args.save_path) # print( # 'all_detections_with_labels:{}\nshape:{}'.format(all_detections_with_labels, all_detections_with_labels.shape) # ) if not os.path.exists('detection.h5'): with h5py.File('detection.h5') as f: f['dets_labels'] = all_detections_with_labels
def main(args=None): # parse arguments if args is None: args = sys.argv[1:] args = parse_args(args) # make sure keras is the minimum required version check_keras_version() # optionally choose specific GPU if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu keras.backend.tensorflow_backend.set_session(get_session()) # create the generators train_generator, validation_generator = create_generators(args) if 'resnet' in args.backbone: from keras_retinanet.models import retinanet from ..models.resnet_vocab_w2v import custom_objects from keras_retinanet.models import retinanet_vocab_w2v import keras_resnet.models elif 'mobilenet' in args.backbone: from ..models.mobilenet import mobilenet_retinanet as retinanet, custom_objects, download_imagenet else: raise NotImplementedError('Backbone \'{}\' not implemented.'.format(args.backbone)) # create the model if args.snapshot is not None: print('Loading model, this may take a second...') model = keras.models.load_model(args.snapshot, custom_objects=custom_objects) training_model = model prediction_model = model else: inputs = keras.layers.Input(shape=(None, None, 3)) resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True) training_model = retinanet_vocab_w2v.retinanet_bbox(inputs=inputs, num_classes=72, backbone=resnet) # compile model training_model.compile( loss={ 'regression' : losses.smooth_l1(), 'classification': losses.polar() }, optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001) ) # Loading a Pre-trained Model # inputs = keras.layers.Input(shape=(None, None, 3)) # cocomodel_resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True) # cocomodel65 = retinanet.retinanet_bbox(inputs=inputs, num_classes=72, backbone=cocomodel_resnet) # cocomodel65.load_weights('coco72/resnet50_csv_11.h5') # # for i in range(0, cocomodel65.layers.__len__(), 1): # wei = cocomodel65.layers[i].get_weights() # try: # training_model.layers[i].set_weights(wei) # except Exception as e: # print i training_model.load_weights('Model/resnet50_csv_30.h5') print('yes') print 'Pre-trained load done' print(training_model.summary()) # create the callbacks callbacks = create_callbacks( training_model, training_model, training_model, validation_generator, args, ) # start training training_model.fit_generator( generator=train_generator, steps_per_epoch=args.steps, epochs=args.epochs, verbose=1, callbacks=callbacks, )
num_seen = 65 word = np.loadtxt('MSCOCO/word_w2v.txt', dtype='float32', delimiter=',') word_seen = word[:, :num_seen] word_unseen = word[:, num_seen:] wordname_lines = open('MSCOCO/cls_names_test_coco.csv').read().split("\n") class_mapping = {} for idx in range(int(len(wordname_lines)) - 1): class_mapping[idx] = wordname_lines[idx].split(',')[0] inputs = keras.layers.Input(shape=(None, None, 3)) resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True) model = retinanet.retinanet_bbox(inputs=inputs, num_classes=num_seen, backbone=resnet) model.load_weights('Model/resnet50_polar_loss.h5') lines = open('sample_input.txt').read().split("\n") num_rois = 100 visualise = False # False True detect_type = 'gzsd' # gzsd or zsd or seen_detection seen_threshold = .4 unseen_threshold = .2 for idx in range(int(len(lines)) - 1): aline = lines[idx].split(" ") im_id = aline[1] filepath = aline[0]