Beispiel #1
0
def getModel(C):
	if K.image_dim_ordering() == 'th':
		input_shape_img = (3, None, None)
	else:
		input_shape_img = (None, None, 3)


	img_input = Input(shape=input_shape_img)
	roi_input = Input(shape=(C.num_rois, 4))

	base_model = VGG16(input_tensor = img_input, weights='imagenet', include_top=False) 

	# define the RPN, built on the base layers
	num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
	rpn = nn.rpn(base_model.layers[-2].output, num_anchors)


	b1_feat = base_model.get_layer('block1_pool').output
	b2_feat = base_model.get_layer('block2_pool').output
	b4_feat = base_model.get_layer('block4_pool').output

	# Fuse features from 1st, 3rd and 4th conv blocks
	# classifier output: ([face_pred,pose_pred, gender_pred, viz_pred, landmark_pred, regr_pred])

	classifier = nn.classifier(b1_feat, b2_feat, b4_feat, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
	model_classifier = Model([img_input,roi_input],classifier)


	model_rpn = Model(base_model.input, rpn[:2])

	feat_1_inp = Input(shape=None,None,64)
	feat_2_inp = Input(shape=None,None,128)
	feat_4_inp = Input(shape=None,None,512)
	classifier_only  = nn.classifier(feat_1_inp, feat_2_inp, feat_4_inp, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)

	model_classifier_only = Model([feat1_inp, feat_2_inp, feat_4_inp, roi_input ], classifier_only )
	model_classifier_only = Model( [ b1_feat, b2_feat, b4_feat ] , classifier)







	model_classifier = Model([img_input, roi_input], classifier )

	classifier_only = 
	model_classifier_only = Model([img_input,b1_feat, b2_feat, b4_feat, roi_input ], classifier )
# for layer in base_model.layers:
# layer.trainable = False
b1_feat = base_model.get_layer('block1_conv2').output
b2_feat = base_model.get_layer('block2_pool').output
b4_feat = base_model.get_layer('block4_pool').output

# define the RPN, built on the base layers

num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(b1_feat, b2_feat, b4_feat, num_anchors)

# Fuse features from 1st, 3rd and 4th conv blocks
classifier = nn.classifier(b1_feat,
                           b2_feat,
                           b4_feat,
                           roi_input,
                           C.num_rois,
                           nb_classes=len(classes_count),
                           trainable=True)
# classifier = [face_out,pose_out, gender_out, viz_out, landmark_out, regr_out]

model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)

# bp()

# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)

try:
    # 	# print('loading weights from {}'.format(C.base_net_weights))
Beispiel #3
0
# define the base network (resnet here, can be VGG, Inception, etc)
#shared_layers = nn.nn_base(img_input, trainable=True)
base_model = VGG16(input_tensor = img_input, weights='imagenet', include_top=False) 
b1_feat = base_model.get_layer('block1_conv2').output
b2_feat = base_model.get_layer('block2_pool').output
b4_feat = base_model.get_layer('block4_pool').output


# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn( b1_feat, b2_feat, b4_feat, num_anchors)
model_rpn = Model(base_model.input, rpn )

# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
# model_all = Model([img_input, roi_input], rpn[:2] + classifier)
classifier_only = nn.classifier( b1_inp, b2_inp, b4_inp, roi_input, C.num_rois, nb_classes=2, trainable=True )
model_classifier_only = Model([b1_inp, b2_inp, b4_inp, roi_input], classifier_only)
# # classifier = [face_out,pose_out, gender_out, viz_out, landmark_out, regr_out]

try:
	print('loading rpn weights from {}'.format(C.model_path))
	model_rpn.load_weights(C.model_path, by_name=True)   #TODO: load RPN weights
	print('loading classifier weights from {}'.format(C.model_path))
	model_classifier_only.load_weights(C.model_path, by_name = True)
except:
	print('Could not load pretrained model weights. Weights can be found at {} and {}'.format(
		'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5',
		'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
	))