Ejemplo n.º 1
0
def getModel(C):
	if K.image_dim_ordering() == 'th':
		input_shape_img = (3, None, None)
	else:
		input_shape_img = (None, None, 3)


	img_input = Input(shape=input_shape_img)
	roi_input = Input(shape=(C.num_rois, 4))

	base_model = VGG16(input_tensor = img_input, weights='imagenet', include_top=False) 

	# define the RPN, built on the base layers
	num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
	rpn = nn.rpn(base_model.layers[-2].output, num_anchors)


	b1_feat = base_model.get_layer('block1_pool').output
	b2_feat = base_model.get_layer('block2_pool').output
	b4_feat = base_model.get_layer('block4_pool').output

	# Fuse features from 1st, 3rd and 4th conv blocks
	# classifier output: ([face_pred,pose_pred, gender_pred, viz_pred, landmark_pred, regr_pred])

	classifier = nn.classifier(b1_feat, b2_feat, b4_feat, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
	model_classifier = Model([img_input,roi_input],classifier)


	model_rpn = Model(base_model.input, rpn[:2])

	feat_1_inp = Input(shape=None,None,64)
	feat_2_inp = Input(shape=None,None,128)
	feat_4_inp = Input(shape=None,None,512)
	classifier_only  = nn.classifier(feat_1_inp, feat_2_inp, feat_4_inp, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)

	model_classifier_only = Model([feat1_inp, feat_2_inp, feat_4_inp, roi_input ], classifier_only )
	model_classifier_only = Model( [ b1_feat, b2_feat, b4_feat ] , classifier)







	model_classifier = Model([img_input, roi_input], classifier )

	classifier_only = 
	model_classifier_only = Model([img_input,b1_feat, b2_feat, b4_feat, roi_input ], classifier )
Ejemplo n.º 2
0
# define the base network (resnet here, can be VGG, Inception, etc)
#shared_layers = nn.nn_base(img_input, trainable=True)
base_model = VGG16(input_tensor=img_input,
                   weights='imagenet',
                   include_top=False)
# for layer in base_model.layers:
# layer.trainable = False
b1_feat = base_model.get_layer('block1_conv2').output
b2_feat = base_model.get_layer('block2_pool').output
b4_feat = base_model.get_layer('block4_pool').output

# define the RPN, built on the base layers

num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(b1_feat, b2_feat, b4_feat, num_anchors)

# Fuse features from 1st, 3rd and 4th conv blocks
classifier = nn.classifier(b1_feat,
                           b2_feat,
                           b4_feat,
                           roi_input,
                           C.num_rois,
                           nb_classes=len(classes_count),
                           trainable=True)
# classifier = [face_out,pose_out, gender_out, viz_out, landmark_out, regr_out]

model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)

# bp()
Ejemplo n.º 3
0
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))

# define the base network (resnet here, can be VGG, Inception, etc)
#shared_layers = nn.nn_base(img_input, trainable=True)
base_model = VGG16(input_tensor=img_input,
                   weights='imagenet',
                   include_top=False)
# base_model = nn.alexnet(input_tensor = img_input, trainable=True)
# for layer in base_model.layers:
# layer.trainable = False

# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(base_model.layers[-2].output, num_anchors)

b1_feat = base_model.get_layer('block1_pool').output
b2_feat = base_model.get_layer('block2_pool').output
b4_feat = base_model.get_layer('block4_pool').output

# Fuse features from 1st, 3rd and 4th conv blocks
classifier = nn.classifier(b1_feat,
                           b2_feat,
                           b4_feat,
                           roi_input,
                           C.num_rois,
                           nb_classes=2,
                           trainable=True)
# classifier = [face_out,pose_out, gender_out, viz_out, landmark_out]