コード例 #1
0
ishape 				= [1024, 1024, 3]
feature_map_size 	= [32, 32]
frame_mode 			= True
classes 			= ['face', 'none']
mapping 			= {0: 0}
iou_thresholds 		= [0.3, 0.5]
num_of_samples 		= 64
img_ids 			= None
ann_file = '../datasets/coco/annotations/instances_face.json'
img_dir = '../datasets/coco/images/face'
coco = COCO(ann_file)

if img_ids is None:
	gen = genx(
		coco=coco, 
		img_dir=img_dir, 
		classes=classes, 
		limit=[start_example_index, start_example_index+num_of_examples],
		ishape=ishape)
else:
	gen = genx_selected(
		coco=coco, 
		img_dir=img_dir, 
		img_ids=img_ids,
		ishape=ishape)

for sample_order in range(num_of_examples):

	# generate x
	x, img_id = next(gen)

	# get labels
コード例 #2
0
    unified_roi_size=unified_roi_size,
    rpn_head_dim=rpn_head_dim,
    fc_denses=fc_denses,
    block_settings=block_settings,
    base_block_trainable=True)

# load weihts
# detection_model.load_weights('{}/detection_weights.h5'.format(output_path))

if True:

    for epoch in range(1):

        gen = genx(coco=coco,
                   img_dir=img_dir,
                   classes=classes,
                   limit=(start_example_index, start_example_index +
                          num_of_train_examples + num_of_validation_examples),
                   ishape=ishape)

        #------------------------------
        # TRAIN
        #------------------------------

        print('{}: TRAIN {}'.format(datetime.now().time(), epoch), end='\n')

        loss = np.zeros((num_of_train_examples, 1))

        for batch_idx in range(num_of_train_examples):

            # generate x
            x, img_id = next(gen)
コード例 #3
0
resnet_settings = [[32, 32, 128], [3, [1, 1]], [4, [2, 2]], [6, [2, 2]], [3, [2, 2]]]

input_tensor = Input(shape=ishape)
C2, C3, C4, C5 = resnet(
	input_tensor=input_tensor, 
	block_settings=resnet_settings, 
	use_bias=use_bias, 
	weight_decay=weight_decay,
	trainable=trainable,
	bn_trainable=bn_trainable)
model = Model(inputs=input_tensor, outputs=[C2, C3, C4, C5])

model.load_weights('{}/_weights.h5'.format(output_path), by_name=True)

gen = genx(
	anno_file_path=anno_file_path, 
	image_dir=image_dir, 
	ishape=ishape)

for _ in range(total_examples):
	x, _ = next(gen)
	prediction = model.predict_on_batch(x)
	x = x[0]

	c2_3dtensor = prediction[0][0]
	c3_3dtensor = prediction[1][0]
	c4_3dtensor = prediction[2][0]
	c5_3dtensor = prediction[3][0]

	c2_2dtensor = tf.math.reduce_mean(input_tensor=c2_3dtensor, axis=2)
	c3_2dtensor = tf.math.reduce_mean(input_tensor=c3_3dtensor, axis=2)
	c4_2dtensor = tf.math.reduce_mean(input_tensor=c4_3dtensor, axis=2)
コード例 #4
0
abox_4dtensor = tf.constant(value=abox4d, dtype='float32')
abox_2dtensor = tf.reshape(tensor=abox_4dtensor, shape=[-1, 4])

model = build_infer_model(ishape=ishape,
                          resnet_settings=resnet_settings,
                          k=len(asizes),
                          total_classes=total_classes,
                          abox_2dtensor=abox_2dtensor,
                          nsm_iou_threshold=nsm_iou_threshold,
                          nsm_score_threshold=nsm_score_threshold,
                          nsm_max_output_size=nsm_max_output_size)
model.summary()
# model.load_weights('{}/weights.h5'.format(output_path), by_name=True)

gen = genx(anno_file_path=test_anno_file_path,
           image_dir=test_image_dir,
           ishape=ishape,
           mode='test')

for batch in range(total_test_examples):
    batchx_4dtensor, _, image_id = next(gen)
    print('{}: 1'.format(datetime.now().time()), end='\n')
    boxclz_2dtensor, valid_outputs = model.predict_on_batch(
        batchx_4dtensor)  # (h*w*k, total_classes+1+4)
    print('{}: 2'.format(datetime.now().time()), end='\n')
    image = batchx_4dtensor[0]

    _, ax = plt.subplots(figsize=(15, 7.35))
    ax.imshow(image)
    ax.set_xlabel('Image ID: {}'.format(image_id))

    for i in range(valid_outputs[0]):
コード例 #5
0
from matplotlib.patches import Rectangle
from utils import box2frame
from datagen import genx, gety

np.set_printoptions(threshold=np.inf, linewidth=np.inf)

ishape = [1024, 1024, 3]
frame_mode = True
mapping = {0: 0}

classes = ['face', 'none']ann_file = '../datasets/coco/annotations/instances_face.json'
img_dir = '../datasets/coco/images/face'
coco = COCO(ann_file)

# cat_ids = coco.getCatIds(catNms=classes)
gen = genx(coco=coco, img_dir=img_dir, classes=classes, limit=[0, 100], ishape=ishape)

for i in range(100):
	# generate x
	x, img_id = next(gen)

	bbox2d, _ = gety(coco, img_id, classes, frame_mode=frame_mode, mapping=mapping)

	fig, ax = plt.subplots(figsize=(15, 7.35))
	ax.imshow(x/255)

	for bbox in bbox2d:
		frame = box2frame(box=bbox, apoint=[0, 0])
		ax.add_patch(Rectangle(
			(frame[0], frame[1]), frame[2], frame[3], 
			linewidth=1,