示例#1
0
        x, y, w, h = i
        cv2.rectangle(buff_img, (x - w // 2, y - h // 2),
                      (x + w // 2, y + h // 2), (0, 255, 0), 2)
    cv2.imshow('result', buff_img)
    cv2.waitKey(1)


# set output

b0, b1, b2, c0, c1, c2 = netpart.model_out

# set and load session
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
M.loadSess('./modelveri_tiny/', sess)

import time


def get_coord_from_detection(img):
    buff_out = sess.run([b0, b1, b2, c0, c1, c2],
                        feed_dict={netpart.inpholder: [img]})
    bs, cs = buff_out[:3], buff_out[3:]
    res = crop(img, bs, cs)
    cropped_imgs = [k[0] for k in res]
    coords = [k[1] for k in res]

    # get score and output
    veri_output = sess.run(net_veri.output,
                           feed_dict={net_veri.inputholder: cropped_imgs})
示例#2
0
		mod.fcLayer(11)
	return mod.get_current_layer()

def build_graph():
	img_holder = tf.placeholder(tf.float32,[None,28*28])
	last_layer = build_model(img_holder)
	last_layer_7seg = build_7seg_model(img_holder)
	last_layer_FD = build_FD_model(img_holder)
	return img_holder,last_layer,last_layer_7seg,last_layer_FD

img_holder,last_layer,last_layer_7seg,last_layer_FD = build_graph()

config = tf.ConfigProto(allow_soft_placement = True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
M.loadSess('./rune_module/model_rune/model_mnist/',sess,var_list=M.get_all_vars('mnist'))
M.loadSess('./rune_module/model_rune/model_7seg/',sess,var_list=M.get_all_vars('7seg_detection'))
M.loadSess('./rune_module/model_rune/model_flaming/',sess,var_list=M.get_all_vars('FD_detection'))

def get_pred(imgs):
	scr = sess.run(last_layer,feed_dict={img_holder:imgs})
	scr = np.argmax(scr,1)
	return scr

def get_pred_7seg(imgs):
	scr = sess.run(last_layer_7seg,feed_dict={img_holder:imgs})
	scr = np.argmax(scr,1)
	return scr 

def get_pred_flaming(imgs):
	scr = sess.run(last_layer_FD,feed_dict={img_holder:imgs})
示例#3
0
	cv2.waitKey(1)

# set output

b0,b1,b2,c0,c1,c2 = netpart.model_out

B0,B1,C0,C1 = netpart_s.model_out

# set and load session
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
v1 = M.get_all_vars('VERI') + M.get_all_vars('MSRPN_v3')
v2 = M.get_all_vars('VERI_s') + M.get_all_vars('MSRPN_v3_s')
v1 = [item for item in v1 if item not in v2]
M.loadSess('./modelveri_tiny/',sess,var_list=v1)
M.loadSess('./modelveri_tiny_s/',sess,var_list=v2)

import time 

def get_coord_from_detection(img):
	#t1 = time.time()
	buff_out = sess.run([b0,b1,b2,c0,c1,c2],feed_dict={netpart.inpholder:[img]})
	bs,cs = buff_out[:3],buff_out[3:]
	#t2 = time.time()
	res = crop(img,bs,cs)
	#t3 = time.time()
	cropped_imgs = [k[0] for k in res]
	coords = [k[1] for k in res]

	# get score and output