コード例 #1
0
    def build_loss(self, seg_layer, lab_holder):
        lab_reform = tf.expand_dims(lab_holder, -1)
        lab_reform = tf.image.resize_images(seg_layer,
                                            tf.shape(lab_reform)[1:3])
        lab_reform = tf.squeeze(lab_reform)
        seg_loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_layer,
                                                           labels=lab_reform))

        var_s = M.get_trainable_vars('SegLayer')
        var_m = M.get_trainable_vars('MergingLayer')

        train_step = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,
                                                             var_list=var_s)
        train_step2 = tf.train.AdamOptimizer(1e-5).minimize(
            seg_loss, var_list=self.net_body.var)
        train_step3 = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,
                                                              var_list=var_m)
        upds = M.get_update_ops()
        with tf.control_dependencies(upds +
                                     [train_step, train_step2, train_step3]):
            train_op = tf.no_op()

        self.loss = seg_loss
        self.train_op = train_op
コード例 #2
0
    def build_loss(self,seg_layer,stream_list,inst_layer,lab_holder,mask_holder,coord_holder,inst_holder):

        with tf.variable_scope('coord_loss'):
            coords_reshape = tf.constant(self.coords[:,:,::-1],tf.float32)
            coords_reshape = tf.expand_dims(coords_reshape,0)
            offset = coord_holder - tf.tile(coords_reshape,[tf.shape(coord_holder)[0],1,1,3])
            coord_loss = self.coord_loss(stream_list,offset,mask_holder)
        with tf.variable_scope('inst_loss'):
            inst_loss = tf.reduce_mean(tf.square(inst_layer - inst_holder))

        var_m = M.get_trainable_vars('inst_part/MergingLayer')
        var_c = M.get_trainable_vars('inst_part/stream')
        var_i = M.get_trainable_vars('inst_part/inst_layer')
        var_net = M.get_trainable_vars('inst_part/WideRes')

        with tf.variable_scope('overall_loss'):
            overall_loss = 5*coord_loss + inst_loss

        train_step = tf.train.AdamOptimizer(0.0001).minimize(overall_loss,var_list=var_m) # merging layer
        train_step2 = tf.train.AdamOptimizer(1e-5).minimize(overall_loss,var_list=var_net) # main body
        train_step4 = tf.train.AdamOptimizer(0.0001).minimize(10*coord_loss,var_list=var_c) # coord streams
        train_step5 = tf.train.AdamOptimizer(0.0001).minimize(inst_loss,var_list=var_i) # instant prediction
        with tf.control_dependencies([train_step,train_step2,train_step4,train_step5]):
            train_op = tf.no_op()

        self.crd_loss = coord_loss
        self.inst_loss = inst_loss
        self.train_op = train_op
コード例 #3
0
def build_graph():
    envholder = tf.placeholder(tf.float32, [None, 128])
    envholder2 = tf.placeholder(tf.float32, [None, 128])
    reward_holder = tf.placeholder(tf.float32, [None, 1])
    actholder = tf.placeholder(tf.float32, [None, 1])
    terminated_holder = tf.placeholder(tf.float32, [None, 1])

    #policy gradient
    #S_0 for actor_1
    a_eval = actor(envholder, 'a1')
    #S_1 for actor_1
    a_real = actor(envholder2, 'a2')

    #combine actor with env
    env_act1 = tf.concat([envholder, a_eval], axis=-1)  #S_0
    env_act2 = tf.concat([envholder2, a_real], axis=-1)  #S_1
    env_act3 = tf.concat([envholder, actholder], axis=-1)

    #DQN
    c_eval = crit(env_act1, 'c1')  #S_0
    c_real = crit(env_act2, 'c2')  #S_1
    c_real2 = crit(env_act1, 'c2', True)
    c_eval2 = crit(env_act3, 'c1', True)

    var_a1 = M.get_trainable_vars('a1')
    var_a2 = M.get_trainable_vars('a2')
    var_c1 = M.get_trainable_vars('c1')
    var_c2 = M.get_trainable_vars('c2')

    q_target = EPS * c_real * terminated_holder + reward_holder
    c_loss = tf.reduce_mean(tf.square(c_eval2 - q_target))

    a_loss = -tf.reduce_mean(c_real2)

    train_c = tf.train.RMSPropOptimizer(0.0025).minimize(c_loss,
                                                         var_list=var_c1)
    train_a = tf.train.RMSPropOptimizer(0.0005).minimize(
        a_loss, var_list=var_a1)  #maximize c_real2 , env_act1

    assign_a = soft_assign(var_a2, var_a1, 0.5)
    assign_c = soft_assign(var_c2, var_c1, 0.5)

    assign_a0 = assign(var_a2, var_a1)
    assign_c0 = assign(var_c2, var_c1)

    return [
        envholder, envholder2, reward_holder, actholder, terminated_holder
    ], a_eval, [c_loss, a_loss], [train_c,
                                  train_a], [assign_c,
                                             assign_a], [assign_c0,
                                                         assign_a0], a_eval
コード例 #4
0
	def build_loss(self):
		with tf.variable_scope('mask_loss'):
			lab_reform = tf.expand_dims(self.mask_holder,-1)
			lab_reform = tf.image.resize_images(seg_layer,tf.shape(lab_reform)[1:3])
			lab_reform = tf.squeeze(lab_reform)
			mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.mask_out,labels=lab_reform))
			train_mask0 = tf.train.AdamOptimizer(0.0001).minimize(mask_loss,var_list=M.get_trainable_vars('bg_fg/SegLayer'))
			train_mask1 = tf.train.AdamOptimizer(1e-5).minimize(mask_loss,var_list=M.get_trainable_vars('bg_fg/WideRes'))

		with tf.variable_scope('seg_loss'):
			lab_reform = tf.expand_dims(self.seg_holder,-1)
	        lab_reform = tf.image.resize_images(self.seg_out,tf.shape(lab_reform)[1:3])
	        lab_reform = tf.squeeze(lab_reform)
	        seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.seg_out,labels=lab_reform))
	        train_seg0 = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,var_list=M.get_trainable_vars('seg_part/SegLayer')+M.get_trainable_vars('seg_part/MergingLayer'))
	        train_seg1 = tf.train.AdamOptimizer(1e-5).minimize(seg_loss,var_list=M.get_trainable_vars('seg_part/WideRes'))
コード例 #5
0
    def __init__(self, class_num):

        inp_holder = tf.placeholder(tf.float32, [None, 460, 460, 3])
        lab_holder = tf.placeholder(tf.int32, [None, 460, 460])
        mask_holder = tf.placeholder(tf.float32, [None, 460, 460])

        mask = tf.expand_dims(mask_holder, -1)
        c_ = tf.concat([inp_holder, mask], -1)
        merged_layer = self.merging_layer(c_)

        self.net_body = seg_main_body(merged_layer)
        seg_layer = self.segmentation_layer(self.net_body.feature_layer, 12,
                                            class_num)
        self.build_loss(seg_layer, lab_holder)

        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        M.loadSess('./savings_seg/',
                   self.sess,
                   init=True,
                   var_list=M.get_trainable_vars('seg_part/WideRes'))

        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.seg_layer = seg_layer
        self.mask_holder = mask_holder
コード例 #6
0
    def build_loss(self,seg_layer,lab_holder):
        lab_reform = tf.expand_dims(lab_holder, -1) # 460 x 460 x 1
        seg_layer = tf.image.resize_images(seg_layer, tf.shape(lab_reform)[1:3]) # 460 x 460 x 2
        lab_reform = tf.squeeze(lab_reform, axis=3) # 460 x 460 x 2
        seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_layer,labels=lab_reform))

        var_s = M.get_trainable_vars('bg_fg/SegLayer')
        var_net = M.get_trainable_vars('bg_fg/WideRes')

        train_step = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,var_list=var_s)
        train_step2 = tf.train.AdamOptimizer(1e-6).minimize(seg_loss,var_list=var_net)
        upds = M.get_update_ops()
        with tf.control_dependencies(upds+[train_step,train_step2]):
            train_op = tf.no_op()

        self.loss = seg_loss
        self.train_op = train_op
コード例 #7
0
    def build_loss(self,seg_layer,lab_holder):
        lab_reform = tf.expand_dims(lab_holder,-1)
        lab_reform = tf.image.resize_images(seg_layer,tf.shape(lab_reform)[1:3])
        lab_reform = tf.squeeze(lab_reform)
        seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_layer,labels=lab_reform))

        var_s = M.get_trainable_vars('SegLayer')

        self.loss = seg_loss
コード例 #8
0
ファイル: train_gan_old.py プロジェクト: wonggw/sul
def build_graph(train=True):
    with tf.name_scope('inp'):
        imgholder = tf.placeholder(tf.float32, [None, 256, 256, 3])
    with tf.name_scope('gnd'):
        gndholder = tf.placeholder(tf.float32, [None, 256, 256, 1])

    x_fake = gen(imgholder, train=train)
    d_fake = dis(x_fake, imgholder, train=train)
    d_real = dis(gndholder, imgholder, reuse=True, train=train)

    g_loss_L1 = tf.reduce_mean(tf.abs(x_fake - gndholder))
    g_loss_lg = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                labels=tf.ones_like(d_fake)))

    d_loss_real = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real,
                                                labels=tf.ones_like(d_real)))
    d_loss_fake = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                labels=tf.zeros_like(d_fake)))

    varg = M.get_trainable_vars('gen')
    vard = M.get_trainable_vars('dis')
    updg = M.get_update_ops('gen')
    updd = M.get_update_ops('dis')

    with tf.name_scope('optimizer'):
        train_d = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(
            d_loss_real + d_loss_fake, var_list=vard)
        train_g = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(
            g_loss_lg + g_loss_L1 * 10, var_list=varg)

    return [imgholder, gndholder], [
        g_loss_L1 + g_loss_lg, d_loss_fake + d_loss_real
    ], [updg, updd], [train_g,
                      train_d], x_fake, [g_loss_lg, d_loss_fake + d_loss_real]
コード例 #9
0
    def __init__(self):
        inp_holder = tf.placeholder(tf.float32,[None,460,460,3])
        lab_holder = tf.placeholder(tf.int32,[None,460,460])

        self.net_body = seg_main_body(inp_holder)
        seg_layer = self.segmentation_layer(self.net_body.feature_layer,12)
        self.build_loss(seg_layer,lab_holder)

        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        M.loadSess('./savings_bgfg/',self.sess,init=True,var_list=M.get_trainable_vars('bg_fg/WideRes'))

        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.seg_layer = seg_layer
コード例 #10
0
def build_graph():
    envHolder = tf.placeholder(tf.float32, [None, 4])
    actionHolder = tf.placeholder(tf.float32, [None, 2])
    scoreHolder = tf.placeholder(tf.float32, [None])

    output = main_structure(envHolder, 'output')
    # action_out have the same value as next_score
    action_out = tf.reduce_sum(output * actionHolder, axis=1)
    action = tf.argmax(output, axis=1)
    next_score = tf.reduce_max(output, axis=1)

    loss = tf.reduce_mean(tf.square(scoreHolder - action_out))

    train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)

    var_o = M.get_trainable_vars('output')

    return output, envHolder, actionHolder, scoreHolder, action, action_out, next_score, train_step, loss, var_o
コード例 #11
0
    def __init__(self, class_num):
        self.size = 460
        self.class_num = 20

        # build placeholders
        inp_holder = tf.placeholder(tf.float32,[None,size,size,3],name='image_holder')
        seg_holder = tf.placeholder(tf.float32,[None,size,size,class_num],name='segment_holder')
        mask_holder = tf.placeholder(tf.float32,[None,size,size],name='mask_holder')
        coord_holder = tf.placeholder(tf.float32,[None,size,size,6],name='coordinate_holder')
        inst_holder = tf.placeholder(tf.float32,[None,class_num],name='instance_holder')

        # construct input (4 -> 3 with 1x1 conv)
        merged_layer = self.merging_layer(inp_holder,seg_holder,mask_holder)

        # build network
        self.get_coord(size)
        self.net_body = seg_main_body(merged_layer)

        stream_list = self.get_stream_list(self.net_body.feature_maps)
        inst_pred = self.inst_layer(self.net_body.feature_layer,stream_list[-1],class_num)
        self.build_loss(seg_layer,stream_list,inst_pred,lab_holder,mask_holder,coord_holder,inst_holder)

        # build saver and session
        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        # self.writer = tf.summary.FileWriter('./logs/',self.sess.graph)
        M.loadSess('./savings_inst_model/',self.sess,init=True,var_list=M.get_trainable_vars('inst_part/WideRes'))

        # set class variables
        # holders
        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.mask_holder = mask_holder
        self.coord_holder = coord_holder
        self.inst_holder = inst_holder
        # layers
        self.coord_layer = stream_list[-1]
        self.inst_layer = inst_pred
コード例 #12
0
		img = (img/127.5 - 1.0)
		img = cv2.resize(img,(122,144))
		M2 = np.float32([[1,0,11],[0,1,0]])
		img = cv2.warpAffine(img,M2,(144,144))
		img = img[8:136,8:136]
		data2.append(img)
	return data,data2

build_total_graph()
with tf.Session() as sess:
	print('Writing log file...')
	saver = tf.train.Saver()
	# writer = tf.summary.FileWriter('./logs/',sess.graph)
	data,data2 = get_data()
	M.loadSess('./model/',sess)
	M.loadSess(modpath='./modelres/Epoc0Iter20999.ckpt',sess=sess,var_list=M.get_trainable_vars('MainModel'))
	alltensor = [totalLoss,dis_loss,train_d,train_g,updd,updg]
	gentensor = [profHolder,x_fake,gtHolder]
	for iteration in range(100000):
		train_batch = random.sample(data,BSIZE)
		p2_batch = random.sample(data2,BSIZE)
		p_batch = [i[0] for i in train_batch]
		l_batch = [i[1] for i in train_batch]
		r_batch = [i[2] for i in train_batch]
		n_batch = [i[3] for i in train_batch]
		m_batch = [i[4] for i in train_batch]
		gt_batch = [i[5] for i in train_batch]
		z_batch = np.random.uniform(size=[BSIZE,ZDIM],low=-1.0,high=1.0)
		feeddict = {profHolder:p_batch, gtHolder:gt_batch, leHolder:l_batch, reHolder:r_batch, mthHolder:m_batch, nseHolder:n_batch, zHolder:z_batch, domainHolder:np.zeros([BSIZE])}
		g_loss, d_loss, _, _, _, _ = sess.run(alltensor, feed_dict=feeddict)
		feeddict2 = {domainHolder:np.ones([BSIZE]), profHolder:p2_batch}
コード例 #13
0
class network():
	def __init__(self,class_num):
		img_holder = tf.placeholder(tf.float32,[None,460,460,3])
		with tf.variable_scope('bg_fg'):
			net_bgfg = network_bg_fg(img_holder)
		with tf.variable_scope('seg_part'):
			net_seg = network_seg(class_num,img_holder,tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1])
		with tf.variable_scope('inst_part'):
			net_inst = network_inst(class_num,img_holder
				tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1],
				tf.image.resize_images(net_seg.seg_layer,[460,460]))
		
		self.network_bg_fg = network_bg_fg
		self.network_seg = network_seg
		self.network_inst = network_inst

		self.img_holder = img_holder
		self.mask_holder = network_bg_fg.lab_holder
		self.seg_holder = network_seg.lab_holder
		self.coord_holder = network_inst.coord_holder
		self.inst_holder = network_inst.inst_holder

		self.mask_out = network_bg_fg.seg_layer
		self.seg_out = network_seg.seg_layer
		self.inst_num_out = network_inst.inst_layer
		self.coord_out = network_inst.coord_layer

		self.build_loss()

		self.sess = tf.Session()
		M.loadSess('./savings_bgfg/',sess=self.sess,init=True,var_list=M.get_all_vars('bg_fg'))
		M.loadSess('./savings_seg/',sess=self.sess,var_list=M.get_all_vars('seg_part'))
		M.loadSess('./savings_inst/',sess=self.sess,var_list=M.get_all_vars('inst_part'))

	def build_loss(self):
		with tf.variable_scope('mask_loss'):
			lab_reform = tf.expand_dims(self.mask_holder,-1)
			lab_reform = tf.image.resize_images(seg_layer,tf.shape(lab_reform)[1:3])
			lab_reform = tf.squeeze(lab_reform)
			mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.mask_out,labels=lab_reform))
			train_mask0 = tf.train.AdamOptimizer(0.0001).minimize(mask_loss,var_list=M.get_trainable_vars('bg_fg/SegLayer'))
			train_mask1 = tf.train.AdamOptimizer(1e-5).minimize(mask_loss,var_list=M.get_trainable_vars('bg_fg/WideRes'))

		with tf.variable_scope('seg_loss'):
			lab_reform = tf.expand_dims(self.seg_holder,-1)
	        lab_reform = tf.image.resize_images(self.seg_out,tf.shape(lab_reform)[1:3])
	        lab_reform = tf.squeeze(lab_reform)
	        seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.seg_out,labels=lab_reform))
	        train_seg0 = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,var_list=M.get_trainable_vars('seg_part/SegLayer')+M.get_trainable_vars('seg_part/MergingLayer'))
	        train_seg1 = tf.train.AdamOptimizer(1e-5).minimize(seg_loss,var_list=M.get_trainable_vars('seg_part/WideRes'))

	    with tf.variable_scope('inst_loss'):
	    	train_inst0 = tf.train.AdamOptimizer(0.0001).minimize(self.network_inst.overall_loss,var_list=M.get_trainable_vars('inst_part/MergingLayer')) # merging layer
	        train_inst1 = tf.train.AdamOptimizer(1e-5).minimize(self.network_inst.overall_loss,var_list=M.get_trainable_vars('inst_part/WideRes')) # main body
	        train_inst2 = tf.train.AdamOptimizer(0.0001).minimize(10*self.network_inst.coord_loss,var_list=M.get_trainable_vars('inst_part/stream')) # coord streams
	        train_inst3 = tf.train.AdamOptimizer(0.0001).minimize(self.network_inst.inst_loss,var_list=M.get_trainable_vars('inst_part/inst_layer')) # instant prediction

	    upd_ops = M.get_update_ops()

	    with tf.control_dependencies(upd_ops+[train_mask0,train_mask1,train_seg1,train_seg0,train_inst0,train_inst1,train_inst2,train_inst3]):
	    	train_op = tf.no_op()

	    self.mask_loss = mask_loss
	    self.seg_loss = seg_loss
	    self.inst_loss = self.network_inst.inst_loss
	    self.coord_loss = self.network_inst.coord_loss
	    self.train_op = train_op
コード例 #14
0
		cropped = item[0]
		coord = item[1]
		lb = get_lb(coord,crds)
		lbs.append([cropped,lb])
		# x,y,w,h = 
		# cv2.rectangle(img,())
	return lbs

reader = datareader2.reader(height=240,width=320,scale_range=[0.05,1.2])
b0,b1,c0,c1 = netpart.model_out

start_time = time.time()
MAXITER = 100000
with tf.Session() as sess:
	saver = tf.train.Saver()
	M.loadSess('./model/',sess,init=True,var_list=M.get_trainable_vars('MSRPN'))
	for i in range(MAXITER):
		img,coord = reader.get_img()
		buff_out = sess.run([b0,b1,c0,c1],feed_dict={netpart.inpholder:[img]})
		bs,cs = buff_out[:2],buff_out[2:]
		lbs = crop(img,bs,cs,coord)
		train_imgs = [k[0] for k in lbs]
		train_labs = [k[1] for k in lbs]
		# for item in lbs:
		# 	cv2.imshow('ad',item[0])
		# 	print(item[1])
		# 	cv2.waitKey(0)
		ls,ac,_ = sess.run([net_veri.loss,net_veri.accuracy,net_veri.ts],
			feed_dict={net_veri.inputholder:train_imgs,net_veri.labelholder:train_labs})
		if i%10==0:
			t2 = time.time()
コード例 #15
0
ファイル: gan2.py プロジェクト: wonggw/sul
                                                       logits=disfalse))
    tf.summary.scalar('lossG', lossG)
with tf.name_scope('lossD'):
    lossD1 = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones(
            [BSIZE], dtype=tf.int64),
                                                       logits=distrue))
    lossD2 = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.zeros(
            [BSIZE], dtype=tf.int64),
                                                       logits=disfalse))
    lossD = 0.5 * (lossD1 + lossD2)
    tf.summary.scalar('lossD', lossD)

#run update_ops for updating the batch_norm
VARD = M.get_trainable_vars('Discriminator')
VARG = M.get_trainable_vars('Generator')
UPDD = M.get_update_ops('Discriminator')
UPDG = M.get_update_ops('Generator')

with tf.name_scope('opti'):
    with tf.name_scope('optiG'):
        trainG = tf.train.AdamOptimizer(learning_rate=LR,
                                        beta1=BETA).minimize(lossG,
                                                             var_list=VARG)
    with tf.name_scope('optiD'):
        trainD = tf.train.AdamOptimizer(learning_rate=LR,
                                        beta1=BETA).minimize(lossD,
                                                             var_list=VARD)
    with tf.control_dependencies([trainG, trainD]):
        trainAll = tf.no_op(name='train')
コード例 #16
0
ファイル: tpgan_domain_lnet.py プロジェクト: wonggw/sul
def build_total_graph():
    global totalLoss, dis_loss, train_d, train_g, train_e, updd, updg, upde, profHolder, gtHolder, leHolder, reHolder, mthHolder, nseHolder, domainHolder, zHolder, clsHolder, x_fake, losscollection
    with tf.name_scope('ProfileImg'):
        profHolder = tf.placeholder(tf.float32, [None, 128, 128, 3])
    with tf.name_scope('GTIMG'):
        gtHolder = tf.placeholder(tf.float32, [None, 128, 128, 3])
    with tf.name_scope('LEIMG'):
        leHolder = tf.placeholder(tf.float32, [None, 40, 40, 3])
    with tf.name_scope('REIMG'):
        reHolder = tf.placeholder(tf.float32, [None, 40, 40, 3])
    with tf.name_scope('MTHIMG'):
        mthHolder = tf.placeholder(tf.float32, [None, 32, 48, 3])
    with tf.name_scope('NSEIMG'):
        nseHolder = tf.placeholder(tf.float32, [None, 32, 40, 3])
    with tf.name_scope('Z'):
        zHolder = tf.placeholder(tf.float32, [None, ZDIM])
    with tf.name_scope('domain'):
        domainHolder = tf.placeholder(tf.int32, [None])
    with tf.name_scope('CLASS'):
        clsHolder = tf.placeholder(tf.int32, [None])

    nse = localpath_nse(nseHolder)
    mth = localpath_mth(mthHolder)
    le = localpath_le(leHolder)
    re = localpath_re(reHolder)
    fusion = fusion_locals(le, re, nse, mth)
    x_fake, domainlayer = globalpath(profHolder, zHolder, fusion)
    d_fake, c_fake = discriminator(x_fake)
    d_real, c_real = discriminator(gtHolder, reuse=True)
    f_fake = lcnn(x_fake)
    f_real = lcnn(gtHolder, reuse=True)

    with tf.name_scope('pixel_loss'):
        pix_loss = tf.reduce_mean(tf.abs(gtHolder - x_fake))
    with tf.name_scope('sym_loss'):
        x_left, x_right = tf.split(x_fake, 2, axis=2)
        sym_loss = tf.reduce_mean(tf.abs(x_left - x_right))
    with tf.name_scope('dis_loss'):
        dis_true = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real,
                                                    labels=tf.ones([BSIZE,
                                                                    1])))
        dis_false = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                    labels=tf.zeros([BSIZE,
                                                                     1])))
        dis_loss = dis_true + dis_false
    with tf.name_scope('gen_loss'):
        gen_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                    labels=tf.ones([BSIZE,
                                                                    1])))
    with tf.name_scope('ip_loss'):
        ip_loss = tf.reduce_mean(tf.abs(f_real - f_fake))
    with tf.name_scope('tv_loss'):
        tv_loss = tf.reduce_mean(
            tf.image.total_variation(x_fake)) / (128.0 * 128.0)
    with tf.name_scope('domain_loss'):
        domain_loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=domainlayer, labels=domainHolder))
    with tf.name_scope('cls_loss'):
        cls_loss_real = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(logits=c_real,
                                                           labels=clsHolder))
        # cls_loss_fake = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=c_fake,labels=clsHolder))

    vard = M.get_trainable_vars('dis')
    varg = M.get_trainable_vars('local_le') \
    + M.get_trainable_vars('local_re') \
    + M.get_trainable_vars('local_mth') \
    + M.get_trainable_vars('local_nse')\
    + M.get_trainable_vars('global_path')\
    + M.get_trainable_vars('fusion_node')
    updd = M.get_update_ops('dis')
    updg = M.get_update_ops('local_le') \
    + M.get_update_ops('local_re') \
    + M.get_update_ops('local_mth') \
    + M.get_update_ops('local_nse')\
    + M.get_update_ops('global_path')\
    + M.get_update_ops('fusion_node')
    upde = M.get_update_ops('global_path')[:18]
    losscollection = [
        pix_loss, sym_loss, gen_loss, ip_loss, tv_loss, domain_loss, dis_loss
    ]
    with tf.name_scope('Optimizer'):
        totalLoss = (pix_loss * 0.3 + sym_loss * 0.07 + 0.001 * gen_loss +
                     0.003 * ip_loss + 0.0001 * tv_loss + 0.1 * domain_loss)
        train_d = tf.train.AdamOptimizer(0.0001).minimize(dis_loss +
                                                          cls_loss_real,
                                                          var_list=vard)
        train_g = tf.train.AdamOptimizer(0.0001).minimize(totalLoss,
                                                          var_list=varg)
        train_e = tf.train.AdamOptimizer(0.0001).minimize(domain_loss)
コード例 #17
0
ファイル: tpgan_domain_lnet.py プロジェクト: wonggw/sul
        data2.append(img)
    print('data2 length:', len(data2))
    return data2


build_total_graph()
with tf.Session() as sess:
    print('Writing log file...')
    saver = tf.train.Saver()
    reader = data_reader()
    # writer = tf.summary.FileWriter('./logs/',sess.graph)
    # data2 = get_data2()
    M.loadSess('./model/', sess, init=True)
    M.loadSess(modpath='/home/psl/7T/chengyu/modelres/Epoc0Iter20999.ckpt',
               sess=sess,
               var_list=M.get_trainable_vars('MainModel'))
    alltensor = [totalLoss, dis_loss, train_d, train_g, updd, updg]
    gentensor = [profHolder, x_fake, gtHolder]
    for iteration in range(100000):
        train_batch = reader.get_batch(BSIZE)
        # p2_batch = random.sample(data2,BSIZE)
        p_batch = [i[0] for i in train_batch]
        l_batch = [i[1] for i in train_batch]
        r_batch = [i[2] for i in train_batch]
        n_batch = [i[3] for i in train_batch]
        m_batch = [i[4] for i in train_batch]
        gt_batch = [i[5] for i in train_batch]
        lb_batch = [i[6] for i in train_batch]
        z_batch = np.random.uniform(size=[BSIZE, ZDIM], low=-1.0, high=1.0)
        feeddict = {
            profHolder: p_batch,
コード例 #18
0
        lbs.append([cropped, lb])
        # x,y,w,h =
        # cv2.rectangle(img,())
    return lbs


reader = datareader2.reader(scale_range=[0.05, 1.2])
b0, b1, b2, c0, c1, c2 = netpart.model_out

start_time = time.time()
with tf.Session() as sess:
    saver = tf.train.Saver()
    M.loadSess('./model/',
               sess,
               init=True,
               var_list=M.get_trainable_vars('MSRPN'))
    for i in range(MAXITER):
        img, coord = reader.get_img()
        buff_out = sess.run([b0, b1, b2, c0, c1, c2],
                            feed_dict={netpart.inpholder: [img]})
        bs, cs = buff_out[:3], buff_out[3:]
        lbs = crop(img, bs, cs, coord)
        train_imgs = [k[0] for k in lbs]
        train_labs = [k[1] for k in lbs]
        # for item in lbs:
        # 	cv2.imshow('ad',item[0])
        # 	print(item[1])
        # 	cv2.waitKey(0)
        ls, ac, _ = sess.run([net_veri.loss, net_veri.accuracy, net_veri.ts],
                             feed_dict={
                                 net_veri.inputholder: train_imgs,