예제 #1
0
    def build_loss(self, seg_layer, lab_holder):
        lab_reform = tf.expand_dims(lab_holder, -1)
        lab_reform = tf.image.resize_images(seg_layer,
                                            tf.shape(lab_reform)[1:3])
        lab_reform = tf.squeeze(lab_reform)
        seg_loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_layer,
                                                           labels=lab_reform))

        var_s = M.get_trainable_vars('SegLayer')
        var_m = M.get_trainable_vars('MergingLayer')

        train_step = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,
                                                             var_list=var_s)
        train_step2 = tf.train.AdamOptimizer(1e-5).minimize(
            seg_loss, var_list=self.net_body.var)
        train_step3 = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,
                                                              var_list=var_m)
        upds = M.get_update_ops()
        with tf.control_dependencies(upds +
                                     [train_step, train_step2, train_step3]):
            train_op = tf.no_op()

        self.loss = seg_loss
        self.train_op = train_op
예제 #2
0
    def __init__(self, class_num, is_training=True, mod_dir='./model/'):
        self.mod_dir = mod_dir
        with tf.variable_scope('Input'):
            self.img_holder = tf.placeholder(tf.float32, [None, 128, 128, 3])
            self.lab_holder = tf.placeholder(tf.float32, [None, class_num])
        with tf.variable_scope('Res_101_cy'):
            mod = M.Model(self.img_holder)
            mod.set_bn_training(is_training)
            # 64x64
            mod.convLayer(7,
                          64,
                          stride=2,
                          activation=M.PARAM_LRELU,
                          batch_norm=True)
            mod.res_block(256, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(256, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(256, stride=1, activation=M.PARAM_LRELU)
            # 32x32
            mod.res_block(512, stride=2, activation=M.PARAM_LRELU)
            mod.res_block(512, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(512, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(512, stride=1, activation=M.PARAM_LRELU)
            # 16x16
            for i in range(14):
                mod.res_block(1024, stride=2, activation=M.PARAM_LRELU)
            # 8x8
            mod.res_block(2048, stride=2, activation=M.PARAM_LRELU)
            mod.res_block(2048, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(2048, stride=1, activation=M.PARAM_LRELU)
            mod.avgpoolLayer(8)
            mod.flatten()
            #mod.fcLayer(256,nobias=True)
        self.feat = mod.get_current_layer()
        with tf.variable_scope('Classification'):
            logit_layer, eval_layer = M.enforcedClassifier(self.feat,
                                                           self.lab_holder,
                                                           dropout=1,
                                                           multi=None,
                                                           L2norm=False)
            self.accuracy = M.accuracy(eval_layer,
                                       tf.argmax(self.lab_holder, -1))

        if is_training:
            print('Building optimizer...')
            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=logit_layer, labels=self.lab_holder))
            with tf.control_dependencies(M.get_update_ops()):
                self.train_op = tf.train.AdamOptimizer(0.0001).minimize(
                    self.loss)

        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        M.loadSess(mod_dir, self.sess, init=True)
예제 #3
0
파일: recon.py 프로젝트: FUHUACANGYUE/sul
 def build_loss(self):
     with tf.variable_scope('LS_optim'):
         self.ls1 = tf.reduce_mean(
             tf.square(self.recon - self.targetholder[:, -1]) * self.A)
         self.ls = tf.reduce_mean(
             tf.square(self.recon - self.targetholder[:, -1]))
         self.A_length = tf.reduce_mean(tf.square(self.A))
         train_step = tf.train.AdamOptimizer(0.0001).minimize(self.ls +
                                                              0.1 *
                                                              self.ls1)
         with tf.control_dependencies(M.get_update_ops() + [train_step]):
             self.train_step = tf.no_op()
예제 #4
0
파일: train_gan_old.py 프로젝트: wonggw/sul
def build_graph(train=True):
    with tf.name_scope('inp'):
        imgholder = tf.placeholder(tf.float32, [None, 256, 256, 3])
    with tf.name_scope('gnd'):
        gndholder = tf.placeholder(tf.float32, [None, 256, 256, 1])

    x_fake = gen(imgholder, train=train)
    d_fake = dis(x_fake, imgholder, train=train)
    d_real = dis(gndholder, imgholder, reuse=True, train=train)

    g_loss_L1 = tf.reduce_mean(tf.abs(x_fake - gndholder))
    g_loss_lg = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                labels=tf.ones_like(d_fake)))

    d_loss_real = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real,
                                                labels=tf.ones_like(d_real)))
    d_loss_fake = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                labels=tf.zeros_like(d_fake)))

    varg = M.get_trainable_vars('gen')
    vard = M.get_trainable_vars('dis')
    updg = M.get_update_ops('gen')
    updd = M.get_update_ops('dis')

    with tf.name_scope('optimizer'):
        train_d = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(
            d_loss_real + d_loss_fake, var_list=vard)
        train_g = tf.train.AdamOptimizer(0.001, beta1=0.5).minimize(
            g_loss_lg + g_loss_L1 * 10, var_list=varg)

    return [imgholder, gndholder], [
        g_loss_L1 + g_loss_lg, d_loss_fake + d_loss_real
    ], [updg, updd], [train_g,
                      train_d], x_fake, [g_loss_lg, d_loss_fake + d_loss_real]
예제 #5
0
    def build_loss(self,seg_layer,lab_holder):
        lab_reform = tf.expand_dims(lab_holder, -1) # 460 x 460 x 1
        seg_layer = tf.image.resize_images(seg_layer, tf.shape(lab_reform)[1:3]) # 460 x 460 x 2
        lab_reform = tf.squeeze(lab_reform, axis=3) # 460 x 460 x 2
        seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_layer,labels=lab_reform))

        var_s = M.get_trainable_vars('bg_fg/SegLayer')
        var_net = M.get_trainable_vars('bg_fg/WideRes')

        train_step = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,var_list=var_s)
        train_step2 = tf.train.AdamOptimizer(1e-6).minimize(seg_loss,var_list=var_net)
        upds = M.get_update_ops()
        with tf.control_dependencies(upds+[train_step,train_step2]):
            train_op = tf.no_op()

        self.loss = seg_loss
        self.train_op = train_op
예제 #6
0
파일: gan2.py 프로젝트: wonggw/sul
with tf.name_scope('lossD'):
    lossD1 = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones(
            [BSIZE], dtype=tf.int64),
                                                       logits=distrue))
    lossD2 = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.zeros(
            [BSIZE], dtype=tf.int64),
                                                       logits=disfalse))
    lossD = 0.5 * (lossD1 + lossD2)
    tf.summary.scalar('lossD', lossD)

#run update_ops for updating the batch_norm
VARD = M.get_trainable_vars('Discriminator')
VARG = M.get_trainable_vars('Generator')
UPDD = M.get_update_ops('Discriminator')
UPDG = M.get_update_ops('Generator')

with tf.name_scope('opti'):
    with tf.name_scope('optiG'):
        trainG = tf.train.AdamOptimizer(learning_rate=LR,
                                        beta1=BETA).minimize(lossG,
                                                             var_list=VARG)
    with tf.name_scope('optiD'):
        trainD = tf.train.AdamOptimizer(learning_rate=LR,
                                        beta1=BETA).minimize(lossD,
                                                             var_list=VARD)
    with tf.control_dependencies([trainG, trainD]):
        trainAll = tf.no_op(name='train')

예제 #7
0
def build_total_graph():
    global totalLoss, dis_loss, train_d, train_g, train_e, updd, updg, upde, profHolder, gtHolder, leHolder, reHolder, mthHolder, nseHolder, domainHolder, zHolder, clsHolder, x_fake, losscollection
    with tf.name_scope('ProfileImg'):
        profHolder = tf.placeholder(tf.float32, [None, 128, 128, 3])
    with tf.name_scope('GTIMG'):
        gtHolder = tf.placeholder(tf.float32, [None, 128, 128, 3])
    with tf.name_scope('LEIMG'):
        leHolder = tf.placeholder(tf.float32, [None, 40, 40, 3])
    with tf.name_scope('REIMG'):
        reHolder = tf.placeholder(tf.float32, [None, 40, 40, 3])
    with tf.name_scope('MTHIMG'):
        mthHolder = tf.placeholder(tf.float32, [None, 32, 48, 3])
    with tf.name_scope('NSEIMG'):
        nseHolder = tf.placeholder(tf.float32, [None, 32, 40, 3])
    with tf.name_scope('Z'):
        zHolder = tf.placeholder(tf.float32, [None, ZDIM])
    with tf.name_scope('domain'):
        domainHolder = tf.placeholder(tf.int32, [None])
    with tf.name_scope('CLASS'):
        clsHolder = tf.placeholder(tf.int32, [None])

    nse = localpath_nse(nseHolder)
    mth = localpath_mth(mthHolder)
    le = localpath_le(leHolder)
    re = localpath_re(reHolder)
    fusion = fusion_locals(le, re, nse, mth)
    x_fake, domainlayer = globalpath(profHolder, zHolder, fusion)
    d_fake, c_fake = discriminator(x_fake)
    d_real, c_real = discriminator(gtHolder, reuse=True)
    f_fake = lcnn(x_fake)
    f_real = lcnn(gtHolder, reuse=True)

    with tf.name_scope('pixel_loss'):
        pix_loss = tf.reduce_mean(tf.abs(gtHolder - x_fake))
    with tf.name_scope('sym_loss'):
        x_left, x_right = tf.split(x_fake, 2, axis=2)
        sym_loss = tf.reduce_mean(tf.abs(x_left - x_right))
    with tf.name_scope('dis_loss'):
        dis_true = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real,
                                                    labels=tf.ones([BSIZE,
                                                                    1])))
        dis_false = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                    labels=tf.zeros([BSIZE,
                                                                     1])))
        dis_loss = dis_true + dis_false
    with tf.name_scope('gen_loss'):
        gen_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake,
                                                    labels=tf.ones([BSIZE,
                                                                    1])))
    with tf.name_scope('ip_loss'):
        ip_loss = tf.reduce_mean(tf.abs(f_real - f_fake))
    with tf.name_scope('tv_loss'):
        tv_loss = tf.reduce_mean(
            tf.image.total_variation(x_fake)) / (128.0 * 128.0)
    with tf.name_scope('domain_loss'):
        domain_loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=domainlayer, labels=domainHolder))
    with tf.name_scope('cls_loss'):
        cls_loss_real = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(logits=c_real,
                                                           labels=clsHolder))
        # cls_loss_fake = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=c_fake,labels=clsHolder))

    vard = M.get_trainable_vars('dis')
    varg = M.get_trainable_vars('local_le') \
    + M.get_trainable_vars('local_re') \
    + M.get_trainable_vars('local_mth') \
    + M.get_trainable_vars('local_nse')\
    + M.get_trainable_vars('global_path')\
    + M.get_trainable_vars('fusion_node')
    updd = M.get_update_ops('dis')
    updg = M.get_update_ops('local_le') \
    + M.get_update_ops('local_re') \
    + M.get_update_ops('local_mth') \
    + M.get_update_ops('local_nse')\
    + M.get_update_ops('global_path')\
    + M.get_update_ops('fusion_node')
    upde = M.get_update_ops('global_path')[:18]
    losscollection = [
        pix_loss, sym_loss, gen_loss, ip_loss, tv_loss, domain_loss, dis_loss
    ]
    with tf.name_scope('Optimizer'):
        totalLoss = (pix_loss * 0.3 + sym_loss * 0.07 + 0.001 * gen_loss +
                     0.003 * ip_loss + 0.0001 * tv_loss + 0.1 * domain_loss)
        train_d = tf.train.AdamOptimizer(0.0001).minimize(dis_loss +
                                                          cls_loss_real,
                                                          var_list=vard)
        train_g = tf.train.AdamOptimizer(0.0001).minimize(totalLoss,
                                                          var_list=varg)
        train_e = tf.train.AdamOptimizer(0.0001).minimize(domain_loss)
예제 #8
0
class network():
	def __init__(self,class_num):
		img_holder = tf.placeholder(tf.float32,[None,460,460,3])
		with tf.variable_scope('bg_fg'):
			net_bgfg = network_bg_fg(img_holder)
		with tf.variable_scope('seg_part'):
			net_seg = network_seg(class_num,img_holder,tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1])
		with tf.variable_scope('inst_part'):
			net_inst = network_inst(class_num,img_holder
				tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1],
				tf.image.resize_images(net_seg.seg_layer,[460,460]))
		
		self.network_bg_fg = network_bg_fg
		self.network_seg = network_seg
		self.network_inst = network_inst

		self.img_holder = img_holder
		self.mask_holder = network_bg_fg.lab_holder
		self.seg_holder = network_seg.lab_holder
		self.coord_holder = network_inst.coord_holder
		self.inst_holder = network_inst.inst_holder

		self.mask_out = network_bg_fg.seg_layer
		self.seg_out = network_seg.seg_layer
		self.inst_num_out = network_inst.inst_layer
		self.coord_out = network_inst.coord_layer

		self.build_loss()

		self.sess = tf.Session()
		M.loadSess('./savings_bgfg/',sess=self.sess,init=True,var_list=M.get_all_vars('bg_fg'))
		M.loadSess('./savings_seg/',sess=self.sess,var_list=M.get_all_vars('seg_part'))
		M.loadSess('./savings_inst/',sess=self.sess,var_list=M.get_all_vars('inst_part'))

	def build_loss(self):
		with tf.variable_scope('mask_loss'):
			lab_reform = tf.expand_dims(self.mask_holder,-1)
			lab_reform = tf.image.resize_images(seg_layer,tf.shape(lab_reform)[1:3])
			lab_reform = tf.squeeze(lab_reform)
			mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.mask_out,labels=lab_reform))
			train_mask0 = tf.train.AdamOptimizer(0.0001).minimize(mask_loss,var_list=M.get_trainable_vars('bg_fg/SegLayer'))
			train_mask1 = tf.train.AdamOptimizer(1e-5).minimize(mask_loss,var_list=M.get_trainable_vars('bg_fg/WideRes'))

		with tf.variable_scope('seg_loss'):
			lab_reform = tf.expand_dims(self.seg_holder,-1)
	        lab_reform = tf.image.resize_images(self.seg_out,tf.shape(lab_reform)[1:3])
	        lab_reform = tf.squeeze(lab_reform)
	        seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.seg_out,labels=lab_reform))
	        train_seg0 = tf.train.AdamOptimizer(0.0001).minimize(seg_loss,var_list=M.get_trainable_vars('seg_part/SegLayer')+M.get_trainable_vars('seg_part/MergingLayer'))
	        train_seg1 = tf.train.AdamOptimizer(1e-5).minimize(seg_loss,var_list=M.get_trainable_vars('seg_part/WideRes'))

	    with tf.variable_scope('inst_loss'):
	    	train_inst0 = tf.train.AdamOptimizer(0.0001).minimize(self.network_inst.overall_loss,var_list=M.get_trainable_vars('inst_part/MergingLayer')) # merging layer
	        train_inst1 = tf.train.AdamOptimizer(1e-5).minimize(self.network_inst.overall_loss,var_list=M.get_trainable_vars('inst_part/WideRes')) # main body
	        train_inst2 = tf.train.AdamOptimizer(0.0001).minimize(10*self.network_inst.coord_loss,var_list=M.get_trainable_vars('inst_part/stream')) # coord streams
	        train_inst3 = tf.train.AdamOptimizer(0.0001).minimize(self.network_inst.inst_loss,var_list=M.get_trainable_vars('inst_part/inst_layer')) # instant prediction

	    upd_ops = M.get_update_ops()

	    with tf.control_dependencies(upd_ops+[train_mask0,train_mask1,train_seg1,train_seg0,train_inst0,train_inst1,train_inst2,train_inst3]):
	    	train_op = tf.no_op()

	    self.mask_loss = mask_loss
	    self.seg_loss = seg_loss
	    self.inst_loss = self.network_inst.inst_loss
	    self.coord_loss = self.network_inst.coord_loss
	    self.train_op = train_op
예제 #9
0
 def update_ops(self):
     with tf.control_dependencies(M.get_update_ops()):
         self.update_bn = tf.no_op()