コード例 #1
0
ファイル: recon.py プロジェクト: FUHUACANGYUE/sul
    def __init__(self, step):
        with tf.variable_scope('Input_holders'):
            self.inpholder = tf.placeholder(tf.float32,
                                            [None, step, 256, 256, 3])
            self.step = step
            self.targetholder = tf.placeholder(tf.float32,
                                               [None, step, 256, 256, 3])

        with tf.variable_scope('Head'):
            inp_split = tf.unstack(self.inpholder, axis=1)
            features = []
            for i in range(len(inp_split)):
                features.append(N.conv_layers(inp_split[i], i != 0))
            features = tf.stack(features, axis=1)

        lstm_out = M.SimpleLSTM(4 * 4 * 128).apply(features)
        with tf.variable_scope('Tail'):
            feat_split = tf.unstack(lstm_out, axis=1)
            # I try the last frame for now
            feat = feat_split[-1]
            A, C = N.deconv_layers(feat)
            self.recon = A * C + (1. - A) * self.inpholder[:, -1]

        self.A = A
        self.C = C
        self.build_loss()

        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        M.loadSess('./model/', self.sess, init=True)
コード例 #2
0
ファイル: train_gan_old.py プロジェクト: wonggw/sul
def main():
    holders, losses, upops, trains, x_fake, lb_ls = build_graph()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        M.loadSess('./model/', sess, init=True)
        data = read_data()
        for iteration in range(10000):
            train_batch = random.sample(data, BSIZE)
            img_batch = [i[0] for i in train_batch]
            gnd_batch = [i[1] for i in train_batch]
            feeddict = {holders[0]: img_batch, holders[1]: gnd_batch}
            g_loss, d_loss, _, _, _, _, lb_g, lb_d = sess.run(
                losses + upops + trains + lb_ls, feed_dict=feeddict)
            print('Iter:', iteration, '\tLoss_g:', g_loss, '\tLb_g:', lb_g,
                  '\tLoss_d:', d_loss)
            while lb_g > lb_d + 0.7:
                train_batch = random.sample(data, BSIZE)
                img_batch = [i[0] for i in train_batch]
                gnd_batch = [i[1] for i in train_batch]
                feeddict = {holders[0]: img_batch, holders[1]: gnd_batch}
                g_loss, d_loss, _, _, lb_g, lb_d = sess.run(
                    losses + [upops[0], trains[0]] + lb_ls, feed_dict=feeddict)
                print('Iter:', iteration, '\tLoss_g:', g_loss, '\tLb_g:', lb_g,
                      '\tLoss_d:', d_loss)
            if iteration % 100 == 0 and iteration > 0:
                saver.save(sess, './model/' + str(iteration) + '.ckpt')
            if iteration % 20 == 0:
                gen = sess.run(x_fake, feed_dict=feeddict)
                show_sample(gen, img_batch, gnd_batch, iteration)
コード例 #3
0
ファイル: net.py プロジェクト: FUHUACANGYUE/sul
    def __init__(self):
        with tf.variable_scope('holders'):
            self.inp_holder = tf.placeholder(tf.float32, [None, 28, 28, 1])
            self.lab_holder = tf.placeholder(tf.float32, [None, 10])

        with tf.variable_scope('mainMod'):
            mod = M.Model(self.inp_holder)
            mod.convLayer(7, 64, stride=2, activation=M.PARAM_RELU)
            mod.convLayer(5, 128, stride=2, activation=M.PARAM_RELU)
            mod.capsulization(dim=16, caps=8)
            mod.caps_conv(3, 8, 16, activation=None, usebias=False)
            mod.caps_flatten()
            mod.squash()
            mod.capsLayer(10, 8, 3, BSIZE=128)
            mod.squash()
            feat = mod.capsDown()

        with tf.variable_scope('loss'):
            length = tf.norm(feat, axis=2)
            self.length = length
            loss = self.lab_holder * tf.square(tf.maximum(
                0., 0.9 - length)) + 0.5 * (1 - self.lab_holder) * tf.square(
                    tf.maximum(0., length - 0.1))
            self.loss = tf.reduce_mean(tf.reduce_sum(loss, 1))
            self.accuracy = M.accuracy(length, tf.argmax(self.lab_holder, 1))

        with tf.variable_scope('opti'):
            self.train_op = tf.train.AdamOptimizer(0.001).minimize(self.loss)

        self.sess = tf.Session()
        M.loadSess(self.sess, './model/', init=True)
コード例 #4
0
ファイル: gan2.py プロジェクト: wonggw/sul
def training():
    with tf.Session() as sess:
        merged = tf.summary.merge_all()
        print('creating log file...')
        writer = tf.summary.FileWriter('./logs/', sess.graph)
        saver = tf.train.Saver()
        M.loadSess(modelpath, sess=sess)
        imgs = list(getImgs())
        print('start training...')
        for i in range(MAXITER):
            a = np.random.uniform(size=[BSIZE, ZDIM], low=-1.0, high=1.0)
            # for _ in range(3):
            # sess.run(trainG,feed_dict={z:a})
            _, mg, lsd, lsg = sess.run(
                [trainAll, merged, lossD, lossG, UPDD, UPDG],
                feed_dict={
                    z: a,
                    imgholder: random.sample(imgs, BSIZE)
                })
            if (i) % 1 == 0:
                writer.add_summary(mg, i)
                print('iter:', i)
                print('lsd:', lsd)
                print('lsg:', lsg)
            if (i + 1) % 100 == 0:
                getGeneratedImg(sess, i + 1)
            if (i + 1) % 1000 == 0:
                saver.save(
                    sess, modelpath + 'Model_epoc' + str(i + 1) + 'Time' +
                    datetime.now().strftime('%Y%m%d%H%M%S') + '.ckpt')
コード例 #5
0
    def __init__(self, class_num):

        inp_holder = tf.placeholder(tf.float32, [None, 460, 460, 3])
        lab_holder = tf.placeholder(tf.int32, [None, 460, 460])
        mask_holder = tf.placeholder(tf.float32, [None, 460, 460])

        mask = tf.expand_dims(mask_holder, -1)
        c_ = tf.concat([inp_holder, mask], -1)
        merged_layer = self.merging_layer(c_)

        self.net_body = seg_main_body(merged_layer)
        seg_layer = self.segmentation_layer(self.net_body.feature_layer, 12,
                                            class_num)
        self.build_loss(seg_layer, lab_holder)

        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        M.loadSess('./model/',
                   self.sess,
                   init=True,
                   var_list=self.net_body.var)

        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.seg_layer = seg_layer
        self.mask_holder = mask_holder
コード例 #6
0
    def __init__(self, class_num, is_training=True, mod_dir='./model/'):
        self.mod_dir = mod_dir
        with tf.variable_scope('Input'):
            self.img_holder = tf.placeholder(tf.float32, [None, 128, 128, 3])
            self.lab_holder = tf.placeholder(tf.float32, [None, class_num])
        with tf.variable_scope('Res_101_cy'):
            mod = M.Model(self.img_holder)
            mod.set_bn_training(is_training)
            # 64x64
            mod.convLayer(7,
                          64,
                          stride=2,
                          activation=M.PARAM_LRELU,
                          batch_norm=True)
            mod.res_block(256, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(256, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(256, stride=1, activation=M.PARAM_LRELU)
            # 32x32
            mod.res_block(512, stride=2, activation=M.PARAM_LRELU)
            mod.res_block(512, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(512, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(512, stride=1, activation=M.PARAM_LRELU)
            # 16x16
            for i in range(14):
                mod.res_block(1024, stride=2, activation=M.PARAM_LRELU)
            # 8x8
            mod.res_block(2048, stride=2, activation=M.PARAM_LRELU)
            mod.res_block(2048, stride=1, activation=M.PARAM_LRELU)
            mod.res_block(2048, stride=1, activation=M.PARAM_LRELU)
            mod.avgpoolLayer(8)
            mod.flatten()
            #mod.fcLayer(256,nobias=True)
        self.feat = mod.get_current_layer()
        with tf.variable_scope('Classification'):
            logit_layer, eval_layer = M.enforcedClassifier(self.feat,
                                                           self.lab_holder,
                                                           dropout=1,
                                                           multi=None,
                                                           L2norm=False)
            self.accuracy = M.accuracy(eval_layer,
                                       tf.argmax(self.lab_holder, -1))

        if is_training:
            print('Building optimizer...')
            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=logit_layer, labels=self.lab_holder))
            with tf.control_dependencies(M.get_update_ops()):
                self.train_op = tf.train.AdamOptimizer(0.0001).minimize(
                    self.loss)

        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        M.loadSess(mod_dir, self.sess, init=True)
コード例 #7
0
ファイル: AIM.py プロジェクト: FUHUACANGYUE/sul
    def __init__(self, age_size, id_num, model_path='./aim_model/'):
        self.model_path = model_path
        self.inp_holder = tf.placeholder(tf.float32, [None, 128, 128, 3])
        # self.real_holder = tf.placeholder(tf.float32,[None,128,128,3])
        self.uni_holder = tf.placeholder(tf.float32, [None, 2, 2, 512])
        self.age_holder = tf.placeholder(tf.float32, [None, age_size])
        self.target_holder = tf.placeholder(tf.float32, [None, 128, 128, 3])
        self.id_holder = tf.placeholder(tf.float32, [None, id_num])

        # get_feature
        self.feat = N.feat_encoder(self.inp_holder)

        # retrieve tensor for adv1 and ip
        adv1, ip = N.discriminator_f(self.feat, id_num)
        adv1_uni, _ = N.discriminator_f(self.uni_holder, id_num)

        # get attention A and C
        age_expanded = self.expand(self.age_holder, self.feat)
        aged_feature = tf.concat([age_expanded, self.feat], -1)
        self.A, self.C = N.generator_att(aged_feature)

        # construct synthesized image
        self.generated = self.A * self.C + (1. - self.A) * self.inp_holder

        # retrieve tensor for adv2 and ae
        adv2, age_pred = N.discriminator(self.generated, age_size)
        adv2_real, age_pred_real = N.discriminator(self.target_holder,
                                                   age_size)

        # retrieve tensor for ai1 and ai2
        ai1 = N.age_classify_r(self.feat, age_size)
        ai2 = N.age_classify(self.feat, age_size)

        # call loss builder functions
        print('Building losses...')
        self.build_loss_mc()
        self.build_loss_adv1(adv1, adv1_uni)
        self.build_loss_ip(ip)
        self.build_loss_adv2(adv2, adv2_real)
        self.build_loss_ae(age_pred, age_pred_real)
        self.build_loss_ai1(ai1)
        self.build_loss_ai2(ai2, age_size)
        self.build_loss_A()
        self.update_ops()
        self.accuracy = M.accuracy(ip, tf.argmax(self.id_holder, -1))

        self.sess = tf.Session()
        M.loadSess(model_path, self.sess, init=True)
        self.saver = tf.train.Saver()
コード例 #8
0
    def __init__(self):
        inp_holder = tf.placeholder(tf.float32,[None,460,460,3])
        lab_holder = tf.placeholder(tf.int32,[None,460,460])

        self.net_body = seg_main_body(inp_holder)
        seg_layer = self.segmentation_layer(self.net_body.feature_layer,12)
        self.build_loss(seg_layer,lab_holder)

        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        M.loadSess('./savings_bgfg/',self.sess,init=True,var_list=M.get_trainable_vars('bg_fg/WideRes'))

        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.seg_layer = seg_layer
コード例 #9
0
    def __init__(self, class_num):
        self.size = 460
        self.class_num = 20

        # build placeholders
        inp_holder = tf.placeholder(tf.float32, [None, size, size, 3],
                                    name='image_holder')
        seg_holder = tf.placeholder(tf.float32, [None, size, size, class_num],
                                    name='segment_holder')
        mask_holder = tf.placeholder(tf.float32, [None, size, size],
                                     name='mask_holder')
        coord_holder = tf.placeholder(tf.float32, [None, size, size, 6],
                                      name='coordinate_holder')
        inst_holder = tf.placeholder(tf.float32, [None, class_num],
                                     name='instance_holder')

        # construct input (4 -> 3 with 1x1 conv)
        merged_layer = self.merging_layer(inp_holder, seg_holder, mask_holder)

        # build network
        self.get_coord(size)
        self.net_body = seg_main_body(merged_layer)

        stream_list = self.get_stream_list(self.net_body.feature_maps)
        inst_pred = self.inst_layer(self.net_body.feature_layer,
                                    stream_list[-1], class_num)
        self.build_loss(seg_layer, stream_list, inst_pred, lab_holder,
                        mask_holder, coord_holder, inst_holder)

        # build saver and session
        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        # self.writer = tf.summary.FileWriter('./logs/',self.sess.graph)
        M.loadSess('./model/',
                   self.sess,
                   init=True,
                   var_list=self.net_body.var)

        # set class variables
        # holders
        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.mask_holder = mask_holder
        self.coord_holder = coord_holder
        self.inst_holder = inst_holder
        # layers
        self.coord_layer = stream_list[-1]
        self.inst_layer = inst_pred
コード例 #10
0
    def __init__(self):
        inp_holder = tf.placeholder(tf.float32, [None, 460, 460, 3])
        lab_holder = tf.placeholder(tf.int32, [None, 460, 460])

        self.net_body = seg_main_body(inp_holder)
        seg_layer = self.segmentation_layer(self.net_body.feature_layer, 12)
        self.build_loss(seg_layer, lab_holder)

        self.saver = tf.train.Saver()
        self.sess = tf.Session()
        M.loadSess('./model/',
                   self.sess,
                   init=True,
                   var_list=self.net_body.var)

        self.inp_holder = inp_holder
        self.lab_holder = lab_holder
        self.seg_layer = seg_layer
コード例 #11
0
def evaluate():
    import cv2
    # listf = open('set1list.txt')
    listf = open('1k5_set2.txt')
    imglist = []
    for i in listf:
        imglist.append(i.replace('\n', ''))
    with tf.Session() as sess:
        M.loadSess(modelpath, sess=sess, modpath=modelpath + modelname)
        res = []
        imgs = []
        print('reading images...')
        for pic in imglist:
            img = cv2.imread(pic, 1)
            img = cv2.resize(img, (122, 144))
            M2 = np.float32([[1, 0, 11], [0, 1, 0]])
            img = cv2.warpAffine(img, M2, (144, 144))
            img = cv2.flip(img, 1)
            img = np.float32(img)[8:136, 8:136]
            img = cv2.resize(img, (224, 224))
            imgs.append(img)
        splits = len(imgs) // 100
        print('Splits in total...')
        for i in range(splits - 1):
            cl = sess.run(tf.nn.softmax(evallayer),
                          feed_dict={imgholder: imgs[i * 100:i * 100 + 100]})
            print(cl.shape)
            res.append(cl)
        cl = sess.run(tf.nn.softmax(evallayer),
                      feed_dict={imgholder: imgs[(splits - 1) * 100:]})
        print(cl.shape)
        res.append(cl)
        res = np.concatenate(res, axis=0)
        print(res.shape)
        import scipy.io as sio
        sio.savemat('enf56_1.mat', {'data': res})
        fout = 'testresult.txt'
        fout = open(fout, 'w')
        for i in res:
            fout.write(str(i) + '\n')
        fout.close()
コード例 #12
0
ファイル: train.py プロジェクト: liuhan26/RGB2Depth_exp
def test():
    args = parser.parse_args()
    test_acc = 0
    rgb_file_txt = '/home/wtx/RGBD_dataset/eaststation/test/test_3Dgallery.txt'
    depth_file_txt = '/home/wtx/RGBD_dataset/eaststation/test/test_3Dprobe.txt'
    root_folder = '/home/wtx/RGBD_dataset/eaststation/'
    imgs, labs = concat_rgb_and_depth(root_folder, rgb_file_txt, depth_file_txt)
    img_holder = tf.placeholder(tf.float32, [None, 128, 128, 2])
    lab_holder = tf.placeholder(tf.int64, [None])
    acc = _LCNN9(img_holder, lab_holder)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    M.loadSess('../tfmodel/', sess)
    for iter in range(len(labs) // args.batch_size):
        test_acc += sum(sess.run([acc], feed_dict={
            img_holder: imgs[iter * args.batch_size:(iter + 1) * args.batch_size],
            lab_holder: labs[iter * args.batch_size:(iter + 1) * args.batch_size]}))
    sess.close()
    ave_acc = test_acc / (len(labs) // args.batch_size)
    print('The Accuracy in Test Set:' + str(ave_acc))
コード例 #13
0
	def __init__(self,class_num):
		img_holder = tf.placeholder(tf.float32,[None,460,460,3])
		with tf.variable_scope('bg_fg'):
			net_bgfg = network_bg_fg(img_holder)
		with tf.variable_scope('seg_part'):
			net_seg = network_seg(class_num,img_holder,tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1])
		with tf.variable_scope('inst_part'):
			net_inst = network_inst(class_num,img_holder
				tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1],
				tf.image.resize_images(net_seg.seg_layer,[460,460]))
		
		self.network_bg_fg = network_bg_fg
		self.network_seg = network_seg
		self.network_inst = network_inst

		self.img_holder = img_holder
		self.mask_holder = network_bg_fg.lab_holder
		self.seg_holder = network_seg.lab_holder
		self.coord_holder = network_inst.coord_holder
		self.inst_holder = network_inst.inst_holder

		self.mask_out = network_bg_fg.seg_layer
		self.seg_out = network_seg.seg_layer
		self.inst_num_out = network_inst.inst_layer
		self.coord_out = network_inst.coord_layer

		self.build_loss()

		self.sess = tf.Session()
		M.loadSess('./savings_bgfg/',sess=self.sess,init=True,var_list=M.get_all_vars('bg_fg'))
		M.loadSess('./savings_seg/',sess=self.sess,var_list=M.get_all_vars('seg_part'))
		M.loadSess('./savings_inst/',sess=self.sess,var_list=M.get_all_vars('inst_part'))
コード例 #14
0
def training():
    merged = tf.summary.merge_all()
    data = getData()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        writer = tf.summary.FileWriter('./logs/', sess.graph)
        M.loadSess('./model/', sess=sess)
        counter = M.counter
        for i in range(1000000):
            counter += 1
            sample = random.sample(data, BSIZE)
            x_train = [i[0] for i in sample]
            y_train = [i[1] for i in sample]
            a = np.random.uniform(size=[BSIZE, ZDIM], low=-1.0, high=1.0)
            a = a / np.linalg.norm(a, axis=1, keepdims=True)
            # ge = sess.run(generated,feed_dict={z:a})
            for _ in range(5):
                sess.run(trainG, feed_dict={z: a, imgholder: x_train})
            _, _, mg, lsd, lsg, lsc = sess.run(
                [trainC, trainD, merged, lossD, lossG, lossC],
                feed_dict={
                    z: a,
                    imgholder: x_train,
                    classholder: y_train
                })
            if (i) % 5 == 0:
                writer.add_summary(mg, counter)
                print('iter:', i)
                print('lsd:', lsd)
                print('lsg:', lsg)
                print('lsc:', lsc)
            if (i + 1) % 100 == 0:
                getGeneratedImg(sess, i + 1)
            if (i + 1) % 1000 == 0:
                saver.save(sess,
                           './model/ModelCounter' + str(counter) + '.ckpt')
コード例 #15
0
def getSample():
    with tf.Session() as sess:
        data = getData()
        M.loadSess('./model/', sess=sess)
        for i in range(20):
            x_train = random.sample(data, 1)
            # print(x_train[0].shape)
            x_train = np.float32(x_train[0][0]).reshape([-1, 128, 128, 1])
            for j in range(8):
                # a = np.random.uniform(size=[1,ZDIM],low=-0.2,high=0.2)
                a = np.zeros([1, ZDIM], dtype=np.float32)
                genimg = sess.run(generated,
                                  feed_dict={
                                      imgholder: x_train,
                                      noise: a
                                  })
                genimg = (genimg + 1) * 127
                genimg = genimg.astype(np.uint8)
                cv2.imwrite('./sampleimg/' + str(i) + 'gen' + str(j) + '.jpg',
                            cv2.resize(genimg[0], (128, 128)))
                cv2.imwrite(
                    './sampleimg/' + str(i) + 'org.jpg',
                    cv2.resize(((x_train[0] + 1) * 127).astype(np.uint8),
                               (128, 128)))
コード例 #16
0
ファイル: train.py プロジェクト: liuhan26/RGB2Depth_exp
def tfreord_train(tfrecord_path):
    args = parser.parse_args()
    # rgb_file_txt = '/Volumes/Untitled/eaststation/test/test_3Dtexture.txt'
    # depth_file_txt = '/Volumes/Untitled/eaststation/test/test_3Ddepth.txt'
    # root_folder = '/Volumes/Untitled/eaststation/test/'
    images, labels = input(args.batch_size, args.batch_size, tfrecord_path)
    loss, acc = LCNN29(images, labels)
    l2_loss = tf.losses.get_regularization_loss()
    loss += l2_loss
    train_op = tf.train.MomentumOptimizer(0.00001, 0.9).minimize(loss)
    # train_op = tf.train.AdamOptimizer(0.0001).minimize(loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess, epoch, step = M.loadSess('../tfmodel_LCNN29/', sess)
        saver = tf.train.Saver()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            while not coord.should_stop():
                _, loss_value, accuracy = sess.run([train_op, loss, acc])
                step += 1
                if (args.batch_size * step) % args.samples_num == 0:
                    epoch += 1
                if step % 1 == 0:
                    print('epoch = %d  iter = %d loss = %.2f' % (epoch, step, loss_value))
                    print('accuracy = %.2f' % accuracy)
                if step % 200 == 0:
                    save_path = '../tfmodel_LCNN29/Epoc_' + str(epoch) + '_' + 'Iter_' + str(step) + '.cpkt'
                    saver.save(sess, save_path)
                    save_path2 = save_path + '.meta'
                    save_path3 = save_path + '.index'
                    save_path4 = save_path + '.data-00000-of-00001'
                    save_path5 = '../tfmodel_LCNN29/checkpoint'

                    shutil.copy(save_path2, save_path2.replace('../tfmodel_LCNN29/', '../backup_LCNN29/'))
                    shutil.copy(save_path3, save_path3.replace('../tfmodel_LCNN29/', '../backup_LCNN29/'))
                    shutil.copy(save_path4, save_path4.replace('../tfmodel_LCNN29/', '../backup_LCNN29/'))
                    shutil.copy(save_path5, save_path5.replace('../tfmodel_LCNN29/', '../backup_LCNN29/'))

        except tf.errors.OutOfRangeError:
            print('Done training for %d steps' % (step))
        finally:
            coord.request_stop()
        coord.join(threads)
コード例 #17
0
    def __init__(self, class_num):
        img_holder = tf.placeholder(tf.float32, [None, 460, 460, 3])
        with tf.variable_scope('bg_fg'):
            net_bgfg = network_bg_fg(img_holder)
        with tf.variable_scope('seg_part'):
            bg_fg_upsample = tf.nn.softmax(
                tf.image.resize_images(net_bgfg.seg_layer,
                                       img_holder.get_shape().as_list()[1:3]),
                1)[:, :, :, 1]
            print(bg_fg_upsample)
            input('pause')
            net_seg = network_seg(img_holder, class_num, bg_fg_upsample)
        with tf.variable_scope('inst_part'):
            net_inst = network_inst(
                img_holder, class_num,
                tf.nn.softmax(
                    tf.image.resize_images(net_bgfg.seg_layer,
                                           tf.shape(img_holder)[1:3]),
                    1)[:, :, :, 1],
                tf.image.resize_images(net_seg.seg_layer,
                                       tf.shape(img_holder)[1:3]))

        self.net_bgfg = net_bgfg
        self.net_seg = net_seg
        self.net_inst = net_inst

        self.img_holder = img_holder

        self.mask_out = tf.image.resize_images(net_bgfg.seg_layer,
                                               tf.shape(img_holder)[1:3])
        self.seg_out = tf.image.resize_images(net_seg.seg_layer,
                                              tf.shape(img_holder)[1:3])
        self.inst_num_out = net_inst.inst_layer
        self.coord_out = net_inst.coord_layer

        self.sess = tf.Session()
        M.loadSess('./savings_bgfg/',
                   sess=self.sess,
                   init=True,
                   var_list=M.get_all_vars('bg_fg'))
        M.loadSess('./savings_seg/',
                   sess=self.sess,
                   var_list=M.get_all_vars('seg_part'))
        M.loadSess('./savings_inst/',
                   sess=self.sess,
                   var_list=M.get_all_vars('inst_part'))
コード例 #18
0
 def load(self,path):
     M.loadSess(sess=self.sess,modpath=path)
コード例 #19
0
    mod.conv_layer(3, 128 * 2, activation=1)

    mod.sum(d)
    mod.conv_layer(1, 256, activation=1)
    mod.conv_layer(3, 256, activation=1)
    mod.maxpooling_layer(2, 2)  #pool4

    mod.flatten()
    mod.fcnn_layer(512)
    feature_layer = mod.get_current_layer()[0]

    return feature_layer, img_holder


with tf.variable_scope('LCNN29'):
    feature_layer, img_holder = LCNN29()

#
sess = tf.Session()
model_path = './model/Epoc_49_Iter_663.cpkt'
M.loadSess(model_path, sess)


def eval(img):
    res = sess.run(feature_layer, feed_dict={img_holder: img})
    return res


def __exit__():
    sess.close()
コード例 #20
0
    block(mod, 128)
    block(mod, 128)
    block(mod, 128)
    block(mod, 128)
    mod.convLayer(1, 256, activation=M.PARAM_MFM)
    mod.convLayer(3, 256, activation=M.PARAM_MFM)
    mod.maxpoolLayer(2)
    mod.flatten()
    mod.fcLayer(512)
    featurelayer = mod.get_current_layer()
    # with tf.variable_scope('enforced_layer'):
    # 	classlayer,evallayer = M.enforcedClassfier(featurelayer,512,labholder,BSIZE,CLASS,dropout=1,enforced=True)
    return imgholder, featurelayer


with tf.variable_scope('MainModel'):
    imgholder, featurelayer = res_18()

sess = tf.Session()
M.loadSess('./model2/', sess=sess, modpath='./modelres/Epoc0Iter20999.ckpt')
# M.loadSess('./model2/',sess=sess,modpath='./model2/Epoc2Iter29099.ckpt')


def eval(img):
    res = sess.run(featurelayer, feed_dict={imgholder: img})
    return res


def __exit__():
    sess.close()
コード例 #21
0
		cropped = item[0]
		coord = item[1]
		lb = get_lb(coord,crds)
		lbs.append([cropped,lb])
		# x,y,w,h = 
		# cv2.rectangle(img,())
	return lbs

reader = datareader2.reader(height=240,width=320,scale_range=[0.05,1.2])
b0,b1,c0,c1 = netpart.model_out

start_time = time.time()
MAXITER = 100000
with tf.Session() as sess:
	saver = tf.train.Saver()
	M.loadSess('./model/',sess,init=True,var_list=M.get_trainable_vars('MSRPN'))
	for i in range(MAXITER):
		img,coord = reader.get_img()
		buff_out = sess.run([b0,b1,c0,c1],feed_dict={netpart.inpholder:[img]})
		bs,cs = buff_out[:2],buff_out[2:]
		lbs = crop(img,bs,cs,coord)
		train_imgs = [k[0] for k in lbs]
		train_labs = [k[1] for k in lbs]
		# for item in lbs:
		# 	cv2.imshow('ad',item[0])
		# 	print(item[1])
		# 	cv2.waitKey(0)
		ls,ac,_ = sess.run([net_veri.loss,net_veri.accuracy,net_veri.ts],
			feed_dict={net_veri.inputholder:train_imgs,net_veri.labelholder:train_labs})
		if i%10==0:
			t2 = time.time()
コード例 #22
0
    def __init__(self, model_path='./aim_model_gen/'):
        self.model_path = model_path
        self.inp_holder = tf.placeholder(tf.float32, [None, 128, 128, 3])
        self.age_holder = tf.placeholder(tf.float32, [None, 1])
        self.age_holder2 = tf.placeholder(tf.float32, [None, 1])

        # get attention A and C
        age_expanded = self.expand(self.age_holder, self.inp_holder)
        aged_feature = tf.concat([age_expanded, self.inp_holder], -1)
        A, C = N.generator_att(aged_feature)
        # construct synthesized image
        generated = A * C + (1. - A) * self.inp_holder

        # get attention A2 and C2
        age_expanded2 = self.expand(self.age_holder2, generated)
        aged_feature2 = tf.concat([age_expanded2, generated], -1)
        A2, C2 = N.generator_att(aged_feature2)
        generated2 = A2 * C2 + (1. - A2) * generated

        # retrieve tensor for adv2 and ae
        adv2, age_pred = N.discriminator(generated)
        adv2_real, age_pred_real = N.discriminator(self.inp_holder)

        adv2_2, age_pred2 = N.discriminator(generated2)

        feat = N.feat_encoder(self.inp_holder)
        feat1 = N.feat_encoder(generated)
        feat2 = N.feat_encoder(generated2)

        self.feat_loss = tf.reduce_mean(
            tf.square(feat - feat1) + tf.square(feat - feat2))
        self.train_feat = tf.train.AdamOptimizer(0.00001).minimize(
            self.feat_loss, var_list=M.get_all_vars('gen_att'))

        # get gradient penalty

        # gamma1 = tf.random_uniform([],0.0,1.0)
        # interp1 = gamma1 * generated + (1. - gamma1) * self.inp_holder
        # interp1_y, _ = N.discriminator(interp1, 7)
        # grad_p1 = tf.gradients(interp1_y, interp1)[0]
        # grad_p1 = tf.sqrt(tf.reduce_sum(tf.square(grad_p1),axis=[1,2,3]))
        # grad_p1 = tf.reduce_mean(tf.square(grad_p1 - 1.) * 10.)

        # gamma2 = tf.random_uniform([],0.0,1.0)
        # interp2 = gamma2 * generated + (1. - gamma2) * self.inp_holder
        # interp2_y, _ = N.discriminator(interp2, 7)
        # grad_p2 = tf.gradients(interp2_y, interp2)[0]
        # grad_p2 = tf.sqrt(tf.reduce_sum(tf.square(grad_p2),axis=[1,2,3]))
        # grad_p2 = tf.reduce_mean(tf.square(grad_p2 - 1.) * 10.)

        grad_p1 = grad_p2 = 0.

        # call loss builder functions
        self.mc_loss, self.train_mc = self.build_loss_mc(
            generated2, self.inp_holder)
        self.adv2_loss_d1, self.adv2_loss_g1, self.train_adv2_1 = self.build_loss_adv2(
            adv2, adv2_real, grad_p1)
        self.adv2_loss_d2, self.adv2_loss_g2, self.train_adv2_2 = self.build_loss_adv2(
            adv2_2, adv2_real, grad_p2)
        self.age_cls_loss_dis, self.train_ae_dis = self.build_loss_ae_dis(
            age_pred_real, self.age_holder2)
        self.age_cls_loss_gen, self.train_ae_gen = self.build_loss_ae_gen(
            age_pred, self.age_holder)
        self.age_cls_loss_gen2, self.train_ae_gen2 = self.build_loss_ae_gen(
            age_pred2, self.age_holder2)
        self.loss_A, self.train_A = self.build_loss_A(A)
        self.loss_A2, self.train_A2 = self.build_loss_A(A2)
        self.update_ops()
        self.accuracy = M.accuracy(age_pred_real,
                                   tf.argmax(self.age_holder2, -1))
        self.A1_l, self.A2_l = tf.reduce_mean(tf.square(A)), tf.reduce_mean(
            tf.square(A2))

        self.generated = generated
        self.A, self.C = A, C

        self.sess = tf.Session()
        M.loadSess(model_path, self.sess, init=True)
        M.loadSess('./aim_model/',
                   self.sess,
                   var_list=M.get_all_vars('encoder'))
        self.saver = tf.train.Saver()
コード例 #23
0
ファイル: train.py プロジェクト: liuhan26/RGB2Depth_exp
def placeholder_train(imgs, labels):
    args = parser.parse_args()
    with tf.name_scope('img_holder'):
        img_holder = tf.placeholder(tf.float32, [args.batch_size, 128, 128, 1])
    with tf.name_scope('lab_holder'):
        lab_holder = tf.placeholder(tf.int64, [args.batch_size])
    val_imgs, val_labs = test_list()
    loss, acc = LCNN9(img_holder, lab_holder)
    # l2_loss = tf.losses.get_regularization_loss()
    # l2_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    # total_loss = loss + l2_loss
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(learning_rate=args.lr, global_step=global_step,
                                               decay_steps=20 * args.samples_num / args.batch_size,
                                               # decay_steps = 5000
                                               decay_rate=0.6, staircase=True)
    train_op = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss, global_step)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess, epoch, step = M.loadSess('../tfmodel/', sess)
        saver = tf.train.Saver()
        for i in range(args.epoch):
            for j in range(args.samples_num // args.batch_size):
                images = imgs[j * args.batch_size:(j + 1) * args.batch_size]
                labs = labels[j * args.batch_size:(j + 1) * args.batch_size]
                _, loss_value, lr, accuracy = sess.run([train_op, loss, learning_rate, acc],
                                                           feed_dict={img_holder: images, lab_holder: labs})
                step += 1
                print('epoch = %d  iter = %d  loss = %.2f learning_rate = % .6f' % (
                epoch, step, loss_value, lr))
                print('accuracy = %.2f' % accuracy)

                if step % 2000 == 0:
                    save_path = '../tfmodel/Epoc_' + str(epoch) + '_' + 'Iter_' + str(step) + '.cpkt'
                    saver.save(sess, save_path)
                    save_path2 = save_path + '.meta'
                    save_path3 = save_path + '.index'
                    save_path4 = save_path + '.data-00000-of-00001'
                    save_path5 = '../tfmodel/checkpoint'

                    shutil.copy(save_path2, save_path2.replace('../tfmodel/', '../backup/'))
                    shutil.copy(save_path3, save_path3.replace('../tfmodel/', '../backup/'))
                    shutil.copy(save_path4, save_path4.replace('../tfmodel/', '../backup/'))
                    shutil.copy(save_path5, save_path5.replace('../tfmodel/', '../backup/'))
                    val_acc = 0
                    for it in range(len(val_labs) // args.batch_size):
                        val_acc += sum(sess.run([acc], feed_dict={
                            img_holder: val_imgs[it * args.batch_size:(it + 1) * args.batch_size],
                            lab_holder: val_labs[it * args.batch_size:(it + 1) * args.batch_size]}))
                    val_acc = val_acc / (len(val_labs) // args.batch_size)
                    print('The Accuracy in Val Set:' + str(val_acc))
                    # test_acc = 0
                    #rgb_file_txt = '/home/wtx/RGBD_dataset/eaststation/test/test_3Dgallery.txt'
                    #depth_file_txt = '/home/wtx/RGBD_dataset/eaststation/test/test_3Dprobe.txt'
                    #root_folder = '/home/wtx/RGBD_dataset/eaststation/'
                    #test_imgs, test_labs = concat_rgb_and_depth(root_folder, rgb_file_txt, depth_file_txt)
                    #for iter in range(len(test_labs) // args.batch_size):
                    #   test_acc += sum(sess.run([acc], feed_dict={
                    #        img_holder: test_imgs[iter * args.batch_size:(iter + 1) * args.batch_size],
                    #        lab_holder: test_labs[iter * args.batch_size:(iter + 1) * args.batch_size]}))
                    #ave_acc = test_acc / (len(test_labs) // args.batch_size)
                    #print('The Accuracy in Test Set:' + str(ave_acc))
            epoch += 1
コード例 #24
0
ファイル: tpgan_domain_lnet.py プロジェクト: wonggw/sul
        M2 = np.float32([[1, 0, 11], [0, 1, 0]])
        img = cv2.warpAffine(img, M2, (144, 144))
        img = img[8:136, 8:136]
        data2.append(img)
    print('data2 length:', len(data2))
    return data2


build_total_graph()
with tf.Session() as sess:
    print('Writing log file...')
    saver = tf.train.Saver()
    reader = data_reader()
    # writer = tf.summary.FileWriter('./logs/',sess.graph)
    # data2 = get_data2()
    M.loadSess('./model/', sess, init=True)
    M.loadSess(modpath='/home/psl/7T/chengyu/modelres/Epoc0Iter20999.ckpt',
               sess=sess,
               var_list=M.get_trainable_vars('MainModel'))
    alltensor = [totalLoss, dis_loss, train_d, train_g, updd, updg]
    gentensor = [profHolder, x_fake, gtHolder]
    for iteration in range(100000):
        train_batch = reader.get_batch(BSIZE)
        # p2_batch = random.sample(data2,BSIZE)
        p_batch = [i[0] for i in train_batch]
        l_batch = [i[1] for i in train_batch]
        r_batch = [i[2] for i in train_batch]
        n_batch = [i[3] for i in train_batch]
        m_batch = [i[4] for i in train_batch]
        gt_batch = [i[5] for i in train_batch]
        lb_batch = [i[6] for i in train_batch]
コード例 #25
0
    coords = list(coords)
    if coords:
        for i in range(len(coords)):
            x, y, w, h, _, cat = coords[i]
            cv2.rectangle(buff_img, (x - w // 2, y - h // 2),
                          (x + w // 2, y + h // 2), (0, 255, 0), 2)
            cv2.putText(buff_img, str(cat), (x, y),
                        cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 3, (0, 0, 255), 6)
    cv2.imshow(name, buff_img)
    cv2.waitKey(1)


i = 0

sess = tf.Session()
M.loadSess('./model/', sess, init=False)


def get_coord_from_detection(img, name="image"):

    b, c, cat = sess.run([netpart.bias, netpart.conf, netpart.cat],
                         feed_dict={netpart.inpholder: [img]})
    res_bias, res_conf = get_img_coord(img, c, b, cat, 64)
    if not res_conf:
        result_coords = []
    elif len(res_conf) == 1:
        result_coords = res_bias
    else:
        result_coords = non_max_sup(res_bias, res_conf)
    draw(img, result_coords, name)
    return result_coords
コード例 #26
0
ファイル: tpgan.py プロジェクト: wonggw/sul
        nseimg = (nseimg / 127.5 - 1.0)
        gtimg = (gtimg / 127.5 - 1.0)
        data.append([faceimg, leimg, reimg, nseimg, mthimg, gtimg])
    print(gtimg.max(), gtimg.min())
    print(faceimg.max(), faceimg.min())
    print('Data length:', len(data))
    return data


build_total_graph()
with tf.Session() as sess:
    print('Writing log file...')
    saver = tf.train.Saver()
    # writer = tf.summary.FileWriter('./logs/',sess.graph)
    data = get_data()
    M.loadSess('./model/', sess)
    M.loadSess(modpath='./modelres/Epoc0Iter20999.ckpt',
               sess=sess,
               var_list=M.get_trainable_vars('MainModel'))
    alltensor = [totalLoss, dis_loss, train_d, train_g, updd, updg]
    gentensor = [profHolder, x_fake, gtHolder]
    for iteration in range(100000):
        train_batch = random.sample(data, BSIZE)
        p_batch = [i[0] for i in train_batch]
        l_batch = [i[1] for i in train_batch]
        r_batch = [i[2] for i in train_batch]
        n_batch = [i[3] for i in train_batch]
        m_batch = [i[4] for i in train_batch]
        gt_batch = [i[5] for i in train_batch]
        z_batch = np.random.uniform(size=[BSIZE, ZDIM], low=-1.0, high=1.0)
        feeddict = {
コード例 #27
0
ファイル: train_c.py プロジェクト: wonggw/Robotics_MA4825
        j = aaa % col
        x = int(b[i][j][0]) + j * multip + multip // 2
        y = int(b[i][j][1]) + i * multip + multip // 2
        w = int(b[i][j][2])
        h = int(b[i][j][3])
        cv2.rectangle(img, (x - w // 2, y - h // 2), (x + w // 2, y + h // 2),
                      (0, 255, 0), 2)
    cv2.imshow(name, img)
    cv2.waitKey(1)


n_minibatches = 4
i = 0
with tf.Session() as sess:
    saver = tf.train.Saver()
    M.loadSess('drive/Colab/hand_gesture/model/', sess, init=False)
    while True:
        i += 1

        for j in range(n_minibatches):
            img, train_dic = reader.get_img()
            feed_dict_var = {
                netpart.inpholder: [img],
                netpart.b_labholder: [train_dic[1]],
                netpart.c_labholder: [train_dic[0]],
                netpart.cat_labholder: [train_dic[2]]
            }
            if j == 0:
                sess.run(netpart.zero_ops, feed_dict=feed_dict_var)

            sess.run(netpart.accum_ops, feed_dict=feed_dict_var)
コード例 #28
0
with tf.name_scope('Optimizer'):
	train = tf.train.AdamOptimizer(0.00001).minimize(loss)
	# print (train)

# 
model_path = 'model/'
log_path = 'log/'
list_train = 'hd5_list_train.txt'
list_val = 'hd5_list_val.txt'
f_log = open('log/log.txt', 'a+')

with tf.Session() as sess:

	writer = tf.summary.FileWriter(log_path, sess.graph)
	sess, epoc, iters = M.loadSess("model/", sess)
	saver = tf.train.Saver()

	reader = hd5_reader(list_train, list_val, BSIZE, BSIZE)
	ITERS = reader.train_epoc
	count = 0
	for j in range(EPOC):
		for i in range(ITERS):
			x_train, y_train_ = reader.train_nextbatch()
			global BSIZE
			BSIZE = reader.train_bsize
			# print ('BSIZE:', BSIZE)
			y_train = np.zeros([BSIZE,CLASS],dtype=np.int64)
			for index in range(BSIZE):
				y_train[index][y_train_[index]] = 1
コード例 #29
0
ファイル: basicLSTM.py プロジェクト: FUHUACANGYUE/sul
    out, cell = lstm.apply(a_split[i], out, cell)
    out_decoded = mod(out)
    out_split.append(out_decoded)

out = tf.stack(out_split, 1)  # should be in shape [None, 3, 1]

label_holder = tf.placeholder(tf.float32, [None, 6, 1])

# loss = tf.reduce_mean(tf.square(label_holder - out))
loss = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(logits=out, labels=label_holder))

train_op = tf.train.AdamOptimizer(0.01).minimize(loss)

with tf.Session() as sess:
    M.loadSess('./model/', sess, init=True)
    for i in range(10000):
        batch_x, batch_y = getInputVector()
        mask = np.ones([1, 6, 1])

        ls, _ = sess.run([loss, train_op],
                         feed_dict={
                             a: [batch_x],
                             label_holder: [batch_y]
                         })
        if i % 1000 == 0:
            print(ls)
    test_x, test_y = getInputVector()
    o = sess.run(out, feed_dict={a: [test_x]})
    print(test_x)
    print(test_y)
コード例 #30
0
	loss, acc, img_holder, lab_holder = LCNN29()

with tf.name_scope('Optimizer'):
	train = tf.train.AdamOptimizer(0.00001).minimize(loss)
	# print (train)

# 
model_path = './model/'
log_path = './log/'
list_train = 'hd5_list_train.txt'
list_val = 'hd5_list_val.txt'
f_log = open('./log/log.txt', 'a+')
with tf.Session() as sess:

	writer = tf.summary.FileWriter(log_path, sess.graph)
	sess, epoc, iters = M.loadSess(model_path, sess)
	saver = tf.train.Saver()

	reader = hd5_reader(list_train, list_val, BSIZE, BSIZE)
	ITERS = reader.train_epoc
	count = 0
	for j in range(EPOC):
		for i in range(ITERS):
			x_train, y_train_ = reader.train_nextbatch()
			global BSIZE
			BSIZE = reader.train_bsize
			# print ('BSIZE:', BSIZE)
			y_train = np.zeros([BSIZE,CLASS],dtype=np.int64)
			for index in range(BSIZE):
				y_train[index][y_train_[index]] = 1