def train_feature_generator(self):
	
	print 'Training sampler.'
        
	images, labels = self.load_mnist(self.mnist_dir, split='train')
	labels = utils.one_hot(labels, 10)
    
        # build a graph
        model = self.model
        model.build_model()

	batch_size = self.batch_size
	noise_dim = 100
	epochs = 5000

        with tf.Session(config=self.config) as sess:
	    
            # initialize variables
            tf.global_variables_initializer().run()
            
	    # restore feature extractor trained on Step 0
            print ('Loading pretrained feature extractor.')
            variables_to_restore = slim.get_model_variables(scope='feature_extractor')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_feature_extractor)
	    print 'Loaded'
            
            summary_writer = tf.summary.FileWriter(logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()
	    
	    t = 0
	    
	    while(t <= self.train_feature_generator_iter):
		
		for start, end in zip(range(0, len(images), batch_size), range(batch_size, len(images), batch_size)):
		    
		    t += 1

		    Z_samples = utils.sample_Z(batch_size, noise_dim, 'uniform')

		    feed_dict = {model.noise: Z_samples, model.images: images[start:end], model.labels: labels[start:end]}
	    
		    avg_D_fake = sess.run(model.logits_fake, feed_dict)
		    avg_D_real = sess.run(model.logits_real, feed_dict)
		    
		    sess.run(model.d_train_op, feed_dict)
		    sess.run(model.g_train_op, feed_dict)
		    
		    if (t+1) % 50 == 0:
			summary, dl, gl = sess.run([model.summary_op, model.d_loss, model.g_loss], feed_dict)
			summary_writer.add_summary(summary, t)
			print ('Step: [%d/%d] d_loss: %.6f g_loss: %.6f avg_D_fake: %.2f avg_D_real: %.2f ' \
				   %(t+1, int(epochs*len(images) /batch_size), dl, gl, avg_D_fake.mean(), avg_D_real.mean()))
			
		    if (t+1) % 5000 == 0:  
			saver.save(sess, self.pretrained_feature_generator) 
예제 #2
0
    def eval_dsn(self):
        # build model
        model = self.model
        model.build_model()

        # load svhn dataset
        source_images, source_labels = self.load_svhn(self.src_dir)
        source_labels[:] = 2

        with tf.Session(config=self.config) as sess:

            print('Loading pretrained G.')
            variables_to_restore = slim.get_model_variables(scope='generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.test_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            # train model for source domain S
            src_labels = utils.one_hot(source_labels[:1000], 10)
            src_noise = utils.sample_Z(1000, 100, 'uniform')

            feed_dict = {
                model.src_noise: src_noise,
                model.src_labels: src_labels
            }

            samples = sess.run(model.sampled_images, feed_dict)

            for i in range(1000):

                print str(i) + '/' + str(len(samples)), np.argmax(
                    src_labels[i])
                plt.imshow(np.squeeze(samples[i]), cmap='gray')
                plt.imsave('./sample/' + str(np.argmax(src_labels[i])) + '/' +
                           str(i) + '_' + str(np.argmax(src_labels[i])),
                           np.squeeze(samples[i]),
                           cmap='gray')
    def features_to_pkl(self, seq_2_names=['...'], train_stage='dsn'):

        source_images, _ = load_synthia(self.seq_name, no_elements=900)

        source_features = self.extract_VGG16_features(source_images,
                                                      train_stage=train_stage)
        tf.reset_default_graph()

        target_features = dict()

        for s in seq_2_names:
            target_images, _ = load_synthia(s, no_elements=900)
            target_features[s] = self.extract_VGG16_features(
                target_images, train_stage=train_stage)
            tf.reset_default_graph()

        self.build_model(mode='train_feature_generator')

        with tf.Session() as sess:
            # initialize G and D
            tf.global_variables_initializer().run()

            print('Loading feature generator.')
            variables_to_restore = slim.get_model_variables(
                scope='feature_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.exp_dir + '/model/sampler')

            n_samples = 900
            noise = utils.sample_Z(n_samples, 100, 'uniform')

            feed_dict = {self.noise: noise, self.fx: source_features[1:2]}

            fzy = sess.run([self.fzy], feed_dict)

            with open(self.exp_dir + '/features_' + train_stage + '.pkl',
                      'w') as f:
                cPickle.dump((source_features, target_features, fzy), f,
                             cPickle.HIGHEST_PROTOCOL)
예제 #4
0
def train(args, model, sess):
    summary_writer = tf.summary.FileWriter(model.log_dir, sess.graph)
    
    # load data
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    # load previous model
    model.load(sess, args.model_dir)  

    steps_per_epoch = mnist.train.labels.shape[0] // args.batch_size
    
    for epoch in range(args.epoch):

        epoch_loss_d = []
        epoch_loss_g = []
        
        for step in range(steps_per_epoch):
            
            # train discriminator for d_iters times first
            d_iters = 5
            for _ in range(0, d_iters):
                bx, _ = mnist.train.next_batch(args.batch_size)
                bx = np.reshape(bx, [-1, img['w'], img['h'], img['c']])                
                bz = sample_Z(args.batch_size, args.z_dim, args.sampler, args.num_classes, args.n_cat)
                sess.run(model.d_train_op, feed_dict={model.x: bx, model.z: bz})
            
            # train generator
            bz = sample_Z(args.batch_size, args.z_dim, args.sampler, args.num_classes, args.n_cat)
            sess.run(model.g_train_op, feed_dict={model.x: bx, model.z: bz})
            
            # for tensorboard
            summary, global_step = sess.run([model.summary_op, model.global_step],
                                    feed_dict={model.x: bx,
                                               model.z: bz})

            if step % 100 == 0:
                bx, _ = mnist.train.next_batch(args.batch_size)
                bx = np.reshape(bx, [-1, img['w'], img['h'], img['c']])               
                bz = sample_Z(args.batch_size, args.z_dim, args.sampler, args.num_classes, args.n_cat)
      
                d_loss = sess.run(
                    model.d_loss, feed_dict={model.x: bx, model.z: bz}
                )
                g_loss = sess.run(
                    model.g_loss, feed_dict={model.z: bz}
                )
                
                print('Epoch[{}/{}] Step[{}/{}] g_loss:{:.4f}, d_loss:{:.4f}'.format(epoch, args.epoch, step,
                                                                                     steps_per_epoch, g_loss,
                                                                                     d_loss))
            summary_writer.add_summary(summary, global_step)
            epoch_loss_d.append(d_loss)
            epoch_loss_g.append(g_loss)

        mean_loss_d = sum(epoch_loss_d)/len(epoch_loss_d)
        mean_loss_g = sum(epoch_loss_g)/len(epoch_loss_g)

        print('Epoch:', '%04d' % epoch,
            'G loss: {:.4}'.format(mean_loss_g),
            'D loss: {:.4}'.format(mean_loss_d))

        with open(model.log_dir + "/training_loss.txt", "a+") as file:
                file.write("Epoch: %d\t LossD: %f\t LossG: %f\n" % (epoch, mean_loss_d, mean_loss_g))

        if(epoch % args.saving_cycle == 0):
            model.save(sess, args.model_dir, global_step)
예제 #5
0
    def features(self):
	
	# load whatevere dataset 
	split='source'
	#~ images, _ = self.load_NYUD(split=split)

	
	# build a graph
	model = self.model
	model.build_model()
	
	with tf.Session(config=self.config) as sess:
	    tf.global_variables_initializer().run()

	    #~ # Load pretrained or final model
	    print ('Loading pretrained model.')
	    variables_to_restore = slim.get_model_variables(scope='vgg_16')
	    restorer = tf.train.Saver(variables_to_restore)
	    restorer.restore(sess, self.pretrained_model)
	    # Load pretrained or final model
	    print ('Loading pretrained sampler.')
	    variables_to_restore = slim.get_model_variables(scope='sampler_generator')
	    restorer = tf.train.Saver(variables_to_restore)
	    restorer.restore(sess, self.pretrained_sampler)
	    #~ print ('Loading test model.')
	    #~ variables_to_restore = slim.get_model_variables(scope='vgg_16')
	    #~ restorer = tf.train.Saver(variables_to_restore)
	    #~ restorer.restore(sess, self.test_model)
	    
    
	    no_items=1000000
	    
	    features = np.zeros((no_items, model.hidden_repr_size), dtype=np.float)
	    inf_labels = np.zeros((no_items,))
	    
	    noise = utils.sample_Z(no_items, 100, 'uniform')
	    labels = utils.one_hot(npr.randint(19,size=no_items), 19)

	    i=0
	    #~ # Eval on source
	    #~ for im  in np.array_split(images, 40):
		#~ _feat_ = sess.run(fetches=model.fx, feed_dict={model.images: im})
		#~ print _feat_
		#~ features[i:i+len(im)]= np.squeeze(_feat_)
		#~ i+=len(im)
		#~ print(i)
	    
	    #~ with open('features.pkl','w') as f:
		#~ cPickle.dump(features, f, cPickle.HIGHEST_PROTOCOL)
	    
	    
	    #~ # Eval on source
	    for n, l  in zip(np.array_split(noise, 100), np.array_split(labels, 100)):
		_feat_, inferred_labels = sess.run(fetches=[model.fzy, model.inferred_labels], feed_dict={model.noise: n, model.labels: l})
		features[i:i+len(n)]= np.squeeze(_feat_)
		inf_labels[i:i+len(n)] = inferred_labels
		i+=len(n)
		#~ #print(i)
		
	    #with open('features_noise.pkl','w') as f:
		#cPickle.dump(features, f, cPickle.HIGHEST_PROTOCOL)
	
	#~ with open('features.pkl','r') as f:
	    #~ features = cPickle.load(f)
	    
	features[features > 0] = 1
	features[features < 0] = -1
	tmpUnique = np.unique(features.view(np.dtype((np.void, features.dtype.itemsize*features.shape[1]))), return_counts = True)
	uniques=tmpUnique[0].view(features.dtype).reshape(-1, features.shape[1])
	print uniques.shape
	 
	print len(np.where(inf_labels==np.argmax(labels,1))[0])
	
	

	print 'break'
    def train_feature_generator(self):

        epochs = 10000
        batch_size = 64
        noise_dim = 100

        summary_string_writer = tf.summary.FileWriter(self.log_dir)

        config = tf.ConfigProto(device_count={'GPU': 0})

        source_images, source_annotations = load_synthia(self.seq_name,
                                                         no_elements=900)

        source_features = self.extract_VGG16_features(source_images)

        self.build_model(mode='train_feature_generator')

        with tf.Session() as sess:

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver(
                slim.get_model_variables(scope='feature_generator'))

            tf.global_variables_initializer().run()

            t = 0

            for i in range(epochs):

                #~ print 'Epoch',str(i), '.....................................................................'

                for start, end in zip(
                        range(0, len(source_images), batch_size),
                        range(batch_size, len(source_images), batch_size)):

                    if t % 5000 == 0:
                        saver.save(sess,
                                   os.path.join(self.exp_dir, 'model/sampler'))

                    t += 1

                    Z_samples = utils.sample_Z(batch_size, noise_dim,
                                               'uniform')

                    feed_dict = {
                        self.noise: Z_samples,
                        self.fx: source_features[start:end]
                    }

                    avg_D_fake = sess.run(self.logits_fake, feed_dict)
                    avg_D_real = sess.run(self.logits_real, feed_dict)

                    sess.run(self.d_train_op, feed_dict)
                    sess.run(self.g_train_op, feed_dict)

                    if (t + 1) % 200 == 0:
                        summary, dl, gl = sess.run(
                            [self.summary_op, self.d_loss, self.g_loss],
                            feed_dict)
                        summary_writer.add_summary(summary, t)
                        print ('Step: [%d/%d] d_loss: [%.6f] g_loss: [%.6f]' \
                            %(t+1, int(epochs*len(source_images) /batch_size), dl, gl))
                        print 'avg_D_fake', str(
                            avg_D_fake.mean()), 'avg_D_real', str(
                                avg_D_real.mean())
예제 #7
0
    def train_sampler(self):

        print 'Training sampler.'

        source_images, source_labels = self.load_office(split=self.src_dir)
        source_labels = utils.one_hot(source_labels.astype(int), 31)

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        batch_size = self.batch_size
        noise_dim = 100
        epochs = 500000

        with tf.Session(config=tf.ConfigProto(
                device_count={'GPU': 0})) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()
            # restore variables of F

            print('Computing latent representation.')
            # Do not change next two lines. Necessary because slim.get_model_variables(scope='blablabla') works only for model built with slim.
            variables_to_restore = tf.global_variables()
            variables_to_restore = [
                v for v in variables_to_restore if np.all([
                    s not in str(v.name) for s in [
                        'encoder', 'sampler_generator', 'disc_e',
                        'source_train_op'
                    ]
                ])
            ]
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)
            feed_dict = {
                model.noise: utils.sample_Z(1, noise_dim, 'uniform'),
                model.images: source_images,
                model.labels: source_labels,
                model.fx: np.ones((1, 128))
            }
            source_fx = sess.run(model.dummy_fx, feed_dict)

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()
            # restore variables of F

            print('Loading pretrained model.')
            # Do not change next two lines. Necessary because slim.get_model_variables(scope='blablabla') works only for model built with slim.
            variables_to_restore = tf.global_variables()
            variables_to_restore = [
                v for v in variables_to_restore if np.all([
                    s not in str(v.name) for s in [
                        'encoder', 'sampler_generator', 'disc_e',
                        'source_train_op'
                    ]
                ])
            ]
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            #~ print ('Loading pretrained encoder disc.')
            #~ variables_to_restore = slim.get_model_variables(scope='disc_e')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.pretrained_sampler)

            #~ print ('Loading sample generator.')
            #~ variables_to_restore = slim.get_model_variables(scope='sampler_generator')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.pretrained_sampler)

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            #~ feed_dict = {model.images: source_images[:10000]}
            #~ fx = sess.run(model.fx, feed_dict)

            t = 0

            for i in range(epochs):

                #~ print 'Epoch',str(i)

                for start, end in zip(
                        range(0, len(source_images), batch_size),
                        range(batch_size, len(source_images), batch_size)):

                    t += 1

                    Z_samples = utils.sample_Z(batch_size, noise_dim,
                                               'uniform')

                    feed_dict = {
                        model.noise: Z_samples,
                        model.images: source_images[0:1],
                        model.labels: source_labels[start:end],
                        model.fx: source_fx[start:end]
                    }

                    avg_D_fake = sess.run(model.logits_fake, feed_dict)
                    avg_D_real = sess.run(model.logits_real, feed_dict)

                    sess.run(model.d_train_op, feed_dict)
                    sess.run(model.g_train_op, feed_dict)

                    if (t + 1) % 250 == 0:
                        summary, dl, gl = sess.run(
                            [model.summary_op, model.d_loss, model.g_loss],
                            feed_dict)
                        summary_writer.add_summary(summary, t)
                        print ('Step: [%d/%d] d_loss: [%.6f] g_loss: [%.6f]' \
                            %(t+1, int(epochs*len(source_images) /batch_size), dl, gl))
                        print 'avg_D_fake', str(
                            avg_D_fake.mean()), 'avg_D_real', str(
                                avg_D_real.mean())

                    if (t + 1) % 1000 == 0:
                        saver.save(
                            sess, os.path.join(self.model_save_path,
                                               'sampler'))
예제 #8
0
    def train_end_to_end(self):

        print 'Training sampler.'

        images, labels = self.load_mnist(self.mnist_dir, split='train')
        images_test, labels_test = self.load_mnist(self.mnist_dir,
                                                   split='test')

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        batch_size = self.batch_size
        noise_dim = 100
        epochs = 5000

        with tf.Session(config=self.config) as sess:
            tf.global_variables_initializer().run()

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            t = 0

            print('Loading pretrained model.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            print('Loading sampler - generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            print('Loading sampler - discriminator.')
            variables_to_restore = slim.get_model_variables(scope='disc_e')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            balanced_labels = np.repeat(
                np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 6)

            for i in range(epochs):

                print 'Epoch', str(i)

                for start, end in zip(
                        range(0, len(images), batch_size),
                        range(batch_size, len(images), batch_size)):

                    t += 1

                    Z_samples = utils.sample_Z(batch_size, noise_dim,
                                               'uniform')

                    feed_dict = {
                        model.noise: Z_samples,
                        model.balanced_labels: balanced_labels,
                        model.images: images[start:end],
                        model.labels: labels[start:end]
                    }

                    avg_D_fake = sess.run(model.logits_fake, feed_dict)
                    avg_D_real = sess.run(model.logits_real, feed_dict)

                    #~ sess.run(model.train_op_images, feed_dict)
                    sess.run(model.train_op_features, feed_dict)
                    #~ sess.run(model.train_op_d, feed_dict)
                    #~ sess.run(model.train_op_g, feed_dict)

                    if (t + 1) % 100 == 0:
                        summary, loss_d, loss_g, loss_images, loss_features = sess.run(
                            [
                                model.summary_op, model.d_loss, model.g_loss,
                                model.loss_images, model.loss_features
                            ], feed_dict)
                        feed_dict[model.images] = images_test[:2000]
                        feed_dict[model.labels] = labels_test[:2000]
                        test_acc = sess.run([model.accuracy_images], feed_dict)
                        summary_writer.add_summary(summary, t)
                        print ('Step: [%d/%d] img_loss: %.3f feat_loss: %.3f d_loss: %.3f g_loss: %.3f avg_D_fake: %.2f avg_D_real: %.2f accuracy: %.6f ' \
                            %(t+1, int(epochs*len(images) /batch_size), loss_images, loss_features, loss_d, loss_g, avg_D_fake.mean(), avg_D_real.mean(), test_acc[0]))

                    if (t + 1) % 1000 == 0:
                        saver.save(
                            sess,
                            os.path.join(self.model_save_path, 'end_to_end'))
예제 #9
0
    def check_TSNE(self):

        target_images, target_labels = self.load_mnist(self.mnist_dir,
                                                       split='train')
        #~ usps_images, usps_labels = self.load_usps(self.usps_dir)
        source_images, source_labels = self.load_svhn(self.svhn_dir,
                                                      split='train')

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        self.config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()

            if sys.argv[1] == 'test':
                print('Loading test model.')
                variables_to_restore = slim.get_model_variables(
                    scope='encoder')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.test_model)

            elif sys.argv[1] == 'pretrain':
                print('Loading pretrained model.')
                variables_to_restore = slim.get_model_variables(
                    scope='encoder')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_model)

            elif sys.argv[1] == 'convdeconv':
                print('Loading convdeconv model.')
                variables_to_restore = slim.get_model_variables(
                    scope='conv_deconv')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.convdeconv_model)

            else:
                raise NameError('Unrecognized mode.')

            n_samples = 2000
            src_labels = utils.one_hot(source_labels[:n_samples], 10)
            trg_labels = utils.one_hot(target_labels[:n_samples], 10)
            src_noise = utils.sample_Z(n_samples, 100, 'uniform')

            if sys.argv[1] == 'convdeconv':

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.src_images: source_images,
                    model.trg_images: target_images[:n_samples]
                }
                h_repr = sess.run(model.h_repr, feed_dict)

            else:

                print('Loading sampler.')
                variables_to_restore = slim.get_model_variables(
                    scope='sampler_generator')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_sampler)

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.src_images: source_images[:n_samples],
                    model.trg_images: target_images[:n_samples]
                }

                fzy, fx_src, fx_trg = sess.run(
                    [model.fzy, model.fx_src, model.fx_trg], feed_dict)

                src_labels = np.argmax(src_labels, 1)
                trg_labels = np.argmax(trg_labels, 1)

            print 'Computing T-SNE.'

            model = TSNE(n_components=2, random_state=0)

            if sys.argv[2] == '1':
                TSNE_hA = model.fit_transform(np.vstack((fx_src)))
                plt.figure(2)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples)))),
                            s=3,
                            cmap=mpl.cm.jet)
                plt.figure(3)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((src_labels)),
                            s=3,
                            cmap=mpl.cm.jet)

            elif sys.argv[2] == '2':
                TSNE_hA = model.fit_transform(np.vstack((fzy, fx_src)))
                plt.figure(2)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples, )), 2 * np.ones(
                                (n_samples, )))),
                            s=3,
                            cmap=mpl.cm.jet)
                plt.figure(3)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((src_labels, src_labels)),
                            s=3,
                            cmap=mpl.cm.jet)

            elif sys.argv[2] == '3':
                TSNE_hA = model.fit_transform(np.vstack((fzy, fx_src, fx_trg)))
                plt.figure(2)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((
                                src_labels,
                                src_labels,
                                trg_labels,
                            )),
                            s=5,
                            cmap=mpl.cm.jet)
                plt.figure(3)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples, )), 2 * np.ones(
                                (n_samples, )), 3 * np.ones((n_samples, )))),
                            s=5,
                            cmap=mpl.cm.jet)

            elif sys.argv[2] == '4':
                TSNE_hA = model.fit_transform(h_repr)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.argmax(trg_labels, 1),
                            s=3,
                            cmap=mpl.cm.jet)

            plt.legend()
            plt.show()
예제 #10
0
    def eval_dsn(self):
        # build model
        model = self.model
        model.build_model()

        self.config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session(config=self.config) as sess:

            #~ print ('Loading pretrained G.')
            #~ variables_to_restore = slim.get_model_variables(scope='generator')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.test_model)

            print('Loading pretrained E.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            #~ source_images, source_labels = self.load_mnist(self.mnist_dir, split='train')
            #~ source_images = source_images[:2000]
            #~ source_labels = source_labels[:2000]

            source_images, source_labels = self.load_usps(self.usps_dir)
            source_images = source_images[:1800]
            source_labels = source_labels[:1800]

            source_images = np.repeat(source_images, 50, 0)
            source_labels = np.repeat(source_labels, 50, 0)

            #~ for n in range(0,10):

            #~ print n

            #~ no_gen = 10000

            #~ source_labels = n * np.ones((no_gen,),dtype=int)

            #~ # train model for source domain S
            #~ src_labels = utils.one_hot(source_labels[:no_gen],10)
            #~ src_noise = utils.sample_Z(no_gen,100,'uniform')

            #~ feed_dict = {model.src_noise: src_noise, model.src_labels: src_labels}

            #~ samples, samples_logits = sess.run([model.sampled_images, model.sampled_images_logits], feed_dict)
            #~ samples_logits = samples_logits[:,n]
            #~ samples = samples[samples_logits>8.]
            #~ samples_logits = samples_logits[samples_logits>8.]

            #~ for i in range(len(samples_logits)):

            ## print str(i)+'/'+str(len(samples_logits))-

            #~ plt.imshow(np.squeeze(samples[i]), cmap='gray')
            #~ plt.imsave('./sample/'+str(np.argmax(src_labels[i]))+'/'+str(i)+'_'+str(np.argmax(src_labels[i]))+'_'+str(samples_logits[i]),np.squeeze(samples[i]), cmap='gray')

            #~ print str(i)+'/'+str(len(samples)), np.argmax(src_labels[i])

            no_gen = len(source_images)

            print 'Number of samples:', no_gen

            # train model for source domain S
            src_images = source_images[:2]
            src_labels = utils.one_hot(source_labels[:no_gen], 10)
            src_noise = utils.sample_Z(no_gen, 100, 'uniform')

            feed_dict = {
                model.src_noise: src_noise,
                model.src_labels: src_labels,
                model.src_images: src_images
            }

            fzy, fx_src, fzy_labels = sess.run(
                [model.fzy, model.fx_src, model.fzy_labels], feed_dict)

            fzy_states = (fzy > 0.).astype(int)
            fx_src_states = (fx_src > 0.).astype(int)

            tmpUnique = np.unique(fzy_states.view(
                np.dtype((np.void,
                          fzy_states.dtype.itemsize * fzy_states.shape[1]))),
                                  return_counts=True)
            fzy_states_unique = tmpUnique[0].view(fzy_states.dtype).reshape(
                -1, fzy_states.shape[1])
            print 'fzy:', fzy_states_unique.shape

            tmpUnique = np.unique(fx_src_states.view(
                np.dtype(
                    (np.void,
                     fx_src_states.dtype.itemsize * fx_src_states.shape[1]))),
                                  return_counts=True)
            fx_src_states_unique = tmpUnique[0].view(
                fx_src_states.dtype).reshape(-1, fx_src_states.shape[1])
            print 'fx_src:', fx_src_states_unique.shape

            print(np.argmax(src_labels, 1) == fzy_labels).astype(int).mean()

            while (True):

                src_images = source_images[:2]
                src_labels = utils.one_hot(source_labels[:no_gen], 10)
                src_noise = utils.sample_Z(no_gen, 100, 'uniform')

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.src_images: src_images
                }

                fzy, fx_src = sess.run([model.fzy, model.fx_src], feed_dict)

                fzy_states = (fzy > 0.).astype(int)
                fx_src_states = (fx_src > 0.).astype(int)

                fzy_states = np.vstack((fzy_states, fzy_states_unique))
                fx_src_states = np.vstack(
                    (fx_src_states, fx_src_states_unique))

                tmpUnique = np.unique(fzy_states.view(
                    np.dtype(
                        (np.void,
                         fzy_states.dtype.itemsize * fzy_states.shape[1]))),
                                      return_counts=True)
                fzy_states_unique = tmpUnique[0].view(
                    fzy_states.dtype).reshape(-1, fzy_states.shape[1])
                print 'fzy:', fzy_states_unique.shape

            print 'break'
예제 #11
0
    def train_dsn(self):

        print 'Training DSN.'

        if self.protocol == 'svhn_mnist':
            source_images, source_labels = self.load_svhn(self.svhn_dir,
                                                          split='train')
            target_images, target_labels = self.load_mnist(self.mnist_dir,
                                                           split='train')

        if self.protocol == 'mnist_mnist_m':
            source_images, source_labels = self.load_mnist(self.mnist_dir,
                                                           split='train')
            target_images, target_labels = self.load_mnist_m(self.mnist_m_dir,
                                                             split='train')

        elif self.protocol == 'syn_svhn':
            source_images, source_labels = self.load_syn(self.syn_dir,
                                                         split='train')
            target_images, target_labels = self.load_svhn(self.svhn_dir,
                                                          split='train')

        elif self.protocol == 'mnist_usps':
            source_images, source_labels = self.load_mnist(self.mnist_dir,
                                                           split='train')
            target_images, target_labels = self.load_usps(self.usps_dir)

        elif self.protocol == 'usps_mnist':
            target_images, target_labels = self.load_mnist(self.mnist_dir,
                                                           split='train')
            source_images, source_labels = self.load_usps(self.usps_dir)

        elif self.protocol == 'amazon_reviews':
            source_images, source_labels, target_images, target_labels, _, _ = self.load_amazon_reviews(
                self.amazon_dir)

        algorithm = str(sys.argv[1])

        # build a graph
        model = self.model
        model.build_model(algorithm)

        label_gen = utils.one_hot(
            np.array([
                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3,
                3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 0, 0,
                0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7,
                7, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9
            ]), 10)
        label_gen = np.matlib.repmat(label_gen, 5, 1)

        self.config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session(config=self.config) as sess:

            tf.global_variables_initializer().run()

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            print 'Extracting source features'

            source_features = sess.run(model.orig_src_fx,
                                       feed_dict={
                                           model.src_features:
                                           np.zeros((1, 128)),
                                           model.src_images:
                                           source_images,
                                           model.src_noise:
                                           np.zeros((1, 100)),
                                           model.src_labels:
                                           utils.one_hot(source_labels, 10),
                                           model.trg_images:
                                           target_images[0:1],
                                           model.labels_gen:
                                           label_gen
                                       })

        tf.reset_default_graph()

        # build a graph
        model = self.model
        model.build_model(algorithm)

        with tf.Session() as sess:

            # initialize G and D
            tf.global_variables_initializer().run()
            # restore variables of F

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            #~ print ('Loading pretrained discriminator.')
            #~ variables_to_restore = slim.get_model_variables(scope='disc_e')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.test_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            print('Start training.')
            trg_count = 0
            t = 0

            context = zmq.Context()
            socket = context.socket(zmq.DEALER)
            socket.connect('tcp://localhost:5560')

            for step in range(40001):

                trg_count += 1
                t += 1

                i = step % int(source_images.shape[0] / self.batch_size)
                j = step % int(target_images.shape[0] / self.batch_size)

                src_images = source_images[i * self.batch_size:(i + 1) *
                                           self.batch_size]
                src_labels = utils.one_hot(
                    source_labels[i * self.batch_size:(i + 1) *
                                  self.batch_size], 10)
                src_labels_int = source_labels[i * self.batch_size:(i + 1) *
                                               self.batch_size]
                src_noise = utils.sample_Z(self.batch_size, 100, 'uniform')
                trg_images = target_images[j * self.batch_size:(j + 1) *
                                           self.batch_size]
                src_features = source_features[i * self.batch_size:(i + 1) *
                                               self.batch_size]

                feed_dict = {
                    model.src_features: src_features,
                    model.src_images: src_images,
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.trg_images: trg_images,
                    model.labels_gen: label_gen
                }

                sess.run(model.E_train_op, feed_dict)
                sess.run(model.DE_train_op, feed_dict)

                #~ if step%1==0:
                #~ sess.run(model.G_train_op, feed_dict)
                #~ sess.run(model.DG_train_op, feed_dict)

                #~ sess.run(model.const_train_op, feed_dict)

                logits_E_real, logits_E_fake, logits_G_real, logits_G_fake = sess.run(
                    [
                        model.logits_E_real, model.logits_E_fake,
                        model.logits_G_real, model.logits_G_fake
                    ], feed_dict)

                if (step) % 1000 == 0:

                    summary, E, DE, G, DG, cnst = sess.run([
                        model.summary_op, model.E_loss, model.DE_loss,
                        model.G_loss, model.DG_loss, model.const_loss
                    ], feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d/%d] E: [%.6f] DE: [%.6f] G: [%.6f] DG: [%.6f] Const: [%.6f] E_real: [%.2f] E_fake: [%.2f] G_real: [%.2f] G_fake: [%.2f]' \
                        %(step+1, self.train_iter, E, DE, G, DG, cnst,logits_E_real.mean(),logits_E_fake.mean(),logits_G_real.mean(),logits_G_fake.mean()))

                if (step) % 1000 == 0:
                    saver.save(sess, os.path.join(self.model_save_path, 'dtn'))
                    print 'Sending...'
                    socket.send_string(algorithm)
예제 #12
0
    def train_dsn(self):

        print 'Training DSN.'

        if self.protocol == 'svhn_mnist':
            source_images, source_labels = self.load_svhn(self.svhn_dir,
                                                          split='train')
            target_images, target_labels = self.load_mnist(self.mnist_dir,
                                                           split='train')

        algorithm = str(sys.argv[1])

        # build a graph
        model = self.model
        model.build_model(algorithm)

        source_features = np.zeros((len(source_images), 128))

        #~ self.config = tf.ConfigProto(device_count = {'GPU': 0})

        with tf.Session(config=self.config) as sess:

            tf.global_variables_initializer().run()

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            print 'Extracting source features'

            for indices, source_images_batch in zip(
                    np.array_split(np.arange(len(source_images)), 20),
                    np.array_split(source_images, 20)):
                print indices[0]
                source_features[indices] = sess.run(model.orig_src_fx,
                                                    feed_dict={
                                                        model.src_features:
                                                        np.zeros((1, 128)),
                                                        model.src_images:
                                                        source_images_batch,
                                                        model.src_noise:
                                                        np.zeros((1, 100)),
                                                        model.src_labels:
                                                        utils.one_hot(
                                                            source_labels, 10),
                                                        model.trg_images:
                                                        target_images[0:1]
                                                    })

        tf.reset_default_graph()

        # build a graph
        model = self.model
        model.build_model(algorithm)

        with tf.Session(config=self.config) as sess:

            # initialize G and D
            tf.global_variables_initializer().run()
            # restore variables of F

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)

            #~ print ('Loading pretrained discriminator.')
            #~ variables_to_restore = slim.get_model_variables(scope='disc_e')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.test_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            print('Start training.')
            trg_count = 0
            t = 0

            context = zmq.Context()
            socket = context.socket(zmq.DEALER)

            if sys.argv[1] == 'adda':
                socket.connect('tcp://localhost:5560')
            elif sys.argv[1] == 'adda_di':
                socket.connect('tcp://localhost:5660')
            if sys.argv[1] == 'fa':
                socket.connect('tcp://localhost:5760')

            for step in range(40001):

                trg_count += 1
                t += 1

                i = step % int(source_images.shape[0] / self.batch_size)
                j = step % int(target_images.shape[0] / self.batch_size)

                src_images = source_images[i * self.batch_size:(i + 1) *
                                           self.batch_size]
                src_labels = utils.one_hot(
                    source_labels[i * self.batch_size:(i + 1) *
                                  self.batch_size], 10)
                src_labels_int = source_labels[i * self.batch_size:(i + 1) *
                                               self.batch_size]
                src_noise = utils.sample_Z(self.batch_size, 100, 'uniform')
                trg_images = target_images[j * self.batch_size:(j + 1) *
                                           self.batch_size]
                src_features = source_features[i * self.batch_size:(i + 1) *
                                               self.batch_size]

                feed_dict = {
                    model.src_features: src_features,
                    model.src_images: src_images,
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.trg_images: trg_images
                }

                sess.run(model.E_train_op, feed_dict)
                sess.run(model.DE_train_op, feed_dict)

                logits_E_real, logits_E_fake = sess.run(
                    [model.logits_E_real, model.logits_E_fake], feed_dict)

                if (step) % 1000 == 0:

                    summary, E, DE = sess.run(
                        [model.summary_op, model.E_loss, model.DE_loss],
                        feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d/%d] E: [%.6f] DE: [%.6f] E_real: [%.2f] E_fake: [%.2f]' \
                        %(step+1, self.train_iter, E, DE, logits_E_real.mean(),logits_E_fake.mean()))

                if (step) % 1000 == 0:
                    print 'Saving...'
                    saver.save(sess, self.test_model)
                    print 'Sending...'
                    socket.send_string(algorithm)
예제 #13
0
    def train_DIFA(self):

        print 'Adapt with DIFA'

        # build a graph
        model = self.model
        model.build_model()

        source_images, source_labels = self.load_svhn(self.svhn_dir,
                                                      split='train')
        target_images, _ = self.load_mnist(self.mnist_dir, split='train')
        target_test_images, target_test_labels = self.load_mnist(
            self.mnist_dir, split='test')

        with tf.Session(config=self.config) as sess:

            # Initialize weights
            tf.global_variables_initializer().run()

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(
                scope='feature_extractor')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_feature_extractor)

            print('Loading pretrained S.')
            variables_to_restore = slim.get_model_variables(
                scope='feature_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_feature_generator)

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            print('Start training.')

            for step in range(self.train_DIFA_iters):

                i = step % int(source_images.shape[0] / self.batch_size)
                j = step % int(target_images.shape[0] / self.batch_size)

                source_images_batch = source_images[i *
                                                    self.batch_size:(i + 1) *
                                                    self.batch_size]
                target_images_batch = target_images[j *
                                                    self.batch_size:(j + 1) *
                                                    self.batch_size]
                labels_batch = utils.one_hot(
                    source_labels[i * self.batch_size:(i + 1) *
                                  self.batch_size], 10)
                noise = utils.sample_Z(self.batch_size, 100, 'uniform')

                feed_dict = {
                    model.src_images: source_images_batch,
                    model.trg_images: target_images_batch,
                    model.noise: noise,
                    model.labels: labels_batch
                }

                sess.run(model.e_train_op, feed_dict)
                sess.run(model.d_train_op, feed_dict)

                if (step + 1) % 50 == 0:

                    logits_real, logits_fake = sess.run(
                        [model.logits_real, model.logits_fake], feed_dict)

                    summary, e_loss, d_loss = sess.run(
                        [model.summary_op, model.e_loss, model.d_loss],
                        feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d/%d] e_loss: [%.6f] d_loss: [%.6f] e_real: [%.2f] e_fake: [%.2f]' \
                        %(step+1, self.train_DIFA_iters, e_loss, d_loss, logits_real.mean(),logits_fake.mean()))

                    print 'Evaluating.'
                    target_test_acc = 0.

                    for target_test_labels_batch, target_test_images_batch in zip(
                            np.array_split(target_test_labels, 100),
                            np.array_split(target_test_images, 100)):
                        feed_dict[
                            self.model.trg_images] = target_test_images_batch
                        feed_dict[self.model.
                                  trg_labels_gt] = target_test_labels_batch
                        target_test_acc_tmp = sess.run(model.trg_accuracy,
                                                       feed_dict)
                        target_test_acc += target_test_acc_tmp / 100.

                    print 'target test accuracy: [%.3f]' % (target_test_acc)

            print 'Saving.'
            saver.save(sess, self.DIFA_feature_extractor)
    def train_domain_invariant_encoder(self, seq_2_name):

        print 'Adapting from ' + self.seq_name + ' to ' + seq_2_name

        epochs = 1000000
        batch_size = 8
        noise_dim = 100

        self.build_model('train_domain_invariant_encoder')

        summary_string_writer = tf.summary.FileWriter(self.log_dir)

        config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session() as sess:

            print 'Loading weights.'

            # Run the initializers.
            sess.run(tf.global_variables_initializer())
            self.read_vgg_weights_except_fc8_func(sess)
            sess.run(self.vgg_fc8_weights_initializer)
            variables_to_restore = [
                i for i in slim.get_model_variables()
                if ('fc6' in i.name) or ('fc7' in i.name)
            ]
            restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, os.path.join(self.exp_dir,'model/segm_model'))
            restorer.restore(sess, self.exp_dir + '/model/segm_model')

            variables_to_restore = slim.get_model_variables(
                scope='feature_generator')
            restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, os.path.join(self.exp_dir+'model/sampler'))
            restorer.restore(sess, self.exp_dir + '/model/sampler')

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver(self.E_vars)

            source_images, source_annotations = load_synthia(self.seq_name,
                                                             no_elements=900)
            target_images, target_annotations = load_synthia(seq_2_name,
                                                             no_elements=900)

            print('Start training.')
            trg_count = 0
            t = 0

            for step in range(100000):

                #~ if t % 1000 == 0:
                #~ print 'Saving model.'
                #~ saver.save(sess, self.exp_dir+'/model/di_encoder_new')

                trg_count += 1
                t += 1

                i = step % int(source_images.shape[0] / batch_size)
                j = step % int(target_images.shape[0] / batch_size)

                src_images = source_images[i * batch_size:(i + 1) * batch_size]
                trg_images = target_images[j * batch_size:(j + 1) * batch_size]
                noise = utils.sample_Z(batch_size, 100, 'uniform')

                feed_dict = {
                    self.src_images: src_images,
                    self.trg_images: trg_images,
                    self.noise: noise,
                    self.is_training: True
                }

                sess.run(self.E_train_op, feed_dict)
                sess.run(self.DE_train_op, feed_dict)

                logits_E_real, logits_E_fake = sess.run(
                    [self.logits_E_real, self.logits_E_fake], feed_dict)

                if (step + 1) % 100 == 0:

                    summary, E, DE = sess.run(
                        [self.summary_op, self.E_loss, self.DE_loss],
                        feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d] E: [%.3f] DE: [%.3f] E_real: [%.2f] E_fake: [%.2f]' \
                        %(step+1, E, DE, logits_E_real.mean(),logits_E_fake.mean()))
예제 #15
0
    def train_dsn(self):

        source_images, source_labels = self.load_svhn(self.svhn_dir,
                                                      split='train')
        target_images, target_labels = self.load_mnist(self.mnist_dir,
                                                       split='train')

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        with tf.Session(config=self.config) as sess:

            # initialize G and D
            tf.global_variables_initializer().run()
            # restore variables of F

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.test_model)

            #~ print ('Loading pretrained encoder disc.')
            #~ variables_to_restore = slim.get_model_variables(scope='disc_e')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.pretrained_sampler)

            #~ print ('Loading pretrained G.')
            #~ variables_to_restore = slim.get_model_variables(scope='generator')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.test_model)

            #~ print ('Loading pretrained D_g.')
            #~ variables_to_restore = slim.get_model_variables(scope='disc_g')
            #~ restorer = tf.train.Saver(variables_to_restore)
            #~ restorer.restore(sess, self.test_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            print('Start training.')
            trg_count = 0
            t = 0

            self.batch_size = 64

            label_gen = utils.one_hot(
                np.array([
                    0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6,
                    6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3,
                    3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9,
                    9, 9, 9, 9
                ]), 10)

            for step in range(10000000):

                trg_count += 1
                t += 1

                i = step % int(source_images.shape[0] / self.batch_size)
                j = step % int(target_images.shape[0] / self.batch_size)

                src_images = source_images[i * self.batch_size:(i + 1) *
                                           self.batch_size]
                src_labels = utils.one_hot(
                    source_labels[i * self.batch_size:(i + 1) *
                                  self.batch_size], 10)
                src_labels_int = source_labels[i * self.batch_size:(i + 1) *
                                               self.batch_size]
                src_noise = utils.sample_Z(self.batch_size, 100, 'uniform')
                trg_images = target_images[j * self.batch_size:(j + 1) *
                                           self.batch_size]

                feed_dict = {
                    model.src_images: src_images,
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.trg_images: trg_images,
                    model.labels_gen: label_gen
                }

                #~ sess.run(model.E_train_op, feed_dict)
                #~ sess.run(model.DE_train_op, feed_dict)

                sess.run(model.G_train_op, feed_dict)
                if step % 15 == 0:
                    sess.run(model.DG_train_op, feed_dict)
                sess.run(model.const_train_op, feed_dict)
                sess.run(model.const_train_op, feed_dict)
                sess.run(model.const_train_op, feed_dict)

                logits_E_real, logits_E_fake, logits_G_real, logits_G_fake = sess.run(
                    [
                        model.logits_E_real, model.logits_E_fake,
                        model.logits_G_real, model.logits_G_fake
                    ], feed_dict)

                if (step + 1) % 10 == 0:

                    summary, E, DE, G, DG, cnst = sess.run([
                        model.summary_op, model.E_loss, model.DE_loss,
                        model.G_loss, model.DG_loss, model.const_loss
                    ], feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d/%d] E: [%.6f] DE: [%.6f] G: [%.6f] DG: [%.6f] Const: [%.6f] E_real: [%.2f] E_fake: [%.2f] G_real: [%.2f] G_fake: [%.2f]' \
                        %(step+1, self.train_iter, E, DE, G, DG, cnst,logits_E_real.mean(),logits_E_fake.mean(),logits_G_real.mean(),logits_G_fake.mean()))

                if (step + 1) % 500 == 0:
                    saver.save(sess, os.path.join(self.model_save_path, 'dtn'))
예제 #16
0
    def train_feature_generator(self):

        print 'Training sampler.'

        # images, labels = self.load_svhn(self.svhn_dir, split='train')

        classes, images, Xs_test, labels, Ys_test = self.getprotocol(Xs)

        labels = utils.one_hot(labels, 11)

        # build a graph
        model = self.model
        model.build_model()

        noise_dim = 100
        epochs = 5000

        with tf.Session(config=self.config) as sess:

            # initialize variables
            tf.global_variables_initializer().run()

            # restore feature extractor trained on Step 0
            print('Loading pretrained feature extractor.')
            variables_to_restore = slim.get_model_variables(
                scope='feature_extractor')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_feature_extractor)
            print 'Loaded'

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            for step in range(self.train_feature_generator_iters):

                i = step % int(images.shape[0] / self.batch_size)

                images_batch = images[i * self.batch_size:(i + 1) *
                                      self.batch_size]
                labels_batch = labels[i * self.batch_size:(i + 1) *
                                      self.batch_size]
                # noise = utils.sample_Z(self.batch_size, noise_dim, 'uniform')
                noise = utils.sample_Z(self.batch_size, noise_dim, 'gaussian')

                feed_dict = {
                    model.noise: noise,
                    model.images: images_batch,
                    model.labels: labels_batch
                }

                avg_D_fake = sess.run(model.logits_fake, feed_dict)
                avg_D_real = sess.run(model.logits_real, feed_dict)

                sess.run(model.d_train_op, feed_dict)
                sess.run(model.g_train_op, feed_dict)

                if (step + 1) % 1000 == 0:
                    summary, dl, gl = sess.run(
                        [model.summary_op, model.d_loss, model.g_loss],
                        feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d/%d] d_loss: %.6f g_loss: %.6f avg_d_fake: %.2f avg_d_real: %.2f ' \
                           % (
                           step + 1, self.train_feature_generator_iters, dl, gl, avg_D_fake.mean(), avg_D_real.mean()))

            print 'Saving.'
            saver.save(sess, self.pretrained_feature_generator)
예제 #17
0
    def eval_dsn(self, name='Exp2'):
        # build model
        model = self.model
        model.build_model()

        self.config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session(config=self.config) as sess:

            print('Loading pretrained G.')
            variables_to_restore = slim.get_model_variables(scope='generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.test_model)

            print('Loading pretrained E.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.test_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            source_images, source_labels = self.load_svhn(self.svhn_dir)

            npr.seed(190)

            for n in range(10):

                print n

                no_gen = 5000

                source_labels = n * np.ones((no_gen, ), dtype=int)

                # train model for source domain S
                src_labels = utils.one_hot(source_labels[:no_gen], 10)
                src_noise = utils.sample_Z(no_gen, 100, 'uniform')

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels
                }

                samples, samples_logits = sess.run(
                    [model.sampled_images, model.sampled_images_logits],
                    feed_dict)
                samples_logits = samples_logits[:, n]
                samples = samples[samples_logits > 8.]
                samples_logits = samples_logits[samples_logits > 8.]

                for i in range(len(samples_logits)):
                    #~ try:
                    #~ imsave('./sample/'+str(np.argmax(src_labels[i]))+'/'+name+'/'+str(i)+'_'+str(np.argmax(src_labels[i]))+'_'+str(samples_logits[i])+'.png',np.squeeze(samples[i]))
                    #~ except:
                    #~ os.mkdir('./sample/'+str(np.argmax(src_labels[i]))+'/'+name+'/')
                    imsave(
                        './sample/' + str(np.argmax(src_labels[i])) + '/' +
                        name + '_' + str(i) + '_' +
                        str(np.argmax(src_labels[i])) +
                        '_' + str(samples_logits[i]) + '.png',
                        np.squeeze(samples[i]))

                print str(i) + '/' + str(len(samples)), np.argmax(
                    src_labels[i])
예제 #18
0
    def train_DIFA(self):

        print 'Adapt with DIFA'

        # build a graph
        model = self.model
        model.build_model()

        # source_images, source_labels = self.load_svhn(self.svhn_dir, split='train')
        # target_images, _ = self.load_mnist(self.mnist_dir, split='train')    #无需训练label
        # target_test_images, target_test_labels = self.load_mnist(self.mnist_dir, split='test')

        ###########协议数据
        # Xs = cPickle.load(open("protocal18m.dat", 'rb'))
        classes_s, source_images, Xs_test, source_labels, Ys_test = self.getprotocol(
            Xs)

        classes_t, target_images, target_test_images, Yt_train, target_test_labels = self.getprotocol(
            Xt)

        with tf.Session(config=self.config) as sess:
            # Initialize weights
            tf.global_variables_initializer().run()

            print('Loading pretrained encoder.')
            variables_to_restore = slim.get_model_variables(
                scope='feature_extractor')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_feature_extractor)

            print('Loading pretrained S.')
            variables_to_restore = slim.get_model_variables(
                scope='feature_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_feature_generator)

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            print('Start training.')

            #先评估目标域
            print 'Evaluating.'
            target_test_acc = 0.
            source_test_acc = 0.
            # 评估源域数据
            # for source_test_labels_batch, source_test_images_batch in zip(
            #         np.array_split(Ys_test, 5), np.array_split(Xs_test, 5)):
            #     source_test_acc_tmp = sess.run(model.trg_accuracy,
            #                                    feed_dict={model.trg_images: source_test_images_batch,
            #                                               model.trg_labels_gt: source_test_labels_batch})
            #     source_test_acc += source_test_acc_tmp / 5.

            source_test_acc = sess.run(model.trg_accuracy,
                                       feed_dict={
                                           model.trg_images: Xs_test[::4],
                                           model.trg_labels_gt: Ys_test[::4]
                                       })

            print 'source test accuracy: [%.3f]' % (source_test_acc)

            # for target_test_labels_batch, target_test_images_batch in zip(
            #         np.array_split(target_test_labels, 5), np.array_split(target_test_images, 5)):
            #     target_test_acc_tmp = sess.run(model.trg_accuracy,
            #                                    feed_dict={model.trg_images: target_test_images_batch,
            #                                               model.trg_labels_gt: target_test_labels_batch})
            #     target_test_acc += target_test_acc_tmp / 5.

            target_test_acc = sess.run(model.trg_accuracy,
                                       feed_dict={
                                           model.trg_images:
                                           target_test_images[::3],
                                           model.trg_labels_gt:
                                           target_test_labels[::3]
                                       })
            print 'target test accuracy: [%.3f]' % (target_test_acc)

            #画混淆矩阵
            pred_t = sess.run(model.trg_labels,
                              feed_dict={
                                  model.trg_images:
                                  target_test_images[1000:3000],
                                  model.trg_labels_gt:
                                  target_test_labels[1000:3000]
                              })
            y_true = utils.one_hot(target_test_labels[1000:3000], 11)
            confusion(classes_t, pred_t, y_true)
            plt.savefig("noadapta.png")
            plt.close('all')
            # plt.show()

            noise_dim = 100
            p = 0
            test_acc = []
            for step in range(self.train_DIFA_iters):

                i = step % int(source_images.shape[0] / self.batch_size)
                j = step % int(target_images.shape[0] / self.batch_size)

                source_images_batch = source_images[i *
                                                    self.batch_size:(i + 1) *
                                                    self.batch_size]
                target_images_batch = target_images[j *
                                                    self.batch_size:(j + 1) *
                                                    self.batch_size]
                labels_batch = utils.one_hot(
                    source_labels[i * self.batch_size:(i + 1) *
                                  self.batch_size], 11)
                # noise = utils.sample_Z(self.batch_size, noise_dim, 'uniform')
                noise = utils.sample_Z(self.batch_size, noise_dim, 'gaussian')

                feed_dict = {
                    model.src_images: source_images_batch,
                    model.trg_images: target_images_batch,
                    model.noise: noise,
                    model.labels: labels_batch
                }

                sess.run(model.e_train_op, feed_dict)
                sess.run(model.d_train_op, feed_dict)

                logits_real, logits_fake = sess.run(
                    [model.logits_real, model.logits_fake], feed_dict)

                if (step + 1) % 800 == 0:

                    summary, e_loss, d_loss = sess.run(
                        [model.summary_op, model.e_loss, model.d_loss],
                        feed_dict)
                    summary_writer.add_summary(summary, step)
                    print ('Step: [%d/%d] e_loss: [%.6f] d_loss: [%.6f] e_real: [%.2f] e_fake: [%.2f]' \
                           % (step + 1, self.train_DIFA_iters, e_loss, d_loss, logits_real.mean(), logits_fake.mean()))

                    print 'Evaluating.'
                    target_test_acc = 0.

                    # for target_test_labels_batch, target_test_images_batch in zip(
                    #         np.array_split(target_test_labels, 10), np.array_split(target_test_images, 10)):
                    #     feed_dict[self.model.trg_images] = target_test_images_batch
                    #     feed_dict[self.model.trg_labels_gt] = target_test_labels_batch
                    #     target_test_acc_tmp = sess.run(model.trg_accuracy, feed_dict)
                    #     target_test_acc += target_test_acc_tmp / 10.

                    target_test_acc = sess.run(model.trg_accuracy,
                                               feed_dict={
                                                   model.trg_images:
                                                   target_test_images[::3],
                                                   model.trg_labels_gt:
                                                   target_test_labels[::3]
                                               })

                    source_test_acc = sess.run(model.trg_accuracy,
                                               feed_dict={
                                                   model.trg_images:
                                                   Xs_test[::4],
                                                   model.trg_labels_gt:
                                                   Ys_test[::4]
                                               })
                    print 'source test accuracy: [%.3f]' % (source_test_acc)

                    pred_t = sess.run(model.trg_labels,
                                      feed_dict={
                                          model.trg_images:
                                          target_test_images[1000:3000],
                                          model.trg_labels_gt:
                                          target_test_labels[1000:3000]
                                      })
                    y_true = utils.one_hot(target_test_labels[1000:3000], 11)
                    confusion(classes_t, pred_t, y_true)
                    p = p + 1
                    plt.savefig("pics/adapta" + str(p) + ".png")
                    plt.close('all')
                    print 'target test accuracy: [%.3f]' % (target_test_acc)
                    test_acc.append(target_test_acc)

            np.savetxt("resultacc.txt", np.array(test_acc))

            print 'Saving.'
            saver.save(sess, self.DIFA_feature_extractor)
예제 #19
0
    def train_sampler(self):

        print 'Training sampler.'

        images, labels = self.load_mnist(self.mnist_dir, split='train')
        labels = utils.one_hot(labels, 10)

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        batch_size = self.batch_size
        noise_dim = 100
        epochs = 5000

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()
            # restore variables of F
            print('Loading pretrained model.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_model)
            # restore variables of F

            summary_writer = tf.summary.FileWriter(
                logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()

            #~ feed_dict = {model.images: source_images[:10000]}
            #~ fx = sess.run(model.fx, feed_dict)

            t = 0

            for i in range(epochs):

                #~ print 'Epoch',str(i)

                for start, end in zip(
                        range(0, len(images), batch_size),
                        range(batch_size, len(images), batch_size)):

                    t += 1

                    Z_samples = utils.sample_Z(batch_size, noise_dim,
                                               'uniform')

                    feed_dict = {
                        model.noise: Z_samples,
                        model.images: images[start:end],
                        model.labels: labels[start:end]
                    }

                    avg_D_fake = sess.run(model.logits_fake, feed_dict)
                    avg_D_real = sess.run(model.logits_real, feed_dict)

                    sess.run(model.d_train_op, feed_dict)
                    sess.run(model.g_train_op, feed_dict)

                    if (t + 1) % 100 == 0:
                        summary, dl, gl = sess.run(
                            [model.summary_op, model.d_loss, model.g_loss],
                            feed_dict)
                        summary_writer.add_summary(summary, t)
                        print ('Step: [%d/%d] d_loss: %.6f g_loss: %.6f avg_D_fake: %.2f avg_D_real: %.2f ' \
                            %(t+1, int(epochs*len(images) /batch_size), dl, gl, avg_D_fake.mean(), avg_D_real.mean()))

                    if (t + 1) % 1000 == 0:
                        saver.save(
                            sess, os.path.join(self.model_save_path,
                                               'sampler'))
예제 #20
0
    def train_sampler(self):
	
	print 'Training sampler.'
        
	source_images, source_labels = self.load_NYUD(split='source')
	source_labels = utils.one_hot(source_labels.astype(int), self.no_classes )
        
        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)
	
	batch_size = self.batch_size
	noise_dim = model.noise_dim
	epochs = 500000
	
	## Computing latent representation for the source split
	#~ with tf.Session(config=tf.ConfigProto(device_count = {'GPU': 0})) as sess:
	with tf.Session(config=self.config) as sess:
	    
	    print ('Computing latent representation.')
            tf.global_variables_initializer().run()
	    variables_to_restore = slim.get_model_variables(scope='vgg_16')
            restorer = tf.train.Saver(variables_to_restore)
	    restorer.restore(sess, self.pretrained_model)
	    
	    ## Must do it batchwise
	    source_fx = np.empty((0, model.hidden_repr_size))
	    #~ counter = 0
	    for spl_im, spl_lab in zip(np.array_split(source_images, 40),  np.array_split(source_labels, 40)):
		feed_dict = {model.noise: utils.sample_Z(1, noise_dim, 'uniform'), 
				model.images: spl_im, 
				model.labels: spl_lab, 
				model.fx: np.ones((1,model.hidden_repr_size))}
		s_fx = sess.run(model.dummy_fx, feed_dict)
		source_fx = np.vstack((source_fx, np.squeeze(s_fx)))
		#~ print(counter)
		#~ counter+=1
	    assert source_fx.shape == (source_images.shape[0], model.hidden_repr_size)

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()
            
	    print ('Loading pretrained model.')
	    variables_to_restore = slim.get_model_variables(scope='vgg_16')
            restorer = tf.train.Saver(variables_to_restore)
	    restorer.restore(sess, self.pretrained_model)
	    
            summary_writer = tf.summary.FileWriter(logdir=self.log_dir, graph=tf.get_default_graph())
            saver = tf.train.Saver()
	    
	    #~ feed_dict = {model.images: source_images[:10000]}
	    #~ fx = sess.run(model.fx, feed_dict)
		 	    
	    t = 0
	    
	    for i in range(epochs):
		
		#~ print 'Epoch',str(i)
		src_rand = np.random.permutation(source_images.shape[0])
		source_labels, source_fx = source_labels[src_rand], source_fx[src_rand]
		
		for start, end in zip(range(0, len(source_images), batch_size), range(batch_size, len(source_images), batch_size)):
		    
		    t += 1

		    Z_samples = utils.sample_Z(batch_size, noise_dim, 'uniform')

		    feed_dict = {model.noise: Z_samples, model.images: source_images[0:1], model.labels: source_labels[start:end], model.fx: source_fx[start:end]}
	    
		    avg_D_fake = sess.run(model.logits_fake, feed_dict)
		    avg_D_real = sess.run(model.logits_real, feed_dict)
		    
		    sess.run(model.d_train_op, feed_dict)
		    sess.run(model.g_train_op, feed_dict)
		    
		    if (t+1) % 250 == 0:
			summary, dl, gl = sess.run([model.summary_op, model.d_loss, model.g_loss], feed_dict)
			summary_writer.add_summary(summary, t)
			print ('Step: [%d/%d] g_loss: [%.6f] d_loss: [%.6f]' \
				   %(t+1, int(epochs*len(source_images) /batch_size), gl, dl))
			print '\t avg_D_fake',str(avg_D_fake.mean()),'avg_D_real',str(avg_D_real.mean())
			
                    if (t+1) % 5000 == 0:  
			saver.save(sess, os.path.join(self.model_save_path, 'sampler')) 
예제 #21
0
    def check_TSNE(self):

        images, labels = self.load_mnist(self.mnist_dir, split='train')

        # build a graph
        model = self.model
        model.build_model()

        self.config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()

            if sys.argv[1] == 'test':
                print('Loading test model.')
                variables_to_restore = slim.get_model_variables(
                    scope='encoder')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.test_model)

            elif sys.argv[1] == 'pretrain':
                print('Loading pretrained model.')
                variables_to_restore = slim.get_model_variables(
                    scope='encoder')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_model)

            else:
                raise NameError('Unrecognized mode.')

            n_samples = 2000
            labels = utils.one_hot(labels[:n_samples], 10)
            noise = utils.sample_Z(n_samples, 100, 'uniform')

            print('Loading sampler.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            feed_dict = {
                model.noise: noise,
                model.labels: labels,
                model.images: images[:n_samples]
            }

            fzy, fx = sess.run([model.fzy, model.fx], feed_dict)

            labels = np.argmax(labels, 1)

            print 'Computing T-SNE.'

            model = TSNE(n_components=2, random_state=0)

            if sys.argv[2] == '1':
                TSNE_hA = model.fit_transform(fx)
                f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
                ax1.set_facecolor('white')
                ax2.set_facecolor('white')
                ax1.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.ones((n_samples)),
                            s=3,
                            cmap=mpl.cm.jet,
                            alpha=0.5)
                ax2.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=labels,
                            s=3,
                            cmap=mpl.cm.jet,
                            alpha=0.5)

            elif sys.argv[2] == '2':
                TSNE_hA = model.fit_transform(np.vstack((fzy, fx)))
                f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
                ax1.set_facecolor('white')
                ax2.set_facecolor('white')
                ax1.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples, )), 2 * np.ones(
                                (n_samples, )))),
                            s=3,
                            cmap=mpl.cm.jet,
                            alpha=0.5)
                ax2.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((labels, labels)),
                            s=3,
                            cmap=mpl.cm.jet,
                            alpha=0.5)

            plt.legend()
            plt.show()
예제 #22
0
    def train_dsn(self):
        
	source_images, source_labels = self.load_NYUD(split='source')
        target_images, target_labels = self.load_NYUD(split='target')
	

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

	with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
			    
	    # initialize G and D
	    tf.global_variables_initializer().run()
	    # restore variables of F
	    
	    print ('Loading Encoder.')
	    variables_to_restore = slim.get_model_variables(scope='vgg_16')
	    restorer = tf.train.Saver(variables_to_restore)
	    restorer.restore(sess, self.pretrained_model)
	    
	    
	    print ('Loading sample generator.')
	    variables_to_restore = slim.get_model_variables(scope='sampler_generator')
	    restorer = tf.train.Saver(variables_to_restore)
	    restorer.restore(sess, self.pretrained_sampler)

	    summary_writer = tf.summary.FileWriter(logdir=self.log_dir, graph=tf.get_default_graph())
	    saver = tf.train.Saver()

	    print ('Start training.')
	    trg_count = 0
	    t = 0
	    
	    
	    accTeSet = []
	    noise_dim = model.noise_dim		
	    
	    for step in range(10000000):
		
		trg_count += 1
		t+=1
		
		i = step % int(source_images.shape[0] / self.batch_size)
		j = step % int(target_images.shape[0] / self.batch_size)
		
		src_images = source_images[i*self.batch_size:(i+1)*self.batch_size]
		src_labels = utils.one_hot(source_labels[i*self.batch_size:(i+1)*self.batch_size].astype(int),model.no_classes)
		src_noise = utils.sample_Z(self.batch_size,noise_dim,'uniform')
		trg_images = target_images[j*self.batch_size:(j+1)*self.batch_size]
		
		feed_dict = {model.src_images: src_images, model.src_noise: src_noise, model.src_labels: src_labels, model.trg_images: trg_images}
		
		sess.run(model.E_train_op, feed_dict) 
		sess.run(model.DE_train_op, feed_dict) 
		
		if (step+1) % 10 == 0:
		    logits_E_real,logits_E_fake = sess.run([model.logits_E_real,model.logits_E_fake],feed_dict) 
		    summary, E, DE = sess.run([model.summary_op, model.E_loss, model.DE_loss], feed_dict)
		    summary_writer.add_summary(summary, step)
		    print ('Step: [%d/%d] E: [%.6f] DE: [%.6f] E_real: [%.2f] E_fake: [%.2f]' \
			       %(step+1, self.train_iter, E, DE,logits_E_real.mean(),logits_E_fake.mean()))


		if (step+1) % 20 == 0:
		    trg_acc = 0.
		    for trg_im, trg_lab,  in zip(np.array_split(target_images, 40), 
						np.array_split(target_labels, 40),
						):
			feed_dict = {model.src_images: src_images[0:2],  #dummy
					model.src_labels: src_labels[0:2], #dummy
					model.trg_images: trg_im, 
					model.target_labels: trg_lab}
			trg_acc_ = sess.run(fetches=model.trg_accuracy, feed_dict=feed_dict)
			trg_acc += (trg_acc_*len(trg_lab))	# must be a weighted average since last split is smaller				
		    print ('trg acc [%.4f]' %(trg_acc/len(target_labels)))
		    accTeSet.append(trg_acc/len(target_labels))
		    with file(model.mode + '_test_accuracies.pkl', 'w') as f:
			cPickle.dump(accTeSet, f, protocol=cPickle.HIGHEST_PROTOCOL)
		    saver.save(sess, os.path.join(self.model_save_path, 'dtn'))
예제 #23
0
    def train_dsn(self):

        source_images, source_labels = self.load_office(split=self.src_dir)
        target_images, target_labels = self.load_office(split=self.trg_dir)

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            with tf.device('/gpu:1'):

                # initialize G and D
                tf.global_variables_initializer().run()
                # restore variables of F

                print('Loading pretrained model.')
                # Do not change next two lines. Necessary because slim.get_model_variables(scope='blablabla') works only for model built with slim.
                variables_to_restore = tf.global_variables()
                #~ variables_to_restore = [v for v in variables_to_restore if 'encoder' in v.name]
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_model)

                #~ print ('Loading pretrained encoder disc.')
                #~ variables_to_restore = slim.get_model_variables(scope='disc_e')
                #~ restorer = tf.train.Saver(variables_to_restore)
                #~ restorer.restore(sess, self.pretrained_sampler)

                print('Loading sample generator.')
                variables_to_restore = slim.get_model_variables(
                    scope='sampler_generator')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_sampler)

                summary_writer = tf.summary.FileWriter(
                    logdir=self.log_dir, graph=tf.get_default_graph())
                saver = tf.train.Saver()

                print('Start training.')
                trg_count = 0
                t = 0

                G_loss = 1.
                DG_loss = 1.

                noise_dim = 100

                for step in range(10000000):

                    trg_count += 1
                    t += 1

                    i = step % int(source_images.shape[0] / self.batch_size)
                    j = step % int(target_images.shape[0] / self.batch_size)

                    src_images = source_images[i * self.batch_size:(i + 1) *
                                               self.batch_size]
                    src_labels = utils.one_hot(
                        source_labels[i * self.batch_size:(i + 1) *
                                      self.batch_size].astype(int), 31)
                    src_noise = utils.sample_Z(self.batch_size, 100, 'uniform')
                    trg_images = target_images[j * self.batch_size:(j + 1) *
                                               self.batch_size]

                    feed_dict = {
                        model.src_images: src_images,
                        model.src_noise: src_noise,
                        model.src_labels: src_labels,
                        model.trg_images: trg_images
                    }

                    sess.run(model.E_train_op, feed_dict)
                    sess.run(model.DE_train_op, feed_dict)

                    if (step + 1) % 10 == 0:
                        logits_E_real, logits_E_fake = sess.run(
                            [model.logits_E_real, model.logits_E_fake],
                            feed_dict)
                        summary, E, DE = sess.run(
                            [model.summary_op, model.E_loss, model.DE_loss],
                            feed_dict)
                        summary_writer.add_summary(summary, step)
                        print ('Step: [%d/%d] E: [%.6f] DE: [%.6f] E_real: [%.2f] E_fake: [%.2f]' \
                            %(step+1, self.train_iter, E, DE,logits_E_real.mean(),logits_E_fake.mean()))

                    if (step + 1) % 20 == 0:
                        saver.save(sess,
                                   os.path.join(self.model_save_path, 'dtn'))
예제 #24
0
    def check_TSNE(self):
	
	source_images, source_labels = self.load_NYUD(split='source')
	target_images, target_labels = self.load_NYUD(split='target')
        

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)
			
	#~ self.config = tf.ConfigProto(device_count = {'GPU': 0})

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()
		
	    if sys.argv[2] in ['2', '3']:
		print ('Loading sampler.')
		variables_to_restore = slim.get_model_variables(scope='sampler_generator')
		restorer = tf.train.Saver(variables_to_restore)
		restorer.restore(sess, self.pretrained_sampler)
	    
	    if sys.argv[1] == 'test':
		print ('Loading test model.')
		variables_to_restore = tf.global_variables() 
		restorer = tf.train.Saver(variables_to_restore)
		restorer.restore(sess, self.test_model)	
		    
	    elif sys.argv[1] == 'pretrain':
		print ('Loading pretrained model.')
		variables_to_restore = slim.get_model_variables(scope='vgg_16')
		restorer = tf.train.Saver(variables_to_restore)
		restorer.restore(sess, self.pretrained_model)
		
	    else:
		raise NameError('Unrecognized mode.')

	    n_samples = len(source_labels)# Some trg samples are discarded 
	    target_images = target_images[:n_samples]
	    target_labels = target_labels[:n_samples]
	    assert len(target_labels) == len(source_labels)
	    
	    src_labels = utils.one_hot(source_labels.astype(int),self.no_classes )
	    trg_labels = utils.one_hot(target_labels.astype(int),self.no_classes )
	    
	    src_noise = utils.sample_Z(n_samples,model.noise_dim,'uniform')

	    fzy = np.empty((0,model.hidden_repr_size))
	    fx_src = np.empty((0,model.hidden_repr_size))
	    fx_trg = np.empty((0,model.hidden_repr_size))
	    
	    for src_im, src_lab, trg_im, trg_lab, src_n  in zip(np.array_split(source_images, 40),  
								np.array_split(src_labels, 40),
								np.array_split(target_images, 40),  
								np.array_split(trg_labels, 40),
								np.array_split(src_noise, 40),
								):
								    
		feed_dict = {model.src_noise: src_n, model.src_labels: src_lab, model.src_images: src_im, model.trg_images: trg_im}
		
		fzy_, fx_src_, fx_trg_ = sess.run([model.fzy, model.fx_src, model.fx_trg], feed_dict)
		
		
		fzy = np.vstack((fzy, fzy_))
		fx_src = np.vstack((fx_src, np.squeeze(fx_src_)) )
		fx_trg = np.vstack((fx_trg, np.squeeze(fx_trg_)) )
	    
	    src_labels = np.argmax(src_labels,1)
	    trg_labels = np.argmax(trg_labels[:n_samples],1)
	    
	    assert len(src_labels) == len(fx_src)
	    assert len(trg_labels) == len(fx_trg)

	    print 'Computing T-SNE.'

	    model = TSNE(n_components=2, random_state=0)

	       
	    if sys.argv[2] == '1':
		TSNE_hA = model.fit_transform(np.squeeze(fx_src))
		plt.figure(2)
		plt.scatter(TSNE_hA[:,0], TSNE_hA[:,1], c = np.hstack((np.ones((n_samples)))), s=3, cmap = mpl.cm.jet)
		plt.figure(3)
		plt.scatter(TSNE_hA[:,0], TSNE_hA[:,1], c = np.hstack((src_labels)), s=3,  cmap = mpl.cm.jet)
		
	    elif sys.argv[2] == '2':
		TSNE_hA = model.fit_transform(np.vstack((fzy,fx_src)))
	        plt.figure(2)
		plt.scatter(TSNE_hA[:,0], TSNE_hA[:,1], c = np.hstack((np.ones((n_samples,)), 2 * np.ones((n_samples,)))), s=3, cmap = mpl.cm.jet)
		plt.figure(3)
                plt.scatter(TSNE_hA[:,0], TSNE_hA[:,1], c = np.hstack((src_labels,src_labels)), s=3, cmap = mpl.cm.jet)

	    elif sys.argv[2] == '3':
		TSNE_hA = model.fit_transform(np.vstack((fzy,fx_src,fx_trg)))
	        plt.figure(2)
		plt.scatter(TSNE_hA[:,0], TSNE_hA[:,1], c = np.hstack((src_labels, src_labels, trg_labels, )), s=3,  cmap = mpl.cm.jet)
	        plt.figure(3)
                plt.scatter(TSNE_hA[:,0], TSNE_hA[:,1], c = np.hstack((np.ones((n_samples,)), 2 * np.ones((n_samples,)), 3 * np.ones((n_samples,)))), s=3,  cmap = mpl.cm.jet)
		
	    plt.show()
예제 #25
0
    def check_TSNE(self):

        source_images, source_labels = self.load_office(split=self.src_dir)
        target_images, target_labels = self.load_office(split=self.trg_dir)

        # build a graph
        model = self.model
        model.build_model()

        # make directory if not exists
        if tf.gfile.Exists(self.log_dir):
            tf.gfile.DeleteRecursively(self.log_dir)
        tf.gfile.MakeDirs(self.log_dir)

        #~ self.config = tf.ConfigProto(device_count = {'GPU': 0})

        with tf.Session(config=self.config) as sess:
            # initialize G and D
            tf.global_variables_initializer().run()

            if sys.argv[1] == 'test':
                print('Loading test model.')
                # Do not change next two lines. Necessary because slim.get_model_variables(scope='blablabla') works only for model built with slim.
                variables_to_restore = tf.global_variables()
                variables_to_restore = [
                    v for v in variables_to_restore if np.all([
                        s not in str(v.name) for s in [
                            'encoder', 'sampler_generator', 'disc_e',
                            'source_train_op'
                        ]
                    ])
                ]
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.test_model)
            elif sys.argv[1] == 'pretrain':
                print('Loading pretrained model.')
                # Do not change next two lines. Necessary because slim.get_model_variables(scope='blablabla') works only for model built with slim.
                variables_to_restore = tf.global_variables()
                variables_to_restore = [
                    v for v in variables_to_restore if np.all([
                        s not in str(v.name) for s in [
                            'encoder', 'sampler_generator', 'disc_e',
                            'source_train_op'
                        ]
                    ])
                ]
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_model)

            elif sys.argv[1] == 'convdeconv':
                print('Loading convdeconv model.')
                variables_to_restore = slim.get_model_variables(
                    scope='conv_deconv')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.convdeconv_model)

            else:
                raise NameError('Unrecognized mode.')

            n_samples = 400
            src_labels = utils.one_hot(source_labels[:n_samples].astype(int),
                                       31)
            trg_labels = utils.one_hot(target_labels[:n_samples].astype(int),
                                       31)
            src_noise = utils.sample_Z(n_samples, 100, 'uniform')

            if sys.argv[1] == 'convdeconv':

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.src_images: source_images,
                    model.trg_images: target_images[:n_samples]
                }
                h_repr = sess.run(model.h_repr, feed_dict)

            else:

                print('Loading sampler.')
                variables_to_restore = slim.get_model_variables(
                    scope='sampler_generator')
                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_sampler)

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels,
                    model.src_images: source_images[:n_samples],
                    model.trg_images: target_images[:n_samples]
                }

                fzy, fx_src, fx_trg = sess.run(
                    [model.fzy, model.fx_src, model.fx_trg], feed_dict)

                src_labels = np.argmax(src_labels, 1)
                trg_labels = np.argmax(trg_labels, 1)

            print 'Computing T-SNE.'

            model = TSNE(n_components=2, random_state=0)

            if sys.argv[2] == '1':
                TSNE_hA = model.fit_transform(np.vstack((fx_src)))
                plt.figure(2)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples)))),
                            s=3,
                            cmap=mpl.cm.jet)
                plt.figure(3)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((src_labels)),
                            s=3,
                            cmap=mpl.cm.jet)

            elif sys.argv[2] == '2':
                TSNE_hA = model.fit_transform(np.vstack((fzy, fx_src)))
                plt.figure(2)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples, )), 2 * np.ones(
                                (n_samples, )))),
                            s=3,
                            cmap=mpl.cm.jet)
                plt.figure(3)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((src_labels, src_labels)),
                            s=3,
                            cmap=mpl.cm.jet)

            elif sys.argv[2] == '3':
                TSNE_hA = model.fit_transform(np.vstack((fzy, fx_src, fx_trg)))
                plt.figure(2)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((
                                src_labels,
                                src_labels,
                                trg_labels,
                            )),
                            s=3,
                            cmap=mpl.cm.jet)
                plt.figure(3)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.hstack((np.ones((n_samples, )), 2 * np.ones(
                                (n_samples, )), 3 * np.ones((n_samples, )))),
                            s=3,
                            cmap=mpl.cm.jet)

            elif sys.argv[2] == '4':
                TSNE_hA = model.fit_transform(h_repr)
                plt.scatter(TSNE_hA[:, 0],
                            TSNE_hA[:, 1],
                            c=np.argmax(trg_labels, 1),
                            s=3,
                            cmap=mpl.cm.jet)

            plt.show()
예제 #26
0
    def eval_dsn(self):
        # build model
        model = self.model
        model.build_model()

        self.config = tf.ConfigProto(device_count={'GPU': 0})

        with tf.Session(config=self.config) as sess:

            print('Loading pretrained G.')
            variables_to_restore = slim.get_model_variables(scope='generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.test_model)

            print('Loading pretrained E.')
            variables_to_restore = slim.get_model_variables(scope='encoder')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.test_model)

            print('Loading sample generator.')
            variables_to_restore = slim.get_model_variables(
                scope='sampler_generator')
            restorer = tf.train.Saver(variables_to_restore)
            restorer.restore(sess, self.pretrained_sampler)

            if self.protocol == 'svhn_mnist':
                source_images, source_labels = self.load_svhn(self.svhn_dir)
            elif self.protocol == 'mnist_usps':
                source_images, source_labels = self.load_mnist(self.mnist_dir)

            for n in range(10):

                print n

                no_gen = 5000

                source_labels = n * np.ones((no_gen, ), dtype=int)

                # train model for source domain S
                src_labels = utils.one_hot(source_labels[:no_gen], 10)
                src_noise = utils.sample_Z(no_gen, 100, 'uniform')

                feed_dict = {
                    model.src_noise: src_noise,
                    model.src_labels: src_labels
                }

                samples, samples_logits = sess.run(
                    [model.sampled_images, model.sampled_images_logits],
                    feed_dict)
                samples_logits = samples_logits[:, n]
                samples = samples[samples_logits > 8.]
                samples_logits = samples_logits[samples_logits > 8.]

                for i in range(len(samples_logits)):

                    #~ print str(i)+'/'+str(len(samples_logits))-

                    plt.imshow(np.squeeze(samples[i]), cmap='gray')
                    plt.imsave('./sample/' + str(np.argmax(src_labels[i])) +
                               '/' + str(i) + '_' +
                               str(np.argmax(src_labels[i])) + '_' +
                               str(samples_logits[i]),
                               np.squeeze(samples[i]),
                               cmap='gray')

                print str(i) + '/' + str(len(samples)), np.argmax(
                    src_labels[i])