コード例 #1
0
def train_cvae(X_source, X_aux_list, X_target, X_source_t, X_aux_list_t,
               X_target_t):

    cvae = CVAE(input_shape=58, hidden_layers=1, dims=[45, 30])

    input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 58))
    cond_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 4))
    label_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 58))
    train_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=())

    train_ds = tf.data.Dataset.from_tensor_slices(
        (input_tensor, cond_tensor,
         label_tensor)).shuffle(100000).batch(BATCH).repeat()

    iterator = train_ds.make_initializable_iterator()
    dat, c, lab = iterator.get_next()

    #cvae = CVAE(100, hidden_layers=1, dims=[50, 20])
    #cvae.init_model(input_tensor, cond_tensor, label_tensor, train_tensor)

    cvae.init_model(dat, c, lab, train_tensor)
    model = cvae.model_

    loss = cvae.loss_

    train_op = tf.train.AdamOptimizer().minimize(loss)

    X_train = np.vstack([X_source] + X_aux_list)
    X_train_t = np.vstack([X_source_t] + X_aux_list_t)
    k = len(X_aux_list) + 1
    C_train = [k] * X_source.shape[0]
    for i in range(len(X_aux_list)):
        k -= 1
        C_train += [k] * X_aux_list[i].shape[0]

    print(len(C_train))
    print(X_train.shape)
    print(C_train)

    C_train = np.array(C_train)
    C_train = np.eye(len(X_aux_list) + 1)[C_train - 1]

    with tf.Session() as sess:

        sess.run(tf.compat.v1.global_variables_initializer())
        sess.run(iterator.initializer,
                 feed_dict={
                     input_tensor: X_train,
                     cond_tensor: C_train,
                     label_tensor: X_train_t,
                     train_tensor: 1
                 })
        for i in range(EPOCH):
            tot_loss = 0
            for _ in range(X_train.shape[1] // BATCH):
                _, loss_value = sess.run([train_op, loss])
                #feed_dict={input_tensor: X_train, cond_tensor: Y_tr, label_tensor: X_train, train_tensor: 1})
                tot_loss += loss_value
            #if i%100 == 0:
            print("Iter: {}, Loss: {:.4f}".format(
                i, tot_loss / (X_train.shape[1] // BATCH)))

        yy = np.random.random_integers(0, 3, X_target.shape[0])
        print(yy)
        yy = np.eye(len(X_aux_list) + 1)[yy]
        l = []
        sess.run(iterator.initializer,
                 feed_dict={
                     input_tensor: X_target,
                     cond_tensor: yy,
                     label_tensor: X_target_t,
                     train_tensor: 1
                 })
        for i in range(2000):

            for img in sess.run(model['op_tensor']):
                l.append(np.array(img))

        l = np.vstack(l)
        np.savetxt('abc.txt', l)
コード例 #2
0
print(Y_te.shape)

input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 784))
cond_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 10))
label_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=(None, 784))
train_tensor = tf.compat.v1.placeholder(dtype=tf.float32, shape=())

train_ds = tf.data.Dataset.from_tensor_slices((input_tensor, cond_tensor, label_tensor)).shuffle(60000).batch(BATCH).repeat()

iterator = train_ds.make_initializable_iterator()
dat, c, lab = iterator.get_next()

#cvae = CVAE(100, hidden_layers=1, dims=[50, 20])
cvae = CVAE(784, hidden_layers=1, dims=[196, 100])
#cvae.init_model(input_tensor, cond_tensor, label_tensor, train_tensor)
cvae.init_model(dat, c, lab, train_tensor)
model = cvae.model_

loss =  cvae.loss_

train_op = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:

	sess.run(tf.compat.v1.global_variables_initializer())
	sess.run(iterator.initializer, feed_dict={input_tensor: X_tr, cond_tensor: Y_tr, label_tensor: X_tr, train_tensor: 1})
	for i in range(EPOCH):
		tot_loss = 0
		for _ in range(X_tr.shape[1]//BATCH):
			_, loss_value = sess.run([train_op, loss])
								#feed_dict={input_tensor: X_tr, cond_tensor: Y_tr, label_tensor: X_tr, train_tensor: 1})
			tot_loss += loss_value