예제 #1
0
def save_grads(path):
    gw0, gb0, gw1, gb1, gw2, gb2, gw3, gb3, gw4, gb4, dw0, db0, dw1, db1, dw2, db2, dw3, db3, dw4, db4 = sess.run(
        [
            g_dw0, g_db0, g_dw1, g_db1, g_dw2, g_db2, g_dw3, g_db3, g_dw4,
            g_db4, d_dw0, d_db0, d_dw1, d_db1, d_dw2, d_db2, d_dw3, d_db3,
            d_dw4, d_db4
        ],
        feed_dict={
            X: data_x,
            z: data_z
        })
    wi = tensors_saver.Saver(path)
    wi.add(gw0)
    wi.add(gb0)
    wi.add(gw1)
    wi.add(gb1)
    wi.add(gw2)
    wi.add(gb2)
    wi.add(gw3)
    wi.add(gb3)
    wi.add(gw4)
    wi.add(gb4)
    wi.add(dw0)
    wi.add(db0)
    wi.add(dw1)
    wi.add(db1)
    wi.add(dw2)
    wi.add(db2)
    wi.add(dw3)
    wi.add(db3)
    wi.add(dw4)
    wi.add(db4)
    wi.save()
예제 #2
0
    def run(self):
        with open(self.cmdp, 'r') as f:
            for l in f.readlines():
                args = l.strip().split(',')
                args = [x.strip() for x in args]
                getattr(self, 'cmd_' + args[0])(args)

        in_saver = tensors_saver.Saver(self.inp)
        for x in self.input_list:
            in_saver.add(self.inputs[x])
        in_saver.save()

        out_saver = tensors_saver.Saver(self.outp)
        for x in self.output_list:
            out_saver.add(self.outputs[x])
        out_saver.save()
예제 #3
0
def save_net(path):
    wi = tensors_saver.Saver(path)
    wi.add(sess.run(g_weights[0]))
    wi.add(sess.run(g_weights[1]))
    wi.add(sess.run(g_weights[2]))
    wi.add(sess.run(g_weights[3]))
    wi.add(sess.run(g_weights[4]))
    wi.add(sess.run(g_weights[5]))
    wi.add(sess.run(g_weights[6]))
    wi.add(sess.run(g_weights[7]))
    wi.add(sess.run(g_weights[8]))
    wi.add(sess.run(g_weights[9]))
    wi.add(sess.run(d1_weights[0]))
    wi.add(sess.run(d1_weights[1]))
    wi.add(sess.run(d1_weights[2]))
    wi.add(sess.run(d1_weights[3]))
    wi.add(sess.run(d1_weights[4]))
    wi.add(sess.run(d1_weights[5]))
    wi.add(sess.run(d1_weights[6]))
    wi.add(sess.run(d1_weights[7]))
    wi.add(sess.run(d1_weights[8]))
    wi.add(sess.run(d1_weights[9]))
    wi.save()
예제 #4
0
            for inCh in range(64):
                for oCh in range(64):
                    res[oCh,
                        inCh * 63 * 63 + hIndex * 63 + wIndex] = x0[hIndex,
                                                                    wIndex,
                                                                    inCh, oCh]
    return res


x = np.random.randn(BATCH, 32, 32, 64).astype(np.float32)
dx = np.random.randn(64, 64, 64, 3).astype(np.float32)

y = padd_ker(x)
dy = im2col(dx)
yker = kercol(y)
conv = yker @ dy

print(conv.shape)
print("Rand val[481] = {}".format(conv[6, 31]))

tensors_saver.add(y)
tensors_saver.add(dy)
tensors_saver.add(yker)
tensors_saver.add(conv)
tensors_saver.save()

data = tensors_saver.Saver(WEIGHTS_PATH)
data.add(x)
data.add(dx)
data.save()
예제 #5
0
import os
import sys

import numpy as np
import tensorflow as tf
import gen_mnist
import tensors_saver

BATCH = 64
Z_DIM = 100

WEIGHTS_PATH = sys.argv[1]
tensors_saver.set_out_path(sys.argv[2])
new_weights = tensors_saver.Saver(WEIGHTS_PATH)

DATA_PATH = sys.argv[3]
celeba = np.load(DATA_PATH)
imgs = celeba['obj_000000']

imgs = np.ravel(imgs)
imgs = imgs[:BATCH * Z_DIM]
imgs = imgs.reshape(BATCH, Z_DIM)


def generator(X, reuse=False):

    w_init = tf.random_normal_initializer(stddev=0.02)

    with tf.variable_scope('generator', reuse=reuse):
        l0 = tf.layers.dense(X,
                             512 * 4 * 4,
예제 #6
0
    wi.add(sess.run(d1_weights[1]))
    wi.add(sess.run(d1_weights[2]))
    wi.add(sess.run(d1_weights[3]))
    wi.add(sess.run(d1_weights[4]))
    wi.add(sess.run(d1_weights[5]))
    wi.add(sess.run(d1_weights[6]))
    wi.add(sess.run(d1_weights[7]))
    wi.add(sess.run(d1_weights[8]))
    wi.add(sess.run(d1_weights[9]))
    wi.save()


sample_seed = np.random.normal(loc=0.0, scale=1.0,
                               size=(SAMPLE_SIZE, Z_DIM)).astype(np.float32)

sse = tensors_saver.Saver("noze.npz")
sse.add(sample_seed)
sse.save()

# Training
i = 0
for epoch in range(EPOCHS):
    random.shuffle(data_files)
    batch_idxs = len(data_files) // BATCH_SIZE

    for idx in range(0, batch_idxs):
        batch_files = data_files[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
        batch = [utils.get_image(f) for f in batch_files]
        batch_images = np.array(batch).astype(np.float32)
        batch_z = np.random.normal(loc=0.0,
                                   scale=1.0,
예제 #7
0
d_db1 = tf.gradients(d_loss, d1_weights[3])[0]
d_dw2 = tf.gradients(d_loss, d1_weights[4])[0]
d_db2 = tf.gradients(d_loss, d1_weights[5])[0]
d_dw3 = tf.gradients(d_loss, d1_weights[6])[0]
d_db3 = tf.gradients(d_loss, d1_weights[7])[0]
d_dw4 = tf.gradients(d_loss, d1_weights[8])[0]
d_db4 = tf.gradients(d_loss, d1_weights[9])[0]

data_files = glob.glob(os.path.join("../celeba_norm", "*.jpg"))
data_x = data_files[:BATCH_SIZE]
data_x = [utils.get_image(f) for f in data_x]
data_x = np.array(data_x).astype(np.float32)
data_z = np.random.normal(loc=0.0, scale=1.0,
                          size=(BATCH_SIZE, Z_DIM)).astype(np.float32)

inputt = tensors_saver.Saver('input.npz')
inputt.add(data_x)
inputt.add(data_z)
inputt.save()


def save_grads(path):
    gw0, gb0, gw1, gb1, gw2, gb2, gw3, gb3, gw4, gb4, dw0, db0, dw1, db1, dw2, db2, dw3, db3, dw4, db4 = sess.run(
        [
            g_dw0, g_db0, g_dw1, g_db1, g_dw2, g_db2, g_dw3, g_db3, g_dw4,
            g_db4, d_dw0, d_db0, d_dw1, d_db1, d_dw2, d_db2, d_dw3, d_db3,
            d_dw4, d_db4
        ],
        feed_dict={
            X: data_x,
            z: data_z