import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

np.random.seed(3531354)
a = np.random.rand(8000, 3)

b = np.array([
    [1., 5., 3.],
])

a_node = tf.Variable(a, dtype=tf.float32)
b_node = tf.Variable(b, dtype=tf.float32)
res_node = a_node + b_node
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_res = sess.run(res_node)
tensors_saver.add(tf_res.astype(np.float32))
Beispiel #2
0
loss = tf.losses.mean_squared_error(y, l2)

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

data = {
    X: X_train[0:20],
    y: y_train[0:20]
}
data2 = {
    X: X_train[20:50],
    y: y_train[20:50]
}
data3 = {
    X: X_train[50:80],
    y: y_train[50:80]
}

tensors_saver.add(sess.run(l2, feed_dict=data))
tensors_saver.add(sess.run(loss, feed_dict=data))

tensors_saver.add(sess.run(l2, feed_dict=data2))
tensors_saver.add(sess.run(loss, feed_dict=data2))

tensors_saver.add(sess.run(l2, feed_dict=data3))
tensors_saver.add(sess.run(loss, feed_dict=data3))

if weights is None:
    new_weights.save()
Beispiel #3
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

features = np.array(
    [[0.1, 1.2, -4.3, 4.1, -0.2, 7.3, 0.06, 2.01, 0.23, 5.6, 2.3, 1.18]])

y_hat_node = tf.nn.leaky_relu(features)
sess = tf.Session()
tf_y_hat = sess.run(y_hat_node)
tensors_saver.add(tf_y_hat.astype(np.float32))
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

np.random.seed(3531354)
a = np.random.rand(8000, 2)

np.random.seed(3531354)
b = np.random.rand(8000, 2)

c1 = 5.7
c2 = 3.9

a_node = tf.Variable(a, dtype=tf.float32)
b_node = tf.Variable(b, dtype=tf.float32)

res_node = c1 * a_node + c2 * b_node

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_res = sess.run(res_node).astype(np.float32)
tensors_saver.add(tf_res)
Beispiel #5
0
    s[0] = s[1]
    s[1] = s[2]
    s[2] = s[3]
    s[3] = s[3] ^ (s[3] >> np.uint64(19)) ^ t ^ (t >> np.uint64(8))
    return s[3]


def next_f32():
    x = np.float32(next_u64())
    div = np.float32(0xFFFFFFFFFFFFFFFF)
    return x / div


def np_f32(shape):

    res = np.empty(shape).astype(np.float32)
    res2 = res.reshape(res.size)
    for i in range(res2.size):
        res2[i] = next_f32()
    return res


import tensors_saver

if __name__ == '__main__':

    seed(234)

    tensors_saver.set_out_path('./out.npz')
    tensors_saver.add(np_f32((145, 18, 12, 34)))
Beispiel #6
0
x = np.array([[1., 2, 4], [4.1, 0.5, 7], [2, 2, 8], [5, 2.3, 1.1]])

w = np.array([[1., 5.], [2., 4], [3, 8]])

b = np.array([0.5, -4.6])

y = np.array([[0.1, 1.2], [4.1, 0.2], [0.06, 2.01], [5.6, 2.3]])

x_node = tf.Variable(x, dtype=tf.float32)
w_node = tf.Variable(w, dtype=tf.float32)
b_node = tf.Variable(b, dtype=tf.float32)
y_hat_node = tf.matmul(x_node, w_node) + b_node
y_node = tf.Variable(y, dtype=tf.float32)
loss_node = tf.losses.mean_squared_error(y_node, y_hat_node)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

dx_node, dw_node, db_node, dy_hat_node = tf.gradients(
    loss_node, [x_node, w_node, b_node, y_hat_node])

tf_dx = sess.run(dx_node)
tf_dw = sess.run(dw_node)
tf_db = sess.run(db_node)
tf_dy_hat = sess.run(dy_hat_node)

tensors_saver.add(tf_dx.astype(np.float32))
tensors_saver.add(tf_dw.astype(np.float32))
tensors_saver.add(tf_db.astype(np.float32))
tensors_saver.add(tf_dy_hat.astype(np.float32))
Beispiel #7
0
g_loss = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d1_logits),
                                            logits=d1_logits))
d1_loss = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d1_logits),
                                            logits=d1_logits))

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

l0_tf, l1_tf, l2_tf, l3_tf, logits_tf, out_tf, loss_tf = sess.run(
    [g_nodes[0], g_nodes[1], g_nodes[2], g_nodes[3], g_logits, g_out, g_loss],
    feed_dict={z: imgs})

tensors_saver.add(l0_tf)
tensors_saver.add(l1_tf)
tensors_saver.add(l2_tf)
tensors_saver.add(l3_tf)
tensors_saver.add(logits_tf)
tensors_saver.add(out_tf)

tensors_saver.add(sess.run(d1_nodes[0], feed_dict={z: imgs}))
tensors_saver.add(sess.run(d1_nodes[1], feed_dict={z: imgs}))
tensors_saver.add(sess.run(d1_nodes[2], feed_dict={z: imgs}))
tensors_saver.add(sess.run(d1_nodes[3], feed_dict={z: imgs}))
tensors_saver.add(sess.run(d1_logits, feed_dict={z: imgs}))

tensors_saver.add(loss_tf)

g_dw0 = tf.gradients(g_loss, g_weights[0])[0]
    print('get old weights')
    weights = [x for _, x in np.load(WEIGHTS_PATH).items()]
    sess.run(tf.assign(w1, weights[0]))
    sess.run(tf.assign(b1, weights[1]))
    sess.run(tf.assign(w2, weights[2]))
    sess.run(tf.assign(b2, weights[3]))
else:
    print('save new weights')
    weights = tensors_saver.Saver(WEIGHTS_PATH)
    weights.add(sess.run(w1))
    weights.add(sess.run(b1))
    weights.add(sess.run(w2))
    weights.add(sess.run(b2))
    weights.save()


sess.run(train_op, feed_dict=data)

tensors_saver.add(sess.run(w1))
tensors_saver.add(sess.run(b1))
tensors_saver.add(sess.run(w2))
tensors_saver.add(sess.run(b2))

print(sess.run(loss, feed_dict=data))

#print(w1.get_shape())
#print(b1.get_shape())
#print(w2.get_shape())
#print(b2.get_shape())

Beispiel #9
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

WEIGHTS_PATH = sys.argv[1]
tensors_saver.set_out_path(sys.argv[2])

BATCH = 64

x = np.random.randn(BATCH, 16, 16, 128).astype(np.float32)
w = np.random.randn(5, 5, 128, 256).astype(np.float32)

x_node = tf.Variable(x)
w_node = tf.Variable(w)
y_node = tf.nn.conv2d(x_node, w_node, [1, 2, 2, 1], 'SAME')

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

y = sess.run(y_node)
tensors_saver.add(y)

data = tensors_saver.Saver(WEIGHTS_PATH)
data.add(x)
data.add(w)
data.save()
Y_HAT = np.array([[[[9., 13.], [2., 12.]], [[-8., 9.], [3., -9.]]],
                  [[[11., 3.], [0., -22.]], [[3., 9.], [18., -8.]]]])

bias = np.array([1, 2])

Y = np.array([[[[7., 11.], [-1., 4.]], [[-6., 3.], [5., 4.]]],
              [[[15., 4.], [-2., -22.]], [[0., 9.], [22., -6.]]]])

y_hat_node = tf.Variable(Y_HAT, dtype=tf.float32)
y_node = tf.Variable(Y, dtype=tf.float32)
bias_node = tf.Variable(bias, dtype=tf.float32)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
res = tf.nn.bias_add(y_hat_node, bias_node, data_format='NHWC')
res_tf = sess.run(res)
mse_node = tf.losses.mean_squared_error(labels=y_node, predictions=res)
mse_val = sess.run(mse_node)

db_node, dy_hat_node, d_res = tf.gradients(mse_node,
                                           [bias_node, y_hat_node, res])
db_tf = sess.run(db_node)
dy_hat_tf = sess.run(dy_hat_node)
d_res_tf = sess.run(d_res)

tensors_saver.add(d_res_tf)
tensors_saver.add(db_tf)
tensors_saver.add(dy_hat_tf)
            [1, 0]
        ],
        [
            [-1, -1],
            [1, 0],
            [0, 1]
        ]
    ] ,[
        [
            [0, -1],
            [0, 0],
            [1, -1]
        ],
        [
            [1, 1],
            [-1, 0],
            [1, -1]
        ],
        [
            [-1, 0],
            [-1, 0],
            [-1, 1]
        ]
    ]
], dtype=tf.float32, name='kernel')

res = tf.nn.conv2d(input, kernel, [1, 2, 2, 1], "SAME")
sess = tf.Session()
conv = sess.run(res)
tensors_saver.add(conv)
                [0., -1.], [1., -2.], [0., 0.]],
               [[1., 1.], [5., -1.], [-15., -1.], [-7., 15.], [-1., 1.],
                [3., 1.], [9., -9.], [10., -9.]],
               [[5., -4.], [6., -6.], [0., -8.], [-15., 8.], [4., -4.],
                [3., -4.], [1., -1.], [10., -1.]]]])

input_node = tf.Variable(input, dtype=tf.float32)
w_node = tf.Variable(w, dtype=tf.float32)
y_node = tf.Variable(Y, dtype=tf.float32)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

y_hat_node = tf.nn.conv2d_transpose(input_node, w_node, [2, 8, 8, 2],
                                    [1, 2, 2, 1], "VALID")
y_hat_tf = sess.run(y_hat_node)
mse_node = tf.losses.mean_squared_error(labels=y_node, predictions=y_hat_node)
mse_val = sess.run(mse_node)

dx_node, dw_node, dy_hat_node = tf.gradients(mse_node,
                                             [input_node, w_node, y_hat_node])
dx_tf = sess.run(dx_node)
dw_tf = sess.run(dw_node)
dy_hat_tf = sess.run(dy_hat_node)

tensors_saver.add(dx_tf.astype(np.float32))
tensors_saver.add(dw_tf.astype(np.float32))
tensors_saver.add(dy_hat_tf.astype(np.float32))
tensors_saver.add(y_hat_tf.astype(np.float32))
Beispiel #13
0
import os
import sys
import numpy as np
import tensors_saver
from PIL import Image

if os.path.isfile(sys.argv[2]): sys.exit(0)

tensors_saver.set_out_path(sys.argv[2])
SRC_DIR = sys.argv[1]
CELEB_DIR = os.path.join(SRC_DIR, 'celebA')


def get_file(path):
    im = Image.open(os.path.join(CELEB_DIR, path))
    im = im.resize((64, 64), Image.ANTIALIAS)
    im = np.array(im)
    return im


files = os.listdir(CELEB_DIR)[:10]
data = np.array([get_file(x) for x in files]).astype(np.float32) / 255
tensors_saver.add(data)
Beispiel #14
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

y_hat = np.array([[0.1, 0.2, 0.7], [0.8, .1, .1], [0.1, 0.3, 0.6],
                  [.6, .2, .2], [.1, .1, .8], [.2, .3, .5], [.7, .1, .2],
                  [.4, .3, .3], [.2, .1, .7], [.8, .1, .1]])

y = np.array([[0., 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 1, 0],
              [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])

y_node = tf.Variable(y, dtype=tf.float32)
y_hat_node = tf.Variable(y_hat, dtype=tf.float32)

acc_node = tf.reduce_sum(
    tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(y_hat, 1)), tf.float32))

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_acc = sess.run(acc_node).astype(np.float32)
tensors_saver.add(tf_acc)
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

np.random.seed(3531354)
x = np.random.rand(8000, 3)

np.random.seed(3531354)
y = np.random.rand(8000, 3)

x_node = tf.Variable(x, dtype=tf.float32)
y_node = tf.Variable(y, dtype=tf.float32)
cross_node = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_node,
                                                     logits=x_node)
loss_node = tf.reduce_mean(cross_node)
dx_node = tf.gradients(loss_node, x_node)[0]

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_loss = sess.run(loss_node)
tf_dx = sess.run(dx_node)
tensors_saver.add(tf_loss)
tensors_saver.add(tf_dx)
Beispiel #16
0
import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

x = np.array([[0.1, 0.2, 0.7], [0.8, .1, .1], [0.1, 0.3, 0.6], [.6, .2, .2]])

y = np.array([[0.1, 1.2, 4.3], [4.1, 0.2, 7.3], [0.06, 2.01, 0.23],
              [5.6, 2.3, 1.18]])

x_node = tf.Variable(x, dtype=tf.float32)
y_node = tf.Variable(y, dtype=tf.float32)
y_hat_node = tf.tanh(x_node)
loss_node = tf.losses.mean_squared_error(y_node, y_hat_node)
dx_node, dy_hat_node = tf.gradients(loss_node, [x_node, y_hat_node])

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_y_hat = sess.run(y_hat_node)
tf_loss = sess.run(loss_node)
tf_dx = sess.run(dx_node)
tf_dy_hat = sess.run(dy_hat_node)
tensors_saver.add(tf_y_hat)
tensors_saver.add(tf_loss)
tensors_saver.add(tf_dx)
tensors_saver.add(tf_dy_hat)
Beispiel #17
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

x = np.array([[1., 2.], [3., 4.], [5., 6.]])

x_node = tf.Variable(x, dtype=tf.float32)
y_node = tf.reduce_sum(x_node, axis=0)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_y = sess.run(y_node)
tensors_saver.add(tf_y)
Beispiel #18
0
logits = l4
#prob = tf.nn.sigmoid(l4)

loss = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
                                            labels=tf.ones_like(logits)))

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

l1_tf, l3_tf, logits_tf, loss_tf = sess.run([l1, l3, logits, loss],
                                            feed_dict={X: imgs})

tensors_saver.add(l1_tf)
tensors_saver.add(l3_tf)
tensors_saver.add(logits_tf)
tensors_saver.add(loss_tf)

dw0 = tf.gradients(loss, w0)[0]
db0 = tf.gradients(loss, b0)[0]
dw1 = tf.gradients(loss, w1)[0]
db1 = tf.gradients(loss, b1)[0]
dw2 = tf.gradients(loss, w2)[0]
db2 = tf.gradients(loss, b2)[0]
dw3 = tf.gradients(loss, w3)[0]
db3 = tf.gradients(loss, b3)[0]
dw4 = tf.gradients(loss, w4)[0]
db4 = tf.gradients(loss, b4)[0]
Beispiel #19
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

out = np.array([[[[14., 12.], [7., -6.]], [[-5., -5.], [9., -11.]]],
                [[[19., -1.], [-2., 2.]], [[7., 11.], [12., -11.]]]])

bias = np.array([2., -1.])

out_node = tf.Variable(out, dtype=tf.float32)
bias_node = tf.Variable(bias, dtype=tf.float32)
res = tf.nn.bias_add(out_node, bias_node)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
res_tf = sess.run(res)
tensors_saver.add(res_tf)
Beispiel #20
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

np.random.seed(3531354)
x = np.random.rand(8000, 3)

np.random.seed(3531354)
y = np.random.rand(8000, 3)

x_node = tf.Variable(x)
y_node = tf.Variable(y)
loss_node = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_node, logits=x_node))
dx_node = tf.gradients(loss_node, [x_node])[0]

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_loss = sess.run(loss_node)
tf_dx = sess.run(dx_node)
tensors_saver.add(tf_loss.astype(np.float32))
tensors_saver.add(tf_dx.astype(np.float32))
Beispiel #21
0
import os
import sys

import numpy as np
import tensorflow as tf
import tensors_saver

tensors_saver.set_out_path(sys.argv[1])

logits = np.array([[0.1, 1.2, 4.3], [4.1, 0.2, 7.3], [0.06, 2.01, 0.23],
                   [5.6, 2.3, 1.18]])

logits_node = tf.Variable(logits, dtype=tf.float32)
y_hat_node = tf.nn.softmax(logits_node)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

tf_y_hat = sess.run(y_hat_node)
tensors_saver.add(tf_y_hat)
Beispiel #22
0
import numpy as np
import tensorflow as tf
import tensors_saver

WEIGHTS_PATH = sys.argv[1]
tensors_saver.set_out_path(sys.argv[2])

BATCH = 64

x = np.random.randn(BATCH, 4, 4, 512).astype(np.float32)
w = np.random.randn(5, 5, 256, 512).astype(np.float32)
y = np.random.randn(BATCH, 8, 8, 256).astype(np.float32)

x_node = tf.Variable(x)
w_node = tf.Variable(w)
y_node = tf.Variable(y)
yh_node = tf.nn.conv2d_transpose(x_node, w_node, y.shape, [1, 2, 2, 1], 'SAME')

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

res = sess.run(yh_node)
tensors_saver.add(res)

data = tensors_saver.Saver(WEIGHTS_PATH)
data.add(x)
data.add(w)
data.save()
            for inCh in range(64):
                for oCh in range(64):
                    res[oCh,
                        inCh * 63 * 63 + hIndex * 63 + wIndex] = x0[hIndex,
                                                                    wIndex,
                                                                    inCh, oCh]
    return res


x = np.random.randn(BATCH, 32, 32, 64).astype(np.float32)
dx = np.random.randn(64, 64, 64, 3).astype(np.float32)

y = padd_ker(x)
dy = im2col(dx)
yker = kercol(y)
conv = yker @ dy

print(conv.shape)
print("Rand val[481] = {}".format(conv[6, 31]))

tensors_saver.add(y)
tensors_saver.add(dy)
tensors_saver.add(yker)
tensors_saver.add(conv)
tensors_saver.save()

data = tensors_saver.Saver(WEIGHTS_PATH)
data.add(x)
data.add(dx)
data.save()
Beispiel #24
0
g_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones_like(d1_logits),
                                                logits=d1_logits))
d1_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros_like(d1_logits),
                                                logits=d1_logits))



sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

l0_tf, l1_tf, l2_tf, l3_tf, logits_tf, out_tf, loss_tf = sess.run([g_nodes[0], g_nodes[1], g_nodes[2], g_nodes[3], g_logits, g_out, g_loss], feed_dict={z: imgs})

tensors_saver.add(l0_tf)
tensors_saver.add(l1_tf)
tensors_saver.add(l2_tf)
tensors_saver.add(l3_tf)
tensors_saver.add(logits_tf)
tensors_saver.add(out_tf)

tensors_saver.add(sess.run(d1_nodes[0], feed_dict={z:imgs}))
tensors_saver.add(sess.run(d1_nodes[1], feed_dict={z:imgs}))
tensors_saver.add(sess.run(d1_nodes[2], feed_dict={z:imgs}))
tensors_saver.add(sess.run(d1_nodes[3], feed_dict={z:imgs}))
tensors_saver.add(sess.run(d1_logits, feed_dict={z:imgs}))

tensors_saver.add(loss_tf)