示例#1
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Inputs:
            x: a node with shape (batch_size x 1)
        Returns:
            A node with shape (batch_size x 1) containing predicted y-values
        """
        x = nn.relu(nn.add_bias(nn.matmul(x, self.w0), self.b0))
        x = nn.relu(nn.add_bias(nn.matmul(x, self.w1), self.b1))
        x = nn.add_bias(nn.matmul(x, self.w2), self.b2)
        return x
示例#2
0
def test_relu():
    x = np.arange(-2, 2, 0.5)
    print(x)
    expected = [0, 0, 0, 0, 0, 0.5, 1, 1.5]

    out = nn.relu(x)
    print(out, expected)
    assert np.all(out == expected)
示例#3
0
    def run(self, x):
        """
        Runs the model for a batch of examples.

        Your model should predict a node with shape (batch_size x 10),
        containing scores. Higher scores correspond to greater probability of
        the image belonging to a particular class.

        Inputs:
            x: a node with shape (batch_size x 784)
        Output:
            A node with shape (batch_size x 10) containing predicted scores
                (also called logits)
        """
        x = nn.relu(nn.add_bias(nn.matmul(x, self.w0), self.b0))
        x = nn.relu(nn.add_bias(nn.matmul(x, self.w1), self.b1))
        x = nn.add_bias(nn.matmul(x, self.w2), self.b2)
        return x
示例#4
0
def binary_cross_entropy(x, y):
    max_val = nn.relu(-x)
    loss = x - dy.cmult(x, y) + max_val + dy.log(dy.exp(-max_val) + dy.exp(-x - max_val))
    return nn.mean(loss)
示例#5
0
文件: model.py 项目: ty359/AI-RENJU
  def __init__(self):

    self.train_rate = .1

    self.chess = tf.placeholder(shape=[rules.N, rules.N], dtype=np.float32, name='chess')

    self.tar_choise = tf.placeholder(shape=[rules.N, rules.N], dtype=np.float32, name='choise')

    with tf.variable_scope('model'):

      x = tf.pad(self.chess, [[0, 1], [0, 1]])
      x = tf.reshape(x, [1, rules.N + 1, rules.N + 1, 1])

      with tf.variable_scope('layer1'):
        x = nn.conv(x, [3, 3], 4, name='conv1')
        x = nn.conv(x, [3, 3], 8, name='conv2')
        x = nn.pool(x, [2, 2])
        x = nn.relu(x)

      with tf.variable_scope('layer2'):
        x = nn.conv(x, [3, 3], 16, name='conv1')
        x = nn.conv(x, [3, 3], 32, name='conv2')
        x = nn.pool(x, [2, 2])
        x = nn.relu(x)

      with tf.variable_scope('layer3'):
        x = nn.conv(x, [3, 3], 64, name='conv1')
        x = nn.conv(x, [3, 3], 128, name='conv2')
        x = nn.pool(x, [2, 2])
        x = nn.relu(x)

      with tf.variable_scope('layer4'):
        x = nn.conv(x, [3, 3], 256, name='conv1')
        x = nn.conv(x, [3, 3], 512, name='conv2')
        x = nn.pool(x, [2, 2])
        x = nn.relu(x)


      with tf.variable_scope('layer-4'):
        x = nn.unpool(x)
        x = nn.conv(x, [3, 3], 256, name='conv1')
        x = nn.conv(x, [3, 3], 128, name='conv2')
        x = nn.relu(x)

      with tf.variable_scope('layer-3'):
        x = nn.unpool(x)
        x = nn.conv(x, [3, 3], 64, name='conv1')
        x = nn.conv(x, [3, 3], 32, name='conv2')
        x = nn.relu(x)

      with tf.variable_scope('layer-2'):
        x = nn.unpool(x)
        x = nn.conv(x, [3, 3], 16, name='conv1')
        x = nn.conv(x, [3, 3], 8, name='conv2')
        x = nn.relu(x)

      with tf.variable_scope('layer-1'):
        x = nn.unpool(x)
        x = nn.conv(x, [3, 3], 4, name='conv1')
        x = nn.conv(x, [3, 3], 1, name='conv2')
        x = nn.relu(x)

      x = tf.reshape(x, [rules.N + 1, rules.N + 1])
      x = x[0:15,0:15]
      
    self.choise = x
      
    self.loss = tf.nn.l2_loss(self.choise - self.tar_choise)
      
    self.opt = tf.train.AdadeltaOptimizer(self.train_rate).minimize(self.loss)

    self.sess = tf.InteractiveSession()

    self.saver = tf.train.Saver(max_to_keep=25)

    self.initer = tf.global_variables_initializer()
示例#6
0
def predict(X, params):
    h = X
    h = relu(np.dot(params["W"][0], h.T).T + params["b"][0], alpha=0.1)
    h = relu(np.dot(params["W"][1], h.T).T + params["b"][1], alpha=0.1)
    h = np.dot(params["W"][2], h.T).T + params["b"][2]
    return h
示例#7
0
# Y = [y/np.sum(y) for y in Y]
if os.path.exists('index_nn.npy'):
    index_nn = np.loadtxt('index_nn.npy')
    index_nn = int(index_nn) + 1
else:
    index_nn = 10
np.savetxt('index_nn.npy', [index_nn])

#W = tl.load_Q('data/W'+str(index_nn-1))
# input = X
# output = Y

l1 = nn.linear(9, 9)
l1.grad_zero()
l1.init_param()
r1 = nn.relu()

l2 = nn.linear(9, 9)
l2.grad_zero()
l2.init_param()
r2 = nn.relu()

l3 = nn.linear(9, 9)
l3.grad_zero()
l3.init_param()

s = nn.softmax()
loss = nn.mse()


def model(x, y):
示例#8
0
import os
import sys
import pm
import matplotlib.pyplot as plt
import os
import sys
sys.path.append(os.path.abspath('home/navdeep/RLC/Robot-Learning-and-Control'))


x = np.random.randint(0,3,(1,5,5))
y = np.where(x>0,0,1)
y = y.reshape([25])
#  Architecture
conv1 = nn.convolve3d(shape=(10,1,3,3), mode='same')
add1  = nn.add()
relu1 = nn.relu()
conv2 = nn.convolve3d(shape=(1,10,3,3), mode='same')
add2 = nn.add()
lin = nn.linear((25,25 ))
sigmoid = nn.sigmoid()
mse = nn.mse()
print('Architecture loaded')

# weigths init


layer = [conv1, add1, relu1, conv2, add2, lin, sigmoid, mse]

#compute  graph
def model(x,y, update = True):
    x = conv1.forward(x)
示例#9
0
import pm
import numpy as np
import pickle
import os
import matplotlib.pyplot as plt

X = np.random.rand(10000, 2) * 3 - 1.5
Y = np.array([np.where(np.sum(x * x) > 1, [0, 1], [1, 0]) for x in X])

# input = X
# output = Y

l1 = nn.linear(2, 9)
l1.grad_zero()
l1.init_param()
r1 = nn.relu()

l2 = nn.linear(9, 2)
l2.grad_zero()
l2.init_param()

s = nn.softmax()
loss = nn.cre()


def model(x, y):
    x = np.array(x)
    y = np.array(y)

    x = l1.forward(x)
    x = r1.forward(x)