Esempio n. 1
0
def nn():
    return Sequential([Linear(input_dim=784,output_dim=1296, act ='relu', batch_size=FLAGS.batch_size, keep_prob=0.8),
                     Linear(1296, act ='relu'), 
                     Linear(1296, act ='relu'),
                     Linear(10, act ='relu'),
                       #Softmax()
    ])
Esempio n. 2
0
def main():

    from numpy.random import random
    from numpy.random import randint

    from iutils.render import Render
    from modules.linear import Linear

    render = Render(SIZE, BACK, FRONT)
    render.clear_canvas()

    nsteps = 500
    height = 1.0

    for i in range(20):

        start = random(size=(1, 2))
        start_w = 0
        grains = randint(20, 150)
        scale = 0.005 + random() * 0.02
        L = Linear(SIZE, height, start, start_w)
        L.steps(nsteps, scale=scale)
        show(render, L, grains)

    render.write_to_png('./linear.png')
Esempio n. 3
0
    def __init__(self, input_size, lstm_size, output_size, full_grad=False):
        super(LegoModel, self).__init__()
        self.input_size = input_size
        self.lstm_size = lstm_size
        self.output_size = output_size
        self.full_grad = full_grad

        self.lstm = LSTM(input_size, lstm_size)
        self.out = Linear(lstm_size, output_size)
        # self.f = Sigmoid()
        self.f = Softmax()
Esempio n. 4
0
    def __init__(self, input_size, embedding_size, lstm_size, output_size):
        super(LanguageModel, self).__init__()
        self.input_size = input_size
        self.embedding_size = embedding_size
        self.lstm_size = lstm_size
        self.output_size = output_size

        self.emb = Embedding(input_size, embedding_size)
        self.lstm = LSTM(embedding_size, lstm_size)
        self.out = Linear(lstm_size, output_size)

        self.h_0, self.c_0 = None, None
Esempio n. 5
0
def layers(x):
    # Define the layers of your network here

    return Sequential([
        Linear(input_dim=784,
               output_dim=1296,
               act='relu',
               batch_size=FLAGS.batch_size),
        Linear(1296, act='relu'),
        Linear(1296, act='relu'),
        Linear(10),
        Softmax()
    ])
Esempio n. 6
0
class LegoModel(Module):
    def __init__(self, input_size, lstm_size, output_size, full_grad=False):
        super(LegoModel, self).__init__()
        self.input_size = input_size
        self.lstm_size = lstm_size
        self.output_size = output_size
        self.full_grad = full_grad

        self.lstm = LSTM(input_size, lstm_size)
        self.out = Linear(lstm_size, output_size)
        # self.f = Sigmoid()
        self.f = Softmax()

    def forward(self, X):
        seq_len = X.shape[0]
        batch_size = X.shape[1]

        h_0 = np.random.randn(batch_size, self.lstm_size).astype(np.float32)
        c_0 = np.random.randn(batch_size, self.lstm_size).astype(np.float32)

        lstm_out, _ = self.lstm(X, (h_0, c_0))

        if self.full_grad:
            out = self.out(lstm_out)
        else:
            out = self.out(lstm_out[-1])

        return out

    def backward(self, dLdOut):
        # dLdIn = self.f.backward(dLdOut)
        # dLdIn = self.out.backward(dLdIn)
        dLdIn = self.out.backward(dLdOut)
        dLdIn = self.lstm.backward(dLdIn)
        return dLdIn
Esempio n. 7
0
def nn():
    return Sequential([
        Linear(input_dim=166,
               output_dim=256,
               act='relu',
               batch_size=FLAGS.batch_size),
        Linear(256, act='relu'),
        Linear(128, act='relu'),
        Linear(64, act='relu'),
        Linear(64, act='relu'),
        Linear(32, act='relu'),
        Linear(16, act='relu'),
        Linear(8, act='relu'),
        Linear(3, act='relu'),
        Softmax()
    ])
Esempio n. 8
0
def discriminator():
    return Sequential([
        Convolution(input_depth=1,
                    output_depth=32,
                    act='tanh',
                    batch_size=FLAGS.batch_size,
                    input_dim=28),
        MaxPool(),
        Convolution(output_depth=64, act='tanh'),
        MaxPool(),
        Linear(1)
    ])
Esempio n. 9
0
class LanguageModel(Module):
    def __init__(self, input_size, embedding_size, lstm_size, output_size):
        super(LanguageModel, self).__init__()
        self.input_size = input_size
        self.embedding_size = embedding_size
        self.lstm_size = lstm_size
        self.output_size = output_size

        self.emb = Embedding(input_size, embedding_size)
        self.lstm = LSTM(embedding_size, lstm_size)
        self.out = Linear(lstm_size, output_size)

        self.h_0, self.c_0 = None, None

    def reset_hidden(self, batch_size):

        self.h_0 = np.random.randn(batch_size,
                                   self.lstm_size).astype(np.float32)
        self.c_0 = np.random.randn(batch_size,
                                   self.lstm_size).astype(np.float32)

    def forward(self, X):
        seq_len = X.shape[0]
        batch_size = X.shape[1]

        E = self.emb(X)

        if self.h_0 is None:
            self.reset_hidden()

        lstm_out, _ = self.lstm(E, (self.h_0, self.c_0))

        out = self.out(lstm_out)

        return out

    def backward(self, dLdOut):
        # dLdIn = self.f.backward(dLdOut)
        # dLdIn = self.out.backward(dLdIn)
        dLdIn = self.out.backward(dLdOut)
        dLdIn = self.lstm.backward(dLdIn)
        dLdIn = np.stack(dLdIn, 0)
        self.emb.backward(dLdIn)
Esempio n. 10
0
def generator():
    #pdb.set_trace()
    return Sequential([
        Linear(input_dim=1024,
               output_dim=7 * 7 * 128,
               act='tanh',
               batch_size=FLAGS.batch_size),
        Convolution(input_dim=7, input_depth=128, output_depth=32,
                    act='tanh'),  #4x4
        Upconvolution(output_depth=128, kernel_size=3),  #8x8
        Upconvolution(output_depth=256,
                      kernel_size=5,
                      stride_size=1,
                      act='tanh',
                      pad='VALID'),  #12x12
        Upconvolution(output_depth=32, kernel_size=3, act='tanh'),  #24X24 
        Upconvolution(output_depth=1,
                      kernel_size=5,
                      stride_size=1,
                      act='tanh',
                      pad='VALID'),  #28X28
    ])
    if train:
        xs, ys = mnist.train.next_batch(batch_size)
    else:
        xs, ys = mnist.test.next_batch(batch_size)
    return (2 * xs) - 1, ys


mnist = input_data.read_data_sets('data', one_hot=True)

with tf.Session() as sess:

    # GRAPH
    net = Sequential([
        Linear(input_dim=784,
               output_dim=1200,
               act='relu',
               batch_size=batch_size,
               keep_prob=dropout),
        Linear(500, act='relu', keep_prob=dropout),
        Linear(10, act='linear', keep_prob=dropout),
        Softmax()
    ])

    x = tf.placeholder(tf.float32, [batch_size, 784], name='x-input')
    y_labels = tf.placeholder(tf.float32, [batch_size, 10], name='y-input')

    y_pred = net.forward(x)

    correct_prediction = tf.equal(tf.argmax(y_labels, axis=1),
                                  tf.argmax(y_pred, axis=1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Esempio n. 12
0
def main():
    global EPOCHS, BATCH_SIZE, LEARNING_RATE
    # train_X, test_X, train_y, test_y = get_iris_data()

    # Saver
    name = ""

    print("Train? (y for train, n for test)")
    choice = input()
    train_flag = True
    if (choice =='n' or choice=='N'):
        df = pd.read_csv("data/out-test.csv")
        BATCH_SIZE = df.shape[0]
        EPOCHS = 1
        train_flag = False
        name = input("Enter model file name: ")
    else:
         df = pd.read_csv("data/out-train.csv")



    cols = df.columns.values
    cols = np.delete(cols, [1])
    train_X = df.loc[:,cols].values

    train_y = df["decile_score"].values
    y_train_ = train_y
    train_y = keras.utils.np_utils.to_categorical(train_y)



    print(train_X.shape)
    print(train_y.shape)
    # exit()
    # Layer's sizes
    x_size = train_X.shape[1]   # Number of input nodes: 4 features and 1 bias
    # h_size_1 = 256                                # Number of hidden nodes
    # h_size_2 = 256                                # Number of hidden nodes
    # h_size_3 = 128                                # Number of hidden nodes
    # h_size_4 = 64                                  # Number of hidden nodes
    # h_size_5 = 64                                  # Number of hidden nodes
    # h_size_6 = 32                                  # Number of hidden nodes
    # h_size_7 = 16                                  # Number of hidden nodes
    # h_size_8 = 8                                  # Number of hidden nodes
    y_size = train_y.shape[1]   # Number of outcomes (3 iris flowers)

    # Symbols
    X = tf.placeholder("float", shape=[None, x_size])
    y = tf.placeholder("float", shape=[None, y_size])

    net = Sequential([Linear(input_dim=166, output_dim=256, act ='relu', batch_size=BATCH_SIZE),
                 Linear(256, act ='relu'), 
                 Linear(128, act ='relu'), 
                 Linear(64, act ='relu'), 
                 Linear(64, act ='relu'), 
                 Linear(32, act ='relu'), 
                 Linear(16, act ='relu'),
                 Linear(8, act ='relu'),
                 Linear(3, act ='relu'),
                 Softmax()])

    output = net.forward(tf.convert_to_tensor(X))

    trainer = net.fit(output, y, loss='softmax_crossentropy', optimizer='adam', opt_params=[LEARNING_RATE])
Esempio n. 13
0
    return samples, labels


if __name__ == "__main__":
    #Compare in terms of results our implementation with PyTorch
    sanity_check = False
    if sanity_check:
        check()

    # Hyper Parameters
    num_epochs = 60
    learning_rate = 1.e-5
    weight_decay = 0.0

    # Network with two input units, two output units and three hidden layers of 25 units
    model = Sequential(Linear(2, 25), ReLU(), Linear(25, 2))

    # Data generation
    data, labels = generate_data()

    # Model training
    optimizer = SGD(model.param(), lr=learning_rate, weight_decay=weight_decay)

    for epoch in range(num_epochs):
        inputs = FloatTensor(data)
        targets = FloatTensor(labels).view(len(labels), -1)

        optimizer.zero_grad()
        outputs = model(inputs)
        criterion = MSELoss(outputs, targets)
        loss = criterion()
Esempio n. 14
0
def check():
  '''
  Compare in terms of results our implementation with PyTorch.
  '''

  #Threshold for numerical errors when comparing float tensors.
  threshold = 1.e-5


  import torch
  from torch import nn
  from torch.autograd import Variable
  from torch import autograd
  from modules.activation import ReLU, Tanh
  from modules.linear import Linear
  from modules.loss import MSELoss, MAELoss
  from modules.container import Sequential
  from modules.optimizer import SGD


  print('Asserting ReLU forward and backward phase. ', end='')
  a = torch.randn(3,2) - 0.5
  b = torch.randn(3,2)
  va = Variable(a, requires_grad=True)

  #forward
  relu_forward = nn.ReLU()(va)
  my_relu = ReLU()
  my_relu_forward = my_relu(a)

  #backward
  relu_backward = autograd.grad(outputs=relu_forward, inputs=va, grad_outputs=b)[0]
  my_relu_backward = my_relu.backward(b)

  assert(torch.sum(relu_forward.data - my_relu_forward) < threshold)
  assert(torch.sum(relu_backward.data - my_relu_backward) <threshold)
  print('DONE')

  print('Asserting Tanh forward and backward phase. ', end='')
  a = torch.randn(3,2) - 0.5
  b = torch.randn(3,2)
  va = Variable(a, requires_grad=True)

  #forward
  tanh_forward = nn.Tanh()(va)
  my_tanh = Tanh()
  my_tanh_forward = my_tanh(a)

  #backward
  tanh_backward = autograd.grad(outputs=tanh_forward, inputs=va, grad_outputs=b)[0]
  my_tanh_backward = my_tanh.backward(b)

  assert(torch.sum(tanh_forward.data - my_tanh_forward) < threshold)
  assert(torch.sum(tanh_backward.data - my_tanh_backward) <threshold)
  print('DONE')


  print('Asserting Linear forward and backward phase. ', end='')
  a = torch.randn(3,2) - 0.5
  va = Variable(a, requires_grad=True)
  c = torch.rand(3,4)
  w = torch.rand(4,2)
  b = torch.rand(4)

  #forward
  linear = nn.Linear(2,4)
  linear.weight.data = w.clone()
  linear.bias.data = b.clone()
  linear_forward = linear(va)
  my_linear = Linear(2,4)
  my_linear.params[0]['value'] = w
  my_linear.params[1]['value'] = b
  my_linear_forward = my_linear(a)

  #backward
  linear_backward = autograd.grad(outputs=linear_forward, inputs=va, grad_outputs=c, only_inputs=False)[0]
  my_linear_backward = my_linear.backward(c)

  assert(torch.sum(linear.weight.data - my_linear.params[0]['value']) < threshold)
  assert(torch.sum(linear.bias.data - my_linear.params[1]['value']) < threshold)
  assert(torch.sum(linear.weight.grad.data - my_linear.params[0]['grad']) < threshold)
  assert(torch.sum(linear.bias.grad.data - my_linear.params[1]['grad']) < threshold)
  assert(torch.sum(linear_forward.data - my_linear_forward) < threshold)
  assert(torch.sum(linear_backward.data - my_linear_backward) < threshold)
  print('DONE')

  print('Asserting MSE forward and backward phase. ', end='')
  y = torch.rand(1000)
  y_ = torch.rand(1000) - 0.5
  vy = Variable(y, requires_grad=True)
  vy_ = Variable(y_, requires_grad=False)
  c = torch.rand(1)

  #forward
  mse = nn.MSELoss()
  loss_forward = mse(vy, vy_)

  my_mse = MSELoss(y, y_)
  my_loss_forward = my_mse()

  #backward
  loss_backward = autograd.grad(outputs=loss_forward, inputs=vy, grad_outputs=c, only_inputs=False)[0]
  my_loss_backward = my_mse.backward()

  assert(torch.sum(loss_forward.data - my_loss_forward) < threshold)
  assert(torch.sum(loss_backward.data - my_loss_backward) < threshold)
  print('DONE')


  print('Asserting MAE forward and backward phase. ', end='')
  y = torch.rand(1000)
  y_ = torch.rand(1000) - 0.5
  vy = Variable(y, requires_grad=True)
  vy_ = Variable(y_, requires_grad=False)
  c = torch.rand(1)

  #forward
  mae = nn.L1Loss()
  loss_forward = mae(vy, vy_)

  my_mae = MAELoss(y, y_)
  my_loss_forward = my_mae()

  #backward
  loss_backward = autograd.grad(outputs=loss_forward, inputs=vy, grad_outputs=c, only_inputs=False)[0]
  my_loss_backward = my_mae.backward()

  assert(torch.sum(loss_forward.data - my_loss_forward) < threshold)
  assert(torch.sum(loss_backward.data - my_loss_backward) < threshold)
  print('DONE')


  print('Asserting Sequential Container. ', end='')
  y = torch.rand(1000)
  vy = Variable(y, requires_grad=True)

  model = nn.Sequential(
            nn.ReLU(),
            nn.Tanh()
          )

  my_model = Sequential(
            ReLU(),
            Tanh()
          )

  y_ = model(vy)
  my_y_ = my_model(y)

  assert(torch.sum(y_.data - my_y_) < threshold)
  print('DONE')


  print('Asserting SGD step parameter update. ', end='')

  learning_rate = 1.e-5
  weight_decay = 0.1

  w = torch.rand(4,2)
  b = torch.rand(4)
  vw = Variable(w.clone(), requires_grad=True)
  vb = Variable(b.clone(), requires_grad=True)


  sgd = torch.optim.SGD([vw,vb], lr=learning_rate, weight_decay=weight_decay) 
  my_sgd = SGD([{'value':w, 'grad':w}, {'value':b, 'grad':b}], lr=learning_rate, weight_decay=weight_decay) 

  sgd.zero_grad()
  my_sgd.zero_grad()

  sgd.step()
  my_sgd.step()

  assert(torch.sum(w - vw.data) < threshold)
  assert(torch.sum(b - vb.data) < threshold)
  print('DONE')