Exemple #1
0
 def __init__(self, v, h, init=True):
     """
     We currently support only CD1
     """
     self.v = v; self.h = h
     if init:
         self.w = [bias_mat(std_mat(v,h))]
Exemple #2
0
 def __init__(self, v, h, init=True):
     """
     We currently support only CD1
     """
     self.v = v
     self.h = h
     if init:
         self.w = [bias_mat(std_mat(v, h))]
Exemple #3
0
from mats.bias_mat import bias_mat
from mats.std_mat import std_mat

import data.bouncing_balls as b

res = 20
n_balls = 3
T = 100
dat = lambda : b.bounce_vec(res,n=3,T=100)

v=res**2
h=100

LR = .02

W = .02 * bias_mat(std_mat(v,h))
n_cd = 5
def grad(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)

def loss(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)[1]

from pylab import rand


from trainers.std_trainer3 import std_trainer

t = std_trainer(name='r1',
                path='rbm/r_data',
                W = W,
Exemple #4
0
from p8.rnn_trbm.rnn_trbm import rnn_trbm
from p8.rnn_trbm.trbm import trbm

res = 30
n_balls = 3
T = 100
dat = lambda : b.bounce_vec(res,n=3,T=100)


v=res**2
h=400 

NUM_ITER = 10**5
def LR(x): return .01 * (1- float(x)/NUM_ITER)

VH = bias_mat(std_mat(v,h))
HH = bias_mat(std_mat(h,h))

CD_n = 5
choice = input('press 0 for RNN-TRBM, 1 for plain TRBM\n...')
W0 = .005*rnn_trbm(VH, HH, CD_n = CD_n)
W1 = .005*    trbm(VH, HH, CD_n = CD_n)
W = [W0, W1][choice]

name = 'r_balls_' + ['rnn-trbm', 'plain-trbm'][choice]

def grad(W, x):    return W.grad(x)
def loss(W, x):    return W.grad(x)[1]


from trainers.std_trainer3 import std_trainer
Exemple #5
0
res = 30
n_balls = 3
T = 100
dat = lambda: b.bounce_vec(res, n=3, T=100)

v = res**2
h = 400

NUM_ITER = 10**5


def LR(x):
    return .01 * (1 - float(x) / NUM_ITER)


VH = bias_mat(std_mat(v, h))
HH = bias_mat(std_mat(h, h))

CD_n = 5
choice = input('press 0 for RNN-TRBM, 1 for plain TRBM\n...')
W0 = .005 * rnn_trbm(VH, HH, CD_n=CD_n)
W1 = .005 * trbm(VH, HH, CD_n=CD_n)
W = [W0, W1][choice]

name = 'r_balls_' + ['rnn-trbm', 'plain-trbm'][choice]


def grad(W, x):
    return W.grad(x)

Exemple #6
0
import rbm

from mats.bias_mat import bias_mat
from mats.std_mat import std_mat

v=4
batch_size=20
W = bias_mat(std_mat(v,5))

def grad(W, x):
    return rbm.rbm_grad_exact(W, x)

def loss(W, x):
    return rbm.rbm_grad_exact(W, x)[1]

from pylab import rand
def data_fn():
    return (rand(batch_size,v)<.5).astype('f')

from trainers.std_trainer3 import std_trainer

t = std_trainer(name='r1',
                path='rbm/r_data',
                W = W,
                unnorm_grad_fn = grad,
                unnorm_valid_loss = loss,
                data_fn = data_fn,
                valid_data_fn = data_fn,
                num_iter = 1000)

Exemple #7
0
from mats.bias_mat import bias_mat
from mats.std_mat import std_mat

import data.bouncing_balls as b

res = 20
n_balls = 3
T = 100
dat = lambda: b.bounce_vec(res, n=3, T=100)

v = res**2
h = 100

LR = .02

W = .02 * bias_mat(std_mat(v, h))
n_cd = 5


def grad(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)


def loss(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)[1]


from pylab import rand

from trainers.std_trainer3 import std_trainer