Exemple #1
0
def test_sigmoid_activation(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(sigmoid(node))
    compare(func, node, node)
Exemple #2
0
def test_leaky_relu_activation(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.leaky_relu(node))
    compare(func, node, node)
Exemple #3
0
def test_mul(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func_mul1(node, x):
        return sum(x * node)
    compare(func_mul1, node, node, x)

    def func_mul2(node, x):
        return sum(node * x)
    compare(func_mul2, node, node, x)

    def func_imul1(node, x):
        node *= x
        return sum(node)
    try:
        compare(func_imul1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_imul2(node, x):
        x *= node
        return sum(node)
    try:
        compare(func_imul2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Exemple #4
0
def test_div(node, x, raise_error, use_gpu):
    node = Variable(node)
    x = np.array(x)
    set_cuda_active(use_gpu)

    def func_div1(node, x):
        return sum(x / node)
    compare(func_div1, node, node, x)

    def func_div2(node, x):
        return sum(node / x)
    compare(func_div2, node, node, x)

    def func_idiv1(node, x):
        node /= x
        return sum(node)
    try:
        compare(func_idiv1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_idiv2(node, x):
        x /= node
        return sum(node)
    try:
        compare(func_idiv2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Exemple #5
0
def test_mean_squared_error(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return rm.mean_squared_error(node, x)
    compare(func, node, node, x)
Exemple #6
0
def test_sub(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func_sub1(node, x):
        return sum(x - node)
    compare(func_sub1, node, node, x)

    def func_sub2(node, x):
        return sum(node - x)
    compare(func_sub2, node, node, x)

    def func_isub1(node, x):
        node -= x
        return sum(node)
    try:
        compare(func_isub1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_isub2(node, x):
        x -= node
        return sum(node)
    try:
        compare(func_isub2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Exemple #7
0
def test_concat(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.concat(node, x))
    compare(func, node, node, x)
Exemple #8
0
def test_sum(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(sum(node, axis=0))
    compare(func, node, node)
Exemple #9
0
def test_exp(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node):
        return sum(rm.exp(node))
    compare(func, node, node)
Exemple #10
0
def test_sigmoid_cross_entropy(node, x, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    def func(node, x):
        return rm.sigmoid_cross_entropy(node, x)
    compare(func, node, node, x)
Exemple #11
0
def test_add(node, x, raise_error, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    # Add
    def func_add1(node, x):
        return sum(x + node)
    compare(func_add1, node, node, x)

    def func_add2(node, x):
        return sum(node + x)
    compare(func_add2, node, node, x)

    def func_iadd1(node, x):
        node += x
        return sum(node)
    try:
        # An assertion error occur when shape mismatching.
        compare(func_iadd1, node, node, x)
        assert not raise_error
    except:
        assert raise_error

    def func_iadd2(node, x):
        x += node
        return sum(node)
    try:
        # An assertion error occur when shape mismatching.
        compare(func_iadd2, node, node, x)
        assert not raise_error
    except:
        assert raise_error
Exemple #12
0
def test_lrn(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = Lrn()

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
Exemple #13
0
def test_average_pool2d(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = AveragePool2d()

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
Exemple #14
0
def test_dot(node, x, use_gpu):
    node = Variable(node)
    x = Variable(x)

    set_cuda_active(use_gpu)

    def func(node, x):
        return sum(rm.dot(node, x))
    compare(func, node, node, x)
    compare(func, x, node, x)
Exemple #15
0
def test_spatial_dropout(node, seed, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = SpatialDropout()

    def func(node):
        np.random.seed(seed)
        return sum(layer(node))
    compare(func, node, node)
Exemple #16
0
def test_batch_normalize_featurewise(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = BatchNormalize(mode=BATCH_NORMALIZE_FEATUREMAP)

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
    compare(func, layer.params["w"], node)
    compare(func, layer.params["b"], node)
Exemple #17
0
def test_dense(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = Dense(output_size=2)

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
    compare(func, layer.params["w"], node)
    compare(func, layer.params["b"], node)
Exemple #18
0
def test_upconv2d(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = Deconv2d(channel=3)

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
    compare(func, layer.params["w"], node)
    compare(func, layer.params["b"], node)
Exemple #19
0
def test_batch_normalize(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer = BatchNormalize()

    def func(node):
        return sum(layer(node))
    compare(func, node, node)
    compare(func, layer.params["w"], node)
    compare(func, layer.params["b"], node)
Exemple #20
0
def test_peepholelstm(node, use_gpu):
    node = Variable(node)
    set_cuda_active(use_gpu)

    layer1 = rm.PeepholeLstm(output_size=4)

    def func(node):
        loss = 0
        for _ in range(3):
            loss += sum(layer1(node))
        layer1.truncate()
        return loss

    compare(func, node, node)
    for k in layer1.params.keys():
        compare(func, layer1.params[k], node)
Exemple #21
0
def prof_add():
    cuda.set_cuda_active(True)
    a = np.random.rand(1000, 1000).astype(np.float32)
    b = np.random.rand(1000, 1000).astype(np.float32)
    c = np.random.rand(1, 1000).astype(np.float32)
    ga = rm.Variable(a)
    gb = rm.Variable(b)
    gc = rm.Variable(c)
    start_t = time.time()
    for _ in range(1000):
        ga + gb * gc
    print("took time %f" % (time.time() - start_t))

    start_t = time.time()
    for _ in range(1000):
        a + b * c
    print("took time %f" % (time.time() - start_t))
Exemple #22
0
def exp_dense():
    np.random.seed(10)
    cuda.set_cuda_active(False)
    a = np.random.rand(32, 320).astype(np.float32)
    b = np.random.rand(32, 80).astype(np.float32)
    layer1 = rm.Dense(input_size=320, output_size=100)
    layer2 = rm.Dense(input_size=100, output_size=80)
    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)
    opt = Sgd(0.01, momentum=0.3)
    start_t = time.time()

    for _ in range(500):
        loss = rm.Sum((layer2(rm.Sigmoid(layer1(ga))) - gb)**2) / 32
        loss.ensure_cpu()
        print(loss)
        grad = loss.grad()
        grad.update(opt)
    print(time.time() - start_t)
Exemple #23
0
def exp_convolution2():
    np.random.seed(10)
    cuda.set_cuda_active(True)
    a = np.random.randn(8, 3, 12, 12).astype(np.float32)
    b = np.random.randn(8, 16, 10, 10).astype(np.float32)
    layer1 = rm.Conv2d(channel=16, input_size=a.shape[1:])

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.001, momentum=0.3)
    start_t = time.time()
    for _ in range(100000):
        loss = rm.Sum((rm.Sigmoid(layer1(ga)) - gb)**2) / 8
        loss.ensure_cpu()
        print(loss)
        grad = loss.grad()
        grad.update(opt)
        del loss
    print(time.time() - start_t)
Exemple #24
0
def exp_convolution1():
    np.random.seed(10)
    # Caused by CUDNN_CONVOLUTION_FWD_ALGO_GEMM is not deterministic.
    # 1724.07080078 GPU
    # 1715.86767578 CPU
    cuda.set_cuda_active(True)
    a = np.random.randn(8 * 2, 64, 32, 32).astype(np.float32)
    b = np.random.randn(8 * 2, 32, 28, 28).astype(np.float32)
    layer1 = rm.Conv2d(channel=32, input_size=a.shape[1:])
    layer2 = rm.Conv2d(channel=32, input_size=(32, 30, 30))

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.0001, momentum=0.0)
    start_t = time.time()
    for _ in range(100):
        loss = rm.Sum((layer2(rm.Relu(layer1(ga))) - gb)**2) / 8
        loss.ensure_cpu()
        grad = loss.grad()
        grad.update(opt)
        print(loss)
    print(time.time() - start_t)
Exemple #25
0
from __future__ import division, print_function
import numpy as np
import renom as rm
from sklearn.preprocessing import LabelBinarizer
from renom.optimizer import Sgd, Adam
from renom.cuda.cuda import set_cuda_active
set_cuda_active(False)


class LogisticRegression(object):
    def __init__(self, x, y, batch=64, epoch=50, optimizer=Sgd):
        self.lb = LabelBinarizer().fit(y)
        self.batch = batch
        self.epoch = epoch
        self.optimizer = optimizer()
        self.network = rm.Sequential([rm.Dense(1)])

    def fit(self, x, y):
        N = len(x)
        labels = self.lb.transform(y)
        for i in range(self.epoch):
            perm = np.random.permutation(N)
            for j in range(N // self.batch):
                train_batch = x[perm[j * self.batch:(j + 1) * self.batch]]
                labels_batch = labels[perm[j * self.batch:(j + 1) *
                                           self.batch]]
                with self.network.train():
                    z = self.network(train_batch)
                    loss = rm.sigmoid_cross_entropy(z, labels_batch)
                loss.grad().update(self.optimizer)
Exemple #26
0
        idx = 0 if 'train' in fname else 1
        data[idx][type_idx] = load(fname, shapes[type_idx], offsets[type_idx])
    data = np.array(data)
    np.save('mnist/data.npy', data)
else:
    data = np.load('mnist/data.npy')

y_train = data[0][0]
x_train = data[0][1].astype('float32') / 255.
y_test = data[1][0]
x_test = data[1][1].astype('float32') / 255.

x_train = x_train * 2 - 1
x_test = x_test * 2 - 1

set_cuda_active(True)
seed(10)

latent_dim = 200
epoch = 30
batch_size = 256

gen = Gen(latent_dim=latent_dim, batch_normal=True)
dis = Dis()
dcgan = DCGAN(gen, dis)

GAN_dis_opt = rm.Adam()
GAN_gen_opt = rm.Adam()

N = len(x_train)
curve = []
from skimage import color, io
from sklearn.manifold import TSNE
from sklearn.metrics import classification_report, confusion_matrix

import renom as rm
from aae_func import AAE
from renom.utility.initializer import Gaussian, Uniform

# --- configuration ---
model_id = 'aae'
model_type = 'incorp_label'
model_dist = 'swissroll'
gpu = True
if gpu:
    from renom.cuda.cuda import set_cuda_active
    set_cuda_active(True)  # gpu is mandatory
seed(10)
latent_dim = 2
epoch = 50
batch_size = 256
shot_freq = epoch // 10
hidden = 1000
train = True
lr_rate = 0.01
base_outdir = 'result/{}/{}/{}'.format(model_id, model_type, model_dist)
if not path.exists(base_outdir):
    makedirs(base_outdir)

# --- data loading & prepairing ---
data = np.load('mnist/data.npy')
y_train = data[0][0]
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix, classification_report

import renom as rm
from renom.cuda import cuda
from renom.optimizer import Sgd, Adam
from renom.core import DEBUG_NODE_STAT, DEBUG_GRAPH_INIT, DEBUG_NODE_GRAPH
from renom.operation import sum

DEBUG_GRAPH_INIT(True)

np.random.seed(10)

cuda.set_cuda_active(True)

mnist = fetch_mldata('MNIST original', data_home="dataset")

X = mnist.data
y = mnist.target

X = X.astype(np.float32)
X /= X.max()

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
labels_train = LabelBinarizer().fit_transform(y_train).astype(np.float32)
labels_test = LabelBinarizer().fit_transform(y_test).astype(np.float32)


class MNist(rm.Model):