Ejemplo n.º 1
0
 def _gen_model(self):
     depth = self.arch['depth']
     unit = self.arch['unit']
     # excluding mini-batch size
     input_shape = self.arch['input_shape']
     output_shape = self.arch['output_shape']
     seq = []
     for i in range(depth):
         seq.append(rm.Dense(unit))
         seq.append(rm.Relu())
         if i < 1 or i == depth - 1:
             seq.append(rm.BatchNormalize())
     seq.append(rm.Dense(output_shape))
     self._model = rm.Sequential(seq)
Ejemplo n.º 2
0
    def __init__(self, num_class):
        self.base1 = rm.Sequential([rm.Conv2d(64, filter=7, padding=3, stride=2),
                                    rm.Relu(),
                                    rm.MaxPool2d(filter=3, stride=2, padding=1),
                                    rm.BatchNormalize(mode='feature'),
                                    rm.Conv2d(64, filter=1, stride=1),
                                    rm.Relu(),
                                    rm.Conv2d(192, filter=3, padding=1, stride=1),
                                    rm.Relu(),
                                    rm.BatchNormalize(mode='feature'),
                                    rm.MaxPool2d(filter=3, stride=2, padding=1),
                                    InceptionV1Block(),
                                    InceptionV1Block([128, 128, 192, 32, 96, 64]),
                                    rm.MaxPool2d(filter=3, stride=2),
                                    InceptionV1Block([192, 96, 208, 16, 48, 64]),
                                    ])

        self.aux1 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3),
                                   rm.Flatten(),
                                   rm.Dense(1024),
                                   rm.Dense(num_class)])

        self.base2 = rm.Sequential([InceptionV1Block([160, 112, 224, 24, 64, 64]),
                                    InceptionV1Block([128, 128, 256, 24, 64, 64]),
                                    InceptionV1Block([112, 144, 288, 32, 64, 64])])

        self.aux2 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3),
                                   rm.Flatten(),
                                   rm.Dense(1024),
                                   rm.Dense(num_class)])

        self.base3 = rm.Sequential([InceptionV1Block([256, 160, 320, 32, 128, 128]),
                                    InceptionV1Block([256, 160, 320, 32, 128, 128]),
                                    InceptionV1Block([192, 384, 320, 48, 128, 128]),
                                    rm.AveragePool2d(filter=7, stride=1),
                                    rm.Flatten()])
        self.aux3 = rm.Dense(num_class)
Ejemplo n.º 3
0
def test_reshape(tmpdir):
    model = rm.Sequential([rm.Flatten()])

    input = renom.Variable(np.random.random((10, 10, 10, 10)))
    m = _run_onnx(tmpdir, model, input)

    # check input
    id_input, id_shape = m.graph.node[0].input
    assert get_shape(m.graph.input[0]) == input.shape

    inis = load_initializer(m.graph.initializer)
    _test_initializer(inis, id_shape, [10, -1])

    # check output
    assert get_shape(m.graph.output[0]) == (10, 1000)
Ejemplo n.º 4
0
 def __init__(self):
     self.stds, self.means = self.load_scaler()
     self.sequential = rm.Sequential(
         [rm.Lstm(30), rm.Lstm(10),
          rm.Dense(pred_length)])
     self.oanda = oandapy.API(environment="practice", access_token=token)
     self.res = self.oanda.get_history(instrument="USD_JPY",
                                       granularity=gran,
                                       count=look_back + pred_length + 78)
     self.prep = Preprocess(self.res)
     self.df = self.prep.data
     self.data = self.standardize()
     self.exp, self.target = self.create_dataset()
     self.pred = self.predict()
     self.pred_side = self.predict_side()
Ejemplo n.º 5
0
def test_dropout(tmpdir):
    model = rm.Sequential([rm.Dropout(0.5)])

    input = renom.Variable(np.random.random((10, 10, 10, 10)))
    m = _run_onnx(tmpdir, model, input)

    # check input
    id_input, = m.graph.node[0].input
    assert get_shape(m.graph.input[0]) == input.shape

    # check output
    assert get_shape(m.graph.output[0]) == input.shape

    attrs = dict((a.name, a) for a in m.graph.node[0].attribute)
    assert attrs['ratio'].f == 0.5
 def __init__(self, input_shape, output_shape,
     growth_rate = 12,
     depth = 3,
     dropout = False,
     ):
     self.growth_rate = growth_rate
     self.depth = depth
     self.dropout = dropout
     self.input = rm.Dense(input_shape)
     self.output = rm.Dense(output_shape)
     parameters = []
     for _ in range(depth):
         parameters.append(rm.BatchNormalize())
         parameters.append(rm.Dense(growth_rate))
     self.hidden = rm.Sequential(parameters)
Ejemplo n.º 7
0
def test_dqn(agent, environ, fit_args, use_gpu):
    cuda.set_cuda_active(False)
    action_shape = (1,)
    state_shape = (2,)
    env = environ(action_shape, state_shape)
    network = rm.Sequential([
        rm.Dense(5),
        rm.Dense(action_shape[0]),
    ])
    model = agent(env, network)
    action = model.action(np.random.rand(*state_shape))
    assert action.shape == action_shape

    # Check fit
    model.fit(epoch=1, epoch_step=10, batch_size=4, random_step=20, test_step=10, **fit_args)
    print(model.history)
Ejemplo n.º 8
0
    def __init__(self, num_class, layer_per_block=[6, 12, 24, 16], growth_rate=32, train_whole_network=False):
        self.layer_per_block = layer_per_block
        self.growth_rate = growth_rate

        layers = []
        layers.append(rm.Conv2d(64, 7, padding=3, stride=2))
        layers.append(rm.BatchNormalize(epsilon=0.001, mode='feature'))
        for i in layer_per_block[:-1]:
            for j in range(i):
                layers.append(conv_block(growth_rate))
            layers.append(transition_layer(growth_rate))
        for i in range(layer_per_block[-1]):
            layers.append(conv_block(growth_rate))

        self.base = rm.Sequential(layers)
        self.fc = rm.Dense(num_class)
 def __init__(
         self,
         latent_dim = 10,
         output_shape = (28, 28), 
         batch_normal = False,
         dropout = False,
         min_channels = 16,
     ):
     self.batch_normal = batch_normal
     self.latent_dim = latent_dim
     self.output_shape = output_shape
     self.dropout = dropout
     self.min_channels = min_channels
     print('--- Generator Network ---')
     parameters = []
     print_params = []
     dim = output_shape[0]
     channels = self.min_channels
     while dim%2 == 0 and dim > 2:
         parameters.append(rm.Deconv2d(
             channel=channels, stride=2, filter=2))
         if batch_normal:
             parameters.append(rm.BatchNormalize())
         dim = dim // 2
         print_params.append([dim, channels])
         channels *= 2
     if dim%2 == 1:
         parameters.append(rm.Deconv2d(
             channel=channels, stride=2, filter=3))
         if batch_normal:
             parameters.append(rm.BatchNormalize())
         dim = (dim - 1) // 2
         print_params.append([dim, channels])
         channels *= 2
     parameters.reverse()
     print_params.reverse()
     print('Dense {}x{}x{} & Reshape'.format(dim, dim,channels))
     self.channels = channels
     self.transform = rm.Dense(channels*1*dim*dim)
     for item in print_params:
         print('Deconv2d to {}x{} {}ch '.format(
             item[0], item[0], item[1]))
     self.hidden = rm.Sequential(parameters)
     self.output = rm.Conv2d(channel=1,stride=1,filter=1)
     print('Conv2d to {}x{} 1ch'.format(
         output_shape[0], output_shape[0]))
     self.dim = dim
Ejemplo n.º 10
0
def test_relu(tmpdir):
    model = rm.Sequential([rm.Relu()])

    input = renom.Variable(np.random.random((10, 10, 10, 10)))
    m = _run_onnx(tmpdir, model, input)

    assert m.graph.node[0].op_type == 'Relu'

    # check input
    id_input, = m.graph.node[0].input
    assert renom.utility.onnx.OBJNAMES[id(input)] == id_input
    assert id_input == m.graph.input[0].name
    assert get_shape(m.graph.input[0]) == input.shape

    # check output
    id_output, = m.graph.node[0].output
    assert get_shape(m.graph.output[0]) == input.shape
Ejemplo n.º 11
0
def test_batch_normalize(a):
    layer = rm.Sequential([rm.BatchNormalize(momentum=0.1)])

    set_cuda_active(True)

    g1 = Variable(a)
    g2 = layer(g1)
    g3 = rm.sum(g2)
    g = g3.grad()
    g_g1 = g.get(g1)
    g_g2 = g.get(layer.l0.params["w"])
    g_g3 = g.get(layer.l0.params["b"])

    layer.set_models(inference=True)
    g4 = layer(g1)
    layer.set_models(inference=False)

    g2.to_cpu()
    g3.to_cpu()
    g4.to_cpu()
    g_g1.to_cpu()
    g_g2.to_cpu()
    g_g3.to_cpu()

    set_cuda_active(False)
    layer.l0._mov_mean = 0
    layer.l0._mov_std = 0

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad()
    c_g1 = c.get(g1)
    c_g2 = g.get(layer.l0.params["w"])
    c_g3 = g.get(layer.l0.params["b"])

    layer.set_models(inference=True)
    c4 = layer(g1)
    layer.set_models(inference=False)

    close(g2, c2)
    close(g3, c3)
    close(g4, c4)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Ejemplo n.º 12
0
    def __init__(self, class_map=None, anchor=None,
                 imsize=(320, 320), load_pretrained_weight=False, train_whole_network=False):

        assert (imsize[0] / 32.) % 1 == 0 and (imsize[1] / 32.) % 1 == 0, \
            "Yolo v2 only accepts 'imsize' argument which is list of multiple of 32. \
              exp),imsize=(320, 320)."

        self.flag = False  # This is used for modify loss function.
        self.global_counter = 0
        self.anchor = [] if not isinstance(anchor, AnchorYolov2) else anchor.anchor
        self.anchor_size = imsize if not isinstance(anchor, AnchorYolov2) else anchor.imsize
        self.num_anchor = 0 if anchor is None else len(anchor)

        darknet = Darknet19(1)
        self._opt = rm.Sgd(0.001, 0.9)

        super(Yolov2, self).__init__(class_map, imsize,
                                     load_pretrained_weight, train_whole_network, darknet)

        # Initialize trainable layers.
        last_channel = (self.num_class + 5) * self.num_anchor
        self._conv1 = rm.Sequential([
            DarknetConv2dBN(channel=1024, prev_ch=1024),
            DarknetConv2dBN(channel=1024, prev_ch=1024),
        ])
        self._conv21 = DarknetConv2dBN(channel=64, prev_ch=512, filter=1)
        self._conv2 = DarknetConv2dBN(channel=1024, prev_ch=1024 + 256)
        self._last = rm.Conv2d(channel=last_channel, filter=1)
        self._freezed_network = darknet._base

        for model in [self._conv21, self._conv1, self._conv2]:
            for layer in model.iter_models():
                if not layer.params:
                    continue
                if isinstance(layer, rm.Conv2d):
                    layer.params = {
                        "w": rm.Variable(layer._initializer(layer.params.w.shape), auto_update=True),
                        "b": rm.Variable(np.zeros_like(layer.params.b), auto_update=False),
                    }
                elif isinstance(layer, rm.BatchNormalize):
                    layer.params = {
                        "w": rm.Variable(layer._initializer(layer.params.w.shape), auto_update=True),
                        "b": rm.Variable(np.zeros_like(layer.params.b), auto_update=True),
                    }
Ejemplo n.º 13
0
def test_max_pool2d(tmpdir):
    model = rm.Sequential([
        rm.MaxPool2d(filter=2, stride=2),
    ])

    input = renom.Variable(np.random.random((10, 10, 10, 10)))
    m = _run_onnx(tmpdir, model, input)

    # check input
    id_input, = m.graph.node[0].input
    assert 'input' == id_input
    assert get_shape(m.graph.input[0]) == input.shape

    # check output
    assert get_shape(m.graph.output[0]) == (10, 10, 5, 5)

    # check attrs
    attrs = dict((a.name, a) for a in m.graph.node[0].attribute)
    assert attrs['pads'].ints == [0, 0]
    assert attrs['kernel_shape'].ints == [2, 2]
    assert attrs['strides'].ints == [2, 2]
Ejemplo n.º 14
0
    def __init__(self,
                 class_map=None,
                 imsize=(300, 300),
                 overlap_threshold=0.5,
                 load_pretrained_weight=False,
                 train_whole_network=False):
        if not hasattr(imsize, "__getitem__"):
            imsize = (imsize, imsize)

        self.num_class = len(class_map) + 1
        self.class_map = [c.encode("ascii", "ignore") for c in class_map]
        self._train_whole_network = train_whole_network
        self.prior = create_priors()
        self.num_prior = len(self.prior)
        self.overlap_threshold = overlap_threshold

        self.imsize = imsize
        vgg = VGG16(class_map, load_pretrained_weight=load_pretrained_weight)
        self._freezed_network = rm.Sequential(
            [vgg._model.block1, vgg._model.block2])
        self._network = DetectorNetwork(self.num_class, vgg)

        self._opt = rm.Sgd(1e-3, 0.9)
Ejemplo n.º 15
0
 def __init__(self):
     self.block1 = rm.Sequential(
         [DarknetConv2dBN(32, prev_ch=3),
          rm.MaxPool2d(filter=2, stride=2)])
     self.block2 = rm.Sequential([
         DarknetConv2dBN(64, prev_ch=32),
         rm.MaxPool2d(filter=2, stride=2)
     ])
     self.block3 = rm.Sequential([
         DarknetConv2dBN(128, prev_ch=64),
         DarknetConv2dBN(64, filter=1, prev_ch=128),
         DarknetConv2dBN(128, prev_ch=64),
         rm.MaxPool2d(filter=2, stride=2)
     ])
     self.block4 = rm.Sequential([
         DarknetConv2dBN(256, prev_ch=128),
         DarknetConv2dBN(128, filter=1, prev_ch=256),
         DarknetConv2dBN(256, prev_ch=128),
         rm.MaxPool2d(filter=2, stride=2)
     ])
     self.block5 = rm.Sequential([
         DarknetConv2dBN(512, prev_ch=256),
         DarknetConv2dBN(256, filter=1, prev_ch=512),
         DarknetConv2dBN(512, prev_ch=256),
         DarknetConv2dBN(256, filter=1, prev_ch=512),
         DarknetConv2dBN(512, prev_ch=256),
     ])
     self.block6 = rm.Sequential([
         # For concatenation.
         rm.MaxPool2d(filter=2, stride=2),
         DarknetConv2dBN(1024, prev_ch=512),
         DarknetConv2dBN(512, filter=1, prev_ch=1024),
         DarknetConv2dBN(1024, prev_ch=512),
         DarknetConv2dBN(512, filter=1, prev_ch=1024),
         DarknetConv2dBN(1024, prev_ch=512),
     ])
Ejemplo n.º 16
0
              growth_factor=1.2)
    enc = Enc(pre=pre, latent_dim=latent_dim)
    dec = Dec2d(
        input_params=latent_dim,
        first_shape=pre.output_shape,
        output_shape=batch_shape,
        check_network=debug,
    )
else:  # fully connected network
    batch_shape = (batch_size, 28 * 28)
    x_train = x_train.reshape(-1, 28 * 28)
    x_test = x_test.reshape(-1, 28 * 28)
    enc_pre = rm.Sequential([
        rm.Dense(hidden),
        rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(hidden, initializer=Uniform()),
        rm.Relu()
    ])
    enc = Enc(enc_pre, latent_dim)
    dec = rm.Sequential([
        rm.Dense(hidden),
        rm.Relu(),
        rm.BatchNormalize(),
        rm.Dense(28 * 28),
        rm.Sigmoid(),
    ])
vae = VAE(enc, dec, latent_dim)
optimizer = rm.Adam()

N = len(x_train)
Ejemplo n.º 17
0
 def __init__(
     self,
     input_shape=(28, 28),
     depth=4,  #28x28 -> 14x14 -> 7x7 -> 4x4 -> 2x2
     batch_normal=False,
     latent_dim=10,
     dropout=False,
     intermidiate_dim=128,
     max_channels=64,
 ):
     self.depth = depth
     self.batch_normal = batch_normal
     self.input_shape = input_shape
     self.latent_dim = latent_dim
     self.dropout = dropout
     self.intermidiate_dim = intermidiate_dim
     self.max_channels = max_channels
     parameters = []
     channels = np.log(max_channels) / np.log(2) - self.depth + 1
     boot_th = np.log(self.input_shape[0]) / np.log(2)
     print('--- Ecoding Network ---')
     boot_steps = 0
     if boot_th < self.depth:
         boot_steps = int(self.depth - boot_th) + 1
         channels = 3
     channels = int(2**channels)
     boot_steps = int(boot_steps)
     dim = self.input_shape[0]
     for _ in range(boot_steps):
         if self.batch_normal:
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append(rm.Conv2d(channels, padding=(1, 1)))
             print('Batch Normalize')
             parameters.append(rm.BatchNormalize())
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append(rm.Conv2d(channels, padding=(1, 1)))
             print('Batch Normalize')
             parameters.append(rm.BatchNormalize())
         else:
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append((rm.Conv2d(channels, padding=(1, 1))))
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append((rm.Conv2d(channels, padding=(1, 1))))
     for i in range(self.depth - boot_steps):
         if self.batch_normal:
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append(rm.Conv2d(channels, padding=(1, 1)))
             print('Batch Normalize')
             parameters.append(rm.BatchNormalize())
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append(rm.Conv2d(channels, padding=(1, 1)))
             print('Batch Normalize')
             parameters.append(rm.BatchNormalize())
         else:
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append((rm.Conv2d(channels, padding=(1, 1))))
             print('Conv2d {}x{} {}ch'.format(dim, dim, channels))
             parameters.append((rm.Conv2d(channels, padding=(1, 1))))
         channels *= 2
         dim = dim // 2
         if i == self.depth - boot_steps - 1:
             print('Average Pooling {}x{}'.format(dim, dim))
         else:
             print('Max Pooling {}x{}'.format(dim, dim))
     self.hidden = rm.Sequential(parameters)
     nb_parameters = dim * dim * channels
     print('Flatten {} params'.format(nb_parameters))
     parameters = []
     fcnn_depth = int(
         (np.log(nb_parameters / intermidiate_dim)) / np.log(4))
     nb_parameters = nb_parameters // 4
     for _ in range(fcnn_depth):
         print('Dense {}u'.format(nb_parameters))
         parameters.append(rm.Dense(nb_parameters))
         nb_parameters = nb_parameters // 4
     print('Dense {}u'.format(intermidiate_dim))
     parameters.append(rm.Dense(intermidiate_dim))
     print('*Mean Dense {}u'.format(latent_dim))
     parameters.append(rm.Dense(latent_dim, initializer=Uniform()))
     print('*Log Var Dense {}u'.format(latent_dim))
     parameters.append(rm.Dense(latent_dim, initializer=Gaussian(std=0.3)))
     self.fcnn = rm.Sequential(parameters)
def draw_pred_curve(e_num):
    pred_curve = []
    arr_now = X_test[0]
    for _ in range(test_size):
        for t in range(look_back):
            pred = model(np.array([arr_now[t]]))
        model.truncate()
        pred_curve.append(pred[0])
        arr_now = np.delete(arr_now, 0)
        arr_now = np.append(arr_now, pred)
    plt.plot(x[:train_size+look_back], y[:train_size+look_back], color='blue')
    plt.plot(x[train_size+look_back:], pred_curve, label='epoch:'+str(e_num)+'th')

#モデルの定義
model = rm.Sequential([
    rm.Lstm(2),
    rm.Dense(1)
])

#各パラメータの設定
batch_size = 5
max_epoch = 1000
period = 200
optimizer = Adam()

#Train Loop
i= 0
loss_prev = np.inf

#Learning curves
learning_curve = []
test_curve = []
Ejemplo n.º 19
0
def main():
    df = pd.read_csv("crx.data", header=None, index_col=None)
    df = df.applymap(lambda d: np.nan if d == "?" else d)
    df = df.dropna(axis=0)
    sr_labels = df.iloc[:, -1]
    labels = sr_labels.str.replace("+", "1").replace("-",
                                                     "0").values.astype(float)
    data = df.iloc[:, :-1].values.astype(str)

    pattern_continuous = re.compile("^\d+\.?\d*\Z")
    continuous_idx = {}
    for i in range(data.shape[1]):
        is_continuous = True if pattern_continuous.match(data[0][i]) else False
        if is_continuous and i == 0:
            X = data[:, i].astype(float)
        elif not is_continuous and i == 0:
            X = pd.get_dummies(data[:, i]).values.astype(float)
        elif is_continuous and i != 0:
            X = np.concatenate((X, data[:, i].reshape(-1, 1).astype(float)),
                               axis=1)
        elif not is_continuous and i != 0:
            X = np.concatenate(
                (X, pd.get_dummies(data[:, i]).values.astype(float)), axis=1)
    print("X:{X.shape}, y:{labels.shape}".format(**locals()))

    X = X
    y = labels.reshape(-1, 1)

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=0)

    sequential = rm.Sequential(
        [rm.Dense(46),
         rm.Relu(),
         rm.Dense(16),
         rm.Relu(),
         rm.Dense(1)])
    batch_size = 128
    epoch = 50
    N = len(X_train)
    optimizer = Adam()
    for i in range(epoch):
        perm = np.random.permutation(N)
        loss = 0
        for j in range(0, N // batch_size):
            train_batch = X_train[perm[j * batch_size:(j + 1) * batch_size]]
            response_batch = y_train[perm[j * batch_size:(j + 1) * batch_size]]
            with sequential.train():
                l = rm.sgce(sequential(train_batch), response_batch)
            grad = l.grad()
            grad.update(optimizer)
            loss += l.as_ndarray()
        train_loss = loss / (N // batch_size)
        test_loss = rm.sgce(sequential(X_test), y_test).as_ndarray()
        print("epoch:{:03d}, train_loss:{:.4f}, test_loss:{:.4f}".format(
            i, float(train_loss), float(test_loss)))
    predictions = np.argmax(sequential(X_test).as_ndarray(), axis=1)
    print(confusion_matrix(y_test.ravel(), predictions))
    print(classification_report(y_test.ravel(), predictions))
X_train, X_test, y_train, y_test = train_test_split(sub_seq,
                                                    next_values,
                                                    test_size=0.2)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)

train_size = X_train.shape[0]
test_size = X_test.shape[0]
print('train size:{}, test size:{}'.format(train_size, test_size))

# モデルの定義、学習
model = rm.Sequential(
    [rm.Lstm(35),
     rm.Relu(),
     rm.Lstm(35),
     rm.Relu(),
     rm.Dense(pred_length)])

# パラメータ
batch_size = 100
max_epoch = 2000
period = 10  # early stopping checking period

optimizer = Adam()
epoch = 0
loss_prev = np.inf
learning_curve, test_curve = [], []
while (epoch < max_epoch):
    epoch += 1
    perm = np.random.permutation(train_size)
Ejemplo n.º 21
0
noise = random.randn(N) * noise_rate
x_axis = np.linspace(-np.pi, np.pi, N)
base = np.sin(x_axis)
y_axis = base + noise
x_axis = x_axis.reshape(N, 1)
y_axis = y_axis.reshape(N, 1)
idx = random.permutation(N)
train_idx = idx[::2]
test_idx = idx[1::2]
train_x = x_axis[train_idx]
train_y = y_axis[train_idx]
test_x = x_axis[test_idx]
test_y = y_axis[test_idx]

seq_model = rm.Sequential(
    [rm.Dense(1), rm.Dense(10),
     rm.Sigmoid(), rm.Dense(1)])

optimizer = rm.Sgd(0.1, momentum=0.5)
plt.clf()
epoch_splits = 10
epoch_period = epoch // epoch_splits
fig, ax = plt.subplots(epoch_splits, 2, figsize=(4, epoch_splits))

curve = [[], []]
for e in range(epoch):
    with seq_model.train():
        loss = rm.mean_squared_error(seq_model(train_x), train_y)
    grad = loss.grad()
    grad.update(optimizer)
    curve[0].append(loss.as_ndarray())
Ejemplo n.º 22
0
 def __init__(self, x, y, batch=64, epoch=50, optimizer=Sgd):
     self.lb = LabelBinarizer().fit(y)
     self.batch = batch
     self.epoch = epoch
     self.optimizer = optimizer()
     self.network = rm.Sequential([rm.Dense(len(self.lb.classes_))])
Ejemplo n.º 23
0
    def __init__(
            self,
            input_params=32768,
            first_shape=(1, 32, 32, 32),
            output_shape=(1, 1, 64, 64),
            check_network=False,
            batchnormal=True,
            dropout=False,
            down_factor=1.6,
            act=rm.Relu(),
            last_act=rm.Sigmoid(),
    ):
        self.input_params = input_params
        self.latent_dim = input_params
        self.first_shape = first_shape
        self.output_shape = output_shape
        self.act = act
        self.last_act = last_act
        self.down_factor = down_factor

        def decide_factor(src, dst):
            factor = np.log(src / dst) / np.log(2)
            if factor % 1 == 0:
                return factor
            return np.ceil(factor)

        ch = first_shape[1]
        v_factor = decide_factor(output_shape[2], first_shape[2])
        h_factor = decide_factor(output_shape[3], first_shape[3])
        v_dim, h_dim = first_shape[2], first_shape[3]
        parameters = []
        check_params = np.array(first_shape[1:]).prod()
        self.trans = False
        if input_params != check_params:
            if check_network:
                print('--- Decoder Network ---')
                print('inserting Dense({})'.format(check_params))
            self.trans = rm.Dense(check_params)
        while v_factor != 0 or h_factor != 0:
            if batchnormal:
                parameters.append(rm.BatchNormalize())
                if check_network:
                    print('BN ', end='')
            stride = (2 if v_factor > 0 else 1, 2 if h_factor > 0 else 1)
            if check_network:
                print('transpose2d ch={}, filter=2, stride={}'.format(
                    ch, stride))
            parameters.append(rm.Deconv2d(channel=ch, filter=2, stride=stride))
            if self.act:
                parameters.append(self.act)
            if ch > output_shape[1]:
                ch = int(np.ceil(ch / self.down_factor))
            v_dim = v_dim * 2 if v_factor > 0 else v_dim + 1
            h_dim = h_dim * 2 if h_factor > 0 else h_dim + 1
            v_factor = v_factor - 1 if v_factor > 0 else 0
            h_factor = h_factor - 1 if h_factor > 0 else 0
        if v_dim > output_shape[2] or h_dim > output_shape[2]:
            last_filter = (v_dim - output_shape[2] + 1,
                           h_dim - output_shape[3] + 1)
            if check_network:
                print('conv2d filter={}, stride=1'.format(last_filter))
            parameters.append(
                rm.Conv2d(channel=output_shape[1],
                          filter=last_filter,
                          stride=1))
        self.parameters = rm.Sequential(parameters)
        if check_network:
            self.forward(np.zeros((first_shape[0], input_params)),
                         print_parameter=True)
batch_size = 256
model = 'AAE'
shot_freq = epoch // 10
hidden = 100
model_type = 'simple'
model_dist = 'uniform'
lr_rate = 1.
base_outdir = 'result/{}/{}/{}'.format(model_id, model_type, model_dist)
if not path.exists(base_outdir):
    makedirs(base_outdir)

# --- model configuration ---
enc_base = rm.Sequential([
    rm.Dense(hidden),
    rm.Relu(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.Relu(),
    rm.Dense(latent_dim, initializer=Uniform())
])
dec = rm.Sequential([
    #rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    #rm.BatchNormalize(),
    rm.Dense(28 * 28),
    rm.Sigmoid()
])
Ejemplo n.º 25
0
def test_batch_normalize(a, mode):
    layer = rm.Sequential([rm.BatchNormalize(momentum=0.5, mode=mode)])

    set_cuda_active(True)

    g1 = Variable(a)
    g2 = layer(g1)
    g3 = rm.sum(g2)
    g = g3.grad(detach_graph=False)
    g_g1 = g.get(g1)
    g_g2 = g.get(layer.l0.params["w"])
    g_g3 = g.get(layer.l0.params["b"])

    layer.set_models(inference=True)
    g4 = layer(g1)
    layer.set_models(inference=False)

    layer.save('temp.h5')
    layer.l0._mov_mean = 0
    layer.l0._mov_std = 0
    layer.load('temp.h5')
    layer.set_models(inference=True)
    g5 = layer(g1)
    layer.set_models(inference=False)

    g2.to_cpu()
    g3.to_cpu()
    g4.to_cpu()
    g_g1.to_cpu()
    g_g2.to_cpu()
    g_g3.to_cpu()

    set_cuda_active(False)
    layer.l0._mov_mean = 0
    layer.l0._mov_std = 0

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad(detach_graph=False)
    c_g1 = c.get(g1)
    c_g2 = c.get(layer.l0.params["w"])
    c_g3 = c.get(layer.l0.params["b"])

    layer.set_models(inference=True)
    c4 = layer(g1)
    layer.set_models(inference=False)

    layer.save('temp.h5')
    layer.l0._mov_mean = 0
    layer.l0._mov_std = 0
    layer.load('temp.h5')
    layer.set_models(inference=True)
    c5 = layer(g1)
    layer.set_models(inference=False)

    close(g2, c2)
    close(g3, c3)
    close(g4, c4)
    close(g5, c5)
    close(g4, g5)
    close(c4, c5)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)

    close(g2.attrs._m.new_array(), c2.attrs._m)
    close(g2.attrs._v.new_array(), c2.attrs._v)
    close(g2.attrs._mov_m.new_array(), c2.attrs._mov_m)
    close(g2.attrs._mov_v.new_array(), c2.attrs._mov_v)
Ejemplo n.º 26
0
def layer_factory(channel=32, conv_layer_num=2):
    layers = []
    for _ in range(conv_layer_num):
        layers.append(rm.Conv2d(channel=channel, padding=1, filter=3))
        layers.append(rm.Relu())
    return rm.Sequential(layers)
Ejemplo n.º 27
0
    def __init__(
            self,
            nname='',
            batch_size=64,
            input_shape=(1, 28, 28),
            batchnormal=True,
            dropout=False,
            first_channel=8,
            growth_factor=2,
            repeats=2,
            tgt_dim=4,
            keep_vertical=False,
            check_network=False,
            act=rm.Relu(),
    ):
        self.input_shape = input_shape
        self.keep_v = keep_vertical
        self.dropout = dropout
        self.batchnormal = batchnormal
        self.act = act
        if check_network:
            print('--- {} Network ---'.format(nname))
        ch = first_channel
        in_ch, v_dim, h_dim = self.input_shape
        if check_network:
            print('Input {}'.format(input_shape))
        self.first = None

        def check_continue():
            v_deci = False if keep_vertical else v_dim > tgt_dim
            h_deci = h_dim > tgt_dim
            return v_deci or h_deci

        parameters = []
        repeat_cnt = 0
        while check_continue():
            if self.first and batchnormal:
                parameters.append(rm.BatchNormalize())
                if check_network:
                    print('BN ', end='')
            cnn_layer = rm.Conv2d(
                channel=ch,
                filter=3,
                padding=1,
            )
            if self.first:
                parameters.append(cnn_layer)
            else:
                self.first = cnn_layer
            if check_network:
                print('Conv2d -> {}'.format((batch_size, ch, v_dim, h_dim)))
            repeat_cnt += 1
            if repeat_cnt == repeats:
                repeat_cnt = 0
                ch = int(ch * growth_factor)
                stride = (1 if self.keep_v else 2, 2)
                parameters.append(
                    rm.Conv2d(
                        channel=ch,
                        filter=1,
                        stride=stride,
                    ))
                v_dim = v_dim if self.keep_v else int(np.ceil(v_dim / 2))
                h_dim = int(np.ceil(h_dim / 2))
                if check_network:
                    print('Conv2d -> {}'.format(
                        (batch_size, ch, v_dim, h_dim)))
        self.output_shape = (batch_size, ch, v_dim, h_dim)
        self.parameters = rm.Sequential(parameters)
        self.nb_parameters = ch * v_dim * h_dim
        if check_network:
            self.forward(
                np.zeros((batch_size, input_shape[0], input_shape[1],
                          input_shape[2])),
                check_network=True,
            )
Ejemplo n.º 28
0
 def __init__(
     self,
     nname='',
     input_shape=(1, 28, 28),
     blocks=2,
     depth=3,
     growth_rate=12,
     latent_dim=10,
     dropout=False,
     intermidiate_dim=128,
     first_filter=5,
     compression=0.5,
     initial_channel=8,
     keep_vertical=False,
     check_network=False,
     batch_size=64,
     print_network=True,
     # pooling type
 ):
     self.depth = depth
     self.input_shape = input_shape
     self.latent_dim = latent_dim
     self.dropout = dropout
     self.intermidiate_dim = intermidiate_dim
     self.compression = compression
     self.growth_rate = growth_rate
     self.blocks = blocks
     self.keep_v = keep_vertical
     if print_network:
         print('--- {} Network ---'.format(nname))
     parameters = []
     channels = initial_channel
     in_ch, dim_v, dim_h = self.input_shape
     if print_network:
         print('Input image {}x{}'.format(dim_v, dim_h))
     dim_v = dim_v if self.keep_v else dim_v // 2
     dim_h = dim_h // 2
     if print_network:
         print(' Conv2d > {}x{} {}ch'.format(dim_v, dim_h, channels))
     self.input = rm.Conv2d(channels,
                            filter=first_filter,
                            padding=2,
                            stride=(1, 2) if self.keep_v else 2)
     if self.dropout:
         if print_network:
             print(' Dropout')
     for _ in range(blocks):
         t_params, channels = self.denseblock(
             dim_v=dim_v,
             dim_h=dim_h,
             input_channels=channels,
         )
         parameters += t_params
         dim_v = dim_v if self.keep_v else dim_v // 2
         dim_h = (dim_h + 1) // 2
     self.hidden = rm.Sequential(parameters)
     nb_parameters = dim_v * dim_h * channels
     self.cnn_channels = channels
     self.nb_parameters = nb_parameters
     self.output_shape = batch_size, channels, dim_v, dim_h
     if print_network:
         print(' Flatten {} params'.format(nb_parameters))
     if check_network:
         self.forward(np.zeros(
             (batch_size, input_shape[0], input_shape[1], input_shape[2])),
                      print_parameter=True)
    temp = np.zeros((len(data), size))
    temp[np.arange(len(data)), data.reshape(-1)] = 1
    return temp


y_train_1 = one_hot(y_train)
idx = random.permutation(len(y_train))[10000:]
y_train_1[idx] = np.r_[np.zeros(10), np.ones(1)].reshape(1, 11)

# --- model configuration ---
enc_base = rm.Sequential([
    #rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.Relu(),  #rm.Dropout(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.Relu(),  #rm.Dropout(),
    # xxx rm.BatchNormalize(),
    # Genの最後にBNを配置してはだめ
    rm.Dense(latent_dim, initializer=Uniform())
])
dec = rm.Sequential([
    #rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    rm.BatchNormalize(),
    rm.Dense(hidden),
    rm.LeakyRelu(),
    #rm.BatchNormalize(),
    rm.Dense(28 * 28),
    rm.Sigmoid()
Ejemplo n.º 30
0
y = [_y + np.random.randn(series)*(scale/100) for _ in range(n)]
y = np.array(y).astype('float32')
perm = np.random.permutation(n)
x_train = y[perm[100:]]
x_test = y[perm[:100]]

hidden = 1000
gan_dim = 1000
batch_size = 100
epoch = 100
shot_period = 10
shot_freq = epoch // shot_period
s = batch_size, series
gen = rm.Sequential([
    #rm.Dense(hidden), 
    #rm.BatchNormalize(),
    rm.Dense(hidden), rm.Tanh(),
    rm.Dense(series)
])

dis = rm.Sequential([
    #rm.Dense(hidden), #rm.LeakyRelu(),
    #rm.BatchNormalize(),
    rm.Dense(hidden), #rm.LeakyRelu(),
    rm.Dense(1), rm.Sigmoid()
])


gan = GAN(gen, dis, latent_dim=gan_dim)
initial_lr = 0.001
last_lr = initial_lr/1000
_b = .5 # default is .9