Пример #1
0
 def fit(self, inputs, target, epochs):
     for i in range(epochs):
         with self.train():
             value = self.forward(inputs)
         loss = rm.mean_squared_error(value, target)
         loss.grad().update(rm.Adam(lr=0.001))
     return loss
Пример #2
0
def test_gpu_node_mean_squared_error(a, b):
    set_cuda_active(True)

    g1 = Variable(a)
    g2 = Variable(b)

    g3 = rm.mean_squared_error(g1, g2)
    g = g3.grad()
    g_g1 = g.get(g1)
    g3.to_cpu()
    g_g1.to_cpu()

    set_cuda_active(False)
    c3 = rm.mean_squared_error(g1, g2)
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
 def forward(self, x, eps=1e-3):
     nb = len(x)
     self.enc(x)
     e = np.random.randn(nb, self.latent_dim)
     self.z = self.enc.zm + rm.exp(self.enc.zlv / 2) * e
     self.decd = self.dec(self.z)
     self.reconE = rm.mean_squared_error(self.decd, x)
     self.kl_loss = -0.5 * rm.sum(1 + self.enc.zlv - self.enc.zm**2 -
                                  rm.exp(self.enc.zlv)) / nb
     return self.decd
 def forward(self, x):
     self.z_mean, self.z_log_var = self.enc(x)
     e = np.random.randn(len(x), self.latent_dim) * 1.
     z_new = self.z_mean + rm.exp(self.z_log_var / 2) * e
     self.decoded = self.dec(z_new)
     nb, zd = self.z_log_var.shape
     self.kl_loss = -0.5 * rm.sum(1 + self.z_log_var - self.z_mean**2 -
                                  rm.exp(self.z_log_var)) / nb
     #self.recon_loss = rm.sigmoid_cross_entropy(self.decoded, x)
     self.recon_loss = rm.mean_squared_error(self.decoded, x)
     vae_loss = self.kl_loss + self.recon_loss
     return vae_loss
Пример #5
0
 def forward(self, x, perm, y=None, opt=None):
     n = len(x)
     output_shape = self.arch['output_shape']
     batch_input_shape = self.get_shape(self.batch,
                                        self.arch['input_shape'])
     batch_output_shape = self.get_shape(self.batch, output_shape)
     pred_output_shape = self.get_shape(n, output_shape)
     if opt is None or y is None:
         self._set_inference(True)
         #self._model.set_models(inference=True)
     history = []
     pred = np.zeros(pred_output_shape)
     _batch = self.batch
     for i in np.arange(0, n, _batch):
         _idx = perm[i:i + _batch]
         _batch_x = np.zeros(batch_input_shape)
         _batch_x[:len(_idx)] = x[_idx]
         if not y is None:
             _batch_y = y[_idx]
         if opt is None or y is None:
             z = self._forward(_batch_x)
             if not y is None:
                 loss = rm.mean_squared_error(z[:len(_idx)], _batch_y)
         else:
             z, loss = self._train(_batch_x, _idx, _batch_y)
             if 0:
                 with self._train():  #_model.train():
                     z = self._forward(_batch_x)
                     loss = rm.mean_squared_error(z[:len(_idx)], _batch_y)
             grad = loss.grad()
             grad.update(opt)
         if not y is None:
             history.append(loss.as_ndarray())
         pred[_idx] = z[:len(_idx)].as_ndarray()
     #self._model.set_models(inference=False)
     self._set_inference(False)
     if y is None:
         return pred
     return np.array(history)
 def forward(self, x, eps=1e-3):
     nb = len(x)
     self.z_mu = self.enc(x)
     self.decd = self.dec(self.z_mu)
     self.reconE = rm.mean_squared_error(self.decd, x)
     return self.decd
Пример #7
0
 def func(node, x):
     return sum(rm.mean_squared_error(node, x, reduce_sum=False))
Пример #8
0
 def func(node, x):
     return rm.mean_squared_error(node, x)
figsize=(8, epoch_splits*4))
if batch_size == 'Full':
    batch_size = len(train_x)

curve = [[], []]
neighbor_period = []
for e in range(epoch):
    s = time()
    perm = np.random.permutation(len(train_x))
    batch_loss = []
    for i in range(0, len(train_x), batch_size):
        idx = perm[i:i+batch_size]
        batch_x = train_x[idx]
        batch_y = train_y[idx]
        with func_model.train():
            loss = rm.mean_squared_error(func_model(batch_x), batch_y)
        grad = loss.grad()
        grad.update(optimizer)
        batch_loss.append(loss.as_ndarray()) 
    neighbor_period.append(time()-s)
    curve[0].append(np.array(batch_loss).mean())
    loss = rm.mean_squared_error(func_model(test_x), test_y)
    curve[1].append(loss.as_ndarray())
    if e % epoch_period == epoch_period - 1 or e == epoch:
        current_period =  np.array(neighbor_period).mean()
        neighbor_period = []
        ax_ = ax[e//epoch_period]
        curve_na = np.array(curve)
        ax_[0].text(0,0.5, '{:.2f}sec @ epoch'.format(current_period))
        ax_[0].plot(curve_na[0])
        ax_[0].plot(curve_na[1])
Пример #10
0
 def _train(self, x, idx, y):
     with self.fcnn.train():
         x = self.fcnn(x)
     z = rm.reshape(x, self.batch_output_shape)
     return z, rm.mean_squared_error(z[:len(idx)], y)
    def forward(self, x, y=None, eps=1e-3):
        # x : input data
        # y : one-hot label data for categorical dist. or supporting dis.
        #     empty is not assignment
        # self.qzx : style z
        # self.rep : input data for decoding
        nb = len(x)

        # --- encoding phase --- 
        if 0:
            noise = random.randn(x.size).reshape(nb, x.shape[1])*0.03
            self._x = x+noise
        else:
            _x = x
        if self.mode=='clustering' or self.mode=='reduction':
            self.qzx, self.qyx = self.enc(_x)
        else:
            self.qzx = self.enc(_x)

        # --- decoding/reconstruction phase ---
        if self.mode=='clustering' or self.mode=='reduction':
            self.recon = self.dec(rm.concat(self.qzx, self.qyx))
        else:
            self.recon = self.dec(self.qzx)

        # --- reguralization phase --- 
        if self.mode == 'incorp_label':
            self._set_incorpdist(x)
        else:
            self._set_distribution(x)
            if self.mode == 'clustering':
                "categorical dist"
            elif self.mode == 'supervised':
                ""
            elif self.mode == 'dim_reduction':
                "" 

        if self.mode == 'incorp_label':
            self._incorp_label(x, y, eps=eps)
        else:
            self.Dpz = self.dis(self.pz)
            self.Dqzx = self.dis(self.qzx)
            self.real = -rm.sum(rm.log(
                self.Dpz + eps
            ))/nb
            self.fake = -rm.sum(rm.log(
                1 - self.Dqzx + eps
            ))/nb
            self.fake2pos = -rm.sum(rm.log(
                self.Dqzx + eps
            ))/nb 
        if self.mode=='clustering' or self.mode=='reduction':
            _idx = np.where(y.sum(1)==1)[0]
            idx_ = np.where(y.sum(1)==0)[0]
            if len(_idx) > 0:
                self.Cy = self.cds(y)
                self.Cqyx = self.cds(self.qyx)
                self.Creal = -rm.sum(rm.log(
                    self.Cy[_idx] + eps
                ))/len(_idx)
                if 0:
                    self.Cfake = -rm.sum(rm.log(
                        1 - self.Cqyx[_idx] + eps
                    ))/len(_idx)
                else:
                    self.Cfake = -rm.sum(rm.log(
                        1 - self.Cqyx + eps
                    ))/nb
                self.Cfake2 = -rm.sum(rm.log(
                    self.Cqyx[_idx] + eps
                ))/len(_idx)
            else:
                self.Cfake = rm.Variable(0)
                self.Creal = rm.Variable(0)
                self.Cfake2 = rm.Variable(0)

        # --- sumalizing loss ---
        self.gan_loss = self.real + self.fake
        if self.mode=='clustering':
            if len(_idx) > 0:
                self.reconE = rm.mean_squared_error(
                    self.recon[idx_], x[idx_])
            else:
                self.reconE = rm.mean_squared_error(self.recon, x)
        else:
            self.reconE = rm.mean_squared_error(self.recon, x)
        self.real_count = (self.Dpz >= 0.5).sum()/nb
        self.fake_count = (self.Dqzx < 0.5).sum()/nb
        self.enc_loss = self.fake2pos
        if self.mode=='clustering' or self.mode=='reduction':
            if len(_idx) > 0:
                self.Creal_count = (self.Cy[_idx] >= 0.5).sum()/len(_idx)
                self.Cfake_count = (self.Cqyx[_idx] < 0.5).sum()/len(_idx)
            else:
                self.Creal_count = 0
                self.Cfake_count = 0
            self.CganE = self.Creal + self.Cfake
            self.CgenE = self.Cfake2

        return self.recon
Пример #12
0
test_y = y_axis[test_idx]

seq_model = rm.Sequential(
    [rm.Dense(1), rm.Dense(10),
     rm.Sigmoid(), rm.Dense(1)])

optimizer = rm.Sgd(0.1, momentum=0.5)
plt.clf()
epoch_splits = 10
epoch_period = epoch // epoch_splits
fig, ax = plt.subplots(epoch_splits, 2, figsize=(4, epoch_splits))

curve = [[], []]
for e in range(epoch):
    with seq_model.train():
        loss = rm.mean_squared_error(seq_model(train_x), train_y)
    grad = loss.grad()
    grad.update(optimizer)
    curve[0].append(loss.as_ndarray())
    loss = rm.mean_squared_error(seq_model(test_x), test_y)
    curve[1].append(loss.as_ndarray())
    if e % epoch_period == epoch_period - 1 or e == epoch:
        ax_ = ax[e // epoch_period]
        curve_na = np.array(curve)
        ax_[0].plot(curve_na[0])
        ax_[0].plot(curve_na[1])
        pred_train = seq_model(train_x)
        pred_test = seq_model(test_x)
        ax_[1].plot(x_axis, base, 'k-')
        ax_[1].scatter(x_axis, y_axis, marker='+')
        ax_[1].scatter(train_x, pred_train, c='g', alpha=0.3)