def compute_loss(imgs, pafs_ys, heatmaps_ys, pafs_t, heatmaps_t, ignore_mask):
    heatmap_loss_log = []
    paf_loss_log = []
    total_loss = 0

    paf_masks = ignore_mask[:, None].repeat(pafs_t.shape[1], axis=1)
    heatmap_masks = ignore_mask[:, None].repeat(heatmaps_t.shape[1], axis=1)

    # compute loss on each stage
    for pafs_y, heatmaps_y in zip(pafs_ys, heatmaps_ys):
        stage_pafs_t = pafs_t.copy()
        stage_heatmaps_t = heatmaps_t.copy()
        stage_paf_masks = paf_masks.copy()
        stage_heatmap_masks = heatmap_masks.copy()

        if pafs_y.shape != stage_pafs_t.shape:
            stage_pafs_t = F.resize_images(stage_pafs_t, pafs_y.shape[2:]).data
            stage_heatmaps_t = F.resize_images(stage_heatmaps_t, pafs_y.shape[2:]).data
            stage_paf_masks = F.resize_images(stage_paf_masks.astype('f'), pafs_y.shape[2:]).data > 0
            stage_heatmap_masks = F.resize_images(stage_heatmap_masks.astype('f'), pafs_y.shape[2:]).data > 0

        stage_pafs_t[stage_paf_masks == True] = pafs_y.data[stage_paf_masks == True]
        stage_heatmaps_t[stage_heatmap_masks == True] = heatmaps_y.data[stage_heatmap_masks == True]

        pafs_loss = F.mean_squared_error(pafs_y, stage_pafs_t)
        heatmaps_loss = F.mean_squared_error(heatmaps_y, stage_heatmaps_t)

        total_loss += pafs_loss + heatmaps_loss

        paf_loss_log.append(float(cuda.to_cpu(pafs_loss.data)))
        heatmap_loss_log.append(float(cuda.to_cpu(heatmaps_loss.data)))

    return total_loss, paf_loss_log, heatmap_loss_log
Ejemplo n.º 2
0
Archivo: models.py Proyecto: kzky/works
    def __call__(self, x_recon, x, enc_hiddens, dec_hiddens):
        """
        Parameters
        -----------------
        x_recon: Variable to be reconstructed as label
        x: Variable to be reconstructed as label
        enc_hiddens: list of Variable
        dec_hiddens: list of Varialbe
        """

        # Lateral Recon Loss
        recon_loss = 0
        if self.rc and enc_hiddens is not None:
            for h0, h1 in zip(enc_hiddens[::-1], dec_hiddens):
                d = np.prod(h0.data.shape[1:])
                recon_loss += F.mean_squared_error(h0, h1) / d

        # Reconstruction Loss
        if x_recon is not None:
            d = np.prod(x.data.shape[1:])
            recon_loss += F.mean_squared_error(x_recon, x) / d

        self.loss = recon_loss
        
        return self.loss
Ejemplo n.º 3
0
def forward(x, p, a, A=None,P=None):

    conv1_1, conv2_1, conv3_1, conv4_1,conv5_1, = func(inputs={'data': x}, outputs=['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'])
    conv1_1F,conv2_1F, conv3_1F, conv4_1F,conv5_1F, = [ reshape2(x) for x in [conv1_1,conv2_1, conv3_1, conv4_1,conv5_1]]
    conv1_1G,conv2_1G, conv3_1G, conv4_1G,conv5_1G, = [ Fu.matmul(x, x, transa=False, transb=True) for x in [conv1_1F,conv2_1F, conv3_1F, conv4_1F,conv5_1F]]
    
    # Because P an A is not change over iteration, it's better to calcurate onece.
    if A is None and B is None:
        #compute matrix P 
        conv1_1,conv2_1, conv3_1, conv4_1,conv5_1, = func(inputs={'data': p}, outputs=['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'])
        conv1_1P,conv2_1P, conv3_1P, conv4_1P,conv5_1P, = [ reshape2(x) for x in [conv1_1,conv2_1, conv3_1, conv4_1,conv5_1]]
        #compute matrix A
        conv1_1,conv2_1, conv3_1, conv4_1,conv5_1, = func(inputs={'data': a}, outputs=['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1'])
        conv1_1A0,conv2_1A0, conv3_1A0, conv4_1A0,conv5_1A0, = [ reshape2(x) for x in [conv1_1,conv2_1, conv3_1, conv4_1,conv5_1]]
        conv1_1A,conv2_1A, conv3_1A, conv4_1A,conv5_1A, = [ Fu.matmul(x, x, transa=False, transb=True) for x in [conv1_1A0,conv2_1A0, conv3_1A0, conv4_1A0,conv5_1A0]]
    else:
        conv1_1P,conv2_1P, conv3_1P, conv4_1P,conv5_1P,=P
        conv1_1A,conv2_1A, conv3_1A, conv4_1A,conv5_1A,=A

    L_content = Fu.mean_squared_error(conv4_1F,conv4_1P)/2

    #caution! the deviding number is hard coding!
    #this part is correspnding to equation (4) in the original paper
    #to check the current N and M, run the following
    #[x.data.shape  for x in [conv1_1F,conv2_1F, conv3_1F, conv4_1F,conv5_1F]]
    L_style = (Fu.mean_squared_error(conv1_1G,conv1_1A)/(4*64*64*50176*50176)
    + Fu.mean_squared_error(conv2_1G,conv2_1A)/(4*128**128*12544*12544)
    + Fu.mean_squared_error(conv3_1G,conv3_1A)/(4*256*256*3136*3136)
    + Fu.mean_squared_error(conv4_1G,conv4_1A)/(4*512*512*784*784)\
    )/4 # this is equal weighting of E_l

    loss = a_p_ratio*L_content + L_style
    return loss 
Ejemplo n.º 4
0
 def test_invalid_dtype2(self):
     x0 = chainer.Variable(
         numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
     x1 = chainer.Variable(
         numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
     with self.assertRaises(type_check.InvalidType):
         functions.mean_squared_error(x0, x1)
Ejemplo n.º 5
0
 def calcLoss(self,y,t):
     if self.mode == MODE_TRAIN:
         self.loss = F.mean_squared_error(y,t)
         self.loss.backward()  # 誤差逆伝播
         self.optimizer.update()     # 最適化 
     else :
         self.loss = F.mean_squared_error(y,t)
Ejemplo n.º 6
0
 def __call__(self, x):
     # encode
     self.z = self.encode(x)
     xp = cuda.get_array_module(self.z.data)
     volatile = "off" if self.train else "on"
     z_t = Variable(xp.ones_like(self.z.data), volatile=volatile)
     loss_z = F.mean_squared_error(self.z, z_t)
     # decode
     self.y = self.decode(self.z)
     loss_y = F.mean_squared_error(x, self.y)
     self.loss = loss_z + loss_y
     return self.loss
Ejemplo n.º 7
0
Archivo: losses.py Proyecto: kzky/works
    def __call__(self, x_recon, x):
        bs = x.shape[0]
        d = np.prod(x.shape[1:])

        if x.shape[1:] == 3:
            h_recon = F.average_pooling_2d(x_recon, (2, 2))
            h = F.average_pooling_2d(x, (2, 2))
            self.loss = F.mean_squared_error(x_recon, x) / d
        else:
            self.loss = F.mean_squared_error(x_recon, x) / d

        return self.loss
Ejemplo n.º 8
0
def generate_image(img_orig, img_style, width, nw, nh, max_iter, lr, img_gen=None):
    batch_size = img_orig.shape[0]
    mid_orig = nn.forward(Variable(img_orig, volatile=True))
    style_mats = [get_matrix(y) for y in nn.forward(Variable(img_style, volatile=True))]

    if img_gen is None:
        if args.gpu >= 0:
            img_gen_ = xp.random.uniform(-20,20,(3,width,width),dtype=np.float32)
            img_gen = xp.random.uniform(-20,20,(batch_size,3,width,width),dtype=np.float32)
            img_gen[:,:,:,:] = img_gen_
        else:
            img_gen_ = np.random.uniform(-20,20,(3,width,width)).astype(np.float32)
            img_gen = np.random.uniform(-20,20,(batch_size,3,width,width)).astype(np.float32)
            img_gen[:,:,:,:] = img_gen_
    x = Variable(img_gen)
    xg = xp.zeros_like(x.data)
    optimizer = optimizers.Adam(alpha=lr)
    optimizer.setup((img_gen,xg))
    for i in range(max_iter):

        x = Variable(img_gen)
        y = nn.forward(x)

        optimizer.zero_grads()
        L = Variable(xp.zeros((), dtype=np.float32))
        for l in range(len(y)):
            gogh_matrix = get_matrix(y[l])

            L1 = np.float32(args.lam) * np.float32(nn.alpha[l])*F.mean_squared_error(y[l], Variable(mid_orig[l].data))
            L2 = np.float32(nn.beta[l])*F.mean_squared_error(gogh_matrix, Variable(style_mats[l].data))/np.float32(len(y))
            L += L1+L2

            if i%100==0:
                print i,l,L1.data,L2.data

        L.backward()
        xg += x.grad
        optimizer.update()

        tmp_shape = img_gen.shape
        if args.gpu >= 0:
            img_gen += Clip().forward(img_gen).reshape(tmp_shape) - img_gen
        else:
            def clip(x):
                return -120 if x<-120 else (136 if x>136 else x)
            img_gen += np.vectorize(clip)(img_gen).reshape(tmp_shape) - img_gen

        if i%50==0:
            for j in range(img_gen.shape[0]):
                save_image(img_gen[j], W, nw[j], nh[j], args.out_dir+"_%d/im_%05d.png"%(j,i))

    for j in range(img_gen.shape[0]):
        save_image(img_gen[j], W, nw[j], nh[j], args.out_dir+"_last/im_%d.png"%(j))
Ejemplo n.º 9
0
    def train(self, x_img, x_doc, y_data, regression, gpu=True, useImage=True, useDoc=True):
        xp = cuda.cupy if gpu else np
        x_img = xp.asarray(x_img)
        x_doc = xp.asarray(x_doc)
        y_data = xp.asarray(y_data)
        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)
        y = self.model.forward(img, doc, regression=regression, useImage=useImage, useDoc=useDoc)

        # calc loss
        if useImage:
            if regression:
                a = self.toLog(y["a"], xp)
                b = self.toLog(y["b"], xp)
                h = self.toLog(y["h"], xp)
                t = self.toLog(t, xp)
                self.loss1 = F.mean_squared_error(a, t)
                self.loss2 = F.mean_squared_error(b, t)
                self.loss3 = F.mean_squared_error(h, t)
            else:
                a = y["a"]
                b = y["b"]
                h = y["h"]
                self.loss1 = F.softmax_cross_entropy(a, t)
                self.loss2 = F.softmax_cross_entropy(b, t)
                self.loss3 = F.softmax_cross_entropy(h, t)
            loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
        else:
            if regression:
                h = self.toLog(y, xp)
                t = self.toLog(t, xp)
                self.loss1 = F.mean_squared_error(h, t)
            else:
                h = y
                self.loss1 = F.softmax_cross_entropy(y, t)
            loss = self.loss1


        # random select optimizer
        rnd = np.random.randint(0, len(self.myOptimizers))
        self.optimizer = self.myOptimizers[rnd]
        self.optimizer.setup(self.model)
        self.optimizer.zero_grads()
        loss.backward()
        self.optimizer.update()

        if regression:
            h = np.array(cuda.to_cpu(h.data)).reshape((len(h)))
            t = np.array(cuda.to_cpu(t.data)).reshape((len(t)))
            return loss.data, h, t
        else:
            return loss.data, F.accuracy(h, t).data, []
    def DNN(self, x_train, y_train, x_test, y_test, seed):
        np.random.seed(seed)
        dnn = Deep()
        dnn.compute_accuracy = False

        if args.gpu >= 0:
            dnn.to_gpu()

        optimizer = optimizers.Adam()
        optimizer.setup(dnn)

        end_counter = 0
        min_loss = 100
        final_epoch = 0
        final_pred = xp.empty([x_test.shape[0], 1], dtype=xp.float32)

        x_train, y_train = resample(x_train, y_train, n_samples=x_train.shape[0])
        for epoch in range(n_epoch):
            indexes = np.random.permutation(x_train.shape[0])
            for i in range(0, x_train.shape[0], batchsize):
                x_train_dnn = Variable(x_train[indexes[i : i + batchsize]])
                y_train_dnn = Variable(y_train[indexes[i : i + batchsize]])
            dnn.zerograds()
            loss = F.mean_squared_error(dnn(x_train_dnn), y_train_dnn)
            loss.backward()
            optimizer.update()
            end_counter += 1
        
            #evaluation
            if epoch % evaluation == 0:
                y_pred = dnn(Variable(x_test, volatile='on'))
                loss = F.mean_squared_error(y_pred, Variable(y_test, volatile='on'))

                if min_loss > loss.data:
                    min_loss = loss.data
                    print "epoch{}".format(epoch)
                    print "Current minimum loss is {}".format(min_loss)
                    serializers.save_npz('network/DNN{}.model'.format(seed), dnn)
                    final_epoch = epoch
                    final_pred = y_pred
                    end_counter = 0

            if end_counter > end_counter_max:
                f = open("network/final_epoch.txt", "a")
                f.write("DNN{}:{}".format(seed, final_epoch) + "\n")
                f.close()
                break     

        return final_pred.data, min_loss    
Ejemplo n.º 11
0
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        y, y1 = self.forward_super(x, train=train)

        L = F.mean_squared_error(y, t)
        L1 = F.mean_squared_error(y1, t)

        if train:
            Loss = L + (L1 * 0.5)
        else:
            Loss = L

        return Loss
Ejemplo n.º 12
0
def loss_unlabeled(x):
    lam = [1000, 10, 0.1, 0.1, 0.1, 0.1, 0.1]
    y, zs = enc(x, eta=0.3, test=False)
    zs2 = dec(F.softmax(y), zs, test=False)
    y3, zs3 = enc(x, eta=0.0, test=False)
    mus = [enc.bn1.mu, enc.bn2.mu, enc.bn3.mu, enc.bn4.mu, enc.bn5.mu, enc.bn6.mu]
    vrs = [enc.bn1.vr, enc.bn2.vr, enc.bn3.vr, enc.bn4.vr, enc.bn5.vr, enc.bn6.vr]

    L = 0
    for i in range(len(zs2)):
        if i==0:
            L += lam[i] * F.mean_squared_error(zs2[i], zs3[i])
        else:
            L += lam[i] * F.mean_squared_error((zs2[i]-mus[i-1])/(vrs[i-1]+1e-10), zs3[i])
    return L
Ejemplo n.º 13
0
    def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.Q_func(s)  # Get Q-value
        # Generate Target Signals
        tmp2 = self.Q_func(s_dash)
        tmp2 = list(map(np.argmax, tmp2.data.get()))  # argmaxQ(s',a)
        tmp = self.Q_func_target(s_dash)  # Q'(s',*)
        tmp = list(tmp.data.get())
        # select Q'(s',*) due to argmaxQ(s',a)
        res1 = []
        for i in range(num_of_batch):
            res1.append(tmp[i][tmp2[i]])

        #max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        max_Q_dash = np.asanyarray(res1, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)
        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_
        # TD-error clipping
        td = Variable(cuda.to_gpu(target)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
Ejemplo n.º 14
0
 def cost(self, x_data):
     x = Variable(x_data)
     t = Variable(x_data.reshape(x_data.shape[0], x_data.shape[1]*x_data.shape[2]*x_data.shape[3]))
     h = F.dropout(x)
     h = self.encode(h)
     y = self.decode(h)
     return F.mean_squared_error(y, t)
Ejemplo n.º 15
0
    def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.model.Q_func(s,train=True)  # Get Q-value

        # Generate Target Signals
        tmp = self.model_target.Q_func(s_dash,train=self.targetFlag)  # Q(s',*)
        tmp = list(map(np.max, tmp.data.get()))  # max_a Q(s',a)
        max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)

        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])
            #print action
            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_

        # TD-error clipping
        td = Variable(cuda.to_gpu(target,self.gpu_id)) - Q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32),self.gpu_id))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, Q
Ejemplo n.º 16
0
    def get_loss(self, state, action, reward, state_prime, episode_end):
        s = Variable(cuda.to_gpu(state))
        s_dash = Variable(cuda.to_gpu(state_prime))

        q = self.model.q_function(s)  # Get Q-value

        # Generate Target Signals
        tmp = self.model_target.q_function(s_dash)  # Q(s',*)
        tmp = list(map(np.max, tmp.data))  # max_a Q(s',a)
        max_q_prime = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(copy.deepcopy(q.data.get()), dtype=np.float32)

        for i in range(self.replay_size):
            if episode_end[i][0] is True:
                tmp_ = np.sign(reward[i])
            else:
                #  The sign of reward is used as the reward of DQN!
                tmp_ = np.sign(reward[i]) + self.gamma * max_q_prime[i]

            target[i, action[i]] = tmp_

        # TD-error clipping
        td = Variable(cuda.to_gpu(target)) - q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)

        zero_val = Variable(cuda.to_gpu(np.zeros((self.replay_size, self.n_act), dtype=np.float32)))
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, q
Ejemplo n.º 17
0
    def __call__(self, x_data, y_data):
        x = chainer.Variable(x_data)
        y = chainer.Variable(y_data)

        # q(z|x,y)
        rh1 = F.relu(self.recog1(x))
        rh2 = F.relu(self.recog2(rh1))
        recog_mean = self.recog_mean(rh2)
        #recog_log_sigma = 0.5 * self.recog_log_sigma(rh2)
        recog_log_sigma = self.recog_log_sigma(rh2)

        eps = np.random.normal(0, 1, (x.data.shape[0], nz)).astype(np.float32)
        eps = chainer.Variable(eps)

        # z = mu + sigma + epsilon
        z = recog_mean + F.exp(0.5 * recog_log_sigma) * eps
        #z = recog_mean + F.exp(recog_log_sigma) * eps

        gh1 = F.relu(self.gen1(z))
        gh2 = F.relu(self.gen2(gh1))
        gen_mean = self.gen_mean(gh2)
        output = F.sigmoid(gen_mean)
        loss = F.mean_squared_error(output, y)
        kld = -0.5 * F.sum(1 + recog_log_sigma - recog_mean**2 - F.exp(recog_log_sigma)) / (x_data.shape[0] * x_data.shape[1])
        return loss, kld, output
Ejemplo n.º 18
0
 def predict(self, x_data, y_data, state):
     x ,t = Variable(x_data,volatile=False),Variable(y_data,volatile=False)
     h1_in   = self.l1_x(x) + self.l1_h(state['h1'])
     c1, h1  = F.lstm(state['c1'], h1_in)
     y       = self.l6(h1)
     state   = {'c1': c1, 'h1': h1}
     return state,F.mean_squared_error(y,t)
Ejemplo n.º 19
0
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        y = self.forward_super(x, train=train)
        
        return F.mean_squared_error(y, t)
Ejemplo n.º 20
0
	def calc_loss(self, x, t,layer,train=True):
		self.clear()

		#print(x.data.shape)
		h = F.relu(model.conv3_32(x))
		h = F.relu(model.conv32_3(h))
		h = F.relu(self.norm_ch3(h, test=not train))

		h = F.relu(model.conv3_32(h))
		h = F.relu(model.conv32_3(h))
		h = F.relu(self.norm_ch3(h, test=not train))

		if layer > 0:
			h = F.relu(model.conv3_32(h))
			h = F.relu(model.conv32_3(h))
			h = F.relu(self.norm_ch3(h, test=not train))
		if layer > 1:
			h = F.relu(model.conv3_32(h))
			h = F.relu(model.conv32_3(h))
			h = F.relu(self.norm_ch3(h, test=not train))
		if layer > 2:
			h = F.relu(model.conv3_32(h))
			h = F.relu(model.conv32_3(h))
			
		loss = F.mean_squared_error(h, t)
		return loss
Ejemplo n.º 21
0
    def __call__(self, x, t):
        h = F.relu(self.bn1_1(self.conv1_1(x), test=not self.train))
        h = F.relu(self.bn1_2(self.conv1_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn2_1(self.conv2_1(h), test=not self.train))
        h = F.relu(self.bn2_2(self.conv2_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn3_1(self.conv3_1(h), test=not self.train))
        h = F.relu(self.bn3_2(self.conv3_2(h), test=not self.train))
        h = F.relu(self.bn3_3(self.conv3_3(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn4_1(self.conv4_1(h), test=not self.train))
        h = F.relu(self.bn4_2(self.conv4_2(h), test=not self.train))
        h = F.relu(self.bn4_3(self.conv4_3(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.bn5_1(self.conv5_1(h), test=not self.train))
        h = F.relu(self.bn5_2(self.conv5_2(h), test=not self.train))
        h = F.relu(self.bn5_3(self.conv5_3(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), train=self.train, ratio=0.6)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train, ratio=0.6)
        self.pred = self.fc8(h)

        if t is not None:
            self.loss = F.mean_squared_error(self.pred, t)
            return self.loss
        else:
            return self.pred
Ejemplo n.º 22
0
 def _fit(self, X):
     self.optimizer.zero_grads()
     y_var = self._forward(X, train=True)
     loss = F.mean_squared_error(y_var, Variable(X.copy()))
     loss.backward()
     self.optimizer.update()
     return loss
Ejemplo n.º 23
0
    def __call__(self, x, t=None):
        self.clear()
        h1 = F.leaky_relu(self.conv1(x), slope=0.1)
        h1 = F.leaky_relu(self.conv2(h1), slope=0.1)
        h1 = F.leaky_relu(self.conv3(h1), slope=0.1)

        h2 = self.seranet_v1_crbm(x)
        # Fusion
        h12 = F.concat((h1, h2), axis=1)

        lu = F.leaky_relu(self.convlu6(h12), slope=0.1)
        lu = F.leaky_relu(self.convlu7(lu), slope=0.1)
        lu = F.leaky_relu(self.convlu8(lu), slope=0.1)
        ru = F.leaky_relu(self.convru6(h12), slope=0.1)
        ru = F.leaky_relu(self.convru7(ru), slope=0.1)
        ru = F.leaky_relu(self.convru8(ru), slope=0.1)
        ld = F.leaky_relu(self.convld6(h12), slope=0.1)
        ld = F.leaky_relu(self.convld7(ld), slope=0.1)
        ld = F.leaky_relu(self.convld8(ld), slope=0.1)
        rd = F.leaky_relu(self.convrd6(h12), slope=0.1)
        rd = F.leaky_relu(self.convrd7(rd), slope=0.1)
        rd = F.leaky_relu(self.convrd8(rd), slope=0.1)

        # Splice
        h = CF.splice(lu, ru, ld, rd)

        h = F.leaky_relu(self.conv9(h), slope=0.1)
        h = F.leaky_relu(self.conv10(h), slope=0.1)
        h = F.leaky_relu(self.conv11(h), slope=0.1)
        h = F.clipped_relu(self.conv12(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h
Ejemplo n.º 24
0
 def forward(self, x_data, y_data, train=True):
     #print y_data
     batchsize = len(x_data)
     
     csize = self.channel
     
     x, t = chainer.Variable(x_data,volatile=not train), chainer.Variable(y_data,volatile=not train)
     x = F.reshape(x,(batchsize,csize,-1))
     
     h = F.reshape(x,(batchsize,csize,-1,1))
     h = self.conv1(h)
     h = F.reshape(h,(batchsize,10,-1))
     h = F.tanh(h)
     
     h = F.reshape(h,(batchsize,10,-1,1))
     h = self.conv2(h)
     h = F.reshape(h,(batchsize,10,-1))
     h = F.tanh(h)
     
     h = F.reshape(h,(batchsize,10,-1,1))
     h = self.conv3(h)
     h = F.reshape(h,(batchsize,100,-1))
     h = F.tanh(h)
     
     h = F.reshape(h,(batchsize,100,-1,1))
     h = self.conv4(h)
     h = F.reshape(h,(batchsize,100,-1))
     h = F.tanh(h)
     
     h = F.dropout(F.tanh(self.fc5(h)), train=train)
     y = self.fc6(h)
     
     return F.mean_squared_error(y, t)
Ejemplo n.º 25
0
 def test(self, x_data):
     x_data = self.procInput(x_data)
     x = Variable(x_data)
     t = Variable(x_data)
     h = self.encode(t)
     y = self.decode(h)
     return self.procOutput(F.mean_squared_error(y, x))
Ejemplo n.º 26
0
def sample_3():
    # create random input and output data
    x = Variable(X.copy())
    y = Variable(Y.copy())

    # create a network
    model = TwoLayerNet(INPUT_SIZE, HIDDEN_SIZE, OUTPUT_SIZE)

    for t in range(EPOCHS):
        # forward
        y_pred = model(x)

        # compute and print loss
        loss = F.mean_squared_error(y_pred, y)
        print(loss.data)

        # zero the gradients
        model.cleargrads()

        # backward
        loss.backward()

        # update weights
        model.linear1.W.data -= LEARNING_RATE * model.linear1.W.grad
        model.linear2.W.data -= LEARNING_RATE * model.linear2.W.grad
Ejemplo n.º 27
0
 def forward_one_step(self, x_data, y_data, state, train=True,dropout_ratio=0.0):
     x ,t = Variable(x_data,volatile=not train),Variable(y_data,volatile=not train)
     h1_in   = self.l1_x(F.dropout(x, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
     c1, h1  = F.lstm(state['c1'], h1_in)
     y       = self.l6(F.dropout(h1, ratio=dropout_ratio, train=train))
     state   = {'c1': c1, 'h1': h1}
     return state, F.mean_squared_error(y, t)
Ejemplo n.º 28
0
    def calculate_loss(self, q_model, teacher, states, actions, next_states, rewards, dones):
        indices = np.random.permutation(len(states))  # random sampling
        shuffle = lambda x : np.array(x)[indices]
        states = shuffle(states)
        actions = shuffle(actions)
        next_states = shuffle(next_states)
        rewards = shuffle(rewards)
        dones = shuffle(dones)
        #print("states:{}".format(states))
        #print("actions:{}".format(actions))
        #print("next_states:{}".format(next_states))
        #print("rewards:{}".format(rewards))
        #print("dones:{}".format(dones))

        v_states = chainer.Variable(states)
        v_next_states = chainer.Variable(next_states)

        qv = q_model.forward(v_states)
	#print("qv:{}".format(qv.data))
        max_qv_next = np.max(teacher.forward(v_next_states).data, axis=1)
        target = qv.data.copy()
        #teacher_qv = np.sign(rewards)
        teacher_qv = rewards
        for i, action in enumerate(actions):
            if dones[i] == False:
                teacher_qv[i] += self.gamma * max_qv_next[i]
            target[i, action] = teacher_qv[i]
	#print("target:{}".format(target))

        td = chainer.Variable(target) - qv
	#print("td:{}".format(td.data))
        zeros = chainer.Variable(np.zeros(td.data.shape, dtype=np.float32))
        loss = F.mean_squared_error(td, zeros)
        return loss
Ejemplo n.º 29
0
    def _forward(self, batch, test=False):

        # TrainingSetのEncodeとDecode
        encoded, means, ln_vars = self._encode(batch, test=test)
        rec = self._decode(encoded, test=test)
        normer = reduce(lambda x, y: x*y, means.data.shape) # データ数
        kl_loss = F.gaussian_kl_divergence(means, ln_vars)/normer
        #print 'means={}'.format(means.data.shape)
        #print 'ln_vars={}'.format(ln_vars.data.shape)
        #print 'kl_loss={}, normer={}'.format(kl_loss.data, normer)

        # zのサンプル
        samp_p = np.random.standard_normal(means.data.shape).astype('float32')
        z_p = chainer.Variable(samp_p)

        if self.flag_gpu:
            z_p.to_gpu()

        rec_p = self._decode(z_p)

        disc_rec, conv_layer_rec = self.disc(rec, test=test, dropout_ratio=self.dropout_ratio)

        disc_batch, conv_layer_batch = self.disc(batch, test=test, dropout_ratio=self.dropout_ratio)

        disc_x_p, conv_layer_x_p = self.disc(rec_p, test=test, dropout_ratio=self.dropout_ratio)

        dif_l = F.mean_squared_error(conv_layer_rec, conv_layer_batch)

        return kl_loss, dif_l, disc_rec, disc_batch, disc_x_p
Ejemplo n.º 30
0
    def forward(self, x_data, y_data, train=True):
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.6)
        h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.6)
        h = self.fc8(h)

        loss = F.mean_squared_error(h, t)

        return loss, h
Ejemplo n.º 31
0
    def main(self):
        rospy.init_node('q_update_client_dqn')

        rospy.Subscriber("/state_observation_flag", Int64,
                         self.state_observation_flag_callback)
        rospy.Subscriber("/joint1_state", Float64, self.joint1_state_callback)
        rospy.Subscriber("/joint2_state", Float64, self.joint2_state_callback)
        rospy.Subscriber("/joint3_state", Float64, self.joint3_state_callback)
        rospy.Subscriber("/joint4_state", Float64, self.joint4_state_callback)
        rospy.Subscriber("/joint5_state", Float64, self.joint5_state_callback)

        pub_1 = rospy.Publisher("/joint1_pose", Float64, queue_size=1)
        pub_2 = rospy.Publisher("/joint2_pose", Float64, queue_size=1)
        pub_3 = rospy.Publisher("/joint3_pose", Float64, queue_size=1)
        pub_4 = rospy.Publisher("/joint4_pose", Float64, queue_size=1)
        pub_5 = rospy.Publisher("/joint5_pose", Float64, queue_size=1)
        pub_6 = rospy.Publisher("/action_num", Int64, queue_size=1)
        pub_7 = rospy.Publisher("/num_step", Int64, queue_size=1)
        pub_8 = rospy.Publisher("/num_episode", Int64, queue_size=1)

        loop_rate = rospy.Rate(100)

        filename_result = "/home/amsl/ros_catkin_ws/src/arm_q_learning/dqn_results/test_result.txt"

        count = 0
        count_temp = 0

        self.joint1 = self.init_joint1
        self.joint2 = self.init_joint2
        self.joint3 = self.init_joint3
        self.joint4 = self.init_joint4
        self.joint5 = self.init_joint5
        print "joint1 : ", self.joint1
        print "joint2 : ", self.joint2
        print "joint3 : ", self.joint3
        print "joint4 : ", self.joint4
        print "joint5 : ", self.joint5

        self.next_joint1 = self.init_joint1
        self.next_joint2 = self.init_joint2
        self.next_joint3 = self.init_joint3
        self.next_joint4 = self.init_joint4
        self.next_joint5 = self.init_joint5
        print "next joint1 : ", self.next_joint1
        print "next joint2 : ", self.next_joint2
        print "next joint3 : ", self.next_joint3
        print "next joint4 : ", self.next_joint4
        print "next joint5 : ", self.next_joint5

        step_count = 0
        episode_count = 0

        episode_now = 0
        episode_past = 0

        temp_count = 0

        loss_list = []

        print "Q Learning Start!!"

        while not rospy.is_shutdown():
            if self.wait_flag:
                print "wait 1 seconds!!"
                count += 1
                if count == 100:
                    self.wait_flag = False
                    self.select_action_flag = False
                    self.q_update_flag = False
                    self.state_observation_flag = True
                    self.state_observation_flag1 = True
                    self.state_observation_flag2 = True
                    self.state_observation_flag3 = True
                    self.state_observation_flag4 = True
                    self.state_observation_flag5 = True
                    count = 0
                if count == 10:
                    self.action_num = 0
                    self.joint1 = self.init_next_joint1
                    self.joint2 = self.init_next_joint2
                    self.joint3 = self.init_next_joint3
                    self.joint4 = self.init_next_joint4
                    self.joint5 = self.init_next_joint5
                    self.reward = 0.0
                    pub_1.publish(self.joint1)
                    pub_2.publish(self.joint2)
                    pub_3.publish(self.joint3)
                    pub_4.publish(self.joint4)
                    pub_5.publish(self.joint5)
                    pub_6.publish(self.action_num)
            else:
                if self.select_action_flag:
                    self.action = self.epsilon_greedy(self.joint1, self.joint2,
                                                      self.joint3, self.joint4,
                                                      self.joint5)
                    self.action_num = self.action
                    print "self.action_num : ", self.action_num
                    pub_1.publish(self.joint1)
                    pub_2.publish(self.joint2)
                    pub_3.publish(self.joint3)
                    pub_4.publish(self.joint4)
                    pub_5.publish(self.joint5)
                    pub_6.publish(self.action_num)
                    self.select_action_flag = False

                if self.state_observation_flag and self.state_observation_flag1 and self.state_observation_flag2 and self.state_observation_flag3 and self.state_observation_flag4 and self.state_observation_flag5:
                    print "self.joint_state[0] : ", self.joint_state[0]
                    print "self.joint_state[1] : ", self.joint_state[1]
                    print "self.joint_state[2] : ", self.joint_state[2]
                    print "self.joint_state[3] : ", self.joint_state[3]
                    print "self.joint_state[4] : ", self.joint_state[4]

                    print "now joint1 : ", self.joint1
                    print "now joint2 : ", self.joint2
                    print "now joint3 : ", self.joint3
                    print "now joint4 : ", self.joint4
                    print "now joint5 : ", self.joint5

                    self.next_joint1 = self.joint_state[0]
                    self.next_joint2 = self.joint_state[1]
                    self.next_joint3 = self.joint_state[2]
                    self.next_joint4 = self.joint_state[3]
                    self.next_joint5 = self.joint_state[4]
                    print "next joint1 : ", self.next_joint1
                    print "next joint2 : ", self.next_joint2
                    print "next joint3 : ", self.next_joint3
                    print "next joint4 : ", self.next_joint4
                    print "next joint5 : ", self.next_joint5

                    self.reward = self.reward_calculation_client(step_count)
                    print "reward : ", self.reward
                    #  self.select_action_flag = True
                    self.q_update_flag = True
                    self.state_observation_flag = False
                    self.state_observation_flag1 = False
                    self.state_observation_flag2 = False
                    self.state_observation_flag3 = False
                    self.state_observation_flag4 = False
                    self.state_observation_flag5 = False

                if self.q_update_flag:
                    target_val = self.reward + self.GAMMA * np.max(
                        self.forward(self.next_joint1, self.next_joint2,
                                     self.next_joint3, self.next_joint4,
                                     self.next_joint5).data)
                    self.optimizer.zero_grads()
                    tt = xp.copy(self.q_list.data)
                    tt[0][self.action] = target_val
                    target = chainer.Variable(tt)
                    #  loss = 0.5 * (target - self.q_list) ** 2
                    loss = F.mean_squared_error(target, self.q_list)
                    #  self.ALPHA = float(self.ALPHA)
                    #  loss.grad = xp.array([[self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA, self.ALPHA]], dtype=xp.float32)
                    loss_list.append(np.max(loss.data))
                    loss.backward()
                    self.optimizer.update()

                    self.select_action_flag = True
                    self.q_update_flag = False
                    step_count += 1
                    print "episode : %d " % episode_count,
                    print "step : %d " % step_count,
                    print "now joint1 : %d " % self.joint1,
                    print "now joint2 : %d " % self.joint2,
                    print "now joint3 : %d " % self.joint3,
                    print "now joint4 : %d " % self.joint4,
                    print "now joint5 : %d " % self.joint5,
                    print "now action : %d" % self.action,
                    print "loss : ", np.max(loss.data),
                    print "reward : %.1f  " % self.reward,
                    print "EPSILON : %.5f " % self.EPSILON
                    print ""

                #  print ""

                if self.reward >= 1:
                    print "episode : %d " % episode_count,
                    print "step : %d " % step_count,
                    print "now joint1 : %d " % self.joint1,
                    print "now joint2 : %d " % self.joint2,
                    print "now joint3 : %d " % self.joint3,
                    print "now joint4 : %d " % self.joint4,
                    print "now joint5 : %d " % self.joint5,
                    print "now action : %d" % self.action,
                    print "loss average : %.3f " % (sum(loss_list) /
                                                    len(loss_list)),
                    print "reward : %.1f  " % self.reward,
                    print "EPSILON : %.5f " % self.EPSILON,
                    print "succsess!!"
                    print ""

                    temp_result = np.array(([[episode_count, step_count]]),
                                           dtype=np.int32)
                    if episode_count == 0:
                        test_result = temp_result
                    else:
                        test_result = np.r_[test_result, temp_result]

                    while 1:
                        rand_joint1 = randint(0, 1 - 1) + self.init_joint1
                        rand_joint2 = randint(0, 11 - 1) + self.init_joint2
                        rand_joint3 = randint(-10, 1) + self.init_joint3
                        rand_joint4 = randint(0, 1 - 1) + self.init_joint4
                        rand_joint5 = randint(0, 1 - 1) + self.init_joint5

                        if rand_joint2 <= -57 and rand_joint3 <= 75:
                            print "one more!"
                        else:
                            self.init_next_joint1 = rand_joint1
                            self.init_next_joint2 = rand_joint2
                            self.init_next_joint3 = rand_joint3
                            self.init_next_joint4 = rand_joint4
                            self.init_next_joint5 = rand_joint5
                            break

                    step_count = 0
                    episode_count += 1
                    episode_now = episode_count

                    self.action_num = 0
                    self.joint1 = self.init_next_joint1
                    self.joint2 = self.init_next_joint2
                    self.joint3 = self.init_next_joint3
                    self.joint4 = self.init_next_joint4
                    self.joint5 = self.init_next_joint5
                    pub_1.publish(self.joint1)
                    pub_2.publish(self.joint2)
                    pub_3.publish(self.joint3)
                    pub_4.publish(self.joint4)
                    pub_5.publish(self.joint5)
                    pub_6.publish(self.action_num)
                    loss_list = []

                    self.wait_flag = True
                else:
                    if step_count < 300:

                        self.joint1 = self.next_joint1
                        self.joint2 = self.next_joint2
                        self.joint3 = self.next_joint3
                        self.joint4 = self.next_joint4
                        self.joint5 = self.next_joint5

                        episode_past = episode_now
                    else:
                        print "episode : %d " % episode_count,
                        print "step : %d " % step_count,
                        print "now joint1 : %d " % self.joint1,
                        print "now joint2 : %d " % self.joint2,
                        print "now joint3 : %d " % self.joint3,
                        print "now joint4 : %d " % self.joint4,
                        print "now joint5 : %d " % self.joint5,
                        print "now action : %d" % self.action,
                        print "loss average : %.3f " % (sum(loss_list) /
                                                        len(loss_list)),
                        print "reward : %.1f  " % self.reward,
                        print "EPSILON : %.5f " % self.EPSILON,
                        print "failuer!!"
                        print ""

                        temp_result = np.array(([[episode_count, step_count]]),
                                               dtype=np.int32)
                        if episode_count == 0:
                            test_result = temp_result
                        else:
                            test_result = np.r_[test_result, temp_result]

                        while 1:
                            rand_joint1 = randint(0, 1 - 1) + self.init_joint1
                            rand_joint2 = randint(0, 11 - 1) + self.init_joint2
                            rand_joint3 = randint(-10, 1) + self.init_joint3
                            rand_joint4 = randint(0, 1 - 1) + self.init_joint4
                            rand_joint5 = randint(0, 1 - 1) + self.init_joint5

                            if rand_joint2 <= -57 and rand_joint3 <= 75:
                                print "one more!"
                            else:
                                self.init_next_joint1 = rand_joint1
                                self.init_next_joint2 = rand_joint2
                                self.init_next_joint3 = rand_joint3
                                self.init_next_joint4 = rand_joint4
                                self.init_next_joint5 = rand_joint5
                                break

                        step_count = 0
                        episode_count += 1
                        episode_now = episode_count

                        self.action_num = 0
                        self.joint1 = self.init_next_joint1
                        self.joint2 = self.init_next_joint2
                        self.joint3 = self.init_next_joint3
                        self.joint4 = self.init_next_joint4
                        self.joint5 = self.init_next_joint5
                        pub_1.publish(self.joint1)
                        pub_2.publish(self.joint2)
                        pub_3.publish(self.joint3)
                        pub_4.publish(self.joint4)
                        pub_5.publish(self.joint5)
                        pub_6.publish(self.action_num)
                        loss_list = []

                        self.wait_flag = True

                if math.fabs(episode_now - episode_past) > 1e-6:
                    if self.EPSILON > 0.3000:
                        self.EPSILON -= 0.0002

                self.num_step = step_count
                pub_7.publish(self.num_step)
                self.num_episode = episode_count
                pub_8.publish(self.num_episode)

                if episode_count % 50 == 0:
                    model_filename = "/home/amsl/ros_catkin_ws/src/arm_q_learning/dqn_model/dqn_arm_model_%d.dat" % episode_count
                    f = open(model_filename, 'w')
                    pickle.dump(self.model, f)

                if episode_count > 5000:
                    np.savetxt(filename_result,
                               test_result,
                               fmt="%d",
                               delimiter=",")
                    #  f = open('/home/amsl/ros_catkin_ws/src/arm_q_learning/dqn_model/dqn_arm_model.dat', 'w')
                    #  pickle.dump(self.model, f)
                    print "Finish!!!"
                    break

            loop_rate.sleep()
 def loss_enc(self, enc, x_real, x_fake):
     loss = F.mean_squared_error(x_real, x_fake)
     chainer.report({'loss': loss}, enc)
     return loss
Ejemplo n.º 33
0
def train_dqn(env):
    class Q_Network(chainer.Chain):
        def __init__(self, input_size, hidden_size, output_size):
            super(Q_Network,
                  self).__init__(fc1=L.Linear(input_size, hidden_size),
                                 fc2=L.Linear(hidden_size, hidden_size),
                                 fc3=L.Linear(hidden_size, output_size))

        def __call__(self, x):
            h = F.relu(self.fc1(x))
            h = F.relu(self.fc2(h))
            y = self.fc3(h)
            return y

        def reset(self):
            self.cleargrads()

    Q = Q_Network(input_size=env.history_t + 1, hidden_size=100, output_size=3)
    Q_ast = copy.deepcopy(Q)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(Q)

    epoch_num = 50
    step_max = len(env.data) - 1
    memory_size = 200
    batch_size = 20
    epsilon = 1.0
    epsilon_decrease = 1e-3
    epsilon_min = 0.1
    start_reduce_epsilon = 200
    train_freq = 10
    update_q_freq = 20
    gamma = 0.97
    show_log_freq = 5

    memory = []
    total_step = 0
    total_rewards = []
    total_losses = []

    start = time.time()
    for epoch in range(epoch_num):

        pobs = env.reset()
        step = 0
        done = False
        total_reward = 0
        total_loss = 0

        while not done and step < step_max:

            # select act
            pact = np.random.randint(3)

            if np.random.rand() > epsilon:
                pact = Q(np.array(pobs, dtype=np.float32).reshape(1, -1))
                pact = np.argmax(pact.data)

            # act
            obs, reward, done = env.step(pact)

            # add memory
            memory.append((pobs, pact, reward, obs, done))
            if len(memory) > memory_size:
                memory.pop(0)

            # train or update q
            if len(memory) == memory_size:
                if total_step % train_freq == 0:
                    shuffled_memory = np.random.permutation(memory)
                    memory_idx = range(len(shuffled_memory))
                    for i in memory_idx[::batch_size]:
                        batch = np.array(shuffled_memory[i:i + batch_size])
                        b_pobs = np.array(batch[:, 0].tolist(),
                                          dtype=np.float32).reshape(
                                              batch_size, -1)
                        b_pact = np.array(batch[:, 1].tolist(), dtype=np.int32)
                        b_reward = np.array(batch[:, 2].tolist(),
                                            dtype=np.int32)
                        b_obs = np.array(batch[:, 3].tolist(),
                                         dtype=np.float32).reshape(
                                             batch_size, -1)
                        b_done = np.array(batch[:, 4].tolist(), dtype=np.bool)

                        q = Q(b_pobs)
                        maxq = np.max(Q_ast(b_obs).data, axis=1)
                        target = copy.deepcopy(q.data)
                        for j in range(batch_size):
                            #MODIFY
                            target[j, b_pact[j]] = b_reward[
                                j] + gamma * maxq[j] * (not b_done[j])
                        Q.reset()
                        loss = F.mean_squared_error(q, target)
                        total_loss += loss.data
                        loss.backward()
                        optimizer.update()

                if total_step % update_q_freq == 0:
                    Q_ast = copy.deepcopy(Q)

            # epsilon
            if epsilon > epsilon_min and total_step > start_reduce_epsilon:
                epsilon -= epsilon_decrease

            # next step
            total_reward += reward
            pobs = obs
            step += 1
            total_step += 1

        total_rewards.append(total_reward)
        total_losses.append(total_loss)

        if (epoch + 1) % show_log_freq == 0:
            log_reward = sum(total_rewards[(
                (epoch + 1) - show_log_freq):]) / show_log_freq
            log_loss = sum(total_losses[(
                (epoch + 1) - show_log_freq):]) / show_log_freq
            elapsed_time = time.time() - start
            print('\t'.join(
                map(str, [
                    epoch + 1, epsilon, total_step, log_reward, log_loss,
                    elapsed_time
                ])))
            start = time.time()

    return Q, total_losses, total_rewards
Ejemplo n.º 34
0
serializers.load_npz(DIR_NAME + '/my.model', model)

swingDataList = []
for i in range(prop['SWING_NUM']):
    rootDir = '{}/learning/IMAGES/{}/{}/{}/{}' \
              .format(ROOT, prop['IMG_DIR'], prop['DATA_TYPE'], 'swing', i)
    dataList = mylib.image.createInputSwingDataList(rootDir)
    swingDataList.append(np.array(dataList).astype(np.float32))

errors = []
for swingNum in range(prop['SWING_NUM']):
    x = Variable(mylib.NN.cupyArray(swingDataList[swingNum], False))
    t = Variable(x.data)
    
    y = model(x)
    loss = F.mean_squared_error(y, t)
    errors.append(loss.data)

    mylib.util.mkdir('{}/output/swing'.format(DIR_NAME))
    for i in range(len(swingDataList[swingNum])):
        mylib.util.mkdir('{}/output/swing/swing{}'.format(DIR_NAME, i))
        img = mylib.image.makeOutputData(y.data[i], prop['IMG_HEIGHT'], prop['IMG_WIDTH'])
        cv2.imwrite('{}/output/swing/swing{}/img{}.png'.format(DIR_NAME, swingNum, i), img)

# エラーを記録
errors = np.array(errors)
fError = open(DIR_NAME + '/output/swing/error.dat', 'w')
fError.write('# swingNum\t' + 'error\n')
for swingNum in range(prop['SWING_NUM']):
    fError.write('\t'.join([str(swingNum), str(errors[swingNum])]) + '\n')
fError.close()
Ejemplo n.º 35
0
    def update_core(self):
        opt_enc_x = self.get_optimizer('opt_enc_x')
        opt_dec_x = self.get_optimizer('opt_dec_x')
        opt_enc_y = self.get_optimizer('opt_enc_y')
        opt_dec_y = self.get_optimizer('opt_dec_y')
        opt_x = self.get_optimizer('opt_x')
        opt_y = self.get_optimizer('opt_y')
        opt_z = self.get_optimizer('opt_z')

        # get mini-batch
        batch_x = self.get_iterator('main').next()
        batch_y = self.get_iterator('train_B').next()
        x = Variable(self.converter(batch_x, self.args.gpu[0]))
        y = Variable(self.converter(batch_y, self.args.gpu[0]))

        # encode to latent (X,Y => Z)
        x_z = self.enc_x(losses.add_noise(x, sigma=self.args.noise))
        y_z = self.enc_y(losses.add_noise(y, sigma=self.args.noise))

        loss_gen = 0
        ## regularisation on the latent space
        if self.args.lambda_reg > 0:
            loss_reg_enc_y = losses.loss_func_reg(y_z[-1], 'l2')
            loss_reg_enc_x = losses.loss_func_reg(x_z[-1], 'l2')
            loss_gen = loss_gen + self.args.lambda_reg * (loss_reg_enc_x +
                                                          loss_reg_enc_y)
            chainer.report({'loss_reg': loss_reg_enc_x}, self.enc_x)
            chainer.report({'loss_reg': loss_reg_enc_y}, self.enc_y)

        ## discriminator for the latent space: distribution of image of enc_x should look same as that of enc_y
        # since z is a list (for u-net), we use only the output of the last layer
        if self.args.lambda_dis_z > 0:
            if self.args.dis_wgan:
                loss_enc_x_adv = -F.average(self.dis_z(x_z[-1]))
                loss_enc_y_adv = F.average(self.dis_z(y_z[-1]))
            else:
                loss_enc_x_adv = losses.loss_func_comp(self.dis_z(x_z[-1]),
                                                       1.0)
                loss_enc_y_adv = losses.loss_func_comp(self.dis_z(y_z[-1]),
                                                       0.0)
            loss_gen = loss_gen + self.args.lambda_dis_z * (loss_enc_x_adv +
                                                            loss_enc_y_adv)
            chainer.report({'loss_adv': loss_enc_x_adv}, self.enc_x)
            chainer.report({'loss_adv': loss_enc_y_adv}, self.enc_y)

        # cycle for X=>Z=>X (Autoencoder)
        x_x = self.dec_x(x_z)
        loss_cycle_xzx = F.mean_absolute_error(x_x, x)
        chainer.report({'loss_cycle': loss_cycle_xzx}, self.enc_x)
        # cycle for Y=>Z=>Y (Autoencoder)
        y_y = self.dec_y(y_z)
        loss_cycle_yzy = F.mean_absolute_error(y_y, y)
        chainer.report({'loss_cycle': loss_cycle_yzy}, self.enc_y)
        loss_gen = loss_gen + self.args.lambda_Az * loss_cycle_xzx + self.args.lambda_Bz * loss_cycle_yzy

        ## decode from latent Z => Y,X
        x_y = self.dec_y(x_z)
        y_x = self.dec_x(y_z)

        # cycle for X=>Z=>Y=>Z=>X  (Z=>Y=>Z does not work well)
        x_y_x = self.dec_x(self.enc_y(x_y))
        loss_cycle_x = F.mean_absolute_error(x_y_x, x)
        chainer.report({'loss_cycle': loss_cycle_x}, self.dec_x)
        # cycle for Y=>Z=>X=>Z=>Y
        y_x_y = self.dec_y(self.enc_x(y_x))
        loss_cycle_y = F.mean_absolute_error(y_x_y, y)
        chainer.report({'loss_cycle': loss_cycle_y}, self.dec_y)
        loss_gen = loss_gen + self.args.lambda_A * loss_cycle_x + self.args.lambda_B * loss_cycle_y

        ## adversarial for Y
        if self.args.lambda_dis_y > 0:
            x_y_copy = Variable(self._buffer_y.query(x_y.data))
            if self.args.dis_wgan:
                loss_dec_y_adv = -F.average(self.dis_y(x_y))
            else:
                loss_dec_y_adv = losses.loss_func_comp(self.dis_y(x_y), 1.0)
            loss_gen = loss_gen + self.args.lambda_dis_y * loss_dec_y_adv
            chainer.report({'loss_adv': loss_dec_y_adv}, self.dec_y)
        ## adversarial for X
        if self.args.lambda_dis_x > 0:
            y_x_copy = Variable(self._buffer_x.query(y_x.data))
            if self.args.dis_wgan:
                loss_dec_x_adv = -F.average(self.dis_x(y_x))
            else:
                loss_dec_x_adv = losses.loss_func_comp(self.dis_x(y_x), 1.0)
            loss_gen = loss_gen + self.args.lambda_dis_x * loss_dec_x_adv
            chainer.report({'loss_adv': loss_dec_x_adv}, self.dec_x)

        ## idempotence
        if self.args.lambda_idempotence > 0:
            loss_idem_x = F.mean_absolute_error(y_x,
                                                self.dec_x(self.enc_y(y_x)))
            loss_idem_y = F.mean_absolute_error(x_y,
                                                self.dec_y(self.enc_x(x_y)))
            loss_gen = loss_gen + self.args.lambda_idempotence * (loss_idem_x +
                                                                  loss_idem_y)
            chainer.report({'loss_idem': loss_idem_x}, self.dec_x)
            chainer.report({'loss_idem': loss_idem_y}, self.dec_y)
        # Y => X shouldn't change X
        if self.args.lambda_domain > 0:
            loss_dom_x = F.mean_absolute_error(x, self.dec_x(self.enc_y(x)))
            loss_dom_y = F.mean_absolute_error(y, self.dec_y(self.enc_x(y)))
            loss_gen = loss_gen + self.args.lambda_domain * (loss_dom_x +
                                                             loss_dom_y)
            chainer.report({'loss_dom': loss_dom_x}, self.dec_x)
            chainer.report({'loss_dom': loss_dom_y}, self.dec_y)

        ## images before/after conversion should look similar in terms of perceptual loss
        if self.args.lambda_identity_x > 0:
            loss_id_x = losses.loss_perceptual(
                x,
                x_y,
                self.vgg,
                layer=self.args.perceptual_layer,
                grey=self.args.grey)
            loss_gen = loss_gen + self.args.lambda_identity_x * loss_id_x
            chainer.report({'loss_id': 1e-3 * loss_id_x}, self.enc_x)
        if self.args.lambda_identity_y > 0:
            loss_id_y = losses.loss_perceptual(
                y,
                y_x,
                self.vgg,
                layer=self.args.perceptual_layer,
                grey=self.args.grey)
            loss_gen = loss_gen + self.args.lambda_identity_y * loss_id_y
            chainer.report({'loss_id': 1e-3 * loss_id_y}, self.enc_y)
        ## background (pixels with value -1) should be preserved
        if self.args.lambda_air > 0:
            loss_air_x = losses.loss_comp_low(x,
                                              x_y,
                                              self.args.air_threshold,
                                              norm='l2')
            loss_air_y = losses.loss_comp_low(y,
                                              y_x,
                                              self.args.air_threshold,
                                              norm='l2')
            loss_gen = loss_gen + self.args.lambda_air * (loss_air_x +
                                                          loss_air_y)
            chainer.report({'loss_air': loss_air_x}, self.dec_y)
            chainer.report({'loss_air': loss_air_y}, self.dec_x)
        ## images before/after conversion should look similar in the gradient domain
        if self.args.lambda_grad > 0:
            loss_grad_x = losses.loss_grad(x, x_y)
            loss_grad_y = losses.loss_grad(y, y_x)
            loss_gen = loss_gen + self.args.lambda_grad * (loss_grad_x +
                                                           loss_grad_y)
            chainer.report({'loss_grad': loss_grad_x}, self.dec_y)
            chainer.report({'loss_grad': loss_grad_y}, self.dec_x)
        ## total variation (only for X -> Y)
        if self.args.lambda_tv > 0:
            loss_tv = losses.total_variation(x_y,
                                             tau=self.args.tv_tau,
                                             method=self.args.tv_method)
            if self.args.imgtype == "dcm" and self.args.num_slices > 1:
                loss_tv += losses.total_variation_ch(x_y)
            loss_gen = loss_gen + self.args.lambda_tv * loss_tv
            chainer.report({'loss_tv': loss_tv}, self.dec_y)

        ## back propagate
        self.enc_x.cleargrads()
        self.dec_x.cleargrads()
        self.enc_y.cleargrads()
        self.dec_y.cleargrads()
        loss_gen.backward()
        opt_enc_x.update(loss=loss_gen)
        opt_dec_x.update(loss=loss_gen)
        if not self.args.single_encoder:
            opt_enc_y.update(loss=loss_gen)
        opt_dec_y.update(loss=loss_gen)

        ##########################################
        ## discriminator for Y
        if self.args.dis_wgan:  ## synthesised -, real +
            eps = self.xp.random.uniform(0, 1, size=len(batch_y)).astype(
                self.xp.float32)[:, None, None, None]
            if self.args.lambda_dis_y > 0:
                ## discriminator for X=>Y
                loss_dis_y = F.average(self.dis_y(x_y_copy) - self.dis_y(y))
                y_mid = eps * y + (1.0 - eps) * x_y_copy
                # gradient penalty
                gd_y, = chainer.grad([self.dis_y(y_mid)], [y_mid],
                                     enable_double_backprop=True)
                gd_y = F.sqrt(F.batch_l2_norm_squared(gd_y) + 1e-6)
                loss_dis_y_gp = F.mean_squared_error(
                    gd_y, self.xp.ones_like(gd_y.data))
                chainer.report({'loss_dis': loss_dis_y}, self.dis_y)
                chainer.report(
                    {'loss_gp': self.args.lambda_wgan_gp * loss_dis_y_gp},
                    self.dis_y)
                loss_dis_y = loss_dis_y + self.args.lambda_wgan_gp * loss_dis_y_gp
                self.dis_y.cleargrads()
                loss_dis_y.backward()
                opt_y.update(loss=loss_dis_y)

            if self.args.lambda_dis_x > 0:
                ## discriminator for B=>A
                loss_dis_x = F.average(self.dis_x(y_x_copy) - self.dis_x(x))
                x_mid = eps * x + (1.0 - eps) * y_x_copy
                # gradient penalty
                gd_x, = chainer.grad([self.dis_x(x_mid)], [x_mid],
                                     enable_double_backprop=True)
                gd_x = F.sqrt(F.batch_l2_norm_squared(gd_x) + 1e-6)
                loss_dis_x_gp = F.mean_squared_error(
                    gd_x, self.xp.ones_like(gd_x.data))
                chainer.report({'loss_dis': loss_dis_x}, self.dis_x)
                chainer.report(
                    {'loss_gp': self.args.lambda_wgan_gp * loss_dis_x_gp},
                    self.dis_x)
                loss_dis_x = loss_dis_x + self.args.lambda_wgan_gp * loss_dis_x_gp
                self.dis_x.cleargrads()
                loss_dis_x.backward()
                opt_x.update(loss=loss_dis_x)

            ## discriminator for latent: X -> Z is - while Y -> Z is +
            if self.args.lambda_dis_z > 0 and t == 0:
                loss_dis_z = F.average(
                    self.dis_z(x_z[-1]) - self.dis_z(y_z[-1]))
                z_mid = eps * x_z[-1] + (1.0 - eps) * y_z[-1]
                # gradient penalty
                gd_z, = chainer.grad([self.dis_z(z_mid)], [z_mid],
                                     enable_double_backprop=True)
                gd_z = F.sqrt(F.batch_l2_norm_squared(gd_z) + 1e-6)
                loss_dis_z_gp = F.mean_squared_error(
                    gd_z, self.xp.ones_like(gd_z.data))
                chainer.report({'loss_dis': loss_dis_z}, self.dis_z)
                chainer.report(
                    {'loss_gp': self.args.lambda_wgan_gp * loss_dis_y_gp},
                    self.dis_y)
                loss_dis_z = loss_dis_z + self.args.lambda_wgan_gp * loss_dis_z_gp
                self.dis_z.cleargrads()
                loss_dis_z.backward()
                opt_z.update(loss=loss_dis_z)

        else:  ## LSGAN
            if self.args.lambda_dis_y > 0:
                ## discriminator for A=>B (real:1, fake:0)
                disy_fake = self.dis_y(x_y_copy)
                loss_dis_y_fake = losses.loss_func_comp(
                    disy_fake, 0.0, self.args.dis_jitter)
                disy_real = self.dis_y(y)
                loss_dis_y_real = losses.loss_func_comp(
                    disy_real, 1.0, self.args.dis_jitter)
                if self.args.dis_reg_weighting > 0:  ## regularization
                    loss_dis_y_reg = (
                        F.average(F.absolute(disy_real[:, 1, :, :])) +
                        F.average(F.absolute(disy_fake[:, 1, :, :])))
                else:
                    loss_dis_y_reg = 0
                chainer.report({'loss_reg': loss_dis_y_reg}, self.dis_y)
                loss_dis_y_gp = 0
                chainer.report({'loss_fake': loss_dis_y_fake}, self.dis_y)
                chainer.report({'loss_real': loss_dis_y_real}, self.dis_y)
                loss_dis_y = (
                    loss_dis_y_fake + loss_dis_y_real
                ) * 0.5 + self.args.dis_reg_weighting * loss_dis_y_reg + self.args.lambda_wgan_gp * loss_dis_y_gp
                self.dis_y.cleargrads()
                loss_dis_y.backward()
                opt_y.update(loss=loss_dis_y)

            if self.args.lambda_dis_x > 0:
                ## discriminator for B=>A
                disx_fake = self.dis_x(y_x_copy)
                loss_dis_x_fake = losses.loss_func_comp(
                    disx_fake, 0.0, self.args.dis_jitter)
                disx_real = self.dis_x(x)
                loss_dis_x_real = losses.loss_func_comp(
                    disx_real, 1.0, self.args.dis_jitter)
                if self.args.dis_reg_weighting > 0:  ## regularization
                    loss_dis_x_reg = (
                        F.average(F.absolute(disx_fake[:, 1, :, :])) +
                        F.average(F.absolute(disx_real[:, 1, :, :])))
                else:
                    loss_dis_x_reg = 0
                chainer.report({'loss_reg': loss_dis_x_reg}, self.dis_x)
                loss_dis_x_gp = 0
                chainer.report({'loss_fake': loss_dis_x_fake}, self.dis_x)
                chainer.report({'loss_real': loss_dis_x_real}, self.dis_x)
                loss_dis_x = (
                    loss_dis_x_fake + loss_dis_x_real
                ) * 0.5 + self.args.dis_reg_weighting * loss_dis_x_reg + self.args.lambda_wgan_gp * loss_dis_x_gp
                self.dis_x.cleargrads()
                loss_dis_x.backward()
                opt_x.update(loss=loss_dis_x)

            ## discriminator for latent: X -> Z is 0.0 while Y -> Z is 1.0
            if self.args.lambda_dis_z > 0:
                disz_xz = self.dis_z(x_z[-1])
                loss_dis_z_x = losses.loss_func_comp(disz_xz, 0.0,
                                                     self.args.dis_jitter)
                disz_yz = self.dis_z(y_z[-1])
                loss_dis_z_y = losses.loss_func_comp(disz_yz, 1.0,
                                                     self.args.dis_jitter)
                if self.args.dis_reg_weighting > 0:  ## regularization
                    loss_dis_z_reg = (
                        F.average(F.absolute(disz_xz[:, 1, :, :])) +
                        F.average(F.absolute(disz_yz[:, 1, :, :])))
                else:
                    loss_dis_z_reg = 0
                chainer.report({'loss_x': loss_dis_z_x}, self.dis_z)
                chainer.report({'loss_y': loss_dis_z_y}, self.dis_z)
                chainer.report({'loss_reg': loss_dis_z_reg}, self.dis_z)
                loss_dis_z = (
                    loss_dis_z_x + loss_dis_z_y
                ) * 0.5 + self.args.dis_reg_weighting * loss_dis_z_reg
                self.dis_z.cleargrads()
                loss_dis_z.backward()
                opt_z.update(loss=loss_dis_z)
Ejemplo n.º 36
0
    def evaluate(self):
        domain = ['in', 'truth', 'out']
        if self.eval_hook:
            self.eval_hook(self)

        for k, dataset in enumerate(['test', 'train']):
            batch = self._iterators[dataset].next()
            x_in, t_out = chainer.dataset.concat_examples(batch, self.device)
            x_in = Variable(x_in)  # original image
            t_out = Variable(
                t_out)  # corresponding translated image (ground truth)

            with chainer.using_config(
                    'train', False), chainer.function.no_backprop_mode():
                x_out = self._targets['dec_y'](
                    self._targets['enc_x'](x_in))  # translated image by NN

            ## unfold stack and apply softmax
            if self.args.class_num > 0 and self.args.stack > 0:
                x_in = x_in.reshape(x_in.shape[0] * self.args.stack,
                                    x_in.shape[1] // self.args.stack,
                                    x_in.shape[2], x_in.shape[3])
                x_out = F.softmax(
                    x_out.reshape(x_out.shape[0] * self.args.stack,
                                  x_out.shape[1] // self.args.stack,
                                  x_out.shape[2], x_out.shape[3]))
                t_out = t_out.reshape(t_out.shape[0] * self.args.stack,
                                      t_out.shape[1] // self.args.stack,
                                      t_out.shape[2], t_out.shape[3])
                #print(x_out.shape, t_out.shape)
                # select middle slices
                x_in = x_in[(self.args.stack // 2)::self.args.stack]
                x_out = x_out[(self.args.stack // 2)::self.args.stack]
                t_out = t_out[(self.args.stack // 2)::self.args.stack]

            if dataset == 'test':  # for test dataset, compute some statistics
                fig = plt.figure(figsize=(12, 6 * len(x_out)))
                gs = gridspec.GridSpec(2 * len(x_out),
                                       4,
                                       wspace=0.1,
                                       hspace=0.1)
                loss_rec_L1 = F.mean_absolute_error(x_out, t_out)
                loss_rec_L2 = F.mean_squared_error(x_out, t_out)
                loss_rec_CE = softmax_focalloss(x_out,
                                                t_out,
                                                gamma=self.args.focal_gamma,
                                                class_weight=self.class_weight)
                result = {
                    "myval/loss_L1": loss_rec_L1,
                    "myval/loss_L2": loss_rec_L2,
                    "myval/loss_CE": loss_rec_CE
                }

            ## iterate over batch
            for i, var in enumerate([x_in, t_out, x_out]):
                if i % 3 != 0 and self.args.class_num > 0:  # t_out, x_out
                    imgs = var2unit_img(var, 0, 1)  # softmax
                    #imgs[:,:,:,0] = 0 # class 0 => black  ######
                    #imgs = np.roll(imgs,1,axis=3)[:,:,:,:3]  ## R0B, show only 3 classes (-1,0,1)
                else:
                    imgs = var2unit_img(var)  # tanh
#                print(imgs.shape,np.min(imgs),np.max(imgs))
                for j in range(len(imgs)):
                    ax = fig.add_subplot(gs[j + k * len(x_out), i])
                    ax.set_title(dataset + "_" + domain[i], fontsize=8)
                    if (imgs[j].shape[2] == 3):  ## RGB
                        ax.imshow(imgs[j],
                                  interpolation='none',
                                  vmin=0,
                                  vmax=1)
                    elif (imgs[j].shape[2] >= 4):  ## categorical
                        cols = ['k', 'b', 'c', 'g', 'y', 'r', 'm', 'w'] * 5
                        cmap = colors.ListedColormap(cols)
                        im = np.argmax(imgs[j], axis=2)
                        norm = colors.BoundaryNorm(list(range(len(cols) + 1)),
                                                   cmap.N)
                        ax.imshow(im,
                                  interpolation='none',
                                  cmap=cmap,
                                  norm=norm)
                    else:
                        ax.imshow(imgs[j][:, :, -1],
                                  interpolation='none',
                                  cmap='gray',
                                  vmin=0,
                                  vmax=1)
                    ax.set_xticks([])
                    ax.set_yticks([])

            ## difference image
            if (x_out.shape[1] >= 4):  ## categorical
                eps = 1e-7
                p = F.clip(
                    x_out, x_min=eps, x_max=1 -
                    eps)  ## we assume the input is already applied softmax
                q = -F.clip(t_out, x_min=eps, x_max=1 - eps) * F.log(p)
                diff = F.sum(q * ((1 - p)**2), axis=1, keepdims=True)
                vmin = -1
                vmax = 1
            else:
                diff = (x_out - t_out)
                vmin = -0.1
                vmax = 0.1
            diff = diff.data.get().transpose(0, 2, 3, 1)
            for j in range(len(diff)):
                ax = fig.add_subplot(gs[j + k * len(x_out), 3])
                ax.imshow(diff[j][:, :, 0],
                          interpolation='none',
                          cmap='coolwarm',
                          vmin=vmin,
                          vmax=vmax)
                ax.set_xticks([])
                ax.set_yticks([])

        gs.tight_layout(fig)
        plt.savefig(os.path.join(self.vis_out,
                                 'count{:0>4}.jpg'.format(self.count)),
                    dpi=200)
        self.count += 1
        plt.close()

        return result
Ejemplo n.º 37
0
                                             nbatch]]
                    t_batch_2 = train_2_label[inds[icount *
                                                   nbatch:(icount + 1) *
                                                   nbatch]]
                    x_batch_3 = train_3[inds[icount * nbatch:(icount + 1) *
                                             nbatch]]
                    t_batch_3 = train_3_label[inds[icount *
                                                   nbatch:(icount + 1) *
                                                   nbatch]]

                    # FORWARD PROP.
                    y_1 = model_1(x_batch_1)
                    y_2 = model_2(x_batch_2)
                    y_3 = model_3(x_batch_3)
                    if LOSS == 'RMSE':
                        loss_1 = F.mean_squared_error(y_1, t_batch_1)
                        loss_2 = F.mean_squared_error(y_2, t_batch_2)
                        loss_3 = F.mean_squared_error(y_3, t_batch_3)
                    elif LOSS == 'MAE':
                        loss_1 = F.mean_absolute_error(y_1, t_batch_1)
                        loss_2 = F.mean_absolute_error(y_2, t_batch_2)
                        loss_3 = F.mean_absolute_error(y_3, t_batch_3)

                    # BACK PROP.
                    model_1.cleargrads()
                    loss_1.backward()

                    model_2.cleargrads()
                    loss_2.backward()

                    model_3.cleargrads()
    def evaluate(self):
        batch_x = self._iterators['main'].next()
        batch_y = self._iterators['testB'].next()
        models = self._targets
        if self.eval_hook:
            self.eval_hook(self)

        fig = plt.figure(figsize=(9, 3 * (len(batch_x) + len(batch_y))))
        gs = gridspec.GridSpec(len(batch_x) + len(batch_y),
                               3,
                               wspace=0.1,
                               hspace=0.1)

        x = Variable(self.converter(batch_x, self.device))
        y = Variable(self.converter(batch_y, self.device))

        with chainer.using_config('train', False):
            with chainer.function.no_backprop_mode():
                if len(models) > 2:
                    x_y = models['dec_y'](models['enc_x'](x))
                    if self.single_encoder:
                        x_y_x = models['dec_x'](models['enc_x'](x_y))
                    else:
                        x_y_x = models['dec_x'](
                            models['enc_x'](x))  ## autoencoder
                        #x_y_x = models['dec_x'](models['enc_y'](x_y))
                else:
                    x_y = models['gen_g'](x)
                    x_y_x = models['gen_f'](x_y)

#        for i, var in enumerate([x, x_y]):
        for i, var in enumerate([x, x_y, x_y_x]):
            imgs = postprocess(var).astype(np.float32)
            for j in range(len(imgs)):
                ax = fig.add_subplot(gs[j, i])
                if imgs[j].shape[2] == 1:
                    ax.imshow(imgs[j, :, :, 0],
                              interpolation='none',
                              cmap='gray',
                              vmin=0,
                              vmax=1)
                else:
                    ax.imshow(imgs[j], interpolation='none', vmin=0, vmax=1)
                ax.set_xticks([])
                ax.set_yticks([])

        with chainer.using_config('train', False):
            with chainer.function.no_backprop_mode():
                if len(models) > 2:
                    if self.single_encoder:
                        y_x = models['dec_x'](models['enc_x'](y))
                    else:
                        y_x = models['dec_x'](models['enc_y'](y))
#                    y_x_y = models['dec_y'](models['enc_y'](y))   ## autoencoder
                    y_x_y = models['dec_y'](models['enc_x'](y_x))
                else:  # (gen_g, gen_f)
                    y_x = models['gen_f'](y)
                    y_x_y = models['gen_g'](y_x)

#        for i, var in enumerate([y, y_y]):
        for i, var in enumerate([y, y_x, y_x_y]):
            imgs = postprocess(var).astype(np.float32)
            for j in range(len(imgs)):
                ax = fig.add_subplot(gs[j + len(batch_x), i])
                if imgs[j].shape[2] == 1:
                    ax.imshow(imgs[j, :, :, 0],
                              interpolation='none',
                              cmap='gray',
                              vmin=0,
                              vmax=1)
                else:
                    ax.imshow(imgs[j], interpolation='none', vmin=0, vmax=1)
                ax.set_xticks([])
                ax.set_yticks([])

        gs.tight_layout(fig)
        plt.savefig(os.path.join(self.vis_out,
                                 'count{:0>4}.jpg'.format(self.count)),
                    dpi=200)
        self.count += 1
        plt.close()

        cycle_y_l1 = F.mean_absolute_error(y, y_x_y)
        cycle_y_l2 = F.mean_squared_error(y, y_x_y)
        cycle_x_l1 = F.mean_absolute_error(x, x_y_x)
        id_xy_grad = losses.loss_grad(x, x_y)
        id_xy_l1 = F.mean_absolute_error(x, x_y)

        result = {
            "myval/cycle_y_l1": cycle_y_l1,
            "myval/cycle_y_l2": cycle_y_l2,
            "myval/cycle_x_l1": cycle_x_l1,
            "myval/id_xy_grad": id_xy_grad,
            "myval/id_xy_l1": id_xy_l1
        }
        return result
Ejemplo n.º 39
0
 def __call__(self, x_data, y_data):
     x = Variable(x_data.astype(np.float32).reshape(len(x_data),
                                                    1))  # Variableオブジェクトに変換
     y = Variable(y_data.astype(np.float32).reshape(len(y_data),
                                                    1))  # Variableオブジェクトに変換
     return F.mean_squared_error(self.predict(x), y)
Ejemplo n.º 40
0
 def __call__(self, x, y):
     y_p = self.model(x)
     loss = F.mean_squared_error(y_p, y)
     reporter.report({'loss': loss}, self)
     return loss
Ejemplo n.º 41
0
 def __call__(self, X, Y):
     predY = self.predict(X)
     loss = F.mean_squared_error(predY, Y)
     return loss
Ejemplo n.º 42
0
 def __call__(self, x, y):
     return F.mean_squared_error(self.fwd(x), y)
 def __call__(self, x, t):
     y = self.model(x)
     loss = F.mean_squared_error(y, t)
     return loss
Ejemplo n.º 44
0
    def loss_gen_fm(self, gen, real_fm_expected, fake_fm_expected):
        fm_loss = F.mean_squared_error(real_fm_expected, fake_fm_expected)
        chainer.report({'loss': fm_loss}, gen)  # gen/loss でアクセス可能

        return fm_loss
Ejemplo n.º 45
0
            m_list.append(m)
            noise_list.append(noise)

        y = generator(const, m_list, stage, noise_list, alpha)
        y_dis = discriminator(y, stage, alpha)
        x_dis = discriminator(x_down, stage, alpha)

        dis_loss = F.mean(F.softplus(-x_dis)) + F.mean(F.softplus(y_dis))

        eps = xp.random.uniform(0,1,size = batchsize).astype(xp.float32)[:,None,None,None]
        x_mid = eps * y + (1.0 - eps) * x_down

        y_mid = F.sum(discriminator(x_mid, stage, alpha))
        grad,  = chainer.grad([y_mid], [x_mid], enable_double_backprop=True)
        grad = F.sqrt(F.sum(grad*grad, axis=(1,2,3)))
        loss_gp = lambda_gp * F.mean_squared_error(grad, xp.ones_like(grad.data))

        y.unchain_backward()

        dis_loss += loss_gp

        discriminator.cleargrads()
        dis_loss.backward()
        dis_opt.update()
        dis_loss.unchain_backward()

        m_list = []
        noise_list = []
        z1 = chainer.as_variable(xp.random.normal(size=(batchsize, 512)).astype(xp.float32))
        z2 = chainer.as_variable(xp.random.normal(size=(batchsize, 512)).astype(xp.float32))
        switch_point = np.random.randint(1, 2*stage)
Ejemplo n.º 46
0
    def forward_one_step(self, x_data, state, continuous=True, nonlinear_q='tanh', nonlinear_p='tanh', output_f = 'sigmoid', gpu=-1):

        output = np.zeros( x_data.shape ).astype(np.float32)

        nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu}
        nonlinear_f_q = nonlinear[nonlinear_q]
        nonlinear_f_p = nonlinear[nonlinear_p]

        output_a_f = nonlinear[output_f]

        # compute q(z|x)
        for i in range(x_data.shape[0]):
            x_in_t = Variable(x_data[i].reshape((1, x_data.shape[1])))
            hidden_q_t = nonlinear_f_q( self.recog_in_h( x_in_t ) + self.recog_h_h( state['recog_h'] ) )
            state['recog_h'] = hidden_q_t

        q_mean = self.recog_mean( state['recog_h'] )
        q_log_sigma = 0.5 * self.recog_log_sigma( state['recog_h'] )

        eps = np.random.normal(0, 1, q_log_sigma.data.shape ).astype(np.float32)

        if gpu >= 0:
            eps = cuda.to_gpu(eps)

        eps = Variable(eps)
        z   = q_mean + F.exp(q_log_sigma) * eps

        # compute p( x | z)

        h0 = nonlinear_f_p( self.z(z) )
        out= self.output(h0)
        x_0 = output_a_f( out )
        state['gen_h'] = h0
        if gpu >= 0:
            np_x_0 = cuda.to_cpu(x_0.data)
            output[0] = np_x_0
        else:
            output[0] = x_0.data

        if continuous == True:
            rec_loss = F.mean_squared_error(x_0, Variable(x_data[0].reshape((1, x_data.shape[1]))))
        else:
            rec_loss = F.sigmoid_cross_entropy(out, Variable(x_data[0].reshape((1, x_data.shape[1])).astype(np.int32)))

        x_t = x_0

        for i in range(1, x_data.shape[0]):
            h_t_1 = nonlinear_f_p( self.gen_in_h( x_t ) + self.gen_h_h(state['gen_h']) )
            x_t_1      = self.output(h_t_1)
            state['gen_h'] = h_t_1

            if continuous == True:
                output_t   = output_a_f( x_t_1 )
                rec_loss += F.mean_squared_error(output_t, Variable(x_data[i].reshape((1, x_data.shape[1]))))

            else:
                out = x_t_1
                rec_loss += F.sigmoid_cross_entropy(out, Variable(x_data[i].reshape((1,x_data.shape[1])).astype(np.int32)))
                x_t = output_t = output_a_f( x_t_1 )

            if gpu >= 0:
                np_output_t = cuda.to_cpu(output_t.data)
                output[i] = np_output_t
            else:
                output[i]  = output_t.data


        KLD = -0.0005 * F.sum(1 + q_log_sigma - q_mean**2 - F.exp(q_log_sigma))

        return output, rec_loss, KLD, state
Ejemplo n.º 47
0
 def mse(self, x, y, undo_norm):
     y = Variable(np.array(y, dtype=np.float32))
     pred = undo_norm(self.prediction(x))
     return F.mean_squared_error(pred, y)
Ejemplo n.º 48
0
    def __call__(self, t, sw):

        hf0 = self.fx0(vgg.h)
        hf1 = self.fx1(hf0)

        fusionG = xp.tile(hf1.data,(14,14,1))
        fusionG = fusionG.transpose(2,0,1)
        fusionG = fusionG[np.newaxis,:,:,:]

        fusionL = self.convf_0(vgg.h5)
        fusionL = fusionL.data
        fusion  = xp.concatenate([fusionG, fusionL], axis=1)

        h0 = F.relu(self.conv5_1(self.bn0(Variable(fusion))))
        h0 = self.deconv5_1(h0)

        h1 = self.bn1(vgg.h4)
        h2 = h0 + h1
        h2 = self.conv4_1(h2)
        h2 = F.relu(self.bn2(h2))

        h2 = self.deconv4_1(h2)
        h3 = self.bn2(vgg.h3)
        h4 = h2 + h3

        h4 = self.conv4_2(h4)
        h4 = F.relu(self.bn3(h4))
        h4 = self.deconv3_1(h4)
        h5 = self.bn4(vgg.h2)
        h5 = h4 + h5

        h5 = self.conv3_1(h5)
        h5 = F.relu(self.bn5(h5))
        h5 = self.deconv2_1(h5)
        h6 = self.bn6(vgg.h1)
        h6 = h5 + h6

        h6 = self.conv2_1(h6)
        h6 = F.relu(self.bn7(h6))
        h7 = self.bn8(vgg.x)
        h8 = h6 + h7

        h8 = self.conv1_1(h8)
        h8 = F.relu(self.bn9(h8))

        h8 = self.conv0_5(h8)

        zx81 = F.split_axis(h8,1,0)
        zx82 = F.split_axis(zx81,2,1)

        if sw == 1:
            t1 = F.reshape(t,(1, 224*224))
            x = F.reshape(zx82[0],(1, 224*224))
            self.loss = F.mean_squared_error(x, t1)
            return self.loss

        elif sw == 2:
            t1 = F.reshape(t,(1, 224*224))
            x = F.reshape(zx82[1],(1, 224*224))
            self.loss = F.mean_squared_error(x, t1)
            return self.loss

        else:
            return h8
Ejemplo n.º 49
0
 def __call__(self, x, y):
     y = Variable(np.array(y, dtype=np.float32))
     pred = self.prediction(x)
     return F.mean_squared_error(pred, y)
Ejemplo n.º 50
0
    def __call__(self, x_recon, x):
        bs = x.shape[0]
        d = np.prod(x.shape[1:])
        self.loss = F.mean_squared_error(x_recon, x) / d

        return self.loss
Ejemplo n.º 51
0
    for i in range(2):  #six.moves.range(jump*n_epoch):
        print i
        x_batch = np.array([[goes_flux[(jump * j + i) % whole_len]]
                            for j in six.moves.range(batchsize)],
                           dtype=np.float32)
        truth_data = np.array([[goes_max[(jump * j + i) % whole_len]]
                               for j in six.moves.range(batchsize)],
                              dtype=np.float32)
        if args.gpu >= 0:
            truth_data = cuda.to_gpu(y_batch)
        truth = chainer.Variable(truth_data, volatile=False)

        state, y = forward_one_step(x_batch, state)

        loss_i = F.mean_squared_error(y, truth)
        accum_loss += loss_i
        cur_log_perp += loss_i.data.reshape(())

        optimizer.clip_grads(grad_clip)
        optimizer.update()

        if (i + 1) % bprop_len == 0:  # Run truncated BPTT
            print i, jump * n_epoch
            optimizer.zero_grads()
            print accum_loss.data
            accum_loss.backward()
            accum_loss.unchain_backward()  # truncate
            accum_loss = chainer.Variable(mod.zeros((), dtype=np.float32))

            optimizer.clip_grads(grad_clip)
Ejemplo n.º 52
0
    def update_core(self):
        gen_optimizer = self.get_optimizer('gen')
        dis_optimizer = self.get_optimizer('dis')
        xp = self.gen.xp

        for i in range(self.n_dis):
            batch = self.get_iterator('main').next()
            batchsize = len(batch)

            x_real, real_label = zip(*batch)
            x_real = self.xp.asarray(x_real).astype("f")

            x_real_std = self.xp.std(x_real, axis=0, keepdims=True)
            x_rnd = self.xp.random.uniform(0, 1, x_real.shape).astype("f")

            one_hot_label, x_real, real_label_image = self.make_label_infomation(
                real_label, x_real, batchsize)

            real_label_image = Variable(real_label_image)
            x_real = Variable(x_real)
            y_real = self.dis(x_real)

            z = self.gen.make_hidden(batchsize)
            z = Variable(self.xp.concatenate([z, one_hot_label], axis=1))
            x_fake = self.gen(z)
            x_fake = F.concat([x_fake, real_label_image])
            y_fake = self.dis(x_fake)

            if i == 0:
                loss_gen = F.sigmoid_cross_entropy(
                    y_fake, self.xp.ones_like(y_fake.data))
                self.gen.cleargrads()
                loss_gen.backward()
                gen_optimizer.update()
                chainer.reporter.report({'gen/loss': loss_gen})

            x_fake.unchain_backward()
            alpha = self.xp.random.uniform(0, 1,
                                           size=batchsize).astype("f")[:, None,
                                                                       None,
                                                                       None]
            x_perturb = x_real + alpha * self.perturb_range * x_real_std * x_rnd

            grad, = chainer.grad([self.dis(x_perturb)], [x_perturb],
                                 enable_double_backprop=True)
            grad = F.sqrt(F.batch_l2_norm_squared(grad))
            loss_gp = self.lam * \
                F.mean_squared_error(grad, xp.ones_like(grad.data))

            loss_dis = F.sigmoid_cross_entropy(y_real,
                                               self.xp.ones_like(y_fake.data))
            loss_dis += F.sigmoid_cross_entropy(y_fake,
                                                self.zeros_like(y_fake.data))

            self.dis.cleargrads()
            loss_dis.backward()
            loss_gp.backward()
            dis_optimizer.update()

            chainer.reporter.report({'dis/loss': loss_dis})
            chainer.reporter.report({'dis/loss_grad': loss_gp})
            chainer.reporter.report({'g': F.mean(grad)})
Ejemplo n.º 53
0
Archivo: nin.py Proyecto: ghelia/deel
    def getLossDistill(self,x,t):
        _t = chainer.Variable(t.data, volatile='off')
        self.loss = F.mean_squared_error(x, _t)

        return self.loss
Ejemplo n.º 54
0
 def __call__(self, x0, x1):
     if self.scaler is not None:
         x0 = self.scaler.inverse_transform(x0)
         x1 = self.scaler.inverse_transform(x1)
     return F.sqrt(F.mean_squared_error(x0, x1))
Ejemplo n.º 55
0
def mse_gd_loss(x, t, eta=0.5):
    mse = F.mean_squared_error(x, t)
    gd = chainervr.functions.gradient_difference_error(x, t)
    return mse * (1.0 - eta) + gd * eta
Ejemplo n.º 56
0
 def __call__(self, x, t, train=True):
     y = self.predictor(x, train)
     self.loss = F.mean_squared_error(y, t)
     report({'loss': self.loss}, self)
     return self.loss
Ejemplo n.º 57
0
def generate_image(img_orig,
                   img_style,
                   width,
                   nw,
                   nh,
                   max_iter,
                   lr,
                   img_gen=None):
    mid_orig = nn.forward(Variable(img_orig))
    style_mats = [get_matrix(y) for y in nn.forward(Variable(img_style))]

    if img_gen is None:
        if args.gpu >= 0:
            img_gen = xp.random.uniform(-20,
                                        20, (1, 3, width, width),
                                        dtype=np.float32)
        else:
            img_gen = np.random.uniform(-20, 20, (1, 3, width, width)).astype(
                np.float32)
    img_gen = chainer.links.Parameter(img_gen)
    optimizer = optimizers.Adam(alpha=lr)
    optimizer.setup(img_gen)
    for i in range(max_iter):
        img_gen.zerograds()

        x = img_gen.W
        y = nn.forward(x)

        L = Variable(xp.zeros((), dtype=np.float32))
        for l in range(len(y)):
            ch = y[l].data.shape[1]
            wd = y[l].data.shape[2]
            gogh_y = F.reshape(y[l], (ch, wd**2))
            gogh_matrix = F.matmul(gogh_y, gogh_y, transb=True) / np.float32(
                ch * wd**2)

            L1 = np.float32(args.lam) * np.float32(
                nn.alpha[l]) * F.mean_squared_error(y[l],
                                                    Variable(mid_orig[l].data))
            L2 = np.float32(nn.beta[l]) * F.mean_squared_error(
                gogh_matrix, Variable(style_mats[l].data)) / np.float32(len(y))
            L += L1 + L2

            if i % 100 == 0:
                print(i, l, L1.data, L2.data)

        L.backward()
        img_gen.W.grad = x.grad
        optimizer.update()

        tmp_shape = x.data.shape
        if args.gpu >= 0:
            img_gen.W.data += Clip().forward(
                img_gen.W.data).reshape(tmp_shape) - img_gen.W.data
        else:

            def clip(x):
                return -120 if x < -120 else (136 if x > 136 else x)

            img_gen.W.data += np.vectorize(clip)(
                img_gen.W.data).reshape(tmp_shape) - img_gen.W.data

        if i % 50 == 0:
            save_image(img_gen.W.data, W, nw, nh, i)
    def forward_one_step(self,
                         x_data,
                         y_data,
                         n_layers_recog,
                         n_layers_gen,
                         nonlinear_q='softplus',
                         nonlinear_p='softplus',
                         output_f='sigmoid',
                         type_qx='gaussian',
                         type_px='gaussian',
                         gpu=-1):
        x = Variable(x_data)
        y = Variable(y_data)

        # set non-linear function
        nonlinear = {
            'sigmoid': F.sigmoid,
            'tanh': F.tanh,
            'softplus': self.softplus,
            'relu': F.relu
        }
        nonlinear_f_q = nonlinear[nonlinear_q]
        nonlinear_f_p = nonlinear[nonlinear_p]

        output_activation = {
            'sigmoid': F.sigmoid,
            'identity': self.identity,
            'tanh': F.tanh
        }
        output_a_f = output_activation[output_f]

        hidden_q = [nonlinear_f_q(self.recog_x(x) + self.recog_y(y))]

        # compute q(z|x, y)

        for i in range(n_layers_recog - 1):
            hidden_q.append(
                nonlinear_f_q(getattr(self, 'recog_%i' % i)(hidden_q[-1])))

        q_mean = getattr(self, 'recog_mean')(hidden_q[-1])
        q_log_sigma = 0.5 * getattr(self, 'recog_log')(hidden_q[-1])

        eps = np.random.normal(
            0, 1,
            (x.data.shape[0], q_log_sigma.data.shape[1])).astype('float32')
        if gpu >= 0:
            eps = cuda.to_gpu(eps)

        eps = Variable(eps)
        z = q_mean + F.exp(q_log_sigma) * eps

        # compute q(x |y, z)
        hidden_p = [nonlinear_f_p(self.gen_y(y) + self.gen_z(z))]

        for i in range(n_layers_gen - 1):
            hidden_p.append(
                nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1])))

        hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1])))
        output = hidden_p[-1]

        rec_loss = F.mean_squared_error(output, x)
        KLD = -0.5 * F.sum(1 + q_log_sigma - q_mean**2 -
                           F.exp(q_log_sigma)) / (x_data.shape[0] *
                                                  x_data.shape[1])

        return rec_loss, KLD, output
Ejemplo n.º 59
0
 def loss(self, x):
     y = self(x)
     return F.mean_squared_error(x, y)
Ejemplo n.º 60
0
def main():
    parser = argparse.ArgumentParser(
        description='Second order polynomial approximation with MLP')
    parser.add_argument('--n_hidden_units',
                        '-n',
                        type=int,
                        default=3,
                        help='Number of hidden units per hidden layer')
    parser.add_argument('--batch_size',
                        '-b',
                        type=int,
                        default=25,
                        help='Batch size for each epoch')
    parser.add_argument('--n_epochs',
                        '-e',
                        type=int,
                        default=1000,
                        help='Number of epochs to run')
    args = parser.parse_args()
    ''' generate data '''
    x = np.random.rand(1000).astype(np.float32)
    y = 19 * x**3 + 10 * x**2 - 8 * x + 7
    y += 6.3423 * np.random.rand(1000).astype(np.float32)

    N = args.batch_size * 10

    print('Number of hidden units: {}'.format(args.n_hidden_units))
    print('Number of epocsh: {}'.format(args.n_epochs))
    print('Batch size: {}'.format(args.batch_size))
    print('N: {}'.format(N))
    print('')

    x_train, x_test = np.split(x, [N])
    y_train, y_test = np.split(y, [N])

    print('Shape of train, test data: {}, {}'.format(x_train.shape,
                                                     x_test.shape))
    foo = raw_input('Enter any key to continue...')
    ''' instantiate the model and setup optimizer '''
    model = QuadChain(args.n_hidden_units)
    # optimizer = optimizers.AdaDelta(rho=0.9)
    optimizer = optimizers.MomentumSGD()
    # optimizer = optimizers.Adam()
    optimizer.use_cleargrads()
    optimizer.setup(model)
    '''
    -- prepare test data here
    -- train data needs to be shuffled for each epoch; so we will deal with it there
    '''
    test_data = Variable(x_test.reshape(x_test.shape[0], -1))
    test_target = Variable(y_test.reshape(y_test.shape[0], -1))
    '''
    - start training
    - for each epoch, iterate over each mini batches and perform model update
    - at the end of each mini batch, calculate test loss
    '''

    dt = display_train()
    dt.header = str("{:^5} {:^5} {:^5} {:^5}".format('Epoch', 'Mini Batch',
                                                     'Train Loss',
                                                     'Test Loss'))

    for each_epoch in xrange(1, args.n_epochs + 1):
        permuted_ordering = np.random.permutation(N)

        for mini_batch_index in xrange(0, N, args.batch_size):
            x_batch = x_train[
                permuted_ordering[mini_batch_index:mini_batch_index +
                                  args.batch_size]]
            y_batch = y_train[
                permuted_ordering[mini_batch_index:mini_batch_index +
                                  args.batch_size]]

            train_data = Variable(x_batch.reshape(x_batch.shape[0], -1))
            train_target = Variable(y_batch.reshape(y_batch.shape[0], -1))

            train_pred = model(train_data)
            train_loss = F.mean_squared_error(train_target, train_pred)

            model.cleargrads()
            train_loss.backward()
            optimizer.update()
            ''' calculate test loss after this mini batch optimizer/network update '''
            test_pred = model(test_data)
            test_loss = F.mean_squared_error(test_target, test_pred)

            logstr = str("{:4}\t{:4d}\t{:10.8} {:10.8}".format(
                each_epoch, mini_batch_index, train_loss.data, test_loss.data))

            dt.train_log.append(logstr)
            if (len(dt.train_log) == dt.H):
                dt.display()