Example #1
0
def train_loop(model, train_iter, test_iter, optimizer, gpu_id, epochs):

    while train_iter.epoch < epochs:

        # ---------- One iteration of the training loop ----------
        train_batch = train_iter.next()
        image_train, target_train = concat_examples(train_batch, gpu_id)

        # Calculate the prediction of the network
        prediction_train = model(image_train)

        # Calculate the loss with mean squared error
        loss = F.mean_squared_error(prediction_train, target_train)

        # Calculate the gradients in the network
        model.cleargrads()
        loss.backward()

        # Update all the trainable parameters
        optimizer.update()
        # --------------------- until here ---------------------

        # Check the validation accuracy of prediction after every epoch
        if train_iter.is_new_epoch:  # If this iteration is the final iteration of the current epoch

            # Display the training loss
            print('Epoch:{:02d} train_loss:{:.04f} '.format(
                train_iter.epoch, float(to_cpu(loss.array))),
                  end='')

            test_losses = []
            test_accuracies = []
            while True:
                test_batch = test_iter.next()
                image_test, target_test = concat_examples(test_batch, gpu_id)

                # Forward the test data
                prediction_test = model(image_test)

                # Calculate the loss
                loss_test = F.mean_squared_error(prediction_test, target_test)
                test_losses.append(to_cpu(loss_test.array))

                # Calculate the R2
                accuracy = F.r2_score(prediction_test, target_test)
                accuracy.to_cpu()
                test_accuracies.append(accuracy.array)

                if test_iter.is_new_epoch:
                    test_iter.reset()
                    break

            print('val_loss:{:.04f} '.format(np.mean(test_losses)))
Example #2
0
 def forward(self, inputs, device):
     x, t = inputs
     y = functions.r2_score(x, t, self.sample_weight, self.multioutput)
     return y,
Example #3
0
 def __call__(self, x, t):
     y = self.predictor(x)
     loss =  F.mean_squared_error(y, t)
     r2_score = F.r2_score(y, t)
     chainer.report({'loss': loss, 'r2_score': r2_score}, self)
     return loss
Example #4
0
def train_nn(nn,
             env,
             test_env,
             optimizer,
             batch_size,
             lr_decay_period,
             train_epochs,
             test_interval,
             test_epochs,
             target_type,
             save_dir,
             name,
             prev_score=None):
    ## Training loop
    t0 = 1e-8

    if prev_score:
        assert isinstance(prev_score, float) or isinstance(
            prev_score, int), 'prev_score must be int or float.'
        best_score = prev_score
    else:
        best_score = -np.inf

    train_r2_log = []
    train_loss_log = []
    test_r2_log = []
    test_loss_log = []
    for epoch in range(train_epochs):
        try:
            t1 = time()

            if epoch % lr_decay_period == 0:
                optimizer.hyperparam.alpha /= 2

            obs_batch, target_train = make_train_batch(env, batch_size,
                                                       target_type)

            prediction_train = nn(obs_batch)

            loss = F.mean_squared_error(prediction_train, target_train)

            nn.cleargrads()
            loss.backward()

            optimizer.update()

            train_r2 = F.r2_score(prediction_train, target_train)

            train_loss_log.append(loss.data)
            train_r2_log.append(train_r2.data)

            t0 += time() - t1
            print(
                "Training epoch %d/%d, loss: %.08f, r2: %f, mean r2: %f, samples/sec: %f                                          "
                % (epoch + 1, train_epochs, loss.data, train_r2.data,
                   float(np.mean(train_r2_log[-100:])),
                   (epoch + 1) * batch_size / t0),
                end='\r')

            if epoch % test_interval == 0 and epoch != 0:
                test_losses = []
                test_scores = []
                print()
                for j in range(test_epochs):
                    test_batch, target_test = make_train_batch(
                        test_env, batch_size, target_type)

                    # Forward the test data
                    prediction_test = nn(test_batch)

                    # Calculate the loss
                    loss_test = F.mean_squared_error(prediction_test,
                                                     target_test)
                    loss_test.to_cpu()
                    test_losses.append(loss_test.data)

                    # Calculate the accuracy
                    test_r2 = F.r2_score(prediction_test, target_test)
                    test_r2.to_cpu()
                    test_scores.append(test_r2.data)

                    test_loss_log.append(np.mean(test_losses))
                    test_r2_log.append(np.mean(test_scores))

                    print(
                        "Test epoch: %d, loss: %.08f, r2: %.08f                             "
                        % (j + 1, loss_test.data, test_r2.data),
                        end='\r')

                if np.mean(test_scores) > best_score:
                    best_score = np.mean(test_scores)
                    print("\nNew best score:", best_score, end='\r')
                    chainer.serializers.save_npz(save_dir + name + '>' +
                                                 str(float(best_score)) +
                                                 '.npz',
                                                 nn,
                                                 compression=True)

                print('\nval loss: {:.08f}, val r2 score: {:.08f}'.format(
                    np.mean(test_losses), np.mean(test_scores)))

        except KeyboardInterrupt:
            print("\nInterrupted by the user. Best score:", best_score)
            break
Example #5
0
'''
predict_ro = predict_1[:,0] + f0[:,0]
predict_e = predict_1[:,1] + f0[:,1]

predict_vx = predict_1[:,2] + f0[:,2]

predict_vy = predict_1[:,3] + f0[:,3]

predict_vz = predict_1[:,4] + f0[:,4]

predict_By = predict_1[:,5] + f0[:,5]

predict_Bz = predict_1[:,6] + f0[:,6]
'''

R2 = [F.r2_score(predict_ro,ro), F.r2_score(predict_e,e), F.r2_score(predict_vx,vx), F.r2_score(predict_vy,vy),F.r2_score(predict_vz,vz),F.r2_score(predict_By,By),F.r2_score(predict_Bz,Bz)]

R2_df = [F.r2_score(predict_1[:,0],data_f[:,0]), F.r2_score(predict_1[:,1],data_f[:,1]), F.r2_score(predict_1[:,2],data_f[:,2]),\
		F.r2_score(predict_1[:,3],data_f[:,3]),F.r2_score(predict_1[:,4],data_f[:,4]),F.r2_score(predict_1[:,5],data_f[:,5]),F.r2_score(predict_1[:,6],data_f[:,6])]
for i in range(len(R2)):
    print(flux[i],' R2 score :',R2[i])
    print(flux_1[i],' R2 score :',R2_df[i])
    print(T_flux[i])
    print(P_flux[i])

kwargs = dict(c='deeppink',s=4, alpha=0.5,zorder=1, label='Predict')
kwargs1 = dict(c='deepskyblue', label='True', zorder=10)

plt.style.use('seaborn-colorblind')
plt.style.use('seaborn-whitegrid')
fig, ax = plt.subplots(2, 4, figsize=(15,6))
Example #6
0
    		        with chainer.using_config('enable_backprop', False):
    		            print('Exp:',it,'  Itry:',itry,'  Epoch:',i)
    		            for j in range(3):

                            #predict model1(ro,e,vx)
    		                predict_train_1 = model_1(train_1[:,0:n_in])
    		                predict_valid_1 = model_1(valid_1[:,0:n_in])
    		                if LOSS == 'RMSE':
    		                    loss_train_tmp = to_cpu(F.mean_squared_error(predict_train_1[:,j],train_1_label[:,j]).data)
    		                    loss_valid_tmp = to_cpu(F.mean_squared_error(predict_valid_1[:,j],valid_1_label[:,j]).data)
    		                elif LOSS == 'MAE':
    		                    loss_train_tmp = to_cpu(F.mean_absolute_error(predict_train_1[:,j],train_1_label[:,j]).data)
    		                    loss_valid_tmp = to_cpu(F.mean_absolute_error(predict_valid_1[:,j],valid_1_label[:,j]).data)
    		                it_train_loss.append(loss_train_tmp)
    		                it_valid_loss.append(loss_valid_tmp)
    		                r2_train_tmp = to_cpu(F.r2_score(predict_train_1[:,j],train_1_label[:,j]).data)
    		                it_train_r2.append(r2_train_tmp)
    		                r2_valid_tmp = to_cpu(F.r2_score(predict_valid_1[:,j],valid_1_label[:,j]).data)
    		                it_valid_r2.append(r2_valid_tmp)

    		                print(flux_1[j],' R2 score (train):',r2_train_tmp,'  R2 score (valid):',r2_valid_tmp)
    		                print('loss(train):',loss_train_tmp,'loss(valid):',loss_valid_tmp)

    		            for j in range(2):

                            #predict model1(vy, vz)
    		                predict_train_2 = model_2(train_2[:,0:37])
    		                predict_valid_2 = model_2(valid_2[:,0:37])
    		                if LOSS == 'RMSE':
    		                    loss_train_tmp = to_cpu(F.mean_squared_error(predict_train_2[:,j],train_2_label[:,j]).data)
    		                    loss_valid_tmp = to_cpu(F.mean_squared_error(predict_valid_2[:,j],valid_2_label[:,j]).data)
 def score(self, X, Y, step=100):
     predicted = self.predict(X, step)
     score = F.r2_score(predicted, Y).data
     return score
Example #8
0
                                    F.mean_squared_error(
                                        predict_valid_1[:, j],
                                        valid_1_label[:, j]).data)
                            elif LOSS == 'MAE':
                                loss_train_tmp = to_cpu(
                                    F.mean_absolute_error(
                                        predict_train_1[:, j],
                                        train_1_label[:, j]).data)
                                loss_valid_tmp = to_cpu(
                                    F.mean_absolute_error(
                                        predict_valid_1[:, j],
                                        valid_1_label[:, j]).data)
                            it_train_loss.append(loss_train_tmp)
                            it_valid_loss.append(loss_valid_tmp)
                            r2_train_tmp = to_cpu(
                                F.r2_score(predict_train_1[:, j],
                                           train_1_label[:, j]).data)
                            it_train_r2.append(r2_train_tmp)
                            r2_valid_tmp = to_cpu(
                                F.r2_score(predict_valid_1[:, j],
                                           valid_1_label[:, j]).data)
                            it_valid_r2.append(r2_valid_tmp)
                            print(flux_1[j], ' R2 score (train):',
                                  r2_train_tmp, '  R2 score (valid):',
                                  r2_valid_tmp)
                            print('loss(train):', loss_train_tmp,
                                  'loss(valid):', loss_valid_tmp)
                        for j in range(2):

                            predict_train_2 = model_2(train_2[:, 0:25])
                            predict_valid_2 = model_2(valid_2[:, 0:25])
                            if LOSS == 'RMSE':
Example #9
0
    		            print('Exp:',it,'  Itry:',itry,'  Epoch:',i)
    		            for j in range(7):

                            #predict model1(ro,e,vx,By,Bz)
    		                predict_train_1 = model_1(train_1[:,0:n_in])
    		                predict_valid_1 = model_1(valid_1[:,0:n_in])
                            #define loss function
    		                if LOSS == 'RMSE':
    		                    loss_train_tmp = to_cpu(F.mean_squared_error(predict_train_1[:,j],train_1[:,n_in + j]).data)
    		                    loss_valid_tmp = to_cpu(F.mean_squared_error(predict_valid_1[:,j],valid_1[:,n_in + j]).data)
    		                elif LOSS == 'MAE':
    		                    loss_train_tmp = to_cpu(F.mean_absolute_error(predict_train_1[:,j],train_1[:,n_in + j]).data)
    		                    loss_valid_tmp = to_cpu(F.mean_absolute_error(predict_valid_1[:,j],valid_1[:,n_in + j]).data)
    		                it_train_loss.append(loss_train_tmp)
    		                it_valid_loss.append(loss_valid_tmp)
    		                r2_train_tmp = to_cpu(F.r2_score(predict_train_1[:,j],train_1[:,n_in + j]).data)
    		                it_train_r2.append(r2_train_tmp)
    		                r2_valid_tmp = to_cpu(F.r2_score(predict_valid_1[:,j],valid_1[:,n_in + j]).data)
    		                it_valid_r2.append(r2_valid_tmp)
    		                print(flux[j],' R2 score (train):',r2_train_tmp,'  R2 score (valid):',r2_valid_tmp)
    		                print('loss(train):',loss_train_tmp,'loss(valid):',loss_valid_tmp)


    		with chainer.using_config('enable_backprop', False):
    		    	loss_train.append(it_train_loss)
    		    	loss_valid.append(it_valid_loss)
    		    	R2_train.append(it_train_r2)
    		    	R2_valid.append(it_valid_r2)
    		    	model_1.to_cpu()
    		    	y_1_p = model_1(to_cpu(valid_1[:,0:n_in]))
    		    	print(y_1_p.shape)
                # Evaluate training set
                y_train = []
                for s in range(0, train_size, batch_size):
                    x_batch = chainer.cuda.to_gpu(x_train[s:s + batch_size])
                    y_train.extend(chainer.cuda.to_cpu(model(x_batch).data).tolist())

                # Evaluate test set
                y_test = []
                for s in range(0, test_size, batch_size):
                    x_batch = chainer.cuda.to_gpu(x_test[s:s + batch_size])
                    y_test.extend(chainer.cuda.to_cpu(model(x_batch).data).tolist())


                train_loss = F.mean_squared_error(cp.asarray(y_train, dtype=np.float32), cp.asarray(t_train,dtype=np.float32))
                train_R2 = F.r2_score(cp.asarray(y_train, dtype=np.float32),cp.asarray(t_train,dtype=np.float32))
                train_losses.append(to_cpu(train_loss.data))
                train_r2.append(to_cpu(train_R2.data))
                train_loss = cp.asnumpy(train_loss.data)
                train_R2 = cp.asnumpy(train_R2.data)
                test_loss = F.mean_squared_error(cp.asarray(y_test, dtype=np.float32), cp.asarray(t_test, dtype=np.float32))
                test_R2 = F.r2_score(cp.asarray(y_test, dtype=np.float32),cp.asarray(t_test,dtype=np.float32))
                test_losses.append(to_cpu(test_loss.data))
                test_r2.append(to_cpu(test_R2.data))
                test_loss = cp.asnumpy(test_loss.data)
                test_R2 = cp.asnumpy(test_R2.data)
                elapsed_time2 = time.time() - start2
                epoch = i / per_epoch
                epoch_predict = y_train + y_test
                print('epoch:{:.0f} \ntrain_loss:{:.04f} test_loss:{:0.4f} time:{:02f} '.format(epoch, train_loss, test_loss, elapsed_time2))
                print('train_R2:{:.04f} test_R2:{:0.4f} '.format(train_R2, test_R2))
Example #11
0
def r2_score(ys, ts):
    cys = F.concat(ys, axis=0)
    cts = F.concat(ts, axis=0)
    return F.r2_score(cys, cts)
Example #12
0
 def forward(self, inputs, device):
     x, t = inputs
     y = functions.r2_score(x, t, self.sample_weight, self.multioutput)
     return y,