Esempio n. 1
0
def test(model_config, nn_model, dataloader, writer, epoch, criterion):
    device = torch.device(model_config['cuda_num'])
    with torch.no_grad():
        total_label = []
        total_pred = []
        for i, data in enumerate(dataloader):
            x_data = None
            y_data = None
            if 'model' in model_config:
                if model_config['model'] == 'DNN':
                    x_data, y_data = data
                elif model_config['model'] == 'RNN':
                    x_data = data[:][0]
                    y_data = data[:][1]
                elif model_config['model'] == 'CRNN':
                    x_data = data[:][0]
                    x_data = x_data.transpose(1, 2)
                    y_data = data[:][1]
            else:
                if model_config['model_type'] == 'Custom_DNN':
                    x_data, y_data = data
                elif model_config['model_type'] == 'Custom_RNN':
                    x_data = data[:][0]
                    y_data = data[:][1]
                elif model_config['model_type'] == 'Custom_CRNN':
                    x_data = data[:][0]
                    x_data = x_data.transpose(1, 2)
                    y_data = data[:][1]
            if device:
                x_data = x_data.to(device)
                y_data = y_data.to(device)
            y_pred = nn_model(x_data).reshape(-1)
            loss = criterion(y_pred, y_data)
            writer.add_scalar('Loss/Valid MSELoss', loss / 1000,
                              epoch * len(dataloader) + i)
            y_pred = y_pred.cpu()

            total_label += y_data.tolist()
            total_pred += y_pred.tolist()

        test_mse_score = metrics.mean_squared_error(total_label, total_pred)
        test_r2_score = metrics.r2_score(total_label, total_pred)
        test_mae_score = metrics.mean_absolute_error(total_label, total_pred)
        test_rmse_score = np.sqrt(test_mse_score)
        test_mape_score = metrics.mean_absolute_percentage_error(
            total_label, total_pred)

        writer.add_scalar('MSE Score/test', test_mse_score, epoch)
        writer.add_scalar('R2 Score/test', test_r2_score, epoch)
        writer.add_scalar('MAE Score/test', test_mae_score, epoch)
        writer.add_scalar('RMSE Score/test', test_rmse_score, epoch)
        writer.add_scalar('MAPE Score/test', test_mape_score, epoch)
def train(train_loader, epoch, config, device, model, criterion, writer,
          optim):
    # train_bar = tqdm(train_loader,
    #                  desc='{}/{} epoch train ... '.format(epoch, config['epoch']))
    for batch_idx, data in enumerate(train_loader):
        x_data = data[:][0].transpose(1, 2)
        # print(x_data.size())
        y_data = data[:][1]

        if config['use_cuda']:
            x_data = x_data.to(device)
            y_data = y_data.to(device)

        # 모델 예측 진행
        y_pred = model(x_data).reshape(-1)
        # 예측결과에 대한 Loss 계산
        loss = criterion(y_pred, y_data)
        # 역전파 수행
        optim.zero_grad()
        loss.backward()
        optim.step()

        writer.add_scalar("Loss/Train MSELoss", loss / 1000,
                          epoch * len(train_loader) + batch_idx)
        y_pred = y_pred.cpu().detach().numpy()
        y_data = y_data.cpu().detach().numpy()

        mse_score = metrics.mean_squared_error(y_data, y_pred)
        r2_score = metrics.r2_score(y_data, y_pred)
        mae_score = metrics.mean_absolute_error(y_data, y_pred)
        rmse_score = np.sqrt(mse_score)
        # mape_score = metrics.mean_absolute_percentage_error(y_data, y_pred)

        writer.add_scalar('MSE Score/train', mse_score,
                          epoch * len(train_loader) + batch_idx)
        writer.add_scalar('R2 Score/train', r2_score,
                          epoch * len(train_loader) + batch_idx)
        writer.add_scalar('MAE Score/train', mae_score,
                          epoch * len(train_loader) + batch_idx)
        writer.add_scalar('RMSE Score/train', rmse_score,
                          epoch * len(train_loader) + batch_idx)
def validation(valid_loader, epoch, config, device, model, criterion, writer):
    with torch.no_grad():
        total_label = []
        total_pred = []
        # train_bar = tqdm(valid_loader,
        #                  desc='{}/{} epoch train ... '.format(epoch, config['epoch']))
        for batch_idx, data in enumerate(valid_loader):
            x_data = data[:][0].transpose(1, 2)
            y_data = data[:][1]

            if config['use_cuda']:
                x_data = x_data.to(device)
                y_data = y_data.to(device)

            # 모델 예측 진행
            y_pred = model(x_data).reshape(-1)
            # 예측결과에 대한 Loss 계산
            loss = criterion(y_pred, y_data)

            writer.add_scalar('Loss/Validation MSELoss', loss / 1000,
                              epoch * len(valid_loader) + batch_idx)

            y_pred = y_pred.cpu()

            total_label += y_data.tolist()
            total_pred += y_pred.tolist()
        mse_score = metrics.mean_squared_error(total_label, total_pred)
        r2_score = metrics.r2_score(total_label, total_pred)
        mae_score = metrics.mean_absolute_error(total_label, total_pred)
        rmse_score = np.sqrt(mse_score)
        # mape_score = metrics.mean_absolute_percentage_error(total_label, total_pred)

        writer.add_scalar('MSE Score/Validation', mse_score, epoch)
        writer.add_scalar('R2 Score/Validation', r2_score, epoch)
        writer.add_scalar('MAE Score/Validation', mae_score, epoch)
        writer.add_scalar('RMSE Score/Validation', rmse_score, epoch)
Esempio n. 4
0
def validation(valid_loader, epoch, config, device, model, criterion, writer):
    with torch.no_grad():
        total_label = []
        total_pred = []
        total_x = []
        for batch_idx, data in enumerate(valid_loader):
            x_data = data[:][0].transpose(1, 2)
            y_data = data[:][1]

            if config['use_cuda']:
                x_data = x_data.to(device)
                y_data = y_data.to(device)

            # 모델 예측 진행
            y_pred = model(x_data).reshape(-1)
            # 예측결과에 대한 Loss 계산
            loss = criterion(y_pred, y_data)

            writer.add_scalar('Loss/Validation MSELoss', loss / 1000,
                              epoch * len(valid_loader) + batch_idx)

            y_pred = y_pred.cpu()
            for temp in data[:][0]:
                x = temp[:, 0].cpu()
                total_x.append(np.array(x.tolist()).mean())
            total_label += y_data.tolist()
            total_pred += y_pred.tolist()
        mse_score = metrics.mean_squared_error(total_label, total_pred)
        r2_score = metrics.r2_score(total_label, total_pred)
        mae_score = metrics.mean_absolute_error(total_label, total_pred)
        rmse_score = np.sqrt(mse_score)
        mape_score = metrics.mean_absolute_percentage_error(
            total_label, total_pred)

        writer.add_scalar('MSE Score/Validation', mse_score, epoch)
        writer.add_scalar('R2 Score/Validation', r2_score, epoch)
        writer.add_scalar('MAE Score/Validation', mae_score, epoch)
        writer.add_scalar('RMSE Score/Validation', rmse_score, epoch)
        writer.add_scalar('MAPE Score/Validation', mape_score, epoch)
        fig = plt.figure(figsize=(24, 16))
        plt.scatter(total_x,
                    total_pred,
                    color='blue',
                    alpha=0.2,
                    label='prediction')
        plt.scatter(total_x,
                    total_label,
                    color='red',
                    alpha=0.2,
                    label='groundtruth')
        plt.legend()
        plt.grid(True)
        plt.xlabel("rssi (dbm)")
        plt.ylabel("distance (meter)")
        plt.title("Prediction Result")
        plt.yticks(np.arange(0, 70, 5))
        writer.add_figure('PathLoss Prediction', fig, epoch)

        data_size = int(len(total_x) / 16)
        fig_detail = plt.figure(figsize=(16, 16))
        plt.subplots(constrained_layout=True)
        for i in range(16):
            plt.subplot(4, 4, 1 + i)
            if i < 15:
                plt.scatter(total_x[data_size * i:data_size * (i + 1)],
                            total_pred[data_size * i:data_size * (i + 1)],
                            color='blue',
                            alpha=0.2,
                            label='prediction')
                plt.scatter(total_x[data_size * i:data_size * (i + 1)],
                            total_label[data_size * i:data_size * (i + 1)],
                            color='red',
                            alpha=0.2,
                            label='groundtruth')
                plt.legend()
                plt.grid(True)
                plt.xlabel("rssi (dbm)")
                plt.ylabel("distance (meter)")
                plt.yticks(np.arange(0, 70, 5))
                plt.title("PathLoss Prediction with Detail")
            else:
                plt.scatter(total_x[data_size * i:],
                            total_pred[data_size * i:],
                            color='blue',
                            alpha=0.2,
                            label='prediction')
                plt.scatter(total_x[data_size * i:],
                            total_label[data_size * i:],
                            color='red',
                            alpha=0.2,
                            label='groundtruth')
                plt.legend()
                plt.grid(True)
                plt.xlabel("rssi (dbm)")
                plt.ylabel("distance (meter)")
                plt.yticks(np.arange(0, 70, 5))
                plt.title("PathLoss Prediction with Detail")
        plt.subplots(constrained_layout=True)
        writer.add_figure('VisualizeValidationDetail', fig_detail, epoch)