Beispiel #1
0
    def loading_data(self):
        random = True if self.random else False
        size = 160
        if self.model_type == 'C3D':
            size = 112
        if self.model_type == 'I3D':
            size = 224

        normalize = Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
        train_transformations = Compose([
            RandomSizedCrop(size),
            RandomHorizontalFlip(),
            # Resize((size, size)),
            # ColorJitter(
            #     brightness=0.4,
            #     contrast=0.4,
            #     saturation=0.4,
            # ),
            ToTensor(),
            normalize
        ])

        val_transformations = Compose([
            # Resize((182, 242)),
            Resize(256),
            CenterCrop(size),
            ToTensor(),
            normalize
        ])

        train_dataset = MyDataset(self.data,
                                  data_folder="train",
                                  name_list=self.name_list,
                                  version="1",
                                  transform=train_transformations,
                                  num_frames=self.num_frames,
                                  random=random)

        val_dataset = MyDataset(self.data,
                                data_folder="validation",
                                name_list=self.name_list,
                                version="1",
                                transform=val_transformations,
                                num_frames=self.num_frames,
                                random=random)

        train_loader = data.DataLoader(train_dataset,
                                       batch_size=self.batch_size,
                                       shuffle=True,
                                       num_workers=self.workers,
                                       pin_memory=True)

        val_loader = data.DataLoader(val_dataset,
                                     batch_size=self.batch_size,
                                     shuffle=False,
                                     num_workers=self.workers,
                                     pin_memory=False)

        return (train_loader, val_loader)
Beispiel #2
0
def main(args):
    cudnn.benchmark = True
    train_dataset = MyDataset(args.train, filter_pair=True, max_length = args.max_length, \
                            min_length = 3, max_word_length=args.max_length, \
                            freq_threshold=args.freq_threshold, onlylower=(args.only_lowercase>0) )
    voc_size = train_dataset.n_words
    train_data = DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True,
                            shuffle=True, num_workers=2, collate_fn=collate_fn())

    test_dataset = MyDataset(args.train, filter_pair=True, max_length = args.max_length, \
                            min_length = 3, max_word_length=args.max_length, train=False, \
                            freq_threshold=args.freq_threshold, onlylower=(args.only_lowercase>0) )
    test_data = DataLoader(test_dataset, batch_size=args.batch_size, pin_memory=True,
                            shuffle=True, num_workers=2, collate_fn=collate_fn())

    encoder = C2WEncoderRNN(args.hidden_size, args.n_layers, dropout=args.dropout)
    decoder = BahdanauAttnDecoderRNN(voc_size, args.hidden_size, args.n_layers, dropout=args.dropout)
    print(encoder)
    print(decoder)
    print ("vocab size", voc_size)
    encoder.cuda()
    decoder.cuda()

    if args.load_en:
        state_en = torch.load(args.load_en)
        encoder.load_state_dict(state_en)
        print('Loading parameters from {}'.format(args.load_en))

    if args.load_de:
        state_de = torch.load(args.load_de)
        decoder.load_state_dict(state_de)

    encoder_optimizer = optim.Adam(encoder.parameters(), lr=args.elr)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=args.dlr)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=30, verbose=True)

    best_acc = 0
    for e in range(1, args.epochs+1):
        loss, acc = train(train_data, encoder, decoder, encoder_optimizer, decoder_optimizer, args.teacher_forcing_ratio)
        print('Epoch {}:\tavg.loss={:.4f}\tavg.acc={:.4f}'.format(e, loss, acc))

        torch.cuda.empty_cache()

        if args.test and args.test_freq and e % args.test_freq == 0:
            eval_randomly(test_dataset, encoder, decoder)
            loss, acc = evaluate(test_data, encoder, decoder)
            torch.cuda.empty_cache()
            ind = ''
            if acc > best_acc:
                best_acc = acc
                ind = '*'
                torch.save(encoder.state_dict(), 'best_en_5out_freq2.pth')
                torch.save(decoder.state_dict(), 'best_de_5out_freq2.pth')
            print('----Validation:\tavg.loss={:.4f}\tavg.acc={:.4f}{}'.format(loss, acc, ind))
    print('Best Accuracy: {}'.format(best_acc))
Beispiel #3
0
def setup():
    eprint("loading...")
    cudnn.benchmark = True
    test_dataset = MyDataset(args.train, filter_pair=True, max_length = mlength, min_length = 3, \
                    max_word_length=args.max_length, freq_threshold=args.freq_threshold,\
                    onlylower=(args.only_lowercase>0), load_fprefix="dataset_freq2_")
    voc_size = test_dataset.n_words

    encoder = C2WEncoderRNN(args.hidden_size,
                            args.n_layers,
                            dropout=args.dropout)
    decoder = BahdanauAttnDecoderRNN(voc_size,
                                     args.hidden_size,
                                     args.n_layers,
                                     dropout=args.dropout)
    eprint(encoder)
    eprint(decoder)
    eprint("vocab size", voc_size)
    encoder.cuda()
    decoder.cuda()

    if args.load_en and args.load_de:
        state_en = torch.load(args.load_en)
        state_de = torch.load(args.load_de)
        encoder.load_state_dict(state_en)
        decoder.load_state_dict(state_de)
        eprint('Loading parameters from {} {}'.format(args.load_en,
                                                      args.load_de))

    return encoder, decoder, test_dataset
Beispiel #4
0
    def loading_data(self):
        size = 160
        if self.model_type == 'C3D':
            size = 112
        if self.model_type == 'I3D':
            size = 224
        normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

        val_transformations = Compose([
            Resize((size, size)),
            ToTensor(),
            normalize
        ])

        test_dataset = MyDataset(
            self.data,
            name_list=self.name_list,
            data_folder="test",
            version="1",
            transform=val_transformations,
            num_frames=self.num_frames
        )

        test_loader = data.DataLoader(
            test_dataset,
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=self.workers,
            pin_memory=False)

        return test_loader
Beispiel #5
0
def evalFileBatch():
    args = parse_arguments()
    encoder, decoder, dataset = setup()
    test_data = MyDataset(args.train,
                          filter_pair=True,
                          max_length=mlength,
                          min_length=3,
                          max_word_length=args.max_length,
                          train=False)
    test_data = DataLoader(test_data,
                           batch_size=32,
                           pin_memory=True,
                           shuffle=False,
                           num_workers=2,
                           collate_fn=collate_fn())
    loss, acc = evaluate(test_data, encoder, decoder)
    print("acc:", acc, "loss", loss)
Beispiel #6
0
def setup():
    global ENCODER, DECODER, DATASET
    cudnn.benchmark = True
    DATASET = MyDataset(SET_DIR, filter_pair=True, max_length = 20, min_length = 3, \
        max_word_length=MAX_W_LENGTH, load_fprefix=LOAD_PREF, onlylower=True, freq_threshold=2)
    voc_size = DATASET.n_words
    
    ENCODER = C2WEncoderRNN(HIDDEN_SIZE, N_LAYERS, dropout=0)
    DECODER = BahdanauAttnDecoderRNN(voc_size, HIDDEN_SIZE, N_LAYERS, dropout=0)
    ENCODER.cuda()
    DECODER.cuda()

    state_en = torch.load(EN_DIR)
    state_de = torch.load(DE_DIR)
    ENCODER.load_state_dict(state_en)
    DECODER.load_state_dict(state_de)
    print('Loading parameters from {} {}'.format(EN_DIR, DE_DIR))
def eval(epoch, loss_meter):
    # 数据
    myDataset = MyDataset(video_folder,
                          time_steps=TIME_STEPS,
                          num_pred=NUM_PRED,
                          resize_height=new_weight,
                          resize_width=new_height)
    # TODO: data augmentation
    dataloader = DataLoader(myDataset,
                            batch_size=BATCH_SIZE,
                            shuffle=True,
                            num_workers=num_workers)
    # TODO:可视化相关功能

    # 模型
    model = ConvLSTMAE(opt)
    model = model.load(opt.model_ckpt).eval()  # TODO:model_ckpt 根据 train中的设置填写
    model.to(device)
    criterion = nn.MSELoss().to(device)  # EuclideanLoss

    for i_batch, (X, Y) in tqdm.tqdm(enumerate(dataloader)):
        # 计算 metrics
        # TODO: 对 X,Y做一些预处理
        # print("X,Y size() is : ", X.size(), Y.size())
        X, Y = X.to(device), Y.to(device)  # the input of model need to(cuda)
        #
        # y_list = convLSTMAE(X).to(device) # model need to cuda(gpu)
        y_list = model(X)  #
        # print("size of y_list[0]: ", y_list[18].size())
        y_hat = torch.stack(y_list, 0)
        # print("y_hat size(): ", y_hat.size()) # TODO 好坑这里,由于
        # 计算图,tensor,自动求导的缘故,不能直接执行 np.catxxx()
        # print("Y size(): ", Y.size())
        Y = Y.permute(1, 0, 4, 3, 2)
        # print("new Y size(): ", Y.size())
        loss = criterion(y_hat, Y)  # mean square error

        # 可视化
        if (i_batch + 1) % opt.plot_every == 0:
            # if os.path.exists(opt.debug_file):
            #     ipdb.set_trace()
            # loss绘图
            # vis.plot('loss', loss_meter.value()[0])
            print(' i_batch: ', i_batch, ' | train loss: %.4f' % loss.data)
Beispiel #8
0
def main(opt, model_version, ds_version, path, name, inverted_freq, weather):

    params = {
        'num_epochs': 60,
        'batch_size': 10,
        'num_classes': 19,
        'start_features': 32,
        #'log_interval':10,
        #'iou_threshold': 0.3,
        'adam_learning_rate': 1E-3,
        'adam_aux_learning_rate': 5E-4,
        'adam_weight_decay': 1E-4,
        'sgd_learning_rate': 0.1,
        'sgd_weight_decay': 1E-4,
        'sgd_momentum': 0.9,
        'device': torch.device("cuda"),
        'dataset_url': '/home/jupyter/it6/utils/',
        'log_dir': '/home/jupyter/it6/runs/',
        'file_suffix': '_split_urls'
    }

    def test_model(test_loader, net):

        net.eval()
        device = params['device']
        batch_size = params['batch_size']
        test_loss = 0
        test_acc = 0
        test_iou = {}
        with torch.no_grad():
            for batch_index, (img, target) in enumerate(test_loader):
                img, target = img.to(device), target.to(device)

                if model_version == 'deeplab':
                    output = net(img)['out']
                else:
                    output = net(img)

                target = target.long()
                loss = criterion(output, target).item()
                test_loss += loss

                pred = aux.get_predicted_image(output)

                output, target, pred = output.detach().cpu(), target.detach(
                ).cpu(), pred.detach().cpu()
                # compute number of correct predictions in the batch
                test_accuracy = metrics.calculate_accuracy(output, target)
                test_acc += test_accuracy

                iou_inds = metrics.calculate_iou(pred, target)

                for key in iou_inds:
                    if key not in test_iou:
                        test_iou[key] = iou_inds[key]
                    else:
                        test_iou[key] += iou_inds[key]

        test_loss = test_loss / (len(test_loader.dataset) / batch_size)
        test_acc = 100 * (test_acc / (len(test_loader.dataset) / batch_size))
        test_iou = metrics.convert_batched_iou(
            test_iou, (len(test_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(test_iou)

        mIoU_desc = metrics.miou_to_string(test_iou)
        return test_loss, test_acc, mIoU, mIoU_desc

    #Creamos las listas para las transformaciones
    joint_transformations_vt, img_transformations_vt = [], []

    #Añadimos el Resize
    joint_transformations_vt.append(aux.Resize(256, 512))

    #añadimos la transformacion final para tensor en las img y normalizamos

    img_transformations_vt.append(torchvision.transforms.ToTensor())

    #In the case of DeepLabv3, we apply the recommended normalization
    if model_version == 'deeplab':
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        img_transformations_vt.append(
            torchvision.transforms.Normalize(mean, std))

    #Aplicamos la transformacion conjunta sobre img y target
    joint_transforms_vt = aux.JointCompose(joint_transformations_vt)

    #Aplicamos solo la transformacion sobre img
    img_transforms_vt = torchvision.transforms.Compose(img_transformations_vt)

    test_dataset = MyDataset(version=ds_version,
                             split='test',
                             joint_transform=joint_transforms_vt,
                             img_transform=img_transforms_vt,
                             url_csv_file=params['dataset_url'],
                             file_suffix=params['file_suffix'],
                             add_weather=weather == 'y')
    test_loader = utils.data.DataLoader(test_dataset,
                                        batch_size=params['batch_size'],
                                        shuffle=False,
                                        num_workers=4)

    model = aux.load_model(path + '/' + name)
    model.to(params['device'])
    net_params = model.parameters()

    #Depending on the inverted frequency parameter we apply this parameter as a weight to balance the Loss Function
    if inverted_freq == 'y':
        print('set Inverted Frequency weights \n')
        num_pixels_per_class = [
            127414939, 21058643, 79041999, 2269832, 3038496, 4244760, 720425,
            1911074, 55121339, 4008424, 13948699, 4204816, 465832, 24210293,
            925225, 813190, 805591, 341018, 1430722
        ]
        inverted_weights = [(1 / num_pixels)
                            for num_pixels in num_pixels_per_class]
        inverted_weights = torch.FloatTensor(inverted_weights).to(
            params['device'])
        criterion = torch.nn.CrossEntropyLoss(weight=inverted_weights,
                                              ignore_index=255)
    else:
        criterion = torch.nn.CrossEntropyLoss(ignore_index=255)

    if opt == 'adam':
        print('set adam optimizer\n')
        optimizer = torch.optim.Adam(net_params,
                                     lr=params['adam_learning_rate'],
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=params['adam_weight_decay'],
                                     amsgrad=False)
    elif opt == 'sgd':
        print('set SGD optimizer\n')
        optimizer = torch.optim.SGD(net_params,
                                    lr=params['sgd_learning_rate'],
                                    momentum=params['sgd_momentum'],
                                    weight_decay=params['sgd_weight_decay'])

    print('Dataset test images: {}'.format(len(test_loader.dataset)))
    test_loss, test_acc, mIoU, mIoU_desc = test_model(test_loader, model)
    print(
        'Test set: Average loss: {:.4f}, Mean accuracy: {:.2f}%, mIoU: {:.2f}%\n{}\n'
        .format(test_loss, test_acc, mIoU, mIoU_desc))
Beispiel #9
0
def BulidDataloader(args, flag1='train', flag2='source'):
    """Bulid data loader."""

    assert flag1 in ['train', 'test'], 'Function BuildDataloader : function parameter flag1 wrong.'
    assert flag2 in ['source', 'target'], 'Function BuildDataloader : function parameter flag2 wrong.'

    # Set Transform
    trans = transforms.Compose([ 
            transforms.Resize((args.face_scale, args.face_scale)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
            ])
    target_trans = None

    # Basic Notes:
    # 0: Surprised
    # 1: Fear
    # 2: Disgust
    # 3: Happy
    # 4: Sad
    # 5: Angry
    # 6: Neutral

    dataPath_prefix = '../Dataset'

    data_imgs, data_labels, data_bboxs, data_landmarks = [], [], [], []
    if flag1 == 'train':
        if flag2 == 'source':
            list_patition_label = pd.read_csv(dataPath_prefix+'/%s/lists/image_list.txt'%(args.source), header=None, delim_whitespace=True)
            list_patition_label = np.array(list_patition_label)
            for index in range(list_patition_label.shape[0]):
                if list_patition_label[index,0][:5] == "train":
                    if not os.path.exists(dataPath_prefix+'/%s/boundingbox/'%(args.source)+list_patition_label[index,0][:-4] + '_boundingbox' + '.txt'):
                        continue
                    if not os.path.exists(dataPath_prefix+'/%s/landmarks_5/'%(args.source)+list_patition_label[index,0][:-4]+'.txt'):
                        continue
                    bbox = np.loadtxt(dataPath_prefix+'/%s/boundingbox/'%(args.source)+list_patition_label[index,0][:-4]+'_boundingbox.txt').astype(np.int)
                    landmark = np.loadtxt(dataPath_prefix+'/%s/landmarks_5/'%(args.source)+list_patition_label[index,0][:-3]+'txt').astype(np.int)

                    data_imgs.append(dataPath_prefix+'/%s/images/'%(args.source)+list_patition_label[index,0])
                    data_labels.append(list_patition_label[index,1])
                    data_bboxs.append(bbox)
                    data_landmarks.append(landmark)
            
        if flag2 == 'target':
            list_patition_label = pd.read_csv(dataPath_prefix+'/%s/lists/image_list.txt'%(args.target), header=None, delim_whitespace=True)
            list_patition_label = np.array(list_patition_label)
            for index in range(list_patition_label.shape[0]):
                
                if list_patition_label[index,0][:5] == "train":
                    #if not os.path.exists(dataPath_prefix+'/%s/boundingbox/'%(args.target)+list_patition_label[index,0][:-3]+'txt'):
                        #continue
                    
                    if not os.path.exists(dataPath_prefix+'/%s/landmarks_5/'%(args.target)+list_patition_label[index,0][:-3]+'txt'):
                        #print(list_patition_label[index,0][:-3]+'txt')
                        continue
                    img = Image.open(dataPath_prefix + '/%s/images/'%(args.target)+list_patition_label[index,0]).convert('RGB')
                    ori_img_w, ori_img_h = img.size
                    #bbox = np.loadtxt(dataPath_prefix+'/%s/boundingbox/'%(args.target)+list_patition_label[index,0][:-3]+'txt').astype(np.int)
                    landmark = np.loadtxt(dataPath_prefix+'/%s/landmarks_5/'%(args.target)+list_patition_label[index,0][:-3]+'txt').astype(np.int)

                    data_imgs.append(dataPath_prefix+'/%s/images/'%(args.target)+list_patition_label[index,0])
                    data_labels.append(list_patition_label[index,1])
                    data_bboxs.append((0,0,ori_img_w,ori_img_h))
                    data_landmarks.append(landmark)
                        
    elif flag1 == 'test':
        if flag2 =='source':
            list_patition_label = pd.read_csv(dataPath_prefix+'/%s/lists/image_list.txt'%(args.source), header=None, delim_whitespace=True)
            list_patition_label = np.array(list_patition_label)
            for index in range(list_patition_label.shape[0]):
                if list_patition_label[index,0][:4] == "test":
                    if not os.path.exists(dataPath_prefix+'/%s/boundingbox/'%(args.source)+list_patition_label[index,0][:-4]+'_boundingbox.txt'):
                        print(list_patition_label[index,0][:-4]+'_boundingbox.txt')
                        continue
                    if not os.path.exists(dataPath_prefix+'/%s/landmarks_5/'%(args.source)+list_patition_label[index,0][:-3]+'txt'):
                        continue

                    bbox = np.loadtxt(dataPath_prefix+'/%s/boundingbox/'%(args.source) + list_patition_label[index,0][:-4]+'_boundingbox.txt').astype(np.int)
                    landmark = np.loadtxt(dataPath_prefix+'/%s/landmarks_5/'%(args.source)+list_patition_label[index,0][:-3]+'txt').astype(np.int)
                    data_imgs.append(dataPath_prefix+'/%s/images/'%(args.source)+ list_patition_label[index,0])
                    data_labels.append(list_patition_label[index,1])
                    data_bboxs.append(bbox)
                    data_landmarks.append(landmark)

        elif flag2=='target':
            list_patition_label = pd.read_csv(dataPath_prefix+'/%s/lists/image_list.txt'%(args.target), header=None, delim_whitespace=True)
            list_patition_label = np.array(list_patition_label)
            for index in range(list_patition_label.shape[0]):
                if list_patition_label[index,0][:4] == "test":
                    #if not os.path.exists(dataPath_prefix+'/%s/boundingbox/'%(args.target)+list_patition_label[index,0][:-3]+'txt'):
                        #continue
                    if not os.path.exists(dataPath_prefix+'/%s/landmarks_5/'%(args.target)+list_patition_label[index,0][:-3]+'txt'):
                        continue
                    img = Image.open(dataPath_prefix + '/%s/images/'%(args.target)+list_patition_label[index,0]).convert('RGB')
                    ori_img_w, ori_img_h = img.size
                    #bbox = np.loadtxt(dataPath_prefix+'/%s/boundingbox/'%(args.target)+list_patition_label[index,0][:-3]+'txt').astype(np.int)
                    landmark = np.loadtxt(dataPath_prefix+'/%s/landmarks_5/'%(args.target) + list_patition_label[index,0][:-3]+'txt').astype(np.int)

                    data_imgs.append(dataPath_prefix+'/%s/images/'%(args.target)+list_patition_label[index,0])
                    data_labels.append(list_patition_label[index,1])
                    data_bboxs.append((0,0,ori_img_w,ori_img_h))
                    data_landmarks.append(landmark)
        
    # DataSet Distribute
    distribute_ = np.array(data_labels)
    print(' %s %s dataset qty: %d' % ( flag1, flag2, len(data_imgs)))
    dataset_dist = []
    for i in range(args.class_num):
        dataset_dist.append(np.sum(distribute_==i))

    print("Dataset Distribution for %s classes is: "%(args.class_num), dataset_dist)

    # DataSet
    data_set = MyDataset(data_imgs, data_labels, data_bboxs, data_landmarks, flag1, trans, target_trans)

    # DataLoader
    if flag1=='train':
        data_loader = data.DataLoader(dataset=data_set, batch_size=args.train_batch, shuffle=True, num_workers=8, drop_last=True)
    elif flag1=='test':
        data_loader = data.DataLoader(dataset=data_set, batch_size=args.test_batch, shuffle=False, num_workers=8, drop_last=False)

    return data_loader
        val_mse_score /= len(test_dataset)
        val_ssim_score /= len(test_dataset)
        val_loss /= len(test_dataset)
        
        sc = 1 - val_mse_score/100 + val_ssim_score
        print(f'[Epoch {epoch+1}] loss: {tr_loss:.3f}, mse_score: {tr_mse_score:.3f}, ssim_score: {tr_ssim_score:.3f}, '+
              f'val_loss: {val_loss:.3f}, val_mse_score: {val_mse_score:.3f}, val_ssim_score: {val_ssim_score:.3f}', flush=True)

        his['train_loss'].append(tr_loss)
        his['train_mse_score'].append(tr_mse_score)
        his['train_ssim_score'].append(tr_ssim_score)
        his['val_loss'].append(val_loss)
        his['val_mse_score'].append(val_mse_score)
        his['val_ssim_score'].append(val_ssim_score)
        
        if sc > max_score:
            print('model saved!')
            max_score = sc
            torch.save(model.state_dict(), args.save_path)
        
    return his

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

train_dataset = MyDataset('../../Data_Challenge2/train','../../Data_Challenge2/train_gt',is_train=True)
test_dataset = MyDataset('../../Data_Challenge2/test','../../Data_Challenge2/test_gt', is_train=False)

model = PConvUNet().to(device)

his = fit(model, train_dataset, test_dataset, args)
def predict(model, test_dataset):
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False)
    model.eval()

    result = []
    for step, batch in enumerate(test_dataloader):
        img, mask = [t.type(torch.float).to(device) for t in batch]
        mask = mask.permute(0, 3, 1, 2)
        output, _ = model(img, mask)
        output_comp = mask * img + (1 - mask) * output[0]
        result.append(output_comp.detach().cpu()[0])

    return result


seed = 87
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)

test_dataset = MyDataset(in_path, is_train=False)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = PConvUNet().to(device)
model.load_state_dict(torch.load('./model5.pth'))

res = predict(model, test_dataset)

for i in range(len(res)):
    file = os.path.join(out_path, test_dataset.imgs[i] + '.jpg')
    plt.imsave(file, unnormalize(res[i]))
Beispiel #12
0
#                 scale_limit=0.2,
#                 rotate_limit=20,
#                 interpolation=cv2.INTER_LINEAR,
#                 border_mode=cv2.BORDER_REFLECT_101,
#                 p=1,
#             ),
#             Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0),
#         ]
#     )
test_transform=transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=norm_mean,std=norm_std)
])

train_data=MyDataset(root,"train",train_transform)
test_data=MyDataset(root,"test",test_transform)

train_dataloader=DataLoader(train_data,batch_size=batch_size,shuffle=True)
test_dataloader=DataLoader(test_data,batch_size=batch_size,shuffle=False)

criterion=nn.CrossEntropyLoss()
# criterion=nn.MultiLabelMarginLoss()
optimizer=optim.SGD(model.parameters(),lr=lr,momentum=momentum)

print("Start Training!")

for epoch in range(num_epoch):
    # loss_count = 0.0
    for i, data in enumerate(train_dataloader, 0):
        images, labels = data
def train(epoch, loss_meter, writer):
    # 数据
    myDataset = MyDataset(video_folder,
                          time_steps=TIME_STEPS,
                          num_pred=NUM_PRED,
                          resize_height=new_weight,
                          resize_width=new_height,
                          channel=opt.channel)
    # TODO: data augmentation
    dataloader = DataLoader(myDataset,
                            batch_size=BATCH_SIZE,
                            shuffle=shuffle,
                            num_workers=num_workers)

    # 模型
    model = ConvLSTMAE(opt).train()
    if opt.model_ckpt:
        # TODO 注意这里的 losd, 其实还有一个参数:map_location (如果GPU在load处报错,考虑这个报错)
        model.load(opt.model_ckpt)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=LR)  # 要不要改成和论文一致呢? TODO
    # TODO:具体设置下各个参数的初始化,以及,优化的学习率,以及其他超参
    # criterion = nn.MSELoss().to(device)  # EuclideanLoss
    criterion = nn.MSELoss().to(device)  #
    #
    model.to(device)
    # 统计
    loss_meter.reset()

    cnt_batch = 0
    for i_batch, (X, Y) in tqdm.tqdm(enumerate(dataloader)):
        cnt_batch = cnt_batch + 1  # 手动记录 batch_num
        # 训练
        # TODO: 对 X,Y做一些预处理
        # print("X,Y size() is : ", X.size(), Y.size())
        X, Y = X.to(device), Y.to(
            device)  # the input of model need to(cuda)        #
        # y_list = convLSTMAE(X).to(device) # model need to cuda(gpu)
        y_list = model(X)  #
        # print("size of y_list[0]: ", y_list[18].size())
        y_hat = torch.stack(y_list, 0)
        # print("y_hat size(): ", y_hat.size()) # TODO 好坑这里,由于
        # 计算图,tensor,自动求导的缘故,不能直接执行 np.catxxx()
        # print("Y size(): ", Y.size())
        Y = Y.permute(1, 0, 4, 3, 2)
        # print("new Y size(): ", Y.size())
        # TODO 其实Dataset一开始就应该转型为 [b,t,c,w,h]

        loss = criterion(y_hat, Y)  # mean square error
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
        # loss_meter.add(loss.data[0])

        # 可视化
        if (i_batch + 1) % opt.plot_every == 0:
            # if os.path.exists(opt.debug_file):
            #     ipdb.set_trace()
            # loss绘图
            # vis.plot('loss', loss_meter.value()[0])
            # 控制台输出 Loss
            print(' epoch: ', epoch, ' | i_batch: ', i_batch,
                  ' | train loss: %.4f' % loss.data)
            #
            writer.add_scalar("train_loss", loss.item(), cnt_batch)

    model.save()  # 每个 epoch 完毕保存下模型
Beispiel #14
0
opt = parser.parse_args()
args = ['mkdir', 'out_' + opt.date, 'outpth_' + opt.date]
res = subprocess.call(args)

print(opt)
if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
    # torch.cuda.manual_seed_all(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

data_set = MyDataset()
test_data_set = MyDataset("test_images/**/*.jpg")
data_loader = torch.utils.data.DataLoader(data_set,
                                          batch_size=opt.batchSize,
                                          shuffle=True)
test_data_loader = torch.utils.data.DataLoader(test_data_set,
                                               batch_size=opt.batchSize,
                                               shuffle=True)
print(len(data_set))
print(len(test_data_set))

cudnn.benchmark = True

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
Beispiel #15
0
from Dataset.MyDataset import *
from torch.utils import data

Dir = 'D:/Desktop/项目/负荷识别部/Plaid/PLAID 2018/submetered/'
meta_path = Dir + 'metadata_submetered2.0.json'
csv_path = Dir + 'all_submetered.csv'
used_feas = ["i_mean", "i_thd", "P", "Q"]
d = MyDataset(meta_path, csv_path, LabelType.Type, used_feas=used_feas)
features, label = d[0]

data_loader = data.DataLoader(d, batch_size=4, shuffle=False)
print(len(d))
for i, (f, labels) in enumerate(data_loader):
    print(f)
    print(labels)
print("XXX")
Beispiel #16
0
            tq.update(batch_size)
        tq.close()
        output = pred_all
    return output.cpu().detach().numpy()
    

if __name__=='__main__':
    
    folds = KFold(n_splits=5, shuffle=True, random_state=2019)
    trn_len = 7000
    test_len = 2000
    oof_lgb = np.zeros((trn_len, 3))
    prediction = np.zeros((test_len, 3))

    criterion = nn.CrossEntropyLoss()
    all_dataset = MyDataset(trn_path, trn_figurepath)
    test_dataset = MyDataset(test_path, test_figurepath, False)
    test_loader = DataLoader(test_dataset, batch_size=trn_batchsize, num_workers=4, shuffle=False)
    labels = np.array(get_label(trn_path))
    import torch.optim as optim

    for fold_, (trn_idx, val_idx) in enumerate(folds.split(np.zeros(trn_len))):
        print("fold n°{}".format(fold_ + 1))        
        
        train_5 = Subset(all_dataset, trn_idx)
        valid_5 = Subset(all_dataset, val_idx)

        train_loader = DataLoader(train_5, batch_size=trn_batchsize, num_workers=4, shuffle=True)
        val_loader = DataLoader(valid_5, batch_size=trn_batchsize, num_workers=4, shuffle=False)
        model = train(train_loader, val_loader, trn_batchsize)
        
Beispiel #17
0
def main(opt, unet_version, ds_version, data_augmentation, inverted_freq,
         weather):

    params = {
        'num_epochs': 100,
        'batch_size': 10,
        'num_classes': 19,
        'start_features': 32,
        'log_interval': 10,
        'iou_threshold': 0.3,
        'adam_learning_rate': 1E-3,
        'adam_aux_learning_rate': 5E-4,
        'adam_weight_decay': 1E-4,
        'sgd_learning_rate': 1E-3,
        'sgd_weight_decay': 1E-4,
        'sgd_momentum': 0.9,
        'device': torch.device("cuda"),
        'dataset_url': '/home/jupyter/it6/utils/',
        'log_dir': '/home/jupyter/it6/runs/',
        'file_suffix': '_split_urls'
        #'auth_service_json_url':'/home/jupyter/it6-oss-9b93ef313e32.json'
    }

    params['device'] = torch.device(
        "cuda") if torch.cuda.is_available() else 'cpu'
    da = 'da.' if data_augmentation == 'y' else ''
    inv = 'inv.' if inverted_freq == 'y' else ''
    we = 'we' if weather == 'y' else ''
    experiment_id = opt + '.' + unet_version + '.' + ds_version + da + inv + we

    # Creamos las transformaciones para las imagenes y targets
    """
    tensor_transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize(size=(256, 512), interpolation=0),
        torchvision.transforms.ToTensor()
    ])
    target_transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize(size=(256, 512), interpolation=0)
    ])
    """
    #Creamos las listas para las transformaciones
    joint_transformations, joint_transformations_vt, img_transformations, img_transformations_vt = [], [], [], []

    #Añadimos el Resize
    joint_transformations.append(aux.Resize(256, 512))
    joint_transformations_vt.append(aux.Resize(256, 512))

    #En caso de Data Augmentation, se añade un Random Vertical Flip y el ajuste de parametros de imagen
    if data_augmentation == "y":
        print('set Data Augmentation\n')
        joint_transformations.append(aux.RandomVerticallyFlip())
        img_transformations.append(
            torchvision.transforms.ColorJitter(brightness=0.1,
                                               contrast=0.1,
                                               saturation=0.1,
                                               hue=0.1))

    if weather == 'y':
        print('set weather\n')
        img_transformations.append(aux.Weather())

    #añadimos la transformacion final para tensor en las img
    img_transformations.append(torchvision.transforms.ToTensor())
    img_transformations_vt.append(torchvision.transforms.ToTensor())

    #Aplicamos la transformacion conjunta sobre img y target
    joint_transforms = aux.JointCompose(joint_transformations)
    joint_transforms_vt = aux.JointCompose(joint_transformations_vt)

    #Aplicamos solo la transformacion sobre img
    img_transforms = torchvision.transforms.Compose(img_transformations)
    img_transforms_vt = torchvision.transforms.Compose(img_transformations_vt)
    """
    transformations, transformations_target = [], []
    transformations.append(torchvision.transforms.Resize(size=(256, 512),interpolation=0))
    transformations_target.append(torchvision.transforms.Resize(size=(256, 512),interpolation=0))

    if data_augmentation == "y":
        transformations.append(torchvision.transforms.RandomVerticalFlip(p=0.5))
        transformations.append(torchvision.transforms.ColorJitter(brightness=3))
        transformations_target.append(torchvision.transforms.RandomVerticalFlip(p=0.5))

    #añadimos la transformacion final para tensor
    transformations.append(torchvision.transforms.ToTensor())

    #Aplicamos la transformacion sobre target
    tensor_transform = torchvision.transforms.Compose(transformations)
    target_transform = torchvision.transforms.Compose(transformations_target)
    """
    #Definimos temporizadores
    #Arrancamos el temporizador
    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)
    start_total = torch.cuda.Event(enable_timing=True)
    end_total = torch.cuda.Event(enable_timing=True)

    # Creamos los datasets y dataloaders
    train_dataset = MyDataset(version=ds_version,
                              split='train',
                              joint_transform=joint_transforms,
                              img_transform=img_transforms,
                              url_csv_file=params['dataset_url'],
                              file_suffix=params['file_suffix'])
    train_loader = utils.data.DataLoader(train_dataset,
                                         batch_size=params['batch_size'],
                                         shuffle=True,
                                         num_workers=4)

    val_dataset = MyDataset(version=ds_version,
                            split='val',
                            joint_transform=joint_transforms_vt,
                            img_transform=img_transforms_vt,
                            url_csv_file=params['dataset_url'],
                            file_suffix=params['file_suffix'])
    val_loader = utils.data.DataLoader(val_dataset,
                                       batch_size=params['batch_size'],
                                       shuffle=False,
                                       num_workers=4)

    test_dataset = MyDataset(version=ds_version,
                             split='test',
                             joint_transform=joint_transforms_vt,
                             img_transform=img_transforms_vt,
                             url_csv_file=params['dataset_url'],
                             file_suffix=params['file_suffix'])
    test_loader = utils.data.DataLoader(test_dataset,
                                        batch_size=params['batch_size'],
                                        shuffle=False,
                                        num_workers=4)
    """
    train_dataset = MyDataset(version=ds_version, split='train', transform=tensor_transform, target_transform=target_transform, url_csv_file=params['dataset_url'], file_suffix=params['file_suffix'])
    train_loader = utils.data.DataLoader(train_dataset, batch_size=params['batch_size'], shuffle=True, num_workers = 4)

    val_dataset = MyDataset(version=ds_version, split='val', transform=tensor_transform, target_transform=target_transform, url_csv_file=params['dataset_url'], file_suffix=params['file_suffix'])
    val_loader = utils.data.DataLoader(val_dataset, batch_size=params['batch_size'], shuffle=False, num_workers = 4)

    test_dataset = MyDataset(version=ds_version, split='test', transform=tensor_transform, target_transform=target_transform, url_csv_file=params['dataset_url'], file_suffix=params['file_suffix'])
    test_loader = utils.data.DataLoader(test_dataset, batch_size=params['batch_size'], shuffle=False, num_workers = 4)
    """
    def train_one_epoch(train_loader, net, optimizer, criterion, hparams):

        # Activate the train=True flag inside the model
        net.train()

        device = hparams['device']
        batch_size = hparams['batch_size']
        train_loss, train_accs = 0, 0
        train_iou = {}
        times_per_step_iteration = []
        times_per_metric_iteration = []
        times_per_iteration = []
        for batch_index, (img, target) in enumerate(train_loader):
            #Arrancamos temporizador general
            start_total.record()
            img, target = img.to(device), target.to(device)
            optimizer.zero_grad()

            # Arrancamos temporizador para inferencia
            start.record()
            output = net(img)

            target = target.long()

            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            pred = aux.get_predicted_image(output)

            #Paramos temporizador de inferencia
            end.record()
            torch.cuda.synchronize()
            times_per_step_iteration.append(start.elapsed_time(end))

            # Accuracy
            #Arrancamos temporizador para métricas
            start.record()

            # Desvinculamos el valor de los nuevos targets y los pasamos a CPU para calcular las métricas
            output, target, pred = output.detach().cpu(), target.detach().cpu(
            ), pred.detach().cpu()
            train_loss += loss.item()
            # Devuelve values, indices. Los indices son el nº de feature map (clase) en la que se encuentra el valor más alto en el pixel
            train_accuracy = metrics.calculate_accuracy(output,
                                                        target)  #, predicted
            train_accs += train_accuracy

            iou_inds = metrics.calculate_iou(pred, target)
            for key in iou_inds:
                if key not in train_iou:
                    train_iou[key] = iou_inds[key]
                else:
                    train_iou[key] += iou_inds[key]

            #Paramos temporizador para métricas
            end.record()
            torch.cuda.synchronize()
            times_per_metric_iteration.append(start.elapsed_time(end))

            #Paramos temporizador general
            end_total.record()
            torch.cuda.synchronize()
            times_per_iteration.append(start_total.elapsed_time(end))

            avg_time_taken = sum(times_per_iteration) / len(
                times_per_iteration)
            avg_time_step_taken = sum(times_per_step_iteration) / len(
                times_per_step_iteration)
            avg_time_metrics_taken = sum(times_per_metric_iteration) / len(
                times_per_metric_iteration)

        print('Average Time spent total: {:.02f}s'.format(avg_time_taken *
                                                          1e-3))
        print('Average Time spent by steps: {:.02f}s'.format(
            avg_time_step_taken * 1e-3))
        print('Average Time spent by metrics: {:.02f}s'.format(
            avg_time_metrics_taken * 1e-3))
        print('Average Time spent by data load: {:.02f}s'.format(
            avg_time_taken * 1e-3 - avg_time_step_taken * 1e-3 -
            avg_time_metrics_taken * 1e-3))

        train_loss = train_loss / (len(train_loader.dataset) / batch_size)
        train_accs = 100 * (train_accs /
                            (len(train_loader.dataset) / batch_size))
        train_iou = metrics.convert_batched_iou(
            train_iou, (len(train_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(train_iou)
        mIoU_desc = metrics.miou_to_string(train_iou)
        return train_loss, train_accs, mIoU, mIoU_desc

    def val_one_epoch(val_loader, net):

        net.eval()
        device = params['device']
        batch_size = params['batch_size']
        val_loss = 0
        val_acc = 0
        val_iou = {}
        pred = 0
        with torch.no_grad():
            for batch_index, (img, target) in enumerate(val_loader):
                img, target = img.to(device), target.to(device)
                output = net(img)
                target = target.long()

                loss = criterion(output, target).item()
                val_loss += loss

                pred = aux.get_predicted_image(output)
                # Desvinculamos el valor de los nuevos targets y los pasamos a CPU para calcular las métricas
                output, target, pred = output.detach().cpu(), target.detach(
                ).cpu(), pred.detach().cpu()

                # compute number of correct predictions in the batch
                val_accuracy = metrics.calculate_accuracy(output, target)
                val_acc += val_accuracy
                iou_inds = metrics.calculate_iou(pred, target)

                for key in iou_inds:
                    if key not in val_iou:
                        val_iou[key] = iou_inds[key]
                    else:
                        val_iou[key] += iou_inds[key]
                    #print('Batch index: {}, loss: {}, accuracy: {:.2f}%'.format(batch_index, loss, val_accuracy * 100))
        # Average acc across all correct predictions batches now
        val_loss = val_loss / (len(val_loader.dataset) / batch_size)
        val_acc = 100 * (val_acc / (len(val_loader.dataset) / batch_size))
        val_iou = metrics.convert_batched_iou(
            val_iou, (len(val_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(val_iou)

        #print('\nValidation set: Average loss: {:.4f}, Accuracy: {:.0f}%, mIoU: {:.4f}\n'.format(val_loss,  val_acc, mIoU))
        mIoU_desc = metrics.miou_to_string(val_iou)
        return val_loss, val_acc, mIoU, mIoU_desc

    ## Build the net here
    if unet_version == 'linear':
        print('set linear unet\n')
        unet = UNet(num_classes=params['num_classes'],
                    start_features=params['start_features'])
    else:
        print('set ' + str(unet_version) + ' unet\n')
        unet = UNetFull(num_classes=params['num_classes'],
                        start_features=params['start_features'],
                        bilinear=unet_version == 'bilinear')

    ###################

    writer_date = datetime.now().strftime("%Y%m%d-%H%M%S")
    run_path = params['log_dir'] + '/' + experiment_id
    run_data_folder = run_path + '/' + writer_date
    tb_writer_train = SummaryWriter(log_dir=run_data_folder + "/train")
    tb_writer_val = SummaryWriter(log_dir=run_data_folder + "/val")
    images_train, targets_train = next(iter(train_loader))
    images_val, targets_val = next(iter(val_loader))

    aux.write_tensorboard_inicio(tb_writer_train, tb_writer_val, unet,
                                 images_train, images_val, targets_val)

    ##################

    unet.to(params['device'])
    net_params = unet.parameters()

    #Depending on the inverted frequency parameter we apply this parameter as a weight to balance the Loss Function
    if inverted_freq == 'y':
        print('set Inverted Frequency weights \n')
        num_pixels_per_class = [
            127414939, 21058643, 79041999, 2269832, 3038496, 4244760, 720425,
            1911074, 55121339, 4008424, 13948699, 4204816, 465832, 24210293,
            925225, 813190, 805591, 341018, 1430722
        ]
        inverted_weights = [(1 / num_pixels)
                            for num_pixels in num_pixels_per_class]
        inverted_weights = torch.FloatTensor(inverted_weights).to(
            params['device'])
        criterion = torch.nn.CrossEntropyLoss(weight=inverted_weights,
                                              ignore_index=255)
    else:
        criterion = torch.nn.CrossEntropyLoss(ignore_index=255)

    if opt == 'adam':
        print('set adam optimizer\n')
        optimizer = torch.optim.Adam(net_params,
                                     lr=params['adam_learning_rate'],
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=params['adam_weight_decay'],
                                     amsgrad=False)
    elif opt == 'sgd':
        print('set SGD optimizer\n')
        optimizer = torch.optim.SGD(net_params,
                                    lr=params['sgd_learning_rate'],
                                    momentum=params['sgd_momentum'],
                                    weight_decay=params['sgd_weight_decay'])

    best_epoch_miou, added_targg = 0, False

    print('Dataset train images: {}, dataset val images: {}'.format(
        len(train_loader.dataset), len(val_loader.dataset)))

    train_losses, train_acc_hist, val_losses, val_acc_hist, mIoU_hist_train, mIoU_hist_val = [], [], [], [], [], []
    for epoch in range(1, params['num_epochs'] + 1):

        # Compute & save the average training loss for the current epoch
        print('#################### Epoch: {} ####################\n'.format(
            epoch))

        aux.print_timestamp('Inicio training epoch {}'.format(epoch))
        train_loss, train_acc, train_mIoU, mIoU_desc_train = train_one_epoch(
            train_loader, unet, optimizer, criterion, params)
        print(
            'Training set: Average loss {:.4f}, Average accuracy {:.2f}%, mIoU: {:.2f}\n{}\n'
            .format(train_loss, train_acc, train_mIoU, mIoU_desc_train))

        aux.print_timestamp('Inicio validacion epoch {}'.format(epoch))
        val_loss, val_acc, val_mIoU, mIoU_desc_val = val_one_epoch(
            val_loader, unet)
        print(
            'Validation set: Average loss: {:.4f}, Mean accuracy: {:.2f}%, mIoU: {:.2f}\n{}\n'
            .format(val_loss, val_acc, val_mIoU, mIoU_desc_val))

        train_mAP = sum(train_acc_hist) / epoch  # params['num_epochs']
        val_mAP = sum(val_acc_hist) / epoch  # params['num_epochs']

        if val_mIoU > best_epoch_miou:
            best_epoch_miou = val_mIoU
            print('Guardamos el modelo en epoch {} ( mIoU {:.2f})'.format(
                epoch, val_mIoU))
            aux.save_model(unet, run_data_folder + '/best')
            aux.write_tensorboard_best_IoU(tb_writer_val, val_mIoU, epoch)
        #train_losses.append(train_loss)
        train_acc_hist.append(train_acc)
        #val_losses.append(val_loss)
        val_acc_hist.append(val_acc)
        #mIoU_hist.append(val_mIoU)

        mIoU_hist_train.append(train_mIoU)
        mIoU_hist_val.append(val_mIoU)

        images_val = images_val.to(params['device'])
        predicted_output = unet(images_val)

        aux.write_tensorboard_epoch(tb_writer_train, tb_writer_val,
                                    run_data_folder, predicted_output[0],
                                    epoch, train_loss, train_acc, val_loss,
                                    val_acc, train_mIoU, val_mIoU, train_mAP,
                                    val_mAP)
    aux.save_model(unet, run_data_folder + '/last')
    print('Fin del entrenamiento\n')

    tb_writer_train.close()
    tb_writer_val.close()

    def test_model(test_loader, net):

        net.eval()
        device = params['device']
        batch_size = params['batch_size']
        test_loss = 0
        test_acc = 0
        test_iou = {}
        with torch.no_grad():
            for batch_index, (img, target) in enumerate(test_loader):
                img, target = img.to(device), target.to(device)
                output = net(img)
                target = target.long()
                loss = criterion(output, target).item()
                test_loss += loss

                pred = aux.get_predicted_image(output)

                output, target, pred = output.detach().cpu(), target.detach(
                ).cpu(), pred.detach().cpu()
                # compute number of correct predictions in the batch
                test_accuracy = metrics.calculate_accuracy(output, target)
                test_acc += test_accuracy

                iou_inds = metrics.calculate_iou(pred, target)

                for key in iou_inds:
                    if key not in test_iou:
                        test_iou[key] = iou_inds[key]
                    else:
                        test_iou[key] += iou_inds[key]

        test_loss = test_loss / (len(test_loader.dataset) / batch_size)
        test_acc = 100 * (test_acc / (len(test_loader.dataset) / batch_size))
        test_iou = metrics.convert_batched_iou(
            test_iou, (len(test_loader.dataset) / batch_size))
        mIoU = metrics.get_mIoU(test_iou)

        mIoU_desc = metrics.miou_to_string(test_iou)
        return test_loss, test_acc, mIoU, mIoU_desc

    unet = aux.load_model(run_data_folder + '/best')
    print('Dataset test images: {}'.format(len(test_loader.dataset)))
    test_loss, test_acc, mIoU, mIoU_desc = test_model(test_loader, unet)
    print(
        'Test set: Average loss: {:.4f}, Mean accuracy: {:.2f}%, mIoU: {:.2f}%\n{}\n'
        .format(test_loss, test_acc, mIoU, mIoU_desc))