Пример #1
0
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)
    img_size = (args.size, args.size)

    # model
    pretrained_model = None
    raise NotImplementedError("TODO: build model and load pretrained weights")
    model = Segmentator(2, pretrained_model.features, img_size).cuda()

    # dataset
    train_trans, val_trans, train_target_trans, val_target_trans = get_transforms_binary_segmentation(
        args)
    data_root = args.data_folder
    train_data = DataReaderBinarySegmentation(
        os.path.join(data_root, "imgs/train2014"),
        os.path.join(data_root, "aggregated_annotations_train_5classes.json"),
        transform=train_trans,
        target_transform=train_target_trans)
    val_data = DataReaderBinarySegmentation(
        os.path.join(data_root, "imgs/val2014"),
        os.path.join(data_root, "aggregated_annotations_val_5classes.json"),
        transform=val_trans,
        target_transform=val_target_trans)
    print("Dataset size: {} samples".format(len(train_data)))
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.bs,
                                               shuffle=True,
                                               num_workers=6,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=6,
                                             pin_memory=True,
                                             drop_last=False)

    # TODO: loss
    criterion = None
    # TODO: SGD optimizer (see pretraining)
    optimizer = None
    raise NotImplementedError("TODO: loss function and SGD optimizer")

    expdata = "  \n".join(
        ["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_miou = 0.0
    for epoch in range(100):
        logger.info("Epoch {}".format(epoch))
        train(train_loader, model, criterion, optimizer, logger)
        val_results = validate(val_loader, model, criterion, logger, epoch)
Пример #2
0
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)
    img_size = (args.size, args.size)

    # model
    pretrained_model = ResNet18Backbone(False)
    # TODO: Complete the documentation for AttSegmentator model
    raise NotImplementedError("TODO: Build model AttSegmentator model")
    model = None

    if os.path.isfile(args.pretrained_model_path):
        model = load_from_weights(model, args.pretrained_model_path, logger)

    # dataset
    data_root = args.data_folder
    train_transform, val_transform, train_transform_mask, val_transform_mask = get_transforms_binary_segmentation(args)
    vec_transform = ToTensor()
    train_data = DataReaderSingleClassSemanticSegmentationVector(
        os.path.join(data_root, "imgs/train2014"),
        os.path.join(data_root, "aggregated_annotations_train_5classes.json"),
        transform=train_transform,
        vec_transform=vec_transform,
        target_transform=train_transform_mask
    )
    # Note that the dataloaders are different.
    # During validation we want to pass all the semantic classes for each image
    # to evaluate the performance.
    val_data = DataReaderSemanticSegmentationVector(
        os.path.join(data_root, "imgs/val2014"),
        os.path.join(data_root, "aggregated_annotations_val_5classes.json"),
        transform=val_transform,
        vec_transform=vec_transform,
        target_transform=val_transform_mask
    )

    train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.bs, shuffle=True,
                                               num_workers=6, pin_memory=True, drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False,
                                             num_workers=6, pin_memory=True, drop_last=False)


    # TODO: loss
    criterion = None
    # TODO: SGD optimizer (see pretraining)
    optimizer = None
    raise NotImplementedError("TODO: loss function and SGD optimizer")

    expdata = "  \n".join(["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_miou = 0.0
    for epoch in range(100):
        logger.info("Epoch {}".format(epoch))
        train(train_loader, model, criterion, optimizer, log, logger)
        val_loss, val_iou = validate(val_loader, model, criterion, log, logger, epoch)

        # TODO save model
        raise NotImplementedError("TODO: implement the code for saving the model")
Пример #3
0
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)
    img_size = (args.size, args.size)

    # model
    data_root = args.data_folder
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('#### This is the device used: ', device, '####')
    
    encoder_model = ResNet18Backbone(pretrained=False).to(device) 
    #pretrained_model = None
    #raise NotImplementedError("TODO: build model and load pretrained weights")
    model = Segmentator(6, encoder_model.features, img_size).to(device) # 5 + background

    pretrained_dict = torch.load(args.weights_init, map_location = device)['model']
    model_dict = model.state_dict()
    # filter out unnecessary keys
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if ((k in model_dict) and (k not in ['decoder.last_conv.6.weight', 'decoder.last_conv.6.bias']))}
    # overwrite entries in the existing state dict
    model_dict.update(pretrained_dict) 
    # load the new state dict
    model.load_state_dict(model_dict, strict = False)

    # dataset
    train_trans, val_trans, train_target_trans, val_target_trans = get_transforms_binary_segmentation(args)
    data_root = args.data_folder
    train_data = DataReaderSemanticSegmentation(
        os.path.join(data_root, "imgs/train2014"),
        os.path.join(data_root, "aggregated_annotations_train_5classes.json"),
        transform=train_trans,
        target_transform=train_target_trans
    )
    val_data = DataReaderSemanticSegmentation(
        os.path.join(data_root, "imgs/val2014"),
        os.path.join(data_root, "aggregated_annotations_val_5classes.json"),
        transform=val_trans,
        target_transform=val_target_trans
    )
    print("Dataset size: {} samples".format(len(train_data)))
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.bs, shuffle=True,
                                               num_workers=6, pin_memory=True, drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False,
                                             num_workers=6, pin_memory=True, drop_last=False)

    # TODO: loss
    criterion = torch.nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
    # TODO: SGD optimizer (see pretraining)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
    #raise NotImplementedError("TODO: loss function and SGD optimizer")

    expdata = "  \n".join(["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_miou = 0.0

    train_loss_list = []
    train_iou_list = []
    val_loss_list = []
    val_iou_list = []
    for epoch in range(100):
        logger.info("Epoch {}".format(epoch))
        train_results = train(train_loader, model, criterion, optimizer, logger, device)
        val_results = validate(val_loader, model, criterion, logger, device, epoch)

        mean_val_loss, mean_val_iou = val_results
        mean_train_loss, mean_train_iou = train_results

        train_loss_list.append(mean_train_loss)
        train_iou_list.append(mean_train_iou)
        val_loss_list.append(mean_val_loss)
        val_iou_list.append(mean_val_iou)

        # TODO save model
        # path_model = os.path.join(args.model_folder , 'checkpoint_' + str(epoch) +'_.pth')
        # torch.save(model.state_dict(), path_model )
        # import pdb; pdb.set_trace()
        if mean_val_iou > best_val_miou:
            best_val_miou = mean_val_iou
            save_model(model, optimizer, args, epoch+1, mean_val_loss, mean_val_iou, logger, best_iou=True, best_loss = False)
        
        elif mean_val_loss < best_val_loss:
            best_val_loss = mean_val_loss
            save_model(model, optimizer, args, epoch+1, mean_val_loss, mean_val_iou, logger, best_iou=False, best_loss = True)
        
        elif ((epoch+1)%10 == 0):
            save_model(model, optimizer, args, epoch+1, mean_val_loss, mean_val_iou, logger, best_iou=False, best_loss = False)
        
        # save the data
        save_fig (train_loss_list, 'train_loss')
        save_fig (train_iou_list, 'train_iou')
        save_fig (val_loss_list, 'val_loss')
        save_fig (val_iou_list, 'val_iou')

        pd.DataFrame({'train_loss':train_loss_list}).to_csv(os.path.join(args.plots_folder, 'train_loss.csv'), index= False)
        pd.DataFrame({'train_iou':train_iou_list}).to_csv(os.path.join(args.plots_folder, 'train_iou.csv'), index= False)
        pd.DataFrame({'val_loss':val_loss_list}).to_csv(os.path.join(args.plots_folder, 'val_loss.csv'), index= False)
        pd.DataFrame({'val_iou':val_iou_list}).to_csv(os.path.join(args.plots_folder, 'val_iou.csv'), index= False)
Пример #4
0
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)
    img_size = (args.size, args.size)

    # model
    pretrained_model = ResNet18Backbone(pretrained=False)
    pretrained_model.load_state_dict(torch.load("./trained_models/model1.pth"))
    #raise NotImplementedError("TODO: build model and load pretrained weights")
    model = Segmentator(2, pretrained_model.features, img_size).cuda()

    # dataset
    train_trans, val_trans, train_target_trans, val_target_trans = get_transforms_binary_segmentation(args)
    data_root = args.data_folder
    train_data = DataReaderBinarySegmentation(
        os.path.join(data_root, "imgs/train2014"),
        os.path.join(data_root, "aggregated_annotations_train_5classes.json"),
        transform=train_trans,
        target_transform=train_target_trans
    )
    val_data = DataReaderBinarySegmentation(
        os.path.join(data_root, "imgs/val2014"),
        os.path.join(data_root, "aggregated_annotations_val_5classes.json"),
        transform=val_trans,
        target_transform=val_target_trans
    )
    print("Dataset size: {} samples".format(len(train_data)))
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.bs, shuffle=True,
                                               num_workers=6, pin_memory=True, drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=1, shuffle=False,
                                             num_workers=6, pin_memory=True, drop_last=False)

    # TODO: loss
    criterion = torch.nn.CrossEntropyLoss().cuda()
    # TODO: SGD optimizer (see pretraining)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
    #raise NotImplementedError("TODO: loss function and SGD optimizer")

    expdata = "  \n".join(["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_miou = 0.0
    writer_1 = SummaryWriter("./tensorboard")
    for epoch in range(100):
        logger.info("Epoch {}".format(epoch))
        train_loss, train_iou = train(train_loader, model, criterion, optimizer, logger)
        val_results = validate(val_loader, model, criterion, logger, epoch)
        print("train_loss:", train_loss)
        print("train_iou:", train_iou)
        print("val_loss:", val_results[0])
        print("val_iou:", val_results[1])

        writer_1.add_scalar('train_loss', train_loss, epoch)
        writer_1.add_scalar('val_loss', val_results[0], epoch)
        writer_1.add_scalar('train_iou', train_iou, epoch)
        writer_1.add_scalar('val_iou', val_results[1], epoch)
        save_model(model, optimizer, args, epoch, val_results[0], val_results[1], logger, best=False)
        if val_results[0] < best_val_loss:
            best_val_loss = val_results[0]
            save_model(model, optimizer, args, epoch, val_results[0], val_results[1], logger, best=True)
Пример #5
0
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)
    img_size = (args.size, args.size)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('#### This is the device used: ', device, '####')

    # model
    pretrained_model = ResNet18Backbone(False).to(device)
    # TODO: Complete the documentation for AttSegmentator model
    #raise NotImplementedError("TODO: Build model AttSegmentator model")
    model = AttSegmentator(5,
                           pretrained_model.features,
                           att_type='dotprod',
                           img_size=img_size).to(device)

    if os.path.isfile(args.pretrained_model_path):
        model = load_from_weights(model, args.pretrained_model_path, logger)

    # dataset
    data_root = args.data_folder
    train_transform, val_transform, train_transform_mask, val_transform_mask = get_transforms_binary_segmentation(
        args)
    vec_transform = ToTensor()
    train_data = DataReaderSingleClassSemanticSegmentationVector(
        os.path.join(data_root, "imgs/train2014"),
        os.path.join(data_root, "aggregated_annotations_train_5classes.json"),
        transform=train_transform,
        vec_transform=vec_transform,
        target_transform=train_transform_mask)
    # Note that the dataloaders are different.
    # During validation we want to pass all the semantic classes for each image
    # to evaluate the performance.
    val_data = DataReaderSemanticSegmentationVector(
        os.path.join(data_root, "imgs/val2014"),
        os.path.join(data_root, "aggregated_annotations_val_5classes.json"),
        transform=val_transform,
        vec_transform=vec_transform,
        target_transform=val_transform_mask)

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.bs,
                                               shuffle=True,
                                               num_workers=6,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=6,
                                             pin_memory=True,
                                             drop_last=False)

    # TODO: loss
    criterion = torch.nn.CrossEntropyLoss()
    # TODO: SGD optimizer (see pretraining)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)
    #raise NotImplementedError("TODO: loss function and SGD optimizer")

    expdata = "  \n".join(
        ["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_miou = 0.0

    train_loss_list = []
    train_iou_list = []
    val_loss_list = []
    val_iou_list = []
    for epoch in range(100):
        logger.info("Epoch {}".format(epoch))
        train_loss, train_iou = train(train_loader, model, criterion,
                                      optimizer, logger, device, epoch)
        val_loss, val_iou = validate(val_loader, model, criterion, logger,
                                     device, epoch)

        # TODO save model
        #raise NotImplementedError("TODO: implement the code for saving the model")
        train_loss_list.append(train_loss)
        train_iou_list.append(train_iou)
        val_loss_list.append(val_loss)
        val_iou_list.append(val_iou)

        if val_iou > best_val_miou:
            best_val_miou = val_iou
            save_model(model,
                       optimizer,
                       args,
                       epoch + 1,
                       val_loss,
                       val_iou,
                       logger,
                       best_iou=True,
                       best_loss=False)

        elif val_loss < best_val_loss:
            best_val_loss = val_loss
            save_model(model,
                       optimizer,
                       args,
                       epoch + 1,
                       val_loss,
                       val_iou,
                       logger,
                       best_iou=False,
                       best_loss=True)

        elif ((epoch + 1) % 10 == 0):
            save_model(model,
                       optimizer,
                       args,
                       epoch + 1,
                       val_loss,
                       val_iou,
                       logger,
                       best_iou=False,
                       best_loss=False)

        # save the data
        save_fig(train_loss_list, 'train_loss')
        save_fig(train_iou_list, 'train_iou')
        save_fig(val_loss_list, 'val_loss')
        save_fig(val_iou_list, 'val_iou')

        pd.DataFrame({
            'train_loss': train_loss_list
        }).to_csv(os.path.join(args.plots_folder, 'train_loss.csv'),
                  index=False)
        pd.DataFrame({
            'train_iou': train_iou_list
        }).to_csv(os.path.join(args.plots_folder, 'train_iou.csv'),
                  index=False)
        pd.DataFrame({
            'val_loss': val_loss_list
        }).to_csv(os.path.join(args.plots_folder, 'val_loss.csv'), index=False)
        pd.DataFrame({
            'val_iou': val_iou_list
        }).to_csv(os.path.join(args.plots_folder, 'val_iou.csv'), index=False)
def main(args):
    # Logging to the file and stdout
    logger = get_logger(args.output_folder, args.exp_name)
    img_size = (args.size, args.size)

    # model
    pretrained_model = ResNet18Backbone(pretrained=False).cuda()
    pretrained_model.load_state_dict(torch.load(
        args.weights_init, map_location=torch.device('cuda'))['model'],
                                     strict=False)
    #raise NotImplementedError("TODO: build model and load pretrained weights")
    model = Segmentator(5, pretrained_model.features, img_size).cuda()

    # dataset
    train_trans, val_trans, train_target_trans, val_target_trans = get_transforms_binary_segmentation(
        args)
    data_root = args.data_folder
    train_data = DataReaderSemanticSegmentation(
        os.path.join(data_root, "imgs/train2014"),
        os.path.join(data_root, "aggregated_annotations_train_5classes.json"),
        transform=train_trans,
        target_transform=train_target_trans)
    val_data = DataReaderSemanticSegmentation(
        os.path.join(data_root, "imgs/val2014"),
        os.path.join(data_root, "aggregated_annotations_val_5classes.json"),
        transform=val_trans,
        target_transform=val_target_trans)
    print("Dataset size: {} samples".format(len(train_data)))
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=args.bs,
                                               shuffle=True,
                                               num_workers=6,
                                               pin_memory=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=2,
                                             shuffle=False,
                                             num_workers=6,
                                             pin_memory=True,
                                             drop_last=False)

    # TODO: loss
    criterion = nn.CrossEntropyLoss().cuda()
    # TODO: SGD optimizer (see pretraining)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)
    #raise NotImplementedError("TODO: loss function and SGD optimizer")

    expdata = "  \n".join(
        ["{} = {}".format(k, v) for k, v in vars(args).items()])
    logger.info(expdata)
    logger.info('train_data {}'.format(train_data.__len__()))
    logger.info('val_data {}'.format(val_data.__len__()))

    best_val_loss = np.inf
    best_val_miou = 0.0
    train_losses, val_losses, val_ious = [], [], []
    epochs = 100
    for epoch in range(epochs):
        logger.info("Epoch {}".format(epoch))
        t_loss = train(train_loader, model, criterion, optimizer, logger)
        train_losses.append(t_loss)
        print(f"Train Loss: {t_loss}")
        val_loss, val_iou = validate(val_loader, model, criterion, logger,
                                     epoch)
        val_losses.append(val_loss)
        val_ious.append(val_iou)
        print(f"Val Loss: {val_loss} Val iou: {val_iou}")
        if best_val_miou < val_iou:
            best_val_miou = val_iou
            save_model(model, optimizer, args, epoch, val_loss, val_iou,
                       logger)
        # TODO save model

    _, axes = plt.subplots(1, 3, figsize=(20, 10))
    axes[0].plot(range(epochs), train_losses)
    axes[0].set_xlabel('Epoch')
    axes[0].set_ylabel('Loss')
    axes[0].set_title('Training loss')

    axes[1].plot(range(epochs), val_losses)
    axes[1].set_xlabel('Epoch')
    axes[1].set_ylabel('Loss')
    axes[1].set_title('Validation loss')

    axes[2].plot(range(epochs), val_ious)
    axes[2].set_xlabel('Epoch')
    axes[2].set_ylabel('IOU')
    axes[2].set_title('Validation IOU')

    plt.savefig('Side by side mult seg.png')
    plt.close()

    plt.plot(range(epochs), train_losses, label="Train")
    plt.plot(range(epochs), val_losses, label="Validation")
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.legend()
    plt.savefig("Train vs Val mult seg.png")
    plt.close()