コード例 #1
0
def main(FLAGS):

    "train and validate the Unet model"
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #data directory
    data_dir = FLAGS.dataset_dir
    #log_directory
    log_dir = FLAGS.log_dir
    # Hyper and other parameters
    train_batch_size = FLAGS.train_batch_size
    val_batch_size = FLAGS.val_batch_size
    aug_flag = FLAGS.aug
    num_epochs = FLAGS.epochs
    num_classes = 2
    # get the train and validation dataloaders
    dataloaders = get_dataloaders(data_dir, train_batch_size, val_batch_size,
                                  aug_flag)
    model = Unet(3, num_classes)

    # Uncomment to run traiing on Multiple GPUs
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model, device_ids=[0, 1])
    else:
        print("no multiple gpu found")
    model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=0.02,
                          momentum=0.9,
                          weight_decay=0.0005)
    #optimizer = optim.Adam(model.parameters(),lr = learning_rate)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    plotter = VisdomLinePlotter(env_name='Unet Train')
    # uncomment for leraning rate schgeduler..
    train_val(dataloaders, model, criterion, optimizer, num_epochs, log_dir,
              device)
コード例 #2
0
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
df = pd.read_csv(sample_submission_path)
testset = DataLoader(
    TestDataset(test_data_folder, df, mean, std),
    batch_size=batch_size,
    shuffle=False,
    num_workers=num_workers,
    pin_memory=True
)

# Initialize mode and load trained weights
ckpt_path = "../input/res18ex6/model26.pth"
device = torch.device("cuda")
model = Unet("resnet18", encoder_weights=None, classes=4, activation=None)
model.to(device)
model.eval()
state = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state["state_dict"])

# start prediction
predictions = []
for i, batch in enumerate(tqdm(testset)):
    fnames, images = batch
    batch_preds = torch.sigmoid(model(images.to(device)))
    batch_preds = batch_preds.detach().cpu().numpy()
    for fname, preds in zip(fnames, batch_preds):
        for cls, pred in enumerate(preds):
            pred, num = post_process(pred, best_threshold, min_size)
            rle = mask2rle(pred)
            name = fname + f"_{cls+1}"
コード例 #3
0
    for k, v in state_dict['net'].items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    return model


if __name__ == "__main__":

    matches = [100, 200, 300, 400, 500, 600, 700, 800]
    dir_checkpoint = 'checkpoints/'
    device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
    #net = Unet(n_channels=3, n_classes=8, bilinear=True)
    net = Unet(n_channels=3, n_classes=8)
    net.to(device=device)
    #net=load_GPUS(net, dir_checkpoint + 'best_score_model_res50_deeplabv3+.pth', kwargs)
    checkpoint = torch.load(dir_checkpoint + 'student_net.pth',
                            map_location=device)
    net.load_state_dict(checkpoint['net'])
    logging.info("Model loaded !")

    list_path = "data/test.lst"
    output_path = "data/results/"
    img_list = [line.strip('\n') for line in open(list_path)]
    for i, fn in tqdm(enumerate(img_list)):
        save_img = np.zeros((256, 256), dtype=np.uint16)
        logging.info("\nPredicting image {} ...".format(i))
        img = Image.open(fn)
        pre, _ = predict_img(net, img, device)
        for i in range(256):