Exemple #1
0
    def main():
        # loading data
        dataset = datasets.PixelLinkIC15Dataset(opt.train_images_dir,
                                                opt.train_labels_dir)
        sampler = WeightedRandomSampler([1 / len(dataset)] * len(dataset),
                                        opt.batch_size,
                                        replacement=True)
        dataloader = DataLoader(dataset,
                                batch_size=opt.batch_size,
                                sampler=sampler)
        my_net = net.Net()  # construct neural network

        # choose gpu or cpu
        if opt.gpu:
            device = torch.device("cuda:0")
            my_net = my_net.cuda()
            if opt.multi_gpu:
                my_net = nn.DataParallel(my_net)
        else:
            device = torch.device("cpu")

        # train, optimize
        my_net.apply(weight_init)
        optimizer = optim.SGD(my_net.parameters(),
                              lr=opt.learning_rate,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
        optimizer2 = optim.SGD(my_net.parameters(),
                               lr=opt.learning_rate2,
                               momentum=opt.momentum,
                               weight_decay=opt.weight_decay)

        iteration = 0
        train(opt.epoch, iteration, dataloader, my_net, optimizer, optimizer2,
              device)
def retrain():
    dataset = datasets.PixelLinkIC15Dataset(config.train_images_dir,
                                            config.train_labels_dir)
    sampler = WeightedRandomSampler([1 / len(dataset)] * len(dataset),
                                    config.batch_size,
                                    replacement=True)
    dataloader = DataLoader(dataset,
                            batch_size=config.batch_size,
                            sampler=sampler)
    my_net = net.Net()
    if config.gpu:
        device = torch.device("cuda:0")
        my_net = my_net.cuda()
        if config.multi_gpu:
            my_net = nn.DataParallel(my_net)
    else:
        device = torch.device("cpu")
    my_net.load_state_dict(
        torch.load(config.saving_model_dir +
                   '%d.mdl' % config.retrain_model_index))
    optimizer = optim.SGD(my_net.parameters(), lr=config.retrain_learning_rate2, \
                            momentum=config.momentum, weight_decay=config.weight_decay)
    optimizer2 = optim.SGD(my_net.parameters(), lr=config.retrain_learning_rate, \
                            momentum=config.momentum, weight_decay=config.weight_decay)
    train(config.retrain_epoch, config.retrain_model_index, dataloader, my_net,
          optimizer, optimizer2, device)
def test_on_train_dataset(vis_per_img=10):
    dataset = datasets.PixelLinkIC15Dataset(config.train_images_dir,
                                            config.train_labels_dir,
                                            train=False)
    # dataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False)
    my_net = net.Net()
    if config.gpu:
        device = torch.device("cuda:0")
        my_net = my_net.cuda()
        if config.multi_gpu:
            my_net = nn.DataParallel(my_net)
    else:
        device = torch.device("cpu")
    my_net.load_state_dict(
        torch.load(config.saving_model_dir +
                   '%d.mdl' % config.test_model_index))
    true_pos, true_neg, false_pos, false_neg = [0] * 4
    for i in range(len(dataset)):
        sample = dataset[i]
        image = sample['image'].to(device)
        image = image.unsqueeze(0)
        my_labels = cal_label_on_batch(my_net, image)[0]
        # print("my labels num: %d" % len(my_labels))
        res = comp_gt_and_output(my_labels, sample["label"], 0.5)
        if i % vis_per_img == 0:
            image = image.squeeze(0).cpu().numpy()
            image = ImgFormat.ImgOrderFormat(image,
                                             from_order="CHW",
                                             to_order="HWC")
            image = ImgTransform.UnzeroMeanImage(image, config.r_mean,
                                                 config.g_mean, config.b_mean)
            image = ImgFormat.ImgColorFormat(image,
                                             from_color="RGB",
                                             to_color="BGR")
            image = visualize_label(image, my_labels, color=(0, 255, 0))
            image = visualize_label(image,
                                    sample["label"]["coor"],
                                    color=(255, 0, 0))
            cv2.imwrite("test_output/img_%d.jpg" % i, image)
        true_pos += res[0]
        false_pos += res[1]
        false_neg += res[2]
        if (true_pos + false_pos) > 0:
            precision = true_pos / (true_pos + false_pos)
        else:
            precision = 0
        if (true_pos + false_neg) > 0:
            recall = true_pos / (true_pos + false_neg)
        else:
            recall = 0
        print("i: %d, TP: %d, FP: %d, FN: %d, precision: %f, recall: %f" %
              (i, true_pos, false_pos, false_neg, precision, recall))
def main():
    dataset = datasets.PixelLinkIC15Dataset(config.train_images_dir,
                                            config.train_labels_dir)
    sampler = WeightedRandomSampler([1 / len(dataset)] * len(dataset),
                                    config.batch_size,
                                    replacement=True)
    dataloader = DataLoader(dataset,
                            batch_size=config.batch_size,
                            sampler=sampler)
    # dataloader = DataLoader(dataset, config.batch_size, shuffle=True)
    my_net = net.Net()

    if config.gpu:
        device = torch.device("cuda:0")
        my_net = my_net.cuda()
        if config.multi_gpu:
            my_net = nn.DataParallel(my_net)
    else:
        device = torch.device("cpu")

    # nn.init.xavier_uniform_(list(my_net.parameters()))
    my_net.apply(weight_init)
    optimizer = optim.SGD(my_net.parameters(),
                          lr=config.learning_rate,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)
    # if args.change:
    optimizer2 = optim.SGD(my_net.parameters(),
                           lr=config.learning_rate2,
                           momentum=config.momentum,
                           weight_decay=config.weight_decay)
    # else:
    #     optimizer2 = optim.SGD(my_net.parameters(), lr=config.learning_rate, momentum=config.momentum, weight_decay=config.weight_decay)

    iteration = 0
    train(config.epoch, iteration, dataloader, my_net, optimizer, optimizer2,
          device)
def main(retrain=False, test_datasets={}, vis_per_img=10):
    res_dir = os.path.join(out_dir, 'snapshots')
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    # shutil.copyfile(os.path.join('configs', '%s.py' % exp_name), os.path.join(out_dir, 'config.py'))
    with open(os.path.join(out_dir, 'config.py'), 'w') as f:
        params_names = config.__dir__()
        for param_name in params_names:
            if param_name.startswith('__'):
                continue
            param_value = getattr(config, param_name)
            if isinstance(param_value, str):
                f.write('%s = "%s"\n' % (param_name, param_value))
            else:
                f.write('%s = %s\n' % (param_name, param_value))

    dataset = datasets.PixelLinkIC15Dataset(
        config.train_images_dir,
        config.train_labels_dir,
        all_trains=config.all_trains,
        version=config.version,
        mean=config.mean,
        use_rotate=config.use_rotate,
        use_crop=config.use_crop,
        image_size_train=config.image_size_train,
        image_size_test=config.image_size_test)
    # sampler = WeightedRandomSampler([1/len(dataset)]*len(dataset), config.batch_size, replacement=True)
    # dataloader = DataLoader(dataset, batch_size=config.batch_size, sampler=sampler)
    dataloader = DataLoader(dataset,
                            config.batch_size,
                            shuffle=True,
                            num_workers=6)
    model = net.PixelLinkNet(
        **config.net_params)  #net.Net(config.version, config.dilation)

    if config.gpu:
        device = torch.device("cuda:0")
        model = model.cuda()
        if config.multi_gpu:
            model = nn.DataParallel(model)
    else:
        device = torch.device("cpu")

    loss = PixelLinkLoss(config.pixel_weight, config.link_weight,
                         config.neg_pos_ratio)
    optimizer = optim.SGD(model.parameters(),
                          lr=config.learning_rate1,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)
    epoch_milestone = math.ceil(config.step2_start / len(dataloader))
    print('LR schedule')
    print('[%05d - %05d] : %E' % (0, epoch_milestone, config.learning_rate1))
    print('[%05d - %05d] : %E' %
          (epoch_milestone, config.epoch, config.learning_rate2))
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, [epoch_milestone],
        config.learning_rate2 / config.learning_rate1)

    global trainer, callbacks_cont
    tqdm = callbacks.TQDM()
    log_path = os.path.join(out_dir, 'log_train.csv')
    index = 0
    while os.path.exists(log_path):
        index += 1
        log_path = os.path.join(out_dir, 'log_train_%02d.csv' % index)

    logger = callbacks.CSVLogger(log_path)
    trainer = ModuleTrainer(model)
    trainer.compile(optimizer, loss, callbacks=[tqdm, logger])
    callbacks_cont = callbacks.CallbackContainer(trainer._callbacks)
    callbacks_cont.set_trainer(trainer)

    if retrain:
        # find latest snapshot
        snapshots_dir = os.path.join(out_dir, 'snapshots')
        model_files = glob.glob(snapshots_dir + '/epoch_*')
        if model_files:
            resume_path = sorted(model_files)[-1]
            start_epoch = 1 + int(
                os.path.basename(resume_path)[len('epoch_'):-4])
            print('Loading snapshot from : %s' % resume_path)
            checkpoint = torch.load(resume_path)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            # couldnt find snapshots
            start_epoch = 0
    else:
        start_epoch = 0

    train(config.epoch,
          dataloader,
          model,
          loss,
          optimizer,
          scheduler,
          device,
          start_epoch=start_epoch,
          test_datasets=test_datasets,
          vis_per_img=vis_per_img)
          model,
          loss,
          optimizer,
          scheduler,
          device,
          start_epoch=start_epoch,
          test_datasets=test_datasets,
          vis_per_img=vis_per_img)


if __name__ == "__main__":
    if args.mode in ['train', 'retrain']:
        dataset_train = datasets.PixelLinkIC15Dataset(
            config.train_images_dir,
            config.train_labels_dir,
            train=False,
            all_trains=config.all_trains,
            version=config.version,
            mean=config.mean,
            image_size_test=config.image_size_test)
        vis_per_img_train = int(math.ceil(config.all_trains / 100.0))

        dataset_test = datasets.PixelLinkIC15Dataset(
            config.test_images_dir,
            config.test_labels_dir,
            train=False,
            all_trains=config.all_tests,
            version=config.version,
            mean=config.mean,
            image_size_test=config.image_size_test)
        vis_per_img_test = int(math.ceil(config.all_tests / 100.0))