Exemplo n.º 1
0
def train(args, model):
    model.train()

    input_transform = output_transform = None
    dataset = RVSC(args.datadir, input_transform, output_transform)
    loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    weight = torch.ones(2)
    if args.cuda:
        weight = weight.cuda()
    criterion = CrossEntropyLoss2d(weight)

    optimizer = Adam(model.parameters())

    for epoch in range(1, args.num_epochs+1):
        epoch_loss = []

        for step, (images,labels) in enumerate(loader):
            if args.cuda:
                images = images.cuda()
                labels = labels.cuda()

            x = torch.Variable(images)
            y_true = torch.Variable(labels)
            y_pred = model(x)

            optimizer.zero_grad()
            loss = criterion(y_pred, y_true)
            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data[0])
            print(loss.data[0])
Exemplo n.º 2
0
def main():
    use_cuda = torch.cuda.is_available()
    path = os.path.expanduser('/home/yxk/data/')

    dataset = voc_loader.VOC2012ClassSeg(root=path,
                                         split='train',
                                         transform=True)

    vgg_model = models.VGGNet(requires_grad=True)
    fcn_model = models.FCN8s(pretrained_net=vgg_model, n_class=n_class)
    fcn_model.load_state_dict(
        torch.load('./pretrained_models/model120.pth', map_location='cpu'))

    fcn_model.eval()

    if use_cuda:
        fcn_model.cuda()

    criterion = CrossEntropyLoss2d()

    for i in range(len(dataset)):
        idx = random.randrange(0, len(dataset))
        img, label = dataset[idx]
        img_name = str(i)

        img_src, _ = dataset.untransform(img, label)  # whc

        cv2.imwrite(path + 'image/%s_src.jpg' % img_name, img_src)
        tools.labelTopng(label,
                         path + 'image/%s_label.png' % img_name)  # 将label转换成图片

        # a = tools.labelToimg(label)
        #
        # print(a)

        if use_cuda:
            img = img.cuda()
            label = label.cuda()
        img = Variable(img.unsqueeze(0), volatile=True)
        label = Variable(label.unsqueeze(0), volatile=True)
        # print("label: ", label.data)

        out = fcn_model(img)  # (1, 21, 320, 320)
        loss = criterion(out, label)
        # print(img_name, 'loss:', loss.data[0])

        net_out = out.data.max(1)[1].squeeze_(0)  # 320, 320
        # print(out.data.max(1)[1].shape)
        # print("out", net_out)
        if use_cuda:
            net_out = net_out.cpu()

        tools.labelTopng(net_out,
                         path + 'image/%s_out.png' % img_name)  # 将网络输出转换成图片

        if i == 10:
            break
Exemplo n.º 3
0
def train(model,
          train_loader,
          device,
          tile_size,
          epochs=10,
          batch_size=1,
          learning_rate=1e-4,
          momentum=0.9,
          weight_decay=5e-3):

    criterion = CrossEntropyLoss2d()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=learning_rate,
                                momentum=momentum,
                                weight_decay=weight_decay)

    model.train()

    model = model.to(device=device)
    criterion = criterion.to(device=device)

    training_stats = utils.Stats()

    for n in range(epochs):

        epoch_stats = utils.Stats()

        loader_with_progress = utils.loader_with_progress(train_loader,
                                                          epoch_n=n,
                                                          epoch_total=epochs,
                                                          stats=epoch_stats,
                                                          leave=True)

        for i, (x, y) in enumerate(loader_with_progress):

            y = y.to(device=device)
            x = x.to(device=device)

            y_pred = model(x)
            loss = criterion(y_pred, y)

            epoch_stats.append_loss(loss.item())
            training_stats.append_loss(loss.item())

            loader_with_progress.set_postfix(epoch_stats.fmt_dict())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    return model, training_stats
Exemplo n.º 4
0
def main(args):
    logging.info("Construct dataset...")

    input_transform = normalize
    output_transform = None
    dataset = RVSC(args.datadir, input_transform, output_transform)

    # extract number of channels in input images / number of classes
    image, mask = dataset[0]
    channels, _, _ = image.shape

    logging.info("Construct model...")

    model = DilatedDenseNet(image_channels=channels,
                            num_init_features=args.features,
                            growth_rate=args.features,
                            layers=args.layers,
                            dropout_rate=args.dropout,
                            classes=args.classes,
                            dilated=True)

    if args.cuda:
        model = model.cuda()

    # setup cross entropy loss
    weight = torch.ones(2)
    if args.cuda:
        weight = weight.cuda()
    criterion = CrossEntropyLoss2d(weight)

    optimizer = Adam(model.parameters())

    if args.mode == 'train':
        train_loader, val_loader = train_val_dataloaders(
            dataset, args.val_split, args.batch_size, args.seed)

        logging.info("Begin training...")

        for epoch in range(args.start_epoch, args.num_epochs + 1):
            train_epoch(epoch, args, model, train_loader, criterion, optimizer)
            evaluate(args, model, val_loader, criterion)

    if args.mode == 'eval':
        logging.info("Begin model evaluation...")
        loader = DataLoader(dataset, batch_size=args.batch_size)
        evaluate(args, model, loader, criterion)
def train():
    
    i=0
    net =  FCDenseNet103(1,3)
   # net =  DeepUNetV2()
   # net.load_state_dict(t.load('./models/142.pkl'))
    net = net.cuda()
    net.train()
    train_data = DataSet('data/', train=True)

#    loss_weight = t.from_numpy(np.array(opt.loss_weight))
#    loss_weight = (loss_weight.float()).cuda()
  
    train_dataloader = DataLoader(train_data, opt.batch_size, shuffle=True, num_workers=opt.num_workers)
    criterion = CrossEntropyLoss2d()
    
    optimizer = t.optim.SGD(net.parameters(), lr=opt.lr, weight_decay = opt.weight_decay)
    for epoch in range(opt.max_epoch):
        loss_all = 0
        num = 0
        for data,label in train_dataloader:

            inputs = Variable(data)
            target = Variable(label)
            target = target.long()
            if opt.use_GPU:
                
                inputs = inputs.cuda()
                target = target.cuda()
            optimizer.zero_grad()
            pre_target = net(inputs)
            loss = criterion(pre_target, target)
            loss.backward()
            optimizer.step()
            loss_all = loss_all + loss.data[0]
            i = i+1
            num = num+1
#            visdomGUI(i,loss, F.log_softmax(pre_target), target)
        now = 143
        per_loss = float(loss_all / num)
        print("The {} epoch loss is:{}".format(epoch,per_loss))
        _dir = './models/'
        t.save(net.state_dict(),_dir+str(epoch+now)+'.pkl')
        t.save(net,_dir+str(epoch+now)+'.pth')
Exemplo n.º 6
0
                              pin_memory=True)

if torch.cuda.is_available():
    model = torch.nn.DataParallel(FCN(NUM_CLASSES))
    model.cuda()
    model.load_state_dict(torch.load("./pth/fcn-deconv.pth"))

epoches = 80
lr = 1e-4
weight_decay = 2e-5
momentum = 0.9
weight = torch.ones(NUM_CLASSES)
# weight[21] = 0
max_iters = 92 * epoches

criterion = CrossEntropyLoss2d(weight.cuda())
optimizer = torch.optim.SGD(model.parameters(),
                            lr=lr,
                            momentum=momentum,
                            weight_decay=weight_decay)
ploter = LinePlotter()

model.train()
for epoch in range(epoches):
    running_loss = 0.0
    for i, (images, labels_group) in tqdm.tqdm(
            enumerate(trainloader),
            total=int(len(trainloader.dataset) / trainloader.batch_size)):
        if torch.cuda.is_available():
            images = [Variable(image.cuda()) for image in images]
            labels_group = [labels for labels in labels_group]
Exemplo n.º 7
0
                                      split="val",
                                      img_transform=input_transform,
                                      label_transform=target_transform,
                                      label_2_transform=target_2_transform,
                                      label_4_transform=target_4_transform),
                            batch_size=1,
                            pin_memory=True)

if torch.cuda.is_available():
    model = torch.nn.DataParallel(FCN(NUM_CLASSES))
    model.cuda()

epoches = 8
lr = 1e-3

criterion = CrossEntropyLoss2d()
optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.99))

# pretrained_dict = torch.load("./pth/fcn-deconv-40.pth")
# model_dict = model.state_dict()
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# model_dict.update(pretrained_dict)
# model.load_state_dict(model_dict)

# model.load_state_dict(torch.load("./pth/seg-skip-all-25.pth"))

model.train()

x_index = 1

for epoch in range(epoches):
Exemplo n.º 8
0
def main():
    # make model
    task1_classes = 2
    task2_classes = 37
    # model = linknet.LinkNet34MTL(task1_classes, task2_classes)
    model = stack_module.StackHourglassNetMTL(task1_classes, task2_classes)
    model.cuda()

    # make data loader
    data_dir = r'/hdd/pgm/patches_mtl_nz/patches'
    batch_size = 1
    train_file = r'/hdd/pgm/patches_mtl_nz/file_list_train.txt'
    valid_file = r'/hdd/pgm/patches_mtl_nz/file_list_valid.txt'
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    tsfm_train = A.Compose([
        A.Flip(),
        A.RandomRotate90(),
        A.Normalize(mean=mean, std=std),
        ToTensor(sigmoid=False),
    ])
    tsfm_valid = A.Compose([
        A.Normalize(mean=mean, std=std),
        ToTensor(sigmoid=False),
    ])
    train_loader = DataLoader(dataset.RSDataLoader(data_dir,
                                                   train_file,
                                                   transforms=tsfm_train),
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)
    valid_loader = DataLoader(dataset.RSDataLoader(data_dir,
                                                   valid_file,
                                                   transforms=tsfm_valid),
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=4)

    # prepare training
    experiment_dir = r'/hdd6/Models/line_mtl'
    train_file = "{}/train_loss.txt".format(experiment_dir)
    test_file = "{}/test_loss.txt".format(experiment_dir)
    train_loss_file = open(train_file, "w")
    val_loss_file = open(test_file, "w")
    train_file_angle = "{}/train_angle_loss.txt".format(experiment_dir)
    test_file_angle = "{}/test_angle_loss.txt".format(experiment_dir)
    train_loss_angle_file = open(train_file_angle, "w")
    val_loss_angle_file = open(test_file_angle, "w")

    best_accuracy = 0
    best_miou = 0
    start_epoch = 1
    total_epochs = 120
    lr_drop_epoch = [60, 90, 110]
    lr_step = 0.1
    lr = 1e-3
    seed = 1
    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)
    weights_init(model, manual_seed=seed)
    summary(model, print_arch=False)

    scheduler = MultiStepLR(
        optimizer,
        milestones=lr_drop_epoch,
        gamma=lr_step,
    )
    weights = torch.ones(task1_classes).cuda()
    weights_angles = torch.ones(task2_classes).cuda()

    angle_loss = CrossEntropyLoss2d(weight=weights_angles,
                                    size_average=True,
                                    ignore_index=255,
                                    reduce=True).cuda()
    road_loss = mIoULoss(weight=weights,
                         size_average=True,
                         n_classes=task1_classes).cuda()

    for epoch in range(start_epoch, total_epochs + 1):
        start_time = datetime.now()
        scheduler.step(epoch)
        print("\nTraining Epoch: %d" % epoch)
        train(model, optimizer, epoch, task1_classes, task2_classes,
              train_loader, road_loss, angle_loss, train_loss_file,
              val_loss_file, train_loss_angle_file, val_loss_angle_file)
        if epoch % 1 == 0:
            print("\nTesting Epoch: %d" % epoch)
            val_loss = test(epoch, optimizer, model, task1_classes,
                            task2_classes, valid_loader, road_loss, angle_loss,
                            train_loss_file, val_loss_file,
                            train_loss_angle_file, val_loss_angle_file,
                            experiment_dir, best_accuracy, best_miou)

        end_time = datetime.now()
        print("Time Elapsed for epoch => {1}".format(epoch,
                                                     end_time - start_time))
Exemplo n.º 9
0
train_loader = torch.utils.data.DataLoader(src_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)

weight = get_class_weight_from_file(n_class=args.n_class,
                                    weight_filename=args.loss_weights_file,
                                    add_bg_loss=args.add_bg_loss)

if torch.cuda.is_available():
    model_g1.cuda()
    model_g2.cuda()
    model_f1.cuda()
    weight = weight.cuda()

criterion = CrossEntropyLoss2d(weight)

configure(args.tflog_dir, flush_secs=5)

model_g1.train()
model_g2.train()
model_f1.train()
if args.fix_bn:
    print(emphasize_str("BN layers are NOT trained!"))
    fix_batchnorm_when_training(model_g1)
    fix_batchnorm_when_training(model_g2)
    fix_batchnorm_when_training(model_f1)

    # check_training(model)

for epoch in range(start_epoch, args.epochs):
Exemplo n.º 10
0
def main():

    # set torch and numpy seed for reproducibility
    torch.manual_seed(settings.MANUAL_SEED)
    np.random.seed(settings.MANUAL_SEED)

    # tensorboard writer
    writer = SummaryWriter(settings.TENSORBOARD_DIR)
    # makedir snapshot
    makedir(settings.CHECKPOINT_DIR)

    # enable cudnn
    torch.backends.cudnn.enabled = True

    # create segmentor network
    model = Segmentor(pretrained=settings.PRETRAINED,
                      num_classes=settings.NUM_CLASSES,
                      modality=settings.MODALITY)

    model.train()
    model.cuda()

    torch.backends.cudnn.benchmark = True

    # dataset and dataloader
    dataset = TrainDataset()
    dataloader = data.DataLoader(dataset,
                                 batch_size=settings.BATCH_SIZE,
                                 shuffle=True,
                                 num_workers=settings.NUM_WORKERS,
                                 pin_memory=True,
                                 drop_last=True)

    test_dataset = TestDataset(data_root=settings.DATA_ROOT_VAL,
                               data_list=settings.DATA_LIST_VAL)
    test_dataloader = data.DataLoader(test_dataset,
                                      batch_size=1,
                                      shuffle=False,
                                      num_workers=settings.NUM_WORKERS,
                                      pin_memory=True)

    # optimizer for generator network (segmentor)
    optimizer = optim.SGD(model.optim_parameters(settings.LR),
                          lr=settings.LR,
                          momentum=settings.LR_MOMENTUM,
                          weight_decay=settings.WEIGHT_DECAY)

    # lr scheduler for optimizer
    # lr_lambda = lambda epoch: (1 - epoch / settings.EPOCHS) ** settings.LR_POLY_POWER
    # lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
    lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, T_0=40 * len(dataloader), eta_min=1e-7)

    # losses
    ce_loss = CrossEntropyLoss2d(
        ignore_index=settings.IGNORE_LABEL)  # to use for segmentor
    # ce_loss = FocalLoss(ignore_index=settings.IGNORE_LABEL, gamma=2)

    # upsampling for the network output
    upsample = nn.Upsample(size=(settings.CROP_SIZE, settings.CROP_SIZE),
                           mode='bilinear',
                           align_corners=True)

    last_epoch = -1
    if settings.RESUME_TRAIN:
        checkpoint = torch.load(settings.LAST_CHECKPOINT)

        model.load_state_dict(checkpoint['model_state_dict'])
        model.train()
        model.cuda()
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
        last_epoch = checkpoint['epoch']

        # purge the logs after the last_epoch
        writer = SummaryWriter(settings.TENSORBOARD_DIR,
                               purge_step=(last_epoch + 1) * len(dataloader))

    for epoch in range(last_epoch + 1, settings.EPOCHS + 1):

        train_one_epoch(model,
                        optimizer,
                        lr_scheduler,
                        dataloader,
                        test_dataloader,
                        epoch,
                        upsample,
                        ce_loss,
                        writer,
                        print_freq=5,
                        eval_freq=settings.EVAL_FREQ)

        if epoch % settings.CHECKPOINT_FREQ == 0 and epoch != 0:
            save_checkpoint(epoch, model, optimizer, lr_scheduler)

        # save the final model
        if epoch >= settings.EPOCHS:
            print('saving the final model')
            save_checkpoint(epoch, model, optimizer, lr_scheduler)
            writer.close()
Exemplo n.º 11
0
def main():

    # tensorboard writer
    writer = SummaryWriter(settings.TENSORBOARD_DIR)
    # makedir snapshot
    makedir(settings.CHECKPOINT_DIR)

    # enable cudnn
    torch.backends.cudnn.enabled = True

    # create segmentor network
    model = Segmentor(pretrained=settings.PRETRAINED,
                      num_classes=settings.NUM_CLASSES,
                      modality=settings.MODALITY)

    model.train()
    model.cuda()

    torch.backends.cudnn.benchmark = True

    # dataset and dataloader
    dataset = TrainDataset()
    dataloader = data.DataLoader(dataset,
                                 batch_size=settings.BATCH_SIZE,
                                 shuffle=True,
                                 num_workers=settings.NUM_WORKERS,
                                 pin_memory=True,
                                 drop_last=True)

    dataloader_iter = enumerate(dataloader)

    # optimizer for generator network (segmentor)
    optim = SGD(model.optim_parameters(settings.LR),
                lr=settings.LR,
                momentum=settings.LR_MOMENTUM,
                weight_decay=settings.WEIGHT_DECAY)

    # losses
    ce_loss = CrossEntropyLoss2d(
        ignore_index=settings.IGNORE_LABEL)  # to use for segmentor

    # upsampling for the network output
    upsample = nn.Upsample(size=(settings.CROP_SIZE, settings.CROP_SIZE),
                           mode='bilinear',
                           align_corners=True)

    # confusion matrix ; to track metrics such as mIoU during training
    conf_mat = np.zeros((settings.NUM_CLASSES, settings.NUM_CLASSES))

    for i_iter in range(settings.MAX_ITER):

        # initialize losses
        loss_G_seg_value = 0

        # clear optim gradients and adjust learning rates
        optim.zero_grad()

        lr_poly_scheduler(optim, settings.LR, settings.LR_DECAY_ITER, i_iter,
                          settings.MAX_ITER, settings.LR_POLY_POWER)

        ####### train generator #######

        # get the batch of data
        try:
            _, batch = next(dataloader_iter)
        except:
            dataloader_iter = enumerate(dataloader)
            _, batch = next(dataloader_iter)

        images, depths, labels = batch
        images = images.cuda()
        depths = depths.cuda()
        labels = labels.cuda()

        # get a mask where an elemnt is True for every pixel with ignore_label value
        ignore_mask = (labels == settings.IGNORE_LABEL)
        target_mask = torch.logical_not(ignore_mask)
        target_mask = target_mask.unsqueeze(dim=1)

        # get the output of generator
        if settings.MODALITY == 'rgb':
            predict = upsample(model(images))
        elif settings.MODALITY == 'middle':
            predict = upsample(model(images, depths))

        # calculate cross-entropy loss
        loss_G_seg = ce_loss(predict, labels)

        # accumulate loss, backward and store value
        loss_G_seg.backward()

        loss_G_seg_value += loss_G_seg.data.cpu().numpy()
        ####### end of train generator #######

        optim.step()

        # get pred and gt to compute confusion matrix
        seg_pred = np.argmax(predict.detach().cpu().numpy(), axis=1)
        seg_gt = labels.cpu().numpy().copy()

        seg_pred = seg_pred[target_mask.squeeze(dim=1).cpu().numpy()]
        seg_gt = seg_gt[target_mask.squeeze(dim=1).cpu().numpy()]

        conf_mat += confusion_matrix(seg_gt,
                                     seg_pred,
                                     labels=np.arange(settings.NUM_CLASSES))

        ####### log ########
        if i_iter % (
            (settings.TRAIN_SIZE // settings.BATCH_SIZE)) == 0 and i_iter != 0:
            metrics = evaluate(conf_mat)
            writer.add_scalar('Pixel Accuracy/Train', metrics['pAcc'], i_iter)
            writer.add_scalar('Mean Accuracy/Train', metrics['mAcc'], i_iter)
            writer.add_scalar('mIoU/Train', metrics['mIoU'], i_iter)
            writer.add_scalar('fwavacc/Train', metrics['fIoU'], i_iter)
            conf_mat = np.zeros_like(conf_mat)

        writer.add_scalar('Loss_G_SEG/Train', loss_G_seg_value, i_iter)
        writer.add_scalar('learning_rate_G/Train', optim.param_groups[0]['lr'],
                          i_iter)

        print("iter = {:6d}/{:6d},\t loss_seg = {:.3f}".format(
            i_iter, settings.MAX_ITER, loss_G_seg_value))

        with open(settings.LOG_FILE, "a") as f:
            output_log = '{:6d},\t {:.8f}\n'.format(i_iter, loss_G_seg_value)
            f.write(output_log)

        # taking snapshot
        if i_iter >= settings.MAX_ITER:
            print('saving the final model ...')
            torch.save(
                model.state_dict(),
                osp.join(settings.CHECKPOINT_DIR,
                         'CHECKPOINT_' + str(settings.MAX_ITER) + '.pt'))
            break

        if i_iter % settings.SAVE_EVERY == 0 and i_iter != 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                osp.join(settings.CHECKPOINT_DIR,
                         'CHECKPOINT_' + str(i_iter) + '.pt'))
Exemplo n.º 12
0
    opt.input = os.path.join(opt.model_dir, opt.input)

# set the device
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('running on device ' + str(device))

# load the model checkpoint
print('loading checkpoint:  ' + opt.input)
checkpoint = torch.load(opt.input)

num_classes = opt.num_classes

# create the model architecture
print('num classes:  ' + str(num_classes))

criterion = CrossEntropyLoss2d(size_average=True, ignore_index=None).cuda()
model = network.get_net_ori(opt, criterion)
model = nn.DataParallel(model)

# load the model weights
model.load_state_dict(checkpoint)

model.to(device)
model.eval()

print(model)
print('')

# create example image data
resolution = [opt.width, opt.height]
input = torch.ones((1, 3, resolution[0], resolution[1])).cuda()
Exemplo n.º 13
0
########################################################################################################################
##### Modele
vgg_model = VGGNet(requires_grad=True)
fcn = FCN8s(pretrained_net=vgg_model, n_class=len(facades_classes)).cuda()

##### Optimizer and Scheduler
optimizer = torch.optim.RMSprop(fcn.parameters(),
                                lr=lr,
                                momentum=momentum,
                                weight_decay=w_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=step_size,
                                            gamma=gamma)

##### Loss
loss = CrossEntropyLoss2d()
##### Dataset and Dataloader
city_train = city(mode="train", classes=facades_classes)
city_train_loader = DataLoader(dataset=city_train, batch_size=batch_size)

city_val = city(mode="val", classes=facades_classes)
city_val_loader = DataLoader(dataset=city_val, batch_size=batch_size)

########################################################################################################################

Train(model=fcn,
      optimizer=optimizer,
      scheduler=scheduler,
      loss=loss,
      train_loader=city_train_loader,
      val_loader=city_val_loader,
Exemplo n.º 14
0
    extend_transforms.RandomRotate(args['rotate_degree']),
    extend_transforms.RandomCrop(args['train_crop_size']),
])

train_set = culane.CULANE('train',
                          joint_transform=train_joint_transform,
                          transform=img_transform,
                          mask_transform=mask_transform)
train_loader = DataLoader(train_set,
                          batch_size=args['train_batch_size'],
                          num_workers=10,
                          shuffle=True)

criterion = CrossEntropyLoss2d(weight=torch.Tensor([0.4, 1, 1, 1, 1]).cuda(),
                               size_average=True,
                               ignore_index=culane.ignore_label,
                               aux_weight=args['aux_weight'],
                               print_aux=args['print_aux'])
criterion = criterion.cuda()

writer = SummaryWriter(os.path.join(ckpt_path, exp_name, 'tboard'))
log_path = os.path.join(ckpt_path, exp_name,
                        'train' + str(datetime.datetime.now()) + '.txt')


def main():
    net = Baseline(num_classes=culane.num_classes,
                   deep_base=args['deep_base']).cuda().train()
    net = DataParallelWithCallback(net)

    optimizer = optim.SGD([{
    src_dataset, tgt_dataset),
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True)

weight = get_class_weight_from_file(n_class=args.n_class,
                                    weight_filename=args.loss_weights_file,
                                    add_bg_loss=args.add_bg_loss)
if torch.cuda.is_available():
    model_g_3ch.cuda()
    model_g_1ch.cuda()
    model_f1.cuda()
    model_f2.cuda()
    weight = weight.cuda()

criterion = CrossEntropyLoss2d(
    weight) if "Gate" not in args.method_detail else ProbCrossEntropyLoss2d(
        weight)
criterion_d = get_prob_distance_criterion(args.d_loss)

model_g_3ch.train()
model_g_1ch.train()
model_f1.train()
model_f2.train()

if args.no_dropout:
    print("NO DROPOUT")
    fix_dropout_when_training(model_g_3ch)
    fix_dropout_when_training(model_g_1ch)
    fix_dropout_when_training(model_f1)
    fix_dropout_when_training(model_f2)
Exemplo n.º 16
0
def train(
    model,
    train_loader,
    device,
    tile_size,
    epochs=10,
    batch_size=1,
    learning_rate=1e-4,
    momentum=0.9,
    weight_decay=5e-3,
):

    writer = SummaryWriter(
        comment=f'LR_{learning_rate}_BS_{batch_size}_Epochs_{epochs}')

    since = time.time()
    criterion = CrossEntropyLoss2d()

    # optimizer = torch.optim.SGD(
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=learning_rate,
        # momentum=momentum,
        weight_decay=weight_decay,
    )
    model.train()
    model = model.to(device=device)

    summary(model, (3, tile_size[0], tile_size[1]))

    criterion = criterion.to(device=device)
    training_stats = utils.Stats()
    running_loss = 0.0

    for n in range(epochs):
        epoch_stats = utils.Stats()
        loader_with_progress = utils.loader_with_progress(train_loader,
                                                          epoch_n=n,
                                                          epoch_total=epochs,
                                                          stats=epoch_stats,
                                                          leave=True)
        progress_bar_output = io.StringIO()
        with redirect_stderr(progress_bar_output):
            for i, (x, y) in enumerate(loader_with_progress):
                # for x, y in loader_with_progress:
                y = y.to(device=device)
                x = x.to(device=device)
                y_pred = model(x)
                loss = criterion(y_pred, y)
                epoch_stats.append_loss(loss.item())
                training_stats.append_loss(loss.item())

                loader_with_progress.set_postfix(epoch_stats.fmt_dict())
                # print(flush=True)
                # sys.stdout.flush()

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                writer.add_scalar("training loss", loss.item(),
                                  n * len(train_loader) + i)

    time_elapsed = time.time() - since
    print("Training complete in {:.0f}m {:.0f}s".format(
        time_elapsed // 60, time_elapsed % 60))

    writer.add_graph(model, x)
    writer.close()

    # print('Best val Acc: {:4f}'.format(best_acc))
    return model, training_stats
Exemplo n.º 17
0
def main(train_loader, val_loader, num_e=59):

    torch.cuda.manual_seed(1)
    unet.cuda()
    weight_1 = torch.from_numpy(np.array(segment_weight)).type(
        torch.FloatTensor).cuda()
    weight_2 = torch.from_numpy(np.array(classify_weight)).type(
        torch.FloatTensor).cuda()
    # criterion = SegmentClassifyLoss(weight_1, weight_2)
    criterion = CrossEntropyLoss2d(weight_1)
    lr = 0.001
    optimizer = torch.optim.Adam(unet.parameters(), lr=lr)

    if LoadModel:
        checkpoint = torch.load(model_dir + '{}.ckpt'.format(num_e))
        unet.load_state_dict(checkpoint['state_dict'])
        print 'Loading model~~~~~~~~~~', num_e

    for e in range(EPOCH):
        unet.train()
        class_correct = list(0. for i in range(NC))  # 1*2(number_of_classes)
        class_total = list(0. for i in range(NC))  # 1*2
        classify = []
        for i, (data, target, label) in enumerate(train_loader):
            # print data.shape
            data = data.type(torch.FloatTensor)
            data = data.view(
                -1, data.size(2), data.size(3),
                data.size(4))  # (BN*num_per_img, 1, shape[0], shape[1])
            data = Variable(data.cuda(async=True))

            target = target.view(-1, target.size(3), target.size(4)).type(
                torch.LongTensor)  # (BN*num_per_img, shape[0], shape[1])
            target = Variable(target.cuda(async=True))

            label = label.view(-1).type(torch.LongTensor)  # BN×1, 值为0或1
            label = Variable(label.cuda(async=True))

            optimizer.zero_grad()
            # outputs,pred_label = unet(data)  # shape = [batch_size, num_class, 256, 256], 预测的pixel-map和预测的label[BN, class]
            # loss1, loss2 = criterion(outputs, pred_label, target, label)
            # loss = loss1*loss_weight[0] + loss2*loss_weight[1]

            outputs = unet(
                data
            )  # shape = [batch_size, num_class, 256, 256], 预测的pixel-map和预测的label[BN, class]
            loss1 = criterion(outputs, target)
            loss = loss1

            # print loss1.data[0], loss2.data[0]
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs.data,
                                     1)  # 此处参数选为1, 对第1维(num_class)操作
            correct = (predicted.cpu() == target.cpu().data).view(-1)
            tg = target.view(
                -1).cpu().data  # BATCH_SIZE*256*256, reshape成1xn的向量

            # _, classify_label = torch.max(pred_label.data, 1)
            # classify.append((classify_label.cpu()==label.cpu().data).numpy())

            for x, y in zip(tg, correct):
                class_correct[x] += y
                class_total[x] += 1

            if i % print_freq == 0:
                print("Epoch [%d/%d], Iter [%d] Loss: %.4f" %
                      (e, EPOCH, i, loss.data[0]))
                np.savez(
                    train_dir + 'pred_label_{}_{}.npz'.format(e, i),
                    data.cpu().data.numpy(),
                    target.cpu().data.numpy(),
                    Variable(predicted).cpu().data.numpy(),
                )
                # print 'training---------------:', e, i

        if e % val_freq == 0:
            print '\t\t', 'recall', '\t\t', '\t', 'FPR', '\t', 'classify accuracy'
            print class_correct[1] / class_total[
                1], '\t\t', 1 - class_correct[0] / class_total[0]  #,\
            #'\t\t' , np.sum(classify)*1.0/len(classify)/BN/cared_slice
            test(val_loader, e)

        if (e + 1) % 20 == 0:
            state_dict = unet.state_dict()
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()

            torch.save(
                {
                    'epoch': e,
                    'save_dir': model_dir,
                    'state_dict': state_dict,
                }, os.path.join(model_dir, '%d.ckpt' % e))
Exemplo n.º 18
0
def main(args):
    if args.dataset == "cityscapes":
        train_dataset = DatasetTrain(cityscapes_data_path="/home/chenxiaoshuang/Cityscapes",
                                    cityscapes_meta_path="/home/chenxiaoshuang/Cityscapes/gtFine", 
                                    only_encode=args.only_encode, extra_data=args.extra_data)
        val_dataset = DatasetVal(cityscapes_data_path="/home/chenxiaoshuang/Cityscapes",
                                cityscapes_meta_path="/home/chenxiaoshuang/Cityscapes/gtFine",
                                only_encode=args.only_encode)
        test_dataset = DatasetTest(cityscapes_data_path="/home/chenxiaoshuang/Cityscapes",
                                cityscapes_meta_path="/home/chenxiaoshuang/Cityscapes/gtFine")      
        train_loader = DataLoader(dataset=train_dataset,
                                batch_size=args.batch_size, shuffle=True, num_workers=8, drop_last=True)
        val_loader = DataLoader(dataset=val_dataset,
                                batch_size=args.batch_size, shuffle=False, num_workers=8)
        test_loader = DataLoader(dataset=test_dataset,
                                batch_size=args.batch_size, shuffle=False, num_workers=8)
        num_classes = 20
    elif args.dataset == "camvid":
        train_dataset = DatasetCamVid(camvid_data_path="/home/chenxiaoshuang/CamVid",
                                    camvid_meta_path="/home/chenxiaoshuang/CamVid",
                                    only_encode=args.only_encode, mode="train")
        val_dataset = DatasetCamVid(camvid_data_path="/home/chenxiaoshuang/CamVid",
                                    camvid_meta_path="/home/chenxiaoshuang/CamVid",
                                    only_encode=args.only_encode, mode="val")
        test_dataset = DatasetCamVid(camvid_data_path="/home/chenxiaoshuang/CamVid",
                                    camvid_meta_path="/home/chenxiaoshuang/CamVid",
                                    only_encode=args.only_encode, mode="test")
        train_loader = DataLoader(dataset=train_dataset,
                                batch_size=args.batch_size, shuffle=True, num_workers=8, drop_last=True)
        val_loader = DataLoader(dataset=val_dataset,
                                batch_size=args.batch_size, shuffle=False, num_workers=8)
        test_loader = DataLoader(dataset=test_dataset,
                                batch_size=args.batch_size, shuffle=False, num_workers=8)
        num_classes = 12
    else:
        print("Unsupported Dataset!")
        return

    device = torch.device("cuda:{}".format(args.cuda) if torch.cuda.is_available() else "cpu")
    device_ids = [args.cuda, args.cuda+1]
    cfg=Config(args.dataset, args.only_encode, args.extra_data)
    net = Net(num_classes=num_classes)
    
    if torch.cuda.is_available():
        weight = cfg.weight.to(device)
    criterion1 = CrossEntropyLoss2d(weight)
    criterion2 = LovaszSoftmax(weight=weight)
    
    optimizer = optim.Adam(net.parameters(), 5e-4, (0.9, 0.999),  eps=1e-08, weight_decay=1e-4)

    lambda1 = lambda epoch : (1 - epoch/300) ** 0.9

    exp_lr_scheduler = lr_scheduler.LambdaLR(optimizer,lr_lambda=lambda1)
    
    trainer = Trainer('training', optimizer, exp_lr_scheduler, net, cfg, './log', device, device_ids, num_classes)
    trainer.load_weights(trainer.find_last(), encode=False, restart=False)
    #trainer.train(train_loader, val_loader, criterion1, criterion2, 300)
    trainer.evaluate(val_loader)
    trainer.test(test_loader)
    
    print('Finished Training')
Exemplo n.º 19
0
def main():

    # set torch and numpy seed for reproducibility
    torch.manual_seed(27)
    np.random.seed(27)

    # tensorboard writer
    writer = SummaryWriter(settings.TENSORBOARD_DIR)
    # makedir snapshot
    makedir(settings.CHECKPOINT_DIR)

    # enable cudnn
    torch.backends.cudnn.enabled = True

    # create segmentor network
    model_G = Segmentor(pretrained=settings.PRETRAINED,
                        num_classes=settings.NUM_CLASSES,
                        modality=settings.MODALITY)

    model_G.train()
    model_G.cuda()

    torch.backends.cudnn.benchmark = True

    # create discriminator network
    model_D = Discriminator(settings.NUM_CLASSES)
    model_D.train()
    model_D.cuda()

    # dataset and dataloader
    dataset = TrainDataset()
    dataloader = data.DataLoader(dataset,
                                 batch_size=settings.BATCH_SIZE,
                                 shuffle=True,
                                 num_workers=settings.NUM_WORKERS,
                                 pin_memory=True,
                                 drop_last=True)

    test_dataset = TestDataset(data_root=settings.DATA_ROOT_VAL,
                               data_list=settings.DATA_LIST_VAL)
    test_dataloader = data.DataLoader(test_dataset,
                                      batch_size=1,
                                      shuffle=False,
                                      num_workers=settings.NUM_WORKERS,
                                      pin_memory=True)

    # optimizer for generator network (segmentor)
    optim_G = optim.SGD(model_G.optim_parameters(settings.LR),
                        lr=settings.LR,
                        momentum=settings.LR_MOMENTUM,
                        weight_decay=settings.WEIGHT_DECAY)

    # lr scheduler for optimi_G
    lr_lambda_G = lambda epoch: (1 - epoch / settings.EPOCHS
                                 )**settings.LR_POLY_POWER
    lr_scheduler_G = optim.lr_scheduler.LambdaLR(optim_G,
                                                 lr_lambda=lr_lambda_G)

    # optimizer for discriminator network
    optim_D = optim.Adam(model_D.parameters(), settings.LR_D)

    # lr scheduler for optimi_D
    lr_lambda_D = lambda epoch: (1 - epoch / settings.EPOCHS
                                 )**settings.LR_POLY_POWER
    lr_scheduler_D = optim.lr_scheduler.LambdaLR(optim_D,
                                                 lr_lambda=lr_lambda_D)

    # losses
    ce_loss = CrossEntropyLoss2d(
        ignore_index=settings.IGNORE_LABEL)  # to use for segmentor
    bce_loss = BCEWithLogitsLoss2d()  # to use for discriminator

    # upsampling for the network output
    upsample = nn.Upsample(size=(settings.CROP_SIZE, settings.CROP_SIZE),
                           mode='bilinear',
                           align_corners=True)

    # # labels for adversarial training
    # pred_label = 0
    # gt_label = 1

    # load the model to resume training
    last_epoch = -1
    if settings.RESUME_TRAIN:
        checkpoint = torch.load(settings.LAST_CHECKPOINT)

        model_G.load_state_dict(checkpoint['model_G_state_dict'])
        model_G.train()
        model_G.cuda()

        model_D.load_state_dict(checkpoint['model_D_state_dict'])
        model_D.train()
        model_D.cuda()

        optim_G.load_state_dict(checkpoint['optim_G_state_dict'])
        optim_D.load_state_dict(checkpoint['optim_D_state_dict'])

        lr_scheduler_G.load_state_dict(checkpoint['lr_scheduler_G_state_dict'])
        lr_scheduler_D.load_state_dict(checkpoint['lr_scheduler_D_state_dict'])

        last_epoch = checkpoint['epoch']

        # purge the logs after the last_epoch
        writer = SummaryWriter(settings.TENSORBOARD_DIR,
                               purge_step=(last_epoch + 1) * len(dataloader))

    for epoch in range(last_epoch + 1, settings.EPOCHS + 1):

        train_one_epoch(model_G,
                        model_D,
                        optim_G,
                        optim_D,
                        dataloader,
                        test_dataloader,
                        epoch,
                        upsample,
                        ce_loss,
                        bce_loss,
                        writer,
                        print_freq=5,
                        eval_freq=settings.EVAL_FREQ)

        if epoch % settings.CHECKPOINT_FREQ == 0 and epoch != 0:
            save_checkpoint(epoch, model_G, model_D, optim_G, optim_D,
                            lr_scheduler_G, lr_scheduler_D)

        # save the final model
        if epoch >= settings.EPOCHS:
            print('saving the final model')
            save_checkpoint(epoch, model_G, model_D, optim_G, optim_D,
                            lr_scheduler_G, lr_scheduler_D)
            writer.close()

        lr_scheduler_G.step()
        lr_scheduler_D.step()
Exemplo n.º 20
0
                             label_lists=None,
                             img_transform=img_transform,
                             label_transform=None,
                             test=False)

train_loader = torch.utils.data.DataLoader(ConcatDataset(
    source_dataset, target_dataset),
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True)

# start training
# background weight: 1  shoe weight: 1
class_weighted = torch.Tensor([args.b_weight, args.s_weight])
class_weighted = class_weighted.cuda()
criterion_c = CrossEntropyLoss2d(class_weighted)
criterion_d = DiscrepancyLoss2d()

G.cuda()
F1.cuda()
F2.cuda()
G.train()
F1.train()
F2.train()

for epoch in range(start_epoch, args.epochs):
    d_loss_per_epoch = 0
    c_loss_per_epoch = 0

    for ind, (source, target) in tqdm.tqdm(enumerate(train_loader)):
        source_img, source_labels = source[0], source[1]