Exemple #1
0
def train(args):
    my_dataset = MyDataset("../data/train", transform=x_transforms, target_transform=y_transforms)
    dataloaders = DataLoader(my_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    model = Unet(3, 1).to(device)
    model.train()
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    num_epochs = args.epochs
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        data_size = len(dataloaders.dataset)
        epoch_loss = 0
        step = 0
        for x, y in dataloaders:
            step += 1
            inputs = x.to(device)
            lables = y.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, lables)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print("%d/%d, train_loss:%0.3f" % (step, (data_size - 1) // dataloaders.batch_size + 1, loss.item()))
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss))
    torch.save(model.state_dict(), 'model_weights.pth')
    return model
Exemple #2
0
def train():
    save_dir = "/home/FuDawei/NLP/SQUAD/unet/data/"
    train_examples, dev_examples, opt = prepare_train(save_dir)
    epoch = 30
    batch_size = 32
    model = Unet(opt=opt).to(device)
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adamax(parameters, lr=opt["lr"])
    best_score, exact_scores, f1_scores = 0, [], []

    count = 0
    total_loss = 0
    for ep in range(epoch):
        model.train()
        for batch_data in get_batch_data(train_examples, batch_size):
            data = model.get_data(batch_data)
            loss = model(data)
            model.zero_grad()
            optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(parameters, 10)
            optimizer.step()
            model.reset_parameters()
            count += 1
            # print(loss.item())
            # Evaluate(dev_examples, model)

            total_loss += loss.item()
            if count % 100 == 0:
                print(total_loss / 100)
                total_loss = 0
                # model.eval()
                # Evaluate(dev_examples, model, opt)
            if not opt["fix_word_embedding"]:
                model.reset_parameters()
        print(ep)
        model.eval()
        exact, f1 = Evaluate(dev_examples, model, opt)
        exact_scores.append(exact)
        f1_scores.append(f1)
        if f1 > best_score:
            best_score = f1
            torch.save(model.state_dict(), save_dir + "best_model")
    with open(save_dir + '_f1_scores.pkl', 'wb') as f:
        pkl.dump(f1_scores, f)
    with open(save_dir + '_exact_scores.pkl', 'wb') as f:
        pkl.dump(exact_scores, f)
def main(FLAGS):

    "train and validate the Unet model"
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #data directory
    data_dir = FLAGS.dataset_dir
    #log_directory
    log_dir = FLAGS.log_dir
    # Hyper and other parameters
    train_batch_size = FLAGS.train_batch_size
    val_batch_size = FLAGS.val_batch_size
    aug_flag = FLAGS.aug
    num_epochs = FLAGS.epochs
    num_classes = 2
    # get the train and validation dataloaders
    dataloaders = get_dataloaders(data_dir, train_batch_size, val_batch_size,
                                  aug_flag)
    model = Unet(3, num_classes)

    # Uncomment to run traiing on Multiple GPUs
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model, device_ids=[0, 1])
    else:
        print("no multiple gpu found")
    model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=0.02,
                          momentum=0.9,
                          weight_decay=0.0005)
    #optimizer = optim.Adam(model.parameters(),lr = learning_rate)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    plotter = VisdomLinePlotter(env_name='Unet Train')
    # uncomment for leraning rate schgeduler..
    train_val(dataloaders, model, criterion, optimizer, num_epochs, log_dir,
              device)
Exemple #4
0
        train_set, batch_size=args.batch_size, shuffle=True, num_workers=8)
    dataloaders['val'] = torch.utils.data.DataLoader(val_set,
                                                     batch_size=1,
                                                     shuffle=True,
                                                     num_workers=8)

    from model import Unet
    model = Unet(args.in_channels, args.out_channels)

    if args.load_caffe_unet:
        caffe_unet_path = '/mnt/ccvl15/yixiao/kaggle/models/pretrained/unet.pt'
        unet_caffemodel_weights = torch.load(caffe_unet_path)
        model = load_conv_weights(model, unet_caffemodel_weights)
    elif args.load != '':
        model.load_state_dict(torch.load(args.load))

    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

    from losses import cross_entropy_with_soft_dice_2
    # loss_func = torch.nn.CrossEntropyLoss()

    from criteria import dice, multichannel_dice

    try:
        model, val_history = train_model(model, dataloaders,
                                         cross_entropy_with_soft_dice_2,
                                         multichannel_dice, optimizer,
                                         args.epochs)
    except KeyboardInterrupt:
        sys.exit(0)
Exemple #5
0
def train_net(image_size, batch_size, num_epochs, lr, num_workers, checkpoint):
    train_loader, val_loader = data_loaders(image_size=(image_size,
                                                        image_size),
                                            batch_size=batch_size)
    device = torch.device('cuda') if torch.cuda.is_available() else 'cpu'
    model = Unet().to(device)
    if checkpoint:
        model.load_state_dict(torch.load(checkpoint))

    criterion = DiceLoss().to(device)
    optimizer = Adam(model.parameters(), lr=lr)

    logging.info(f'Start training:\n'
                 f'Num epochs:               {num_epochs}\n'
                 f'Batch size:               {batch_size}\n'
                 f'Learning rate:            {lr}\n'
                 f'Num workers:              {num_workers}\n'
                 f'Scale image size:         {image_size}\n'
                 f'Device:                   {device}\n'
                 f'Checkpoint:               {checkpoint}\n')

    train_losses = []
    val_losses = []

    for epoch in range(num_epochs):
        print(f'Epoch {epoch+1}: ')
        train_batch_losses = []
        val_batch_losses = []
        best_val_loss = 9999

        for x_train, y_train in tqdm(train_loader):
            x_train = x_train.to(device)
            y_train = y_train.to(device)
            y_pred = model(x_train)

            optimizer.zero_grad()
            loss = criterion(y_pred, y_train)
            train_batch_losses.append(loss.item())
            loss.backward()
            optimizer.step()

        train_losses.append(sum(train_batch_losses) / len(train_batch_losses))
        print(
            f'-----------------------Train loss: {train_losses[-1]} -------------------------------'
        )

        for x_val, y_val in tqdm(val_loader):
            x_val = x_val.to(device)
            y_val = y_val.to(device)
            y_pred = model(x_val)

            loss = criterion(y_pred, y_val)
            val_batch_losses.append(loss.item())

        val_losses.append(sum(val_batch_losses) / len(val_batch_losses))
        print(
            f'-----------------------Val loss: {val_losses[-1]} -------------------------------'
        )
        if val_losses[-1] < best_val_loss:
            best_val_loss = val_losses[-1]
            if not os.path.isdir('weights/'):
                os.mkdir('weights/')
            torch.save(model.state_dict(), f'weights/checkpoint{epoch+1}.pth')
            print(f'Save checkpoint in: weights/checkpoint{epoch+1}.pth')
Exemple #6
0
def train():
    model = Unet(input_channel=opt.input_channel, cls_num=opt.cls_num)
    model_name = 'Unet_bn'
    train_logger = LogWay(
        datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.txt')
    train_data = My_Dataset(opt.train_images, opt.train_masks)
    train_dataloader = DataLoader(train_data,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=0)

    if opt.cls_num == 1:
        criterion = torch.nn.BCELoss()
    else:
        criterion = torch.nn.NLLLoss()
    if use_gpu:
        model.cuda()
        if opt.cls_num == 1:
            criterion = torch.nn.BCELoss().cuda()
        else:
            criterion = torch.nn.NLLLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=opt.learning_rate,
                                momentum=opt.momentum,
                                weight_decay=opt.weight_decay)

    for epoch in range(opt.epoch):
        loss_sum = 0
        for i, (data, target) in enumerate(train_dataloader):
            data = Variable(data)
            target = Variable(target)
            if use_gpu:
                data = data.cuda()
                target = target.cuda()
            outputs = model(data)

            if opt.cls_num == 1:
                outputs = F.sigmoid(outputs).view(-1)
                mask_true = target.view(-1)
                loss = criterion(outputs, mask_true)
            else:
                outputs = F.LogSoftmax(outputs, dim=1)
                loss = criterion(outputs, target)

            loss_sum = loss_sum + loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print("epoch:{} batch:{} loss:{}".format(epoch + 1, i,
                                                     loss.item()))
        info = 'Time:{}    Epoch:{}    Loss_avg:{}\n'.format(
            str(datetime.datetime.now()), epoch + 1, loss_sum / (i + 1))
        train_logger.add(info)
        adjusting_rate(optimizer, opt.learning_rate, epoch + 1)
        realepoch = epoch + 1
        if (realepoch % 10 == 0):
            save_name = datetime.datetime.now().strftime(
                '%Y-%m-%d %H-%M-%S') + ' ' + model_name + str(
                    realepoch) + '.pt'
            torch.save(model.state_dict(), save_name)
def unet_train():

    batch_size = 1
    num_epochs = [5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000]
    num_workers = 2
    lr = 0.0001

    losslist = ['dice']  # ['focal', 'bce', 'dice', 'lovasz']
    optimlist = ['adam']  # ['adam', 'sgd']
    iflog = True

    SC_root_dir = '../dataset-EdmSealedCrack-512'
    train_files, val_files, test_files = myutils.organize_SC_files(SC_root_dir)

    train_RC_dataset = DatasetRealCrack('../dataset-EdmCrack600-512/A/train',
                                        transform=transform)
    train_SC_dataset = DatasetSealedCrack(files=train_files,
                                          root_dir=SC_root_dir,
                                          transform=data_Train_transforms)
    val_RC_dataset = DatasetRealCrack('../dataset-EdmCrack600-512/A/val',
                                      transform=transform)
    val_SC_dataset = DatasetSealedCrack(files=val_files,
                                        root_dir=SC_root_dir,
                                        transform=data_Test_transforms)

    train_loader = torch.utils.data.DataLoader(ConcatDataset(
        train_RC_dataset, train_SC_dataset),
                                               batch_size=2,
                                               shuffle=True,
                                               num_workers=2)

    criterion = nn.BCELoss()
    focallos = FocalLoss(gamma=2)
    doubleFocalloss = focalloss.FocalLoss_2_datasets(gamma=2)

    epoidx = -1
    for los in losslist:
        for opt in optimlist:
            start = time.time()
            print(los, opt)
            torch.manual_seed(77)
            torch.cuda.manual_seed(77)
            #################
            #unet = Unet_SpatialPyramidPooling(3).cuda()
            #################
            unet = Unet(3).cuda()
            SC_classifier = classifier(64, 2).cuda()
            RC_classifier = classifier(64, 2).cuda()

            ##################
            #unet = smp.Unet('resnet34', encoder_weights='imagenet').cuda()
            #unet.segmentation_head = torch.nn.Sequential().cuda()
            #SC_classifier = classifier(16, 2).cuda()
            #RC_classifier = classifier(16, 2).cuda()

            #UNCOMMENT TO KEEP TRAINING THE BEST MODEL
            prev_epoch = 0  # if loading model 58, change to prev_epoch = 58. When saving the model, it is going to be named as 59, 60, 61...
            #unet.load_state_dict(torch.load('trained_models/unet_adam_dice_58.pkl'))
            #SC_classifier.load_state_dict(torch.load('trained_models/SC_classifier_adam_dice_58.pkl'))
            #RC_classifier.load_state_dict(torch.load('trained_models/RC_classifier_adam_dice_58.pkl'))

            history = []
            if 'adam' in opt:
                optimizer = torch.optim.Adam(unet.parameters(), lr=lr)
            elif 'sgd' in opt:
                optimizer = torch.optim.SGD(unet.parameters(),
                                            lr=10 * lr,
                                            momentum=0.9)

            logging.basicConfig(filename='./logs/logger_unet.log',
                                level=logging.INFO)

            total_step = len(train_loader)
            epoidx += 1
            for epoch in range(num_epochs[epoidx]):
                totalloss = 0
                for i, (realCrack_batch,
                        sealedCrack_batch) in enumerate(train_loader):
                    SC_images = sealedCrack_batch[0].cuda()
                    SC_masks = sealedCrack_batch[1].cuda()
                    RC_images = realCrack_batch[0].cuda()
                    RC_masks = realCrack_batch[1].cuda()
                    SC_encoder = unet(SC_images)
                    RC_encoder = unet(RC_images)
                    #############
                    SC_outputs = SC_classifier(SC_encoder)
                    RC_outputs = RC_classifier(RC_encoder)
                    #############
                    #Deep lab v3
                    #SC_outputs = SC_classifier(SC_encoder['out'])
                    #RC_outputs = RC_classifier(RC_encoder['out'])
                    ##############
                    if 'bce' in los:
                        masks = onehot(masks)
                        loss = criterion(outputs, masks)
                    elif 'dice' in los:
                        branch_RC = {'outputs': RC_outputs, 'masks': RC_masks}
                        branch_SC = {'outputs': SC_outputs, 'masks': SC_masks}
                        loss = dice_loss_2_datasets(branch_RC, branch_SC)
                        #masks = onehot(masks)
                        #loss = dice_loss(outputs, masks)
                    elif 'lovasz' in los:
                        masks = onehot(masks)
                        loss = L.lovasz_hinge(outputs, masks)
                    elif 'focal' in los:
                        #loss = focallos(outputs, masks.long())
                        branch_RC = {
                            'outputs': RC_outputs,
                            'masks': RC_masks.long()
                        }
                        branch_SC = {
                            'outputs': SC_outputs,
                            'masks': SC_masks.long()
                        }
                        loss = doubleFocalloss(branch_RC, branch_SC)
                    totalloss += loss * RC_images.size(0)  #*2?
                    #print(RC_images.size(0))

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    if i % 10 == 0:
                        print(epoch, i)
                        print("total loss: ", totalloss)
                    if i % 1000 == 0:
                        print("Epoch:%d;     Iteration:%d;      Loss:%f" %
                              (epoch, i, loss))

                    if i + 1 == total_step:  # and epoch%1==0: #and val_miou>0.85:
                        torch.save(
                            unet.state_dict(),
                            './trained_models/unet_' + opt + '_' + los + '_' +
                            str(epoch + 1 + prev_epoch) + '.pkl')
                        torch.save(
                            RC_classifier.state_dict(),
                            './trained_models/RC_classifier_' + opt + '_' +
                            los + '_' + str(epoch + 1 + prev_epoch) + '.pkl')
                        torch.save(
                            SC_classifier.state_dict(),
                            './trained_models/SC_classifier_' + opt + '_' +
                            los + '_' + str(epoch + 1 + prev_epoch) + '.pkl')
                history_np = np.array(history)
                np.save('./logs/unet_' + opt + '_' + los + '.npy', history_np)
            end = time.time()
            print((end - start) / 60)
                               num_workers=4)

generator = define_G(
    4,
    1,
    64,
    'unet_128',
    norm='instance',
)
discriminator = netD()
unet = Unet()
unet.load_state_dict(torch.load("./weight/unet_pretrained.pth"))

optimizer_g = torch.optim.Adam(generator.parameters(), lr=0.0002)
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=0.0002)
optimizer_s = torch.optim.Adam(unet.parameters(), lr=0.0002)

generator.cuda()
discriminator.cuda()
unet.cuda()
EPOCH = 100
num_iter = len(train_loader)
D_LOSS = []
G_LOSS = []
# S_LOSS=[]
f = open("./loss_gan.txt", 'a')
print(time.strftime('|---------%Y-%m-%d   %H:%M:%S---------|',
                    time.localtime(time.time())),
      file=f)
discriminator.train()
unet.train()
Exemple #9
0
                              shuffle=True)

    num_data = len(dataset_train)
    num_epoch = np.ceil(num_data / args.batch_size)

    print(f'Number of data:     {num_data}')
    print(f'Number of epochs:   {num_epoch}')

    # Model load.
    u_net = Unet().to(device)

    # loss
    bce_loss = nn.BCEWithLogitsLoss().to(device)

    # Optimizer.
    optim = torch.optim.Adam(u_net.parameters(), lr=args.lr)

    # 기타 function 설정
    to_numpy = lambda x: x.to('cpu').detach().numpy().transpose(
        0, 2, 3, 1)  # device 위에 올라간 텐서를 detach 한 뒤 numpy로 변환
    denorm = lambda x, mean, std: (x * std) + mean
    classifier = lambda x: 1.0 * (
        x > 0.5)  # threshold 0.5 기준으로 indicator function으로 classifier 구현

    # Tensorboard
    tensorboard_train = SummaryWriter(
        log_dir=os.path.join(args.log_dir, 'train'))

    # Training loop
    init_epoch = 0
    max_num_iters = num_data // args.batch_size
Exemple #10
0
 valid_set = Cloudset(
     r.train, 'valid', r.valid_ids, r.train_fold,
     validation_augmentation_kaggle()
     # validation_augmentation()
     # try different augmentation here
 )
 train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
 valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=False)
 print('training data loaded')
 net = Unet(c_in=3, c_out=4).float()
 if is_gpu:
     net.cuda()
 print('unet built')
 # training
 criterion = BceDiceLoss(eps=1e-1)  # make sure tp=1 at least
 optimizer = optim.Adam(net.parameters(), lr=initial_lr)
 scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                  factor=0.2,
                                                  patience=2,
                                                  cooldown=2)
 valid_loss_min = np.Inf
 # for plot
 train_loss_list = []
 valid_loss_list = []
 lr_list = []
 # start
 for epoch in range(1, max_epochs + 1):
     train_loss = 0.0
     valid_loss = 0.0
     # train
     net.train()
Exemple #11
0
        ToTensorV2()
    ])

    train_dataset = KvasirSegDataset(train_path, t_transforms)
    val_dataset = KvasirSegDataset(val_path, v_transforms)

    train_dataloader = DataLoader(train_dataset, batch_size)
    val_dataloader = DataLoader(val_dataset, batch_size)

    in_classes = 3
    out_classes = 1
    model = Unet(in_classes, out_classes)
    criteria = DICELoss()
    metrics = {'iou': iou}

    optim = torch.optim.SGD(model.parameters(),
                            lr=0.001,
                            momentum=0.99,
                            weight_decay=0.0005)
    lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optim,
                                                     base_lr=0.001,
                                                     max_lr=0.01)
    lr_s = torch.optim.lr_scheduler.CosineAnnealingLR(optim, 100, 0.001)
    # train(train_dataloader, val_dataloader, model, epochs, criteria, metrics, optim, scheduler=lr_s, device=device)
    predict(
        model,
        'D:\\Kvasir-SEG',
        device,
        f_name='unet_aug4_pred_masks',
        model_path=
        'C:\\Users\\DSKIM\\Google 드라이브\\AI\\medical-projects\\Kvasir-Seg\\unet_aug4_models\\Unet_199_22.pth'
Exemple #12
0
def main(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    ### Hyperparameters Setting ###
    epochs = args.epochs
    batch_size = args.batch_size
    num_workers = args.num_workers
    valid_ratio = args.valid_ratio
    threshold = args.threshold
    separable = args.separable
    down_method = args.down_method
    up_method = args.up_method
    ### DataLoader ###
    dataset = DataSetWrapper(batch_size, num_workers, valid_ratio)
    train_dl, valid_dl = dataset.get_data_loaders(train=True)

    ### Model: U-Net ###
    model = Unet(input_dim=1,
                 separable=separable,
                 down_method=down_method,
                 up_method=up_method)
    model.summary()
    model = nn.DataParallel(model).to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     T_max=len(train_dl),
                                                     eta_min=0,
                                                     last_epoch=-1)
    criterion = nn.BCEWithLogitsLoss()
    train_losses = []
    val_losses = []

    ###Train & Validation start ###
    mIOU_list = []
    best_mIOU = 0.
    step = 0

    for epoch in range(epochs):

        ### train ###
        pbar = tqdm(train_dl)
        model.train()
        losses = []

        for (img, label) in pbar:
            optimizer.zero_grad()
            img, label = img.to(device), label.to(device)
            pred = model(img)
            # pred = Padding()(pred, label.size(3))
            loss = criterion(pred, label)
            loss.backward()
            optimizer.step()
            losses.append(loss.item())
            pbar.set_description(
                f'E: {epoch + 1} | L: {loss.item():.4f} | lr: {scheduler.get_lr()[0]:.7f}'
            )
        scheduler.step()
        if (epoch + 1) % 10:
            losses = sum(losses) / len(losses)
            train_losses.append(losses)

        ### validation ###
        with torch.no_grad():
            model.eval()
            mIOU = []
            losses = []
            pbar = tqdm(valid_dl)
            for (img, label) in pbar:
                img, label = img.to(device), label.to(device)
                pred = model(img)

                loss = criterion(pred, label)

                mIOU.append(get_IOU(pred, label, threshold=threshold))
                losses.append(loss.item())

            mIOU = sum(mIOU) / len(mIOU)
            mIOU_list.append(mIOU)
            if (epoch + 1) % 10:
                losses = sum(losses) / len(losses)
                val_losses.append(losses)

            print(
                f'VL: {loss.item():.4f} | mIOU: {100 * mIOU:.1f}% | best mIOU: {100 * best_mIOU:.1f}'
            )

        ### Early Stopping ###
        if mIOU > best_mIOU:
            best_mIOU = mIOU
            save_state = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'train_losses': train_losses,
                'val_losses': val_losses,
                'best_mIOU': best_mIOU
            }
            torch.save(
                save_state,
                f'./checkpoint/{down_method}_{up_method}_{separable}.ckpt')
            step = 0
        else:
            step += 1
            if step > args.patience:
                print('Early stopped...')
                return
Exemple #13
0
def train(args, x_train, y_train, x_valid, y_valid):

    writer = SummaryWriter()

    best_dice = 0 

    model = Unet().to(args.device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    bce_loss = torch.nn.BCELoss()

    train_dataloader = load_dataset(x_train, y_train, args.batch_size, True)
    valid_dataloader = load_dataset(x_valid, y_valid, args.batch_size, False)

    result = {}
    result['train/BCE'] = []
    result['train/Dice'] = []
    result['valid/BCE'] = []
    result['valid/Dice'] = []

    for epoch in range(args.epochs):
        print('train step: epoch {}'.format(str(epoch+1).zfill(4)))

        train_bce = []
        train_dice = []

        for inp_im, lab_im in tqdm(train_dataloader):
            inp_im = inp_im.to(args.device)
            lab_im = lab_im.to(args.device)

            pred = model(inp_im)

            bce = bce_loss(pred, lab_im)
            dice = calc_dice(pred, lab_im)

            train_bce.append(bce.item())
            train_dice.append(dice)

            model.zero_grad()
            bce.backward()
            optimizer.step()
        
        result['train/BCE'].append(statistics.mean(train_bce))
        result['train/Dice'].append(statistics.mean(train_dice))

        writer.add_scalar('train/BinaryCrossEntropy', result['train/BCE'][-1], epoch+1)
        writer.add_scalar('train/DiceScore', result['train/Dice'][-1], epoch+1)

        print('BCE: {}, Dice: {}'.format(result['train/BCE'][-1], result['train/Dice'][-1]))

        if (epoch+1) % 10 == 0 or (epoch+1) == 1:

            with torch.no_grad():
                print('valid step: epoch {}'.format(str(epoch+1).zfill(4)))
                model.eval()

                valid_bce = []
                valid_dice = []
                for inp_im, lab_im in tqdm(valid_dataloader):
                    inp_im = inp_im.to(args.device)
                    lab_im = lab_im.to(args.device)

                    pred = model(inp_im)

                    bce = bce_loss(pred, lab_im)
                    dice = calc_dice(pred, lab_im)

                    valid_bce.append(bce.item())
                    valid_dice.append(dice)
                
                result['valid/BCE'].append(statistics.mean(valid_bce))
                result['valid/Dice'].append(statistics.mean(valid_dice))

                writer.add_scalar('valid/BinaryCrossEntropy', result['valid/BCE'][-1], epoch+1)
                writer.add_scalar('valid/DiceScore', result['valid/Dice'][-1], epoch+1)

                print('BCE: {}, Dice: {}'.format(result['valid/BCE'][-1], result['valid/Dice'][-1]))


                if best_dice < result['valid/Dice'][-1]:
                    best_dice = result['valid/Dice'][-1]

                    best_model_name = os.path.join(args.save_model_path, f'best_model_{epoch + 1:04}.pth')
                    print('save model ==>> {}'.format(best_model_name))
                    torch.save(model.state_dict(), best_model_name)
Exemple #14
0
#### create dataloader ####
train_loader = DataLoader(dataset=training_set,
                          drop_last=True,
                          sampler=RandomSampler(training_set),
                          batch_size=batch_size)

valid_loader = DataLoader(dataset=valid_set,
                          drop_last=False,
                          sampler=SequentialSampler(valid_set),
                          batch_size=batch_size)

epochs = 25
model = Unet().cuda()
criterion = DiceBCELoss()
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate, weight_decay=1e-5)

train_loss,val_loss = [],[]
train_iou,val_iou = [],[]
valid_loss_min = 10000
i = 1
if initial_checkpoint is not None:
    model, optimizer, epochnum, valid_loss_min = load_ckp(checkpoint_path+initial_checkpoint, model, optimizer)
    print(f'initial ckp: {epochnum}')
    i = i + epochnum


for epoch in range(epochs):

    running_train_loss = []
netD_A = Discriminator_MSE(out_ch).cuda(cuda)
netD_B = Discriminator_MSE(in_ch).cuda(cuda)

if lambda_identity > 0.0:  # only works when input and output images have the same number of channels
    assert (in_ch == out_ch)
fake_A_pool = ImagePool(
    pool_size)  # create image buffer to store previously generated images
fake_B_pool = ImagePool(
    pool_size)  # create image buffer to store previously generated images
# define loss functions
criterionGAN = torch.nn.MSELoss(
)  # define GAN loss. CrossEntropyLoss or MSELoss !!!!!!!
criterionCycle = torch.nn.L1Loss()
criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
optimizer_G_A = torch.optim.Adam(netG_A.parameters(),
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))
optimizer_G_B = torch.optim.Adam(netG_B.parameters(),
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))
optimizer_D_A = torch.optim.Adam(netD_A.parameters(),
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(netD_B.parameters(),
                                 lr=learning_rate,
                                 betas=(0.5, 0.999))

optimizers = [optimizer_G_A, optimizer_G_B, optimizer_D_A, optimizer_D_B]

realA_mean = np.load("/home/fenqiang/harmonization/realA_mean.npy")
Exemple #16
0
    model.eval()
    img_list = os.listdir(img_dir)
    with torch.no_grad():
        for img_name in img_list:
            img = cv2.imread(os.path.join(img_dir, img_name))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = img.transpose((2, 0, 1))
            img = np.expand_dims(img, axis=0)
            img = np.array(img / 255, dtype=np.float32)
            img = torch.from_numpy(img)
            img = img.to(torch.device('cuda'))
            output = model(img)
            output[output >= 0.5] = 1
            output[output < 0.5] = 0
            output = output.cpu().numpy()
            output = np.squeeze(output, 0)
            output = np.transpose(output, (1, 2, 0))
            output = np.array(output * 255, dtype='uint8')
            cv2.imwrite('./result/' + img_name, output)


if __name__ == '__main__':
    unet = Unet().to(torch.device('cuda'))
    optimizer = optim.Adam(unet.parameters(), lr=0.00001)
    train_data = MyDataset(base_path='./data')
    train_loader = DataLoader(dataset=train_data, batch_size=1, shuffle=True)
    train(unet, train_loader, optimizer, 100)
    # model_path = './model/unet.pth'
    # img_dir = './test_img'
    # inference(unet, model_path, img_dir)
Exemple #17
0
    # 네트워크 학습
    transform = transforms.Compose([Normalization(mean=0.5, std=0.5), ToTensor()])

    dataset_test = Dataset(data_dir=os.path.join(data_dir, 'test'), transform=transform)
    loader_test = DataLoader(dataset_test, batch_size=batch_size, shuffle=False, num_workers=8)
    # 부수적인 variable
    num_data_test = len(dataset_test)

    num_batch_test = np.ceil(num_data_test / batch_size)

#네트워크, 파라미터 생성
net = Unet().to(device)

fn_loss = nn.BCEWithLogitsLoss().to(device)

optim = torch.optim.Adam(net.parameters(), lr = lr)

#부수적인 function
fn_tonumpy = lambda x: x.to('cpu').detach().numpy().transpose(0,2,3,1)
fn_denorm = lambda x, mean, std: (x*std) +mean
fn_class = lambda x: 1.0 * (x > 0.5)

# Tensorboard를 위한 SummaryWriter
writer_train = SummaryWriter(log_dir=os.path.join(log_dir, 'train'))
writer_val = SummaryWriter(log_dir=os.path.join(log_dir, 'val'))

#네트워크 저장, 불러오기
def save(ckpt_dir, net, optim, epoch):
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
Exemple #18
0
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print("%d/%d,train_loss:%0.3f" %
                  (step,
                   (dt_size - 1) // dataload.batch_size + 1, loss.item()))
        break
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss / step))
    return
    torch.save(model.state_dict(), 'weights_%d.pth' % epoch)
    return model


#训练模型
model = Unet(3, 1).to(device)
batch_size = 1
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters())

dataset = MyDataset(image_path,
                    label_path,
                    train_data,
                    transform=x_transforms,
                    target_transform=y_transforms)
data_loader = data.DataLoader(dataset,
                              batch_size=1,
                              shuffle=True,
                              num_workers=0)

train_model(model, criterion, optimizer, data_loader)
Exemple #19
0
               map_location=device)['net'])

model = Unet(n_channels=3, n_classes=8)
model.to(device=device)
#model_dir = './checkpoints/best_score_model_unet.pth'
model_dir = './checkpoints/student_net.pth'

if os.path.exists(model_dir):
    #model = load_GPUS(model_dir, model_dir, kwargs)
    model.load_state_dict(torch.load(model_dir)['net'])
    print("loading model sccessful----" + model_dir)
#model.load_state_dict(torch.load('teach_net_params_0.9895.pkl'))
criterion = nn.CrossEntropyLoss()
criterion2 = nn.KLDivLoss()

optimizer = optim.Adam(model.parameters(), lr=0.0001)
#optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
n_size = batch_size * 256 * 256

writer = SummaryWriter(comment='student' +
                       f'LR_0.0001_BS_32')  #创建一个tensorboard文件
epochs = 50
global_step = 1
for epoch in range(epochs):
    loss_sigma = 0.0
    correct = 0.0
    total = 0.0
    model.train()
    with tqdm(total=train_steps,
              desc=f'Epoch {epoch + 1}/{epochs}',
              unit='img') as pbar: