Beispiel #1
0
def eval_net(net, dataset, dir_img, dir_mask, args):
    net.eval()
    if args.gpu:
        net.to(args.device)

    total = 0
    val = get_imgs_and_masks(dataset['val'], dir_img, dir_mask)
    for i, b in enumerate(val):
        img = np.array(b[0]).astype(np.float32)
        mask = np.array(b[1]).astype(np.float32)

        img = torch.from_numpy(img)[None, None, :, :]
        mask = torch.from_numpy(mask).unsqueeze(0)

        if args.gpu:
            img = img.to(args.device)
            mask = mask.to(args.device)
        mask_pred = net(img)
        mask_pred = (mask_pred > 0.5).float()  # 得到预测的分割图

        total += dice_coeff(mask_pred, mask, args.device).cpu().item()
    current_score = total / (i + 1)
    global best_score
    print('current score is %f' % current_score)
    print('best score is %f' % best_score)
    if current_score > best_score:
        best_score = current_score
        print('current best score is {}'.format(best_score))
        if args.save_cp:
            print('saving checkpoint')
            mkdir_p('checkpoint')
            torch.save(net.state_dict(), './checkpoint/unet.pth')

    return best_score
Beispiel #2
0
def get_result(net, gpu=False):
    ids = get_ids(dir_img)

    val = get_imgs_and_masks(ids, dir_img, dir_mask, 1.0)

    val_dice = eval_net(net, val, gpu)
    print('Validation Dice Coeff: {}'.format(val_dice))
Beispiel #3
0
def finetune(net,
             optimizer,
             criterion,
             trainset,
             log,
             path,
             iters=100,
             epochs=None,
             batch_size=2,
             gpu=True,
             scale=0.5):
    net.train()
    bce_meter = AverageMeter()

    dir_img = 'data/train/'
    dir_mask = 'data/train_masks/'

    if epochs is None:  # Fine-tune using iterations of mini-batches
        epochs = 1
    else:  # Fine-tune using entire epochs
        iters = None

    for e in range(epochs):
        # reset the generators
        train = get_imgs_and_masks(trainset, dir_img, dir_mask, scale)

        with tqdm(total=len(trainset)) as progress_bar:
            for i, b in enumerate(batch(train, batch_size)):
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])

                imgs = torch.from_numpy(imgs)
                true_masks = torch.from_numpy(true_masks)

                if gpu:
                    imgs = imgs.cuda()
                    true_masks = true_masks.cuda()

                masks_pred = net(imgs).squeeze()

                loss = criterion(masks_pred, true_masks)
                bce_meter.update(loss.item(), batch_size)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                progress_bar.update(batch_size)
                progress_bar.set_postfix(epoch=e, BCE=bce_meter.avg)

                if i == 0 and e == 0:
                    log.info("FLOPs after pruning: \n{}".format(
                        flops_count(net, imgs.shape[2:])))

                if i == iters:  # Stop finetuning after sufficient mini-batches
                    break

    log.info("Finished finetuning")
    log.info("Finetuned loss: {}".format(bce_meter.avg))
    torch.save(net.state_dict(), path)
    log.info('Saving finetuned to {}...'.format(path))
Beispiel #4
0
def train(net, iddataset, dir_img, dir_mask, optimizer, criterion, args):
    n_train = len(iddataset['train'])

    # criterion = nn.BCELoss()
    net.train()
    if args.gpu:
        net.to(args.device)

    train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask)

    epoch_loss = 0.
    for i, b in enumerate(batch(train, args.batchsize)):
        imgs = np.array([data[0] for data in b]).astype(np.float32)
        masks = np.array([data[1] for data in b]).astype(np.float32)

        imgs = torch.from_numpy(imgs).unsqueeze(1).float()
        masks = torch.from_numpy(masks).float()

        if args.gpu:
            imgs = imgs.to(args.device)
            masks = masks.to(args.device)
        optimizer.zero_grad()
        mask_pred = net(imgs)
        mask_prob_flat = mask_pred.view(-1)
        masks_flat = masks.view(-1)

        loss = criterion(mask_prob_flat, masks_flat)
        epoch_loss += loss.cpu().item()

        loss.backward()
        optimizer.step()
        print('training progress:{0:.4f} --- loss: {1:.6f}'.format(
            i * args.batchsize / n_train, loss.item()))
Beispiel #5
0
def train_net(net,
              device,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.15,
              save_cp=True,
              img_scale=0.5):
    ids = get_ids(dir_img)

    iddataset = split_train_val(ids, val_percent)

    logging.info('''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Training size:   {len(iddataset["train"])}
        Validation size: {len(iddataset["val"])}
        Checkpoints:     {save_cp}
        Device:          {device.type}
        Images scaling:  {img_scale}
    ''')

    n_train = len(iddataset['train'])
    n_val = len(iddataset['val'])
    optimizer = optim.Adam(net.parameters(), lr=lr)
    if net.n_classes > 1:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.BCEWithLogitsLoss()

    for epoch in range(epochs):
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        f1_score = 0
        num = 0
        with tqdm(total=n_train, desc='Epoch {epoch + 1}/{epochs}',
                  unit='img') as pbar:
            for i, b in enumerate(batch(train, batch_size)):
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])

                imgs = torch.from_numpy(imgs)
                true_masks = torch.from_numpy(true_masks)

                imgs = imgs.to(device=device)
                true_masks = true_masks.to(device=device)

                masks_pred = net(imgs)
                # print('mask:',masks_pred.size())
                # print('lab:',true_masks.size())
                loss = criterion(masks_pred, true_masks)
                masks_pred_np = masks_pred.detach().cpu().numpy()
                true_masks_np = true_masks.detach().cpu().numpy()
                epoch_loss += loss.item()

                # print("----------------------------------------")
                # print('masks_pred',type(masks_pred),masks_pred,'\n')
                # print('true_masks',type(true_masks),true_masks,'\n')
                # print('mask:',masks_pred.size(),'\n')
                # print('lab:',true_masks.size(),'\n')
                pre_2D = np.array(masks_pred_np[0][0])
                true_2D = np.array(true_masks_np[0][0])
                pre_2D_threhold = pre_2D
                pre_2D_threhold[pre_2D_threhold > 0.5] = 1
                pre_2D_threhold[pre_2D_threhold <= 0.5] = 0
                # print("pre_2D.shape",pre_2D.shape,'\n')
                # print("true_2D.shape" ,true_2D.shape,'\n')
                # print("true_2D.flatten()",true_2D.flatten(),'\n')
                # print("pre_2D.flatten()",pre_2D.flatten(),'\n')
                pixel_accuracy = (pre_2D, true_2D)
                f1_score += metrics.f1_score(true_2D.flatten(),
                                             pre_2D_threhold.flatten())
                num = num + 1
                # print("----------------------------------------")

                # val_score1 = eval_net1(net,val,device,n_val)

                pbar.set_postfix(**{'loss (batch)': loss.item()})

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                pbar.update(batch_size)

        if save_cp:
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP_epoch{epoch + 1}.pth')
            logging.info('Checkpoint {epoch + 1} saved !')

        val_score = eval_net(net, val, device, n_val)
        f1_score /= num
        print("f1-score:", f1_score, '\n')
        if net.n_classes > 1:
            logging.info('Validation cross entropy: {}'.format(val_score))

        else:
            logging.info('Validation Dice Coeff: {}'.format(val_score))
Beispiel #6
0
def train_net(net,
              device,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.15,
              save_cp=True,
              img_scale=0.5):
    ids = get_ids(dir_img)

    iddataset = split_train_val(ids, val_percent)

    n_train = len(iddataset['train'])
    n_val = len(iddataset['val'])
    optimizer = optim.Adam(net.parameters(), lr=lr)
    if net.n_classes > 1:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.BCEWithLogitsLoss()

    for epoch in range(epochs):
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        with tqdm(total=n_train,
                  desc='Epoch {0}/{1}'.format(epoch + 1, epochs),
                  unit='img') as pbar:
            for i, b in enumerate(batch(train, batch_size)):
                current_lr = adjust_learning_rate(optimizer, epoch, epochs,
                                                  pbar.n, n_train)
                random_rate = 0
                if epoch > epochs / 2:
                    random_rate = (epoch * 0.1) / epochs
                    b = custom_transforms.random_data_augmentation(
                        b, random_rate=random_rate)

                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1][0] for i in b])

                imgs = torch.from_numpy(imgs)
                true_masks = torch.from_numpy(true_masks)

                imgs = imgs.to(device=device)
                true_masks = true_masks.to(device=device)

                masks_pred = net(imgs)
                loss = criterion(masks_pred, true_masks.long())
                epoch_loss += loss.item()

                pbar.set_postfix(
                    **{
                        'lr:{0}, random_rate:{1}, loss:'.format(
                            current_lr, random_rate):
                        loss.item()
                    })

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                pbar.update(batch_size)

        if save_cp:
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP_epoch{0}.pth'.format(epoch + 1))
            logging.info('Checkpoint {0} saved !'.format(epoch + 1))

        val_score = eval_net(net, val, device, n_val)
        if net.n_classes > 1:
            logging.info('Validation cross entropy: {0}'.format(val_score))
        else:
            logging.info('Validation Dice Coeff: {0}'.format(val_score))
Beispiel #7
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = 'data/train/'
    dir_mask = 'data/train_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()
    best_dice = 0.0

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])
            true_masks = true_masks / true_masks.max()

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs = F.sigmoid(masks_pred)
            masks_probs_flat = masks_probs.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))
            with open('acc.log', 'a+') as w:
                w.write('eopch {}, acc:{}\n'.format(epoch, val_dice))

        if save_cp and val_dice > best_dice:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
            best_dice = val_dice
Beispiel #8
0
from unet import UNet
import torch

ori_w, ori_h = 852, 480
dir_img = '/home/zhuzhu/Desktop/mid project/raw_data'
dir_mask = '/home/zhuzhu/Desktop/mid project/groundtruth'
ids = get_ids(dir_img)
iddataset = split_train_val(ids, 0.05)

net = UNet(1, 2)
net.eval()
net.load_state_dict(
    torch.load(
        '/media/zhuzhu/0C5809B80C5809B8/draft/unet/checkpoint/unet_0.854608765.pth',
        map_location='cpu'))
val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask)

c = 0
for i, b in enumerate(val):
    img = np.array(b[0]).astype(np.float32)
    mask = np.array(b[1]).astype(np.float32)

    with torch.no_grad():
        img = torch.from_numpy(img)[None, None, :, :]
        mask = torch.from_numpy(mask).unsqueeze(0)

        mask_pred = net(img)
        coal, gangue = mask_pred.data.numpy().reshape(2, -1)

        coal_fpr, coal_tpr, coal_th = roc_curve(
            mask.squeeze().numpy()[0].reshape(-1), coal)
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)
    criterion = nn.BCELoss()

    dir_img = '../all/reduced/'
    dir_mask = '../all/masks/'
    dir_checkpoint = 'checkpoints/'

    # fetches ids in directory, without .jpg extension
    ids = get_ids(dir_img)

    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        # create a generator to try: yeild each value enumerate(batch(train, batch_size))
        # or transfer enumerate() to try loop somehow.

        epoch_loss = 0
        exception_amount = 0

        for i, b in enumerate(batch(train, batch_size)):
            try:
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])

                imgs = torch.from_numpy(imgs)
                true_masks = torch.from_numpy(true_masks)

                if gpu:
                    imgs = imgs.cuda()
                    true_masks = true_masks.cuda()

                masks_pred = net(imgs)
                masks_probs = F.sigmoid(masks_pred)
                masks_probs_flat = masks_probs.view(-1)

                true_masks_flat = true_masks.view(-1)

                loss = criterion(masks_probs_flat, true_masks_flat)
                epoch_loss += loss.item()

                print('{0:.4f} --- loss: {1:.6f}'.format(
                    i * batch_size / N_train, loss.item()))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            except KeyboardInterrupt:
                torch.save(net.state_dict(), 'INTERRUPTED.pth')
                print('Saved interrupt')
                try:
                    sys.exit(0)
                except SystemExit:
                    os._exit(0)

            except Exception as ah:
                print(ah)
                exception_amount += 1
                if exception_amount > 20:
                    print('things arent going good')

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        val_dice = eval_net(net, val, gpu)
        print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #10
0
def train_net(net,
              device,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.15,
              save_cp=True,
              img_scale=0.5):
    ids = get_ids(dir_img)

    iddataset = split_train_val(ids, val_percent)

    logging.info('''Starting training:
        Epochs:          {epochs}
        Batch size:      {batch_size}
        Learning rate:   {lr}
        Training size:   {len(iddataset["train"])}
        Validation size: {len(iddataset["val"])}
        Checkpoints:     {save_cp}
        Device:          {device.type}
        Images scaling:  {img_scale}
    ''')

    n_train = len(iddataset['train'])
    n_val = len(iddataset['val'])
    optimizer = optim.Adam(net.parameters(), lr=lr)
    #optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.75)
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        with tqdm(total=n_train, desc='Epoch {epoch + 1}/{epochs}',
                  unit='img') as pbar:
            for i, b in enumerate(batch(train, batch_size)):
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])

                imgs = torch.from_numpy(imgs)
                true_masks = torch.from_numpy(true_masks)

                imgs = imgs.to(device=device)
                true_masks = true_masks.to(device=device)

                masks_pred = net(imgs)
                loss = criterion(masks_pred, true_masks)
                epoch_loss += loss.item()

                pbar.set_postfix(**{'loss (batch)': loss.item()})

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                pbar.update(batch_size)

        if save_cp:
            try:
                os.mkdir(dir_checkpoint)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(), dir_checkpoint + 'gpu_3.pth')
            logging.info('gpu_3 saved !')

        val_dice = eval_net(net, val, device, n_val)
        logging.info('Validation Dice Coeff: {}'.format(val_dice))
Beispiel #11
0
def train_net(net,
              train_dir=None,
              groundtruth_dir=None,
              weight_dir=None,
              weight_name='DeepInsthink',
              val_percent=0.05,
              epochs=5,
              batch_size=1,
              lr=0.1,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = train_dir
    dir_mask = groundtruth_dir
    dir_checkpoint = weight_dir

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    max_DSC = 0
    max_ep_checkpoint = 0
    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        batchN = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print('ep: {3:.0f} batch [{0:.0f}/{1:.0f}] - loss: {2:.6f}'.format(
                i + 1, N_train / batch_size, loss.item(), epoch + 1))
            #if (i % 5==0):
            #    val_dice = eval_net(net, val, gpu)
            #    print('Validation Dice Coeff: {}'.format(val_dice))
        val_dice = eval_net(net, val, gpu)
        print('Epoch {0:} -- Loss: {1:} -- Validation DSC: {2:}'.format(
            epoch, epoch_loss / i, val_dice))
        if (val_dice >= max_DSC):
            max_DSC = val_dice
            max_ep_checkpoint = epoch + 1
        if save_cp:
            torch.save(
                net.state_dict(),
                dir_checkpoint + weight_name + '-{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
    print('Maximum checkpoint is ' + weight_name + '-{0:}' +
          'with {1:} DSC'.format(max_ep_checkpoint, max_DSC))
Beispiel #12
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.2,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = '/media/workspace/DATASET/Eyes/STARE/stare-images'
    dir_mask = '/media/workspace/DATASET/Eyes/STARE/labels'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids, 4)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([w[0] for w in b]).astype(np.float32)
            true_masks = np.array([w[1] for w in b])
            true_masks[true_masks == 255] = 1

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            if not os.path.exists(dir_checkpoint):
                os.mkdir(dir_checkpoint)
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #13
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_height=512,
              img_scale=0.5):

    #dir_img = 'carvana-image-masking-challenge/train/'
    #dir_mask = 'carvana-image-masking-challenge/train_masks/'

    dir_img = '/root/ctw/train_images_preprocess_other/'
    dir_mask = '/root/ctw/train_images_mask_preprocess_other/'


    #dir_img = '/root/ctw/val_images_preprocess_test/'
    #dir_mask = '/root/ctw/val_images_mask_preprocess_test/'
    dir_checkpoint = 'checkpoints/'

    ids = list(get_ids(dir_img))
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)


    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.Adam(net.parameters(),lr=lr)

   # optimizer = optim.SGD(net.parameters(),
   #                       lr=lr,
   #                       momentum=0.92,
   #                       weight_decay=0.0005)

    criterion = nn.BCELoss()
    #criterion = nn.MSELoss()
 
    #import scipy.misc
    iteration = 0
    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_height, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_height, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            #print(i, len(b))
            """
            for j in b:
                #print(j[0].shape, j[1].shape)
                #print(j[1])
                #scipy.misc.toimage(j[0], cmin=0.0, cmax=1.0).save('%s_outfile.jpg'%count)
                #scipy.misc.toimage(j[1], cmin=0.0, cmax=1.0).save('%s_outmask.jpg'%count)
                count += 1
            """
            iteration += 1 
            try:            
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])
              
    #            print("\nImgs :  \n{}".format(np.unique(imgs)))
    #            print("\ntrue mask \n {} ".format(np.unique(true_masks)))
            #print('%s'%(datetime.datetime.now()), '{0:.4f}'.format(i * batch_size))
            except Exception as e:
                print(Exception)
                continue

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()
            if iteration%100==0:
                print('iter %s'%iteration, '%s'%(datetime.datetime.now()), '{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            val_iou=val_dice/(2-val_dice)
            print('Validation Dice Coeff: {}'.format(val_dice))
            print('Validation iouScore : {}'.format(val_iou))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #14
0
def fit(net,
        tf_writer,
        epochs=5,
        batch_size=1,
        lr=0.0001,
        val_percent=0.05,
        save_cp=True,
        gpu=False,
        img_scale=1,
        l2=1e-8,
        mom=0.9,
        n_classes=4,
        loss_function='bce',
        alpha_non_zero=1,
        resize_in=500):

    # dir_png = "data/caddata_line_v2_1_mini/png"
    # dir_mask = "data/caddata_line_v2_1_mini/mask"
    dir_png_train = os.path.join(args.data, 'train', 'png')
    dir_mask_train = os.path.join(args.data, 'train', 'mask')
    dir_png_val = os.path.join(args.data, 'val', 'png')
    dir_mask_val = os.path.join(args.data, 'val', 'mask')
    # dir_mask = "data/mini/mask"

    dir_checkpoint = os.path.join(args.log, 'checkpoints')
    if not os.path.isdir(dir_checkpoint):
        os.mkdir(dir_checkpoint)

    # train
    ids_train = get_ids(dir_png_train)
    ids_train = split_ids(ids_train, n=1)
    l_ids_train = list(ids_train)
    # val
    ids_val = get_ids(dir_png_val)
    ids_val = split_ids(ids_val, n=1)
    l_ids_val = list(ids_val)

    # iddataset = split_train_val(ids, val_percent)
    iddataset = {'train': l_ids_train, 'val': l_ids_val}

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    if loss_function == 'bce':
        criterion = nn.BCELoss()
    elif loss_function == 'mse':
        criterion = nn.MSELoss()
    # criterion = nn.CrossEntropyLoss()

    with open(os.path.join(args.log, 'log.txt'), 'w+') as f_log:
        for epoch in range(epochs):
            # print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
            # optimizer = optim.SGD(net.parameters(),
            #                       lr=lr * (args.lr_decay ** epoch),
            #                       momentum=mom,
            #                       weight_decay=l2)
            optimizer = optim.Adam(net.parameters(),
                                   lr=lr * (args.lr_decay**epoch),
                                   weight_decay=l2)
            # optimizer = optim.RMSprop(net.parameters(),
            #                           lr=lr * (args.lr_decay ** epoch),
            #                           weight_decay=l2)
            print("Current lr = {}".format(lr * (args.lr_decay**epoch)))
            f_log.write("Current lr = {}".format(lr * (args.lr_decay**epoch)) +
                        '\n')

            net.train()

            # shuffle training set
            random.shuffle(l_ids_train)
            iddataset = {'train': l_ids_train, 'val': l_ids_val}
            # reset the generators
            train = get_imgs_and_masks(iddataset['train'], dir_png_train,
                                       dir_mask_train, img_scale)
            val = get_imgs_and_masks(iddataset['val'], dir_png_val,
                                     dir_mask_val, img_scale)

            epoch_loss = 0
            epoch_tot = 0
            epoch_acc = 0
            epoch_acc_all = 0

            i_out = 0

            for i, b in enumerate(batch(train, batch_size)):
                imgs = np.array([j[0] for j in b]).astype(np.uint8)
                true_masks = np.array([j[1] for j in b]).astype(np.uint8)

                imgs_2 = np.zeros(
                    (imgs.shape[0], N_CHANNELS, resize_in, resize_in))
                true_masks_2 = np.zeros(
                    (true_masks.shape[0], resize_in, resize_in, n_classes))

                ## data augmentation
                for j in range(imgs.shape[0]):
                    img = imgs[j, :, :, :]
                    mask = true_masks[j, :, :, :]
                    # print(np.unique(mask))

                    pil_img = Image.fromarray(img, 'RGB')
                    pil_mask = Image.fromarray(mask, 'RGB')

                    ##debug##
                    # pil_img.show()
                    # pil_mask.show()
                    # print(np.unique(mask))

                    seed = np.random.randint(124521346)

                    # Resize Crop ratio: img
                    random.seed(seed)
                    pil_img = torchvision.transforms.RandomResizedCrop(
                        size=(resize_in),
                        scale=(0.8, 1.0),
                        interpolation=Image.NEAREST)(pil_img)
                    # Resize Crop ratio: true_masks
                    random.seed(seed)
                    pil_mask = torchvision.transforms.RandomResizedCrop(
                        size=(resize_in),
                        scale=(0.8, 1.0),
                        interpolation=Image.NEAREST)(pil_mask)
                    """
                    # rotate seed
                    random_degree = randrange(360)
                    # rotate: img
                    pil_img = torchvision.transforms.functional.rotate(pil_img, angle=random_degree)
                    # rotate: true_masks
                    pil_mask = torchvision.transforms.functional.rotate(pil_mask, angle=random_degree)
                    """

                    # color: img
                    # color: true_masks
                    arr_img = rgb_pil_to_bw_norm_arr(pil_img)
                    # print(np.unique(arr_img))

                    imgs_2[j, N_CHANNELS - 1, :, :] = arr_img
                    # print(np.unique(imgs_2))

                    arr_mask = np.array(pil_mask)
                    arr_mask_2 = np.zeros((resize_in, resize_in, n_classes))
                    for in_c in range(n_classes):
                        in_1, in_2 = np.where(arr_mask[:, :, 0] == (in_c + 1))
                        arr_mask_2[in_1, in_2,
                                   in_c] = arr_mask[in_1, in_2,
                                                    0].astype(bool).astype(
                                                        np.float32)
                    true_masks_2[j, :, :, :] = arr_mask_2
                    # true_masks_2.astype(np.float32)
                    # print("======")
                    # print(np.unique(true_masks_2))

                ## To TorchTensor
                # imgs:
                # imgs = torch.from_numpy(imgs_2.astype(np.float32).transpose(0,3,1,2))
                imgs = torch.from_numpy(imgs_2.astype(np.float32))
                # true_masks:
                # true_masks = torch.from_numpy(true_masks_2.transpose(0,3,1,2))
                true_masks = torch.from_numpy(true_masks_2)

                # imgs = torch.from_numpy(imgs)
                # true_masks = torch.from_numpy(true_masks)
                # true_masks = np.transpose(true_masks, (0, 3, 1, 2))

                # assert imgs.size()[1] == N_CHANNELS
                # assert true_masks.size()[1] == n_classes
                # assert true_masks.size()[2] == imgs.size()[2]
                # assert true_masks.size()[3] == imgs.size()[3]

                if gpu:
                    imgs = imgs.cuda()
                    true_masks = true_masks.cuda()

                masks_pred = net(imgs)

                # view(-1)
                masks_probs_flat = masks_pred.view(-1)
                if gpu:
                    true_masks_flat = true_masks.view(-1)
                else:
                    true_masks_flat = true_masks.reshape(-1)
                true_masks_flat = true_masks_flat.float()

                # loss
                loss = criterion(masks_probs_flat, true_masks_flat)
                in_nonzero = torch.nonzero(true_masks_flat)
                loss_nonzero = criterion(masks_probs_flat[in_nonzero],
                                         true_masks_flat[in_nonzero])
                if in_nonzero.size(0) != 0:
                    loss = loss + alpha_non_zero * loss_nonzero
                epoch_loss += loss.item()

                true_masks_flat_bin = true_masks_flat.unsqueeze(0)
                masks_probs_flat_bin = (masks_probs_flat >
                                        0.5).float().unsqueeze(0)
                this_dice = dice_coeff(masks_probs_flat_bin,
                                       true_masks_flat_bin).item()
                epoch_tot += this_dice

                # e = np.array(masks_probs_flat_bin.cpu())
                # f = np.array(true_masks_flat_bin.cpu())
                acc_train = iou(np.array(true_masks_flat_bin.cpu()),
                                np.array(masks_probs_flat_bin.cpu()))
                acc_train_all = iou_all(np.array(true_masks_flat_bin.cpu()),
                                        np.array(masks_probs_flat_bin.cpu()))
                epoch_acc += acc_train
                epoch_acc_all += acc_train_all

                if i % print_interval == print_interval - 1:
                    print(
                        '{0} / {1} steps. --- loss: {2:.6f}, IoU_train_nz: {3:.4f}, IoU_train_all: {4:.4f}, dice: {5:.4f}'
                        .format(i, int(N_train / batch_size),
                                epoch_loss / (i + 1), epoch_acc / (i + 1),
                                epoch_acc_all / (i + 1), epoch_tot / (i + 1)))
                    f_log.write(
                        '{0} / {1} steps. --- loss: {2:.6f}, ACC_train: {3:.4f}, IoU_train_all: {4:.4f}, dice: {4:.4f}'
                        .format(i, int(N_train / batch_size), epoch_loss /
                                (i + 1), epoch_acc / (i + 1), epoch_acc_all /
                                (i + 1), epoch_tot / (i + 1)) + '\n')

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                i_out = i

            print(
                'Epoch {} finished ! Loss: {}, IoU: {}, IoU_all: {}, dice: {}'.
                format(epoch, epoch_loss / (i_out + 1),
                       epoch_acc / (i_out + 1), epoch_acc_all / (i_out + 1),
                       epoch_tot / (i + 1)))
            f_log.write(
                'Epoch finished ! Loss: {}, IoU: {}, IoU_all: {}, dice: {}'.
                format(epoch_loss / (i_out + 1), epoch_acc /
                       (i_out + 1), epoch_acc_all / (i_out + 1), epoch_tot /
                       (i + 1)) + '\n')
            tf_writer.add_scalar('data/train_loss', epoch_loss / (i_out + 1),
                                 epoch)
            tf_writer.add_scalar('data/train_iou', epoch_acc / (i_out + 1),
                                 epoch)
            tf_writer.add_scalar('data/train_iou_all',
                                 epoch_acc_all / (i_out + 1), epoch)
            tf_writer.add_scalar('data/train_dice', epoch_tot / (i_out + 1),
                                 epoch)

            ## Evaluate
            net.eval()
            tot_val = 0
            epoch_loss_val = 0
            epoch_acc_val = 0
            epoch_acc_val_all = 0
            for i_val, b_val in enumerate(batch(val, batch_size)):
                # imgs_val = np.array([j[0] for j in b_val]).astype(np.float32)
                # true_masks_val = np.array([j[1] for j in b_val])
                #
                # imgs_val = torch.from_numpy(imgs_val)
                # true_masks_val = torch.from_numpy(true_masks_val)
                # true_masks_val = np.transpose(true_masks_val, (0, 3, 1, 2))

                ####
                imgs_val = np.array([j[0] for j in b_val]).astype(np.uint8)
                true_masks_val = np.array([j[1]
                                           for j in b_val]).astype(np.uint8)

                imgs_2 = np.zeros(
                    (imgs_val.shape[0], N_CHANNELS, resize_in, resize_in))
                true_masks_2 = np.zeros(
                    (true_masks_val.shape[0], resize_in, resize_in, n_classes))

                ## data augmentation
                if imgs_val.shape[0] == batch_size:
                    for j in range(imgs_val.shape[0]):
                        img = imgs_val[j, :, :, :]
                        mask = true_masks_val[j, :, :, :]
                        # print(np.unique(mask))

                        pil_img = Image.fromarray(img, 'RGB')
                        pil_mask = Image.fromarray(mask, 'RGB')

                        # resize
                        pil_img = torchvision.transforms.Resize(
                            size=(resize_in),
                            interpolation=Image.NEAREST)(pil_img)
                        pil_mask = torchvision.transforms.Resize(
                            size=(resize_in),
                            interpolation=Image.NEAREST)(pil_mask)

                        # upload img and mask to imgs_2 and mask_2
                        arr_img = rgb_pil_to_bw_norm_arr(pil_img)
                        imgs_2[j, N_CHANNELS - 1, :, :] = arr_img

                        arr_mask = np.array(pil_mask)
                        arr_mask_2 = np.zeros(
                            (resize_in, resize_in, n_classes))
                        for in_c in range(n_classes):
                            in_1, in_2 = np.where(arr_mask[:, :,
                                                           0] == (in_c + 1))
                            arr_mask_2[in_1, in_2,
                                       in_c] = arr_mask[in_1, in_2,
                                                        0].astype(bool).astype(
                                                            np.float32)
                        true_masks_2[j, :, :, :] = arr_mask_2
                        # true_masks_2.astype(np.float32)
                        # print("======")
                        # print(np.unique(true_masks_2))

                    ## To TorchTensor
                    # imgs:
                    imgs_val = torch.from_numpy(imgs_2.astype(np.float32))
                    # true_masks:
                    true_masks_val = torch.from_numpy(true_masks_2)

                    if gpu:
                        imgs_val = imgs_val.cuda()
                        true_masks_val = true_masks_val.cuda()

                    masks_pred_val = net(imgs_val)
                    masks_probs_flat_val = masks_pred_val.view(-1)

                    if gpu:
                        true_masks_flat_val = true_masks_val.view(-1)
                    else:
                        true_masks_flat_val = true_masks_val.reshape(-1)
                    true_masks_flat_val = true_masks_flat_val.float()

                    true_masks_flat_bin_val = true_masks_flat_val.unsqueeze(0)
                    masks_probs_flat_bin_val = (masks_probs_flat_val >
                                                0.5).float().unsqueeze(0)
                    dice_val = dice_coeff(masks_probs_flat_bin_val,
                                          true_masks_flat_bin_val).item()

                    acc_val = iou(np.array(true_masks_flat_bin_val.cpu()),
                                  np.array(masks_probs_flat_bin_val.cpu()))
                    acc_val_all = iou_all(
                        np.array(true_masks_flat_bin_val.cpu()),
                        np.array(masks_probs_flat_bin_val.cpu()))
                    epoch_acc_val += acc_val
                    epoch_acc_val_all += acc_val_all

                    tot_val += dice_val

                    loss_val = criterion(masks_probs_flat_val,
                                         true_masks_flat_val)
                    in_nonzero = torch.nonzero(true_masks_flat_val)
                    loss_val_nonzero = criterion(
                        masks_probs_flat_val[in_nonzero],
                        true_masks_flat_val[in_nonzero])
                    if in_nonzero.size(0) != 0:
                        loss_val = loss_val + alpha_non_zero * loss_val_nonzero

            epoch_loss_val = loss_val / (i_val + 1)
            epoch_dice_val = tot_val / (i_val + 1)
            epoch_acc_val = epoch_acc_val / (i_val + 1)
            epoch_acc_val_all = epoch_acc_val_all / (i_val + 1)

            # val_dice = eval_net(net, val, gpu)
            print(
                '* Val: Loss: {0:.6f}, IoU: {1:.3f}, IoU_all: {2:.3f}, Dice: {3:.3f}'
                .format(epoch_loss_val, epoch_acc_val, epoch_acc_val_all,
                        epoch_dice_val))
            f_log.write(
                '* Val: Loss: {0:.6f}, IoU: {1:.3f}, IoU_all: {2:.3f}, Dice: {3:.3f}'
                .format(epoch_loss_val, epoch_acc_val, epoch_acc_val_all,
                        epoch_dice_val) + '\n')
            tf_writer.add_scalar('data/val_loss', epoch_loss_val, epoch)
            tf_writer.add_scalar('data/val_iou', epoch_acc_val, epoch)
            tf_writer.add_scalar('data/val_iou_all', epoch_acc_val_all, epoch)
            tf_writer.add_scalar('data/val_dice', epoch_dice_val, epoch)

            if save_cp and (epoch % save_interval == save_interval - 1):
                torch.save(net.state_dict(),
                           dir_checkpoint + '/CP{}.pth'.format(epoch + 1))
                print('Checkpoint {} saved !'.format(epoch + 1))
                f_log.write('Checkpoint {} saved !'.format(epoch + 1) + '\n')
Beispiel #15
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.5,
              save_cp=True,
              gpu=True,
              img_scale=0.5):

    dir_img = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_few_images/'
    dir_mask = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_masks_few_images/'
    # dir_img = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_images/'
    # dir_mask = '/home/buiduchanh/WorkSpace/Unet/Pytorch-UNet/data/data_rust/train_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    #optimizer = optim.SGD(net.parameters(),
    #                      lr=lr,
    #                      momentum=0.9,
    #                      weight_decay=0.0005)
    optimizer = optim.Adam(net.parameters(),
                           lr = lr,
                           betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch {} finished ! Loss: {}'.format(epoch, epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
def train_prune_net(net,
                    epochs=5,
                    batch_size=1,
                    lr=0.1,
                    val_percent=0.05,
                    save_cp=True,
                    gpu=False,
                    img_scale=0.5,
                    num_prune_iterations=100):

    dir_img = 'data/train/'
    dir_mask = 'data/train_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)
        epoch_loss = 0

        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))

        net.eval()
        sub = list(enumerate(val))[:3]
        for i, b in sub:
            img = b[0]
            true_mask = b[1]

            img = torch.from_numpy(img).unsqueeze(0)
            true_mask = torch.from_numpy(true_mask).unsqueeze(0)

            if gpu:
                img = img.cuda()
                true_mask = true_mask.cuda()

            mask_pred = net(img)[0]
            mask_pred = (mask_pred > 0.5).float()

            val_dice = dice_coeff(mask_pred, true_mask).item()
            print('Validation Dice Coeff at batch {}: {}'.format(
                i + 1, val_dice))

        net.train()
        for i, b in enumerate(batch(train, batch_size)):
            if i > 2:
                break
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            for j in range(num_prune_iterations):
                if j == num_prune_iterations - 1:
                    net.prune(verbose=True)
                else:
                    net.prune(verbose=False)

        summary(net, (3, 640, 640))

        for i, b in sub:
            img = b[0]
            true_mask = b[1]

            img = torch.from_numpy(img).unsqueeze(0)
            true_mask = torch.from_numpy(true_mask).unsqueeze(0)

            if gpu:
                img = img.cuda()
                true_mask = true_mask.cuda()

            mask_pred = net(img)[0]
            mask_pred = (mask_pred > 0.5).float()

            val_dice = dice_coeff(mask_pred, true_mask).item()
            print('Validation Dice Coeff at batch {}: {}'.format(
                i + 1, val_dice))

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        # val_dice = eval_net(net, val, gpu)
        # print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #17
0
def train_net(net,
              epochs=30,
              batch_size=6,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

#    dir_img = 'E:/A_paper_thesis/paper5/tensorflow_deeplabv3plus_scrapingData/dataset/Scraping_Data2/train_db'
    dir_img = 'data/train_db/'
    dir_mask = 'data/GT_bw/'
    dir_checkpoint = 'checkpoint0919/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()
    

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)

        epoch_loss = 0
        epoch_iou = 0
        epoch_xor=0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()
            
            print('step:', i)

#            print('Validation Dice Coeff: {0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))
            print('Validation Dice Coeff: {0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))
            

            
#            # mean iou
#            intersect = sum(masks_probs_flat*true_masks_flat)
#            union = sum(masks_probs_flat+true_masks_flat)
#            iou = (intersect+0.001)/(union-intersect+0.001)
#            epoch_iou +=iou
            
            # mean iou
            smooth = 1e-6 # we smooth to avoid our devision 0/0
            intersect = sum(masks_probs_flat*true_masks_flat)
            union = sum(masks_probs_flat+true_masks_flat)-intersect
            iou = (intersect+smooth)/(union+smooth)
            epoch_iou +=iou
            
            # calculate xor
            # xor quation is: xor = (union(output hợp ground truth) - intersect(output giao ground truth))/ ground truth
            # xor =  (union-intersect)/ground truth
            
            xor = (union - intersect)/sum(true_masks_flat)
            epoch_xor += xor
            


            print('mean IoU: {:.4f}'.format(iou))
#            print('mean IoU1: {:.4f}'.format(iou1))
            print('mean xor: {:.4f}'.format(xor))
            
            # end of mean iou

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! epoch_Loss: {:.6f}'.format(epoch_loss / i))
        print('epoch_iou: {:.4f}'.format(epoch_iou / i))
        print('epoch_xor: {:.4f}'.format(epoch_xor / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('epoch_Validation Dice Coeff: {:.4f}'.format(val_dice))
            # need to write mean iou of evaluate here(reference val_dice)
          

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #18
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = 'data/train/'  # 训练图像文件夹
    dir_mask = 'data/train_masks/'  # 图像的结果文件夹
    dir_checkpoint = 'checkpoints/'  # 训练好的网络保存文件夹

    ids = get_ids(dir_img)  # 图片名字的后4位为数字,能作为图片id

    # 得到元祖列表为[(id1,0),(id1,1),(id2,0),(id2,1),...,(idn,0),(idn,1)]
    # 这样的作用是后面重新设置生成器时会通过后面的0,1作为utils.py中get_square函数的pos参数,pos=0的取左边的部分,pos=1的取右边的部分
    # 这样图片的数量就会变成2倍
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)  # 将数据分为训练集和验证集两份

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])  # 训练集长度

    optimizer = optim.SGD(
        net.parameters(),  # 定义优化器
        lr=lr,
        momentum=0.9,
        weight_decay=0.0005)

    criterion = nn.BCELoss()  # 损失函数

    for epoch in range(epochs):  # 开始训练
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()  # 设置为训练模式

        # reset the generators重新设置生成器
        # 对输入图片dir_img和结果图片dir_mask进行相同的图片处理,即缩小、裁剪、转置、归一化后,将两个结合在一起,返回(imgs_normalized, masks)
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)  # 得到输入图像数据
            true_masks = np.array([i[1] for i in b])  # 得到图像结果数据

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)  # 图像输入的网络后得到结果masks_pred,结果为灰度图像
            masks_probs_flat = masks_pred.view(-1)  # 将结果压扁

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)  # 对两个结果计算损失
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss /
                                                 i))  # 一次迭代后得到的平均损失

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #19
0
def train_net(args, net, val_percent=0.05, save_cp=True):

    dir_img = os.path.join(args.dataset_folder, 'data/train/')
    dir_mask = os.path.join(args.dataset_folder, 'data/train_masks/')
    dir_checkpoint = os.path.join(args.dataset_folder, 'checkpoints/')
    if not os.path.exists(dir_checkpoint):
        os.makedirs(dir_checkpoint)

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
    '''.format(args.epochs, args.batch_size, args.lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(args.epochs):
        print('Starting epoch {}/{}.'.format(args.epochs + 1, args.epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   args.img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 args.img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, args.batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            # if gpu:
            imgs = imgs.cuda()
            true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(
                i * args.batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=1e-3,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = '/home/xyj/data/spacenet/vegas/images_rgb_1300/'
    dir_mask = '/home/xyj/test/Pytorch-UNet/data/train_mask_point/'
    dir_checkpoint = 'checkpoints_point/'

    if not os.path.exists(dir_checkpoint):
        os.mkdir(dir_checkpoint)

    # ids = get_ids(dir_img)  # 返回train文件夹下文件的名字列表,生成器(except last 4 character,.jpg这样的)
    with open('train_list.txt', 'r') as f:
        lines = f.readlines()
        ids = (i.strip('\n')[:-4] for i in lines)

    ids = split_ids(
        ids)  # 返回(id, i), id属于ids,i属于range(n),相当于把train的图✖️了n倍多张,是tuple的生成器

    iddataset = split_train_val(
        ids, val_percent
    )  # validation percentage,是dict = {"train": ___(一个list), "val":___(一个list)}

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    #     optimizer = optim.SGD(net.parameters(),
    #                           lr=lr,
    #                           momentum=0.9,
    #                           weight_decay=0.0005)
    optimizer = optim.Adam(net.parameters(),
                           lr=lr,
                           betas=(0.9, 0.999),
                           eps=1e-3)
    #     scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=40,gamma = 0.3)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)
        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] // 200 for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


#             scheduler.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #21
0
def train_net(net,
              epochs=20,
              batch_size=1,
              lr=0.1,
              lrd=0.99,
              val_percent=0.05,
              save_cp=True,
              gpu=True,
              img_scale=0.5,
              imagepath='',
              maskpath='',
              cpsavepath=''):

    dir_img = imagepath
    dir_mask = maskpath
    dir_checkpoint = cpsavepath
    classweight = [1, 2, 3, 2]

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    logname = cpsavepath + '/' + 'losslog.txt'

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    # classweight = [1,4,8,4]
    criterion = BCELoss_weight(classweight)

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        with open(logname, "a") as f:
            f.write('Starting epoch {}/{}.'.format(epoch + 1, epochs) + "\n")
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        lr = lr * lrd
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        print('lr', lr)
        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            true_masks = np.transpose(true_masks, axes=[0, 3, 1, 2])
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            # print('masks_pred.shape',masks_pred.shape)
            # print('true_masks.shape', true_masks.shape)
            masks_probs_flat = masks_pred

            true_masks_flat = true_masks
            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            printinfo = '{0:.4f} --- loss: {1:.6f}'.format(
                i * batch_size / N_train, loss.item())
            print(printinfo)

            with open(logname, "a") as f:
                f.write(printinfo + "\n")

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))
        with open(logname, "a") as f:
            f.write('Epoch finished ! Loss: {}'.format(epoch_loss / i) + "\n")
        if 1:
            val_dice = eval_net(net, val)
            print('Validation Dice Coeff: {}'.format(val_dice))
            with open(logname, "a") as f:
                f.write('Validation Dice Coeff: {}'.format(val_dice) + "\n")

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
            with open(logname, "a") as f:
                f.write('Checkpoint {} saved !'.format(epoch + 1) + "\n")
Beispiel #22
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5,
              dir_img=None,
              dir_mask=None,
              dir_checkpoint=None,
              channels=1,
              classes=1):

    ids = os.listdir(dir_img)

    if not os.path.exists(dir_checkpoint):
        os.makedirs(dir_checkpoint, mode=0o755)

    iddataset = split_train_val(ids, val_percent)

    print('Starting training:')
    print('Epochs: ' + str(epochs))
    print('Batch size: ' + str(batch_size))
    print('Learning rate: ' + str(lr))
    print('Training size: ' + str(len(iddataset['train'])))
    print('Validation size: ' + str(len(iddataset['val'])))
    print('Checkpoints: ' + str(save_cp))

    N_train = len(iddataset['train'])

    optimizer = optim.RMSprop(net.parameters(), lr=lr)
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        # Run Batch
        for i, b in enumerate(batch(train, batch_size)):

            # Grab data
            try:
                imgs = np.array([i[0] for i in b]).astype(np.float32)
                true_masks = np.array([i[1] for i in b])
            except:
                print(
                    'prob have dimension issues, wrong orientations or half reconned images'
                )
            # Deal with dimension issues
            if channels == 1:
                imgs = np.expand_dims(imgs, 1)
            if classes > 1:
                true_masks = to_categorical(true_masks, num_classes=classes)

            # Play in torch's sandbox
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            # Send to GPU
            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # Predicted segmentations
            masks_pred = net(imgs)

            # Flatten
            masks_probs_flat = masks_pred.view(-1)
            true_masks_flat = true_masks.view(-1)

            # Calculate losses btwn true/predicted
            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            # Batch Loss
            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            # Backprop
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # Epoch Loss
        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, epoch, dir_checkpoint, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(
                net.state_dict(),
                os.path.join(dir_checkpoint, 'CP{}.pth'.format(epoch + 1)))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #23
0
    from utils import split_train_val, batch, get_imgs_and_masks, inputImageViz
    # get all values that you require from your input
    args = get_args()    
    dir_img = args.images
    dir_mask = args.masks
    dir_checkpoint = 'checkpoints/'
    
    val_percent = args.valpercent
    scale = args.scale
    maxWidth = args.maxWidth
    maxHeight = args.maxHeight
    n_classes= args.classes
    
    iddataset = split_train_val(dir_img, val_percent)
    
    train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, scale, maxWidth, maxHeight, n_classes)
    val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, scale, maxWidth, maxHeight, n_classes)

    for epoch in range(args.epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        #net.train()
        for i, k in enumerate(batch(val, args.batchsize)):
            print(i)
            imgs = np.array([i[0] for i in k]).astype(np.float32)
            true_masks = np.array([i[1] for i in k])
            # comment this if required
            inputImageViz (imgs, true_masks)
            #------------ your training code here!!!--------------------
        
        
        
Beispiel #24
0
def train_net(args,
              net,
              epochs,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    global best_dice, best_loss
    dir_img = '/home/mori/Programming/Net_Pruning/unetdataset_patchImg/img/'
    dir_mask = '/home/mori/Programming/Net_Pruning/unetdataset_patchImg/graylabel/'

    ids = get_ids(dir_img)  # get file name (without .png)
    print("ids:{}".format(ids))

    ids = split_ids(ids)  # 重采样?
    print("ids:{}".format(ids))
    iddataset = split_train_val(ids, val_percent)  # 按给定比例划分打乱的数据集

    ###### count parameters  ############
    paras = sum([p.data.nelement() for p in net.parameters()])

    print('''
    Starting training:
        Epochs: {}
        Parameters: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
        Deepsupervision: {}
    '''.format(epochs, paras, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu),
               str(args.deepsupervision)))

    N_train = len(iddataset['train'])
    print("N_train:{}".format(N_train))
    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        New_lr = adjust_learning_rate(optimizer, epoch, epochs)
        print(' lr: {}'.format(New_lr))
        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        best_iou = 0
        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):  # 手动分出batch
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            true_masks_flat = true_masks.view(-1)
            true_masks_flat = true_masks_flat / 255  # 归一化

            output = net(imgs)
            masks_pred = F.sigmoid(output)

            if args.deepsupervision:
                #### unet++ with deepsupervision
                loss = 0
                for mp in masks_pred:
                    masks_probs_flat = mp.view(-1)
                    loss += criterion(masks_probs_flat, true_masks_flat)
                loss /= len(masks_pred)
                epoch_loss += loss.item()
            else:
                masks_probs_flat = masks_pred.view(-1)
                loss = criterion(masks_probs_flat, true_masks_flat)
                epoch_loss += loss.item()

                ## todo: adjust iou
                iou = iou_score(output, true_masks / 255)

            ######## record the best iou
            if iou > best_iou:
                best_iou = iou

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            newloss = loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))
        print('Best iou: {}'.format(best_iou))
        val_dice = eval_net(net, val, gpu)
        print('Validation Dice Coeff: {}'.format(val_dice))

        writer.add_scalar('train_loss', epoch_loss / i, (epoch + 1))
        writer.add_scalar('val_dice', val_dice, (epoch + 1))
        writer.add_scalar('best iou', best_iou, (epoch + 1))

        if save_cp:
            #torch.save(net.state_dict(),dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            #print('Checkpoint {} saved !'.format(epoch + 1))
            dice_best = val_dice > best_dice
            loss_best = epoch_loss / i < best_loss
            best_dice = max(val_dice, best_dice)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': net.state_dict(),
                    'best_dice': best_dice,
                    'best_loss': best_loss,
                }, dice_best, loss_best)

    print('Best dice: ', best_dice)
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.2,
              save_cp=True,
              gpu=False,
              img_scale=0.5):
    path = [['data/ori1/', 'data/gt1/'],
            ['data/original1/', 'data/ground_truth1/'],
            ['data/Original/', 'data/Ground_Truth/']]
    dir_img = path[0][0]
    dir_mask = path[0][1]
    dir_checkpoint = 'sdgcheck/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.7,
                          weight_decay=0.005)
    '''
    optimizer = optim.Adam(net.parameters(),
                      lr=lr,

                      weight_decay=0.0005)
    '''
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0
        x = 0
        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])
            '''
            ori=np.transpose(imgs[0], axes=[1, 2, 0])   
            scipy.misc.imsave("ori/ori_"+str(x)+'.jpg', ori)
            
            gt = np.stack((true_masks[0],)*3, axis=-1)
            
            #gt=np.transpose(true_masks[0], axes=[1, 2, 0])
            scipy.misc.imsave("gt/gt_"+str(x)+'.jpg', gt)
            '''
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            x += 1
            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
def train_net(
        net,
        epochs=5,
        batch_size=1,
        lr=0.1,
        val_percent=0.05,  # 训练集:验证集= 0.95: 0.05
        save_cp=True,
        gpu=False,
        img_scale=0.5):

    dir_img = opt_train.dir_img
    dir_mask = opt_train.dir_mask
    dir_checkpoint = opt_train.dir_checkpoint

    # 得到 图片路径列表  ids为 图片名称(无后缀名)
    ids = get_ids(dir_img)
    # 得到truple元组  (无后缀名的 图片名称,序号)
    # eg:当n为2  图片名称为bobo.jpg 时, 得到(bobo,0) (bobo,1)
    # 当序号为0 时,裁剪宽度,得到左边部分图片  当序号为1 时,裁剪宽度,得到右边部分图片
    ids = split_ids(ids)
    # 打乱数据集后,按照val_percent的比例来 切分 训练集 和 验证集
    iddataset = split_train_val(ids, val_percent)

    print('''
    开始训练:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        训练集大小: {}
        验证集大小: {}
        GPU: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(gpu)))

    #训练集大小
    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    #二进制交叉熵
    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))

        # reset the generators
        # 每轮epoch得到 训练集  和 验证集
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        # 重置epoch损失计数器
        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            # 得到 一个batch的 imgs tensor 及 对应真实mask值
            # 当序号为0 时,裁剪宽度,得到左边部分图片[384,384,3]   当序号为1 时,裁剪宽度,得到右边部分图片[384,190,3]
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            # 将值转为 torch tensor
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            # 训练数据转到GPU上
            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # 得到 网络输出的预测mask [10,1,384,384]
            masks_pred = net(imgs)
            # 经过sigmoid
            masks_probs = F.sigmoid(masks_pred)
            masks_probs_flat = masks_probs.view(-1)

            true_masks_flat = true_masks.view(-1)
            # 计算二进制交叉熵损失
            loss = criterion(masks_probs_flat, true_masks_flat)
            # 统计一个epoch的所有batch的loss之和,用以计算 一个epoch的 loss均值
            epoch_loss += loss.item()

            # 输出 当前epoch的第几个batch  及 当前batch的loss
            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            # 优化器梯度清零
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 更新参数
            optimizer.step()

        # 一轮epoch结束,该轮epoch的 loss均值
        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        # 每轮epoch之后使用验证集进行评价
        if True:
            # 评价函数:Dice系数   Dice距离用于度量两个集合的相似性
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        # 保存模型
        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
Beispiel #27
0
            indent=4,
            sort_keys=True)))

    # Dataset
    if not os.path.exists(splitfile):  # Our constant datasplit
        ids = get_ids(dir_img)  # [file1, file2]
        ids = split_ids(ids)  # [(file1, 0), (file1, 1), (file2, 0), ...]
        iddataset = split_train_val(ids, 0.2, splitfile)
        log.info("New split dataset")

    else:
        with open(splitfile) as f:
            iddataset = json.load(f)
        log.info("Load split dataset")

    train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                               args.scale)
    val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, args.scale)

    # Model Initialization
    net = UNet(n_channels=3, n_classes=1, f_channels=args.channel_txt)
    log.info("Built model using {}...".format(args.channel_txt))
    if args.gpu:
        net.cuda()
    if args.load:
        net.load_state_dict(torch.load(args.load))
        log.info('Loading checkpoint from {}...'.format(args.load))

    pruner = Pruner(net)  # Pruning handler
    criterion = nn.BCELoss()

    # Ranking on the train dataset
Beispiel #28
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    dir_img = '../dataset/train/images/'
    dir_mask = '../dataset/train/masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print(('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu))))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print(('Starting epoch {}/{}.'.format(epoch + 1, epochs)))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask, img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask, img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)): # b[batch_id][0/1]: a batch of image(0)+mask(1)
            #print(('b[0]',b[0][0].shape,b[0][1].shape))
            #imgs = []
            #for img_msk in b:
            #    imgs.append(img_msk[0])
            #print(len(imgs))
            #imgs = np.array(imgs) # Wrong: not all images are of the same shape
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print(('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item())))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(('Epoch finished ! Loss: {}'.format(epoch_loss / i)))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print(('Validation Dice Coeff: {}'.format(val_dice)))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'cropped_CP{}.pth'.format(epoch + 1))
            print(('Checkpoint {} saved !'.format(epoch + 1)))
Beispiel #29
0
def train_net(net,
              epochs=5,
              batch_size=1,
              lr=0.1,
              val_percent=0.05,
              save_cp=True,
              gpu=False,
              img_scale=0.5):

    # dir_img = 'data/train/'
    # dir_mask = 'data/train_masks/'
    dir_img = 'E:/git/dataset/tgs-salt-identification-challenge/train/images/'
    dir_mask = 'E:/git/dataset/tgs-salt-identification-challenge/train/masks/'
    # dir_img = 'E:/git/dataset/tgs-salt-identification-challenge/train/my_images/'
    # dir_mask = 'E:/git/dataset/tgs-salt-identification-challenge/train/my_masks/'
    dir_checkpoint = 'checkpoints/'

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):
            imgs = np.array([i[0] for i in b]).astype(np.float32)
            # true_masks = np.array([i[1] for i in b])#np.rot90(m)
            true_masks = np.array([i[1].T / 65535 for i in b])  #np.rot90(m)

            # show_batch_image(true_masks)
            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # show_batch_image(imgs)

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,
                                                     loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))
def train_net(net,
              epochs=50,
              batch_size=1,
              lr=0.1,
              val_percent=0.1,
              save_cp=True,
              gpu=True,
              img_scale=[513, 513]):

    #dir_img = '/home/lixiaoxing/data/DRIVE/train/'
    #dir_mask = '/home/lixiaoxing/data/DRIVE/trainannot/'
    dir_img = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/images_jpg/'
    dir_mask = '/home/lixiaoxing/github/Pytorch-UNet/data/DRIVE/AV_groundTruth/training/vessel/'
    dir_checkpoint = 'checkpoints/'
    if os.path.exists(dir_checkpoint) is False:
        os.makedirs(dir_checkpoint)

    ids = get_ids(dir_img)
    ids = split_ids(ids)

    iddataset = split_train_val(ids, val_percent)

    print('''
    Starting training:
        Epochs: {}
        Batch size: {}
        Learning rate: {}
        Training size: {}
        Validation size: {}
        Checkpoints: {}
        CUDA: {}
    '''.format(epochs, batch_size, lr, len(iddataset['train']),
               len(iddataset['val']), str(save_cp), str(gpu)))

    N_train = len(iddataset['train'])

    optimizer = optim.SGD(net.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=0.0005)

    criterion = nn.BCELoss()

    for epoch in range(epochs):
        print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
        net.train()

        # reset the generators
        train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask,
                                   img_scale)
        val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask,
                                 img_scale)
        #print(train)

        epoch_loss = 0

        for i, b in enumerate(batch(train, batch_size)):

            imgs = np.array([i[0] for i in b]).astype(np.float32)
            true_masks = np.array([i[1] for i in b])

            imgs = torch.from_numpy(imgs)
            true_masks = torch.from_numpy(true_masks)

            if gpu:
                imgs = imgs.cuda()
                true_masks = true_masks.cuda()

            # up features
            #print('**********************up**************************')
            #up_feature = net.extract_features(imgs)
            #print(up_feature.shape)
            #ff = net._blocks[38]._depthwise_conv

            masks_pred = net(imgs)
            masks_probs_flat = masks_pred.view(-1)

            true_masks_flat = true_masks.view(-1)
            #print(true_masks_flat.shape)
            #print(masks_probs_flat.shape)

            loss = criterion(masks_probs_flat, true_masks_flat)
            epoch_loss += loss.item()

            #print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train, loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch finished ! Loss: {}'.format(epoch_loss / i))

        if 1:
            val_dice = eval_net(net, val, gpu)
            print('Validation Dice Coeff: {}'.format(val_dice))

        if save_cp:
            torch.save(net.state_dict(),
                       dir_checkpoint + 'CP{}.pth'.format(epoch + 1))
            print('Checkpoint {} saved !'.format(epoch + 1))