Esempio n. 1
0
        # print(f"conf: {conf_loss}, clas: {class_loss}, box: {box_loss}")
        loss = conf_loss + class_loss + box_loss
        return loss


if __name__ == '__main__':
    from dataset import DarknetDataset
    from torchvision import transforms
    from transforms import PadToSquare, Rescale, SampleToYoloTensor
    from torch.utils.data import DataLoader

    from model import YOLO

    train_path = "data/train.txt"

    composed = transforms.Compose([PadToSquare(), Rescale(448), SampleToYoloTensor(7, 4)])
    image_dataset = DarknetDataset(train_path, transform=composed)

    dataloader = DataLoader(image_dataset, batch_size=2, shuffle=False, num_workers=4)

    classes = 4
    bboxes = 2
    net = YOLO(classes, bboxes)

    for i_batch, sample_batched in enumerate(dataloader):
        print(i_batch, sample_batched['image'].size(), sample_batched['boxes'].size())
        output = net(sample_batched['image'].float())
        print(output.shape)

        loss_fn = YOLOLoss(4, 2)
        loss = loss_fn.forward(output, sample_batched['boxes'])
Esempio n. 2
0
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from dataset import FaceLandmarksDataset
from transforms import Rescale, RandomCrop, ToTensor

# Ignore warnings
import warnings
warnings.filterwarnings("ignore")

plt.ion()   # interactive mode

transformed_dataset = FaceLandmarksDataset(csv_file='data/faces/face_landmarks.csv',
                                           root_dir='data/faces/',
                                           transform=transforms.Compose([
                                               Rescale(256),
                                               RandomCrop(224),
                                               ToTensor()
                                           ]))

for i in range(len(transformed_dataset)):
    sample = transformed_dataset[i]

    print(i, sample['image'].size(), sample['landmarks'].size())

    if i == 3:
        break

dataloader = DataLoader(transformed_dataset, batch_size=4,
                        shuffle=True, num_workers=4)
#                           original_width,
#                           h_start, w_start
#                           )
# from crop_utils import join_mask
import crowdai

from validation import convert_bin_coco
from transforms import (ImageOnly,
                        Normalize,
                        RandomCrop,
                        DualCompose,
                        Rescale)

img_transform = DualCompose([
    # RandomCrop([128, 128]),
    Rescale([256, 256]),
    ImageOnly(Normalize())
])


def get_model(model_path, model_type='unet11', problem_type='parts'):
    """

    :param model_path:
    :param model_type: 'UNet', 'UNet16', 'UNet11', 'LinkNet34'
    :param problem_type: 'binary', 'parts', 'instruments'
    :return:
    """
    num_classes = 1

    # if model_type == 'UNet16':
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser()

    arg = parser.add_argument
    arg('--jaccard-weight', default=1, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=8)
    arg('--n-epochs', type=int, default=14)
    arg('--lr', type=float, default=0.000001)
    arg('--workers', type=int, default=8)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='TernausNet',
        choices=['UNet', 'UNet11', 'LinkNet34', 'TernausNet'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 3
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'TernausNet':
        model = TernausNet34(num_classes=num_classes)
    else:
        model = TernausNet34(num_classes=num_classes)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    mode='train',
                    problem_type='binary'):
        return DataLoader(dataset=MapDataset(file_names,
                                             transform=transform,
                                             problem_type=problem_type,
                                             mode=mode),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    # labels = pd.read_csv('data/stage1_train_labels.csv')
    # labels = os.listdir('data/stage1_train_')
    # train_file_names, val_file_names = train_test_split(labels, test_size=0.2, random_state=42)

    # print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))

    # train_transform = DualCompose([
    #     HorizontalFlip(),
    #     VerticalFlip(),
    #     RandomCrop([256, 256]),
    #     RandomRotate90(),
    #     ShiftScaleRotate(),
    #     ImageOnly(RandomHueSaturationValue()),
    #     ImageOnly(RandomBrightness()),
    #     ImageOnly(RandomContrast()),
    #     ImageOnly(Normalize())
    # ])
    train_transform = DualCompose([
        OneOrOther(*(OneOf([
            Distort1(distort_limit=0.05, shift_limit=0.05),
            Distort2(num_steps=2, distort_limit=0.05)
        ]),
                     ShiftScaleRotate(shift_limit=0.0625,
                                      scale_limit=0.10,
                                      rotate_limit=45)),
                   prob=0.5),
        RandomRotate90(),
        RandomCrop([256, 256]),
        RandomFlip(prob=0.5),
        Transpose(prob=0.5),
        ImageOnly(RandomContrast(limit=0.2, prob=0.5)),
        ImageOnly(RandomFilter(limit=0.5, prob=0.2)),
        ImageOnly(RandomHueSaturationValue(prob=0.2)),
        ImageOnly(RandomBrightness()),
        ImageOnly(Normalize())
    ])

    val_transform = DualCompose([
        # RandomCrop([256, 256]),
        Rescale([256, 256]),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(TRAIN_ANNOTATIONS_PATH,
                               shuffle=True,
                               transform=train_transform,
                               problem_type=args.type)
    valid_loader = make_loader(VAL_ANNOTATIONS_PATH,
                               transform=val_transform,
                               mode='valid',
                               problem_type=args.type)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.001)
    arg('--workers', type=int, default=12)
    arg('--model', type=str, default='UNet', choices=['UNet', 'UNet11', 'LinkNet34', 'UNet16', 'AlbuNet34', 'MDeNet', 'EncDec', 'hourglass', 'MDeNetplus'])

    args = parser.parse_args()
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'MDeNet':
        print('Mine MDeNet..................')
        model = MDeNet(num_classes=num_classes, pretrained=True)
    elif args.model == 'MDeNetplus':
        print('load MDeNetplus..................')
        model = MDeNetplus(num_classes=num_classes, pretrained=True)
    elif args.model == 'EncDec':
        print('Mine EncDec..................')
        model = EncDec(num_classes=num_classes, pretrained=True)
    elif args.model == 'GAN':
        model = GAN(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes, pretrained=False)
    elif args.model == 'hourglass':
        model = hourglass(num_classes=num_classes, pretrained=True) 
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model).cuda()   #  nn.DataParallel(model, device_ids=device_ids).cuda()
    
    cudnn.benchmark = True
    
    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(
            dataset=Polyp(file_names, transform=transform, limit=limit),
            shuffle=shuffle,
            num_workers=args.workers,
            batch_size=args.batch_size,
            pin_memory=torch.cuda.is_available()
        )

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))
    
    train_transform = DualCompose([
        CropCVC612(),
        img_resize(512),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        Rescale(), 
        Zoomin(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names, shuffle=True, transform=train_transform, limit=args.limit)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(
        args=args,
        model=model,
        train_loader=train_loader,
        fold=args.fold
    )
Esempio n. 6
0
        self.last_layer = nn.Linear(1000, self.__NUM_CLASS)

    def forward(self, x):
        return self.last_layer(self.pretrained_model(x))


net = MyExtendedVGG(pretrained_model).double()

# for param in net.features.parameters():
#     param.requires_grad = False

train_dataset = WhaleDataset(csv_file='/data/train.csv',
                             root_dir='/data/imgs',
                             train=True,
                             transform=transforms.Compose(
                                 [Rescale((256, 384)),
                                  ToTensor()]))
test_dataset = WhaleDataset(csv_file='/data/sample_submission.csv',
                            root_dir='/data/imgs',
                            transform=transforms.Compose(
                                [Rescale((256, 384)),
                                 ToTensor()]))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
print("Done loading data")

# Uncomment to load pre-trained model
# net = pickle.load(open("net_baseline.p", 'rb'))

if is_gpu:
    net.cuda()
Esempio n. 7
0
        self.last_layer = nn.Linear(1000, self.__NUM_CLASS)

    def forward(self, x):
        return self.last_layer(self.pretrained_model(x))


net = MyExtendedVGG(pretrained_model).double()

# for param in net.features.parameters():
#     param.requires_grad = False

train_dataset = WhaleDataset(csv_file='/data/train.csv',
                             root_dir='/data/imgs',
                             train=True,
                             transform=transforms.Compose(
                                 [Rescale((224, 224)),
                                  ToTensor()]))
test_dataset = WhaleDataset(csv_file='/data/sample_submission.csv',
                            root_dir='/data/imgs',
                            transform=transforms.Compose(
                                [Rescale((224, 224)),
                                 ToTensor()]))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
print("Done loading data")

# Uncomment to load pre-trained model
# net = pickle.load(open("net_baseline.p", 'rb'))

if is_gpu:
    net.cuda()
Esempio n. 8
0
        img = torch.from_numpy(img)

        boxes = torch.from_numpy(np.loadtxt(label_path)).reshape((-1, 5))
        sample = {'image': img, 'boxes': boxes}

        if self.transform:
            sample = self.transform(sample)

        return sample


if __name__ == '__main__':
    from transforms import PadToSquare, Rescale, SampleToYoloTensor
    from torchvision import transforms
    # load data
    train_path = "data/train.txt"
    input_size = 448

    composed = transforms.Compose(
        [PadToSquare(),
         Rescale(input_size),
         SampleToYoloTensor(7, 4)])
    image_dataset = DarknetDataset(train_path, transform=composed)

    print(len(image_dataset))
    sample = image_dataset[0]
    image, boxes = sample['image'], sample['boxes']
    print(image.shape)
    print(boxes.shape)
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg("--b1",
        type=float,
        default=0.5,
        help="adam: decay of first order momentum of gradient")
    arg("--b2",
        type=float,
        default=0.999,
        help="adam: decay of first order momentum of gradient")

    args = parser.parse_args()
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    # Loss functions
    criterion_GAN = GAN_loss(gan_weight=1)  #torch.nn.MSELoss()
    criterion_pixelwise = torch.nn.L1Loss()
    criterion_discrim = Discrim_loss(dircrim_weight=1)

    # Loss weight of L1 pixel-wise loss between translated image and real image
    lambda_pixel = 100

    # Initialize generator and discriminator
    model = AlbuNet34(num_classes=1, pretrained=True)
    discrim_model = discriminator()

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()
        discrim_model = nn.DataParallel(discrim_model,
                                        device_ids=device_ids).cuda()

    # Load pretrained models
    root = Path(args.root)
    model_path = root / 'model_{fold}.pt'.format(fold=args.fold)
    if model_path.exists():
        state = torch.load(str(model_path))
        epoch = state['epoch']
        step = state['step']
        model.load_state_dict(state['model'])
        print('Restored model, epoch {}, step {:,}'.format(epoch, step))
    else:
        epoch = 1
        step = 0

    save = lambda ep: torch.save(
        {
            'model': model.state_dict(),
            'epoch': ep,
            'step': step,
        }, str(model_path))

    # Optimizers
    optimizer_G = Adam(model.parameters(),
                       lr=args.lr,
                       betas=(args.b1, args.b2))
    optimizer_D = Adam(discrim_model.parameters(),
                       lr=args.lr,
                       betas=(args.b1, args.b2))

    # Configure dataloaders
    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(dataset=Polyp(file_names,
                                        transform=transform,
                                        limit=limit),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose([
        CropCVC612(),
        img_resize(512),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        Rescale(),
        Zoomin(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               limit=args.limit)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    report_each = 10
    log = root.joinpath('train_{fold}.log'.format(fold=args.fold)).open(
        'at', encoding='utf8')

    for epoch in range(epoch, args.n_epochs + 1):
        model.train()
        discrim_model.train()
        random.seed()
        tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
        tq.set_description('Epoch {}, lr {}'.format(epoch, args.lr))
        losses = []
        tl = train_loader
        try:
            mean_loss = 0
            for i, (inputs, targets) in enumerate(tl):
                # Model inputs
                inputs, targets = variable(inputs), variable(targets)

                # ------------------
                #  Train Generators
                # ------------------
                optimizer_G.zero_grad()
                # Generate output
                outputs = model(inputs)
                # fake loss
                predict_fake = discrim_model(inputs, outputs)

                # Pixel-wise loss
                loss_pixel = criterion_pixelwise(outputs, targets)
                # Generator loss
                loss_GAN = criterion_GAN(predict_fake)
                # Total loss of GAN
                loss_G = loss_GAN + lambda_pixel * loss_pixel

                loss_G.backward()
                optimizer_G.step()

                # ---------------------
                #  Train Discriminator
                # ---------------------
                optimizer_D.zero_grad()
                # Real loss
                predict_real = discrim_model(inputs, targets)
                predict_fake = discrim_model(inputs, outputs.detach())

                # Discriminator loss
                loss_D = criterion_discrim(predict_real, predict_fake)
                loss_D.backward()
                optimizer_D.step()

                step += 1
                batch_size = inputs.size(0)
                tq.update(batch_size)
                losses.append(float(loss_G.data))
                mean_loss = np.mean(losses[-report_each:])
                tq.set_postfix(loss='{:.5f}'.format(mean_loss))
                if i and i % report_each == 0:
                    write_event(log, step, loss=mean_loss)
            write_event(log, step, loss=mean_loss)
            tq.close()
            save(epoch + 1)

        except KeyboardInterrupt:
            tq.close()
            print('Ctrl+C, saving snapshot')
            save(epoch)
            print('done.')
            return
Esempio n. 10
0
import time
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("runs/subseg")

import matplotlib.pyplot as plt

from models import SubtitleSegmentation, BaselineModel
from datasets import SubtitleSegmentationDataset
from transforms import Rescale, ToTensor

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    dataset = SubtitleSegmentationDataset(
        "data",
        transform=transforms.Compose([Rescale((360, 640)),
                                      ToTensor()]))
    test_dataset = SubtitleSegmentationDataset(
        "test-dataset",
        transform=transforms.Compose([Rescale((360, 640)),
                                      ToTensor()]))

    train_size = int(0.9 * len(dataset))
    val_size = len(dataset) - train_size

    train_dataset, val_dataset = random_split(dataset, [train_size, val_size])

    batch_size = 8
    num_workers = 8

    train_dataloader = DataLoader(train_dataset,