def test(epoch):
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _, _ = train(False, data, target, data_len)
            loss += _loss
        loss /= data_len
        state.add_to_test_history(epoch, lr, loss, loss)
        state.log_last_test(data_len, correct)
        state.save_net()
예제 #2
0
def main():
    args, kwargs, device, = runner("FACES_conv_autoencoder1-srr/", options)

    transform = transforms.Compose([
        transforms.ToTensor(),
    ])

    dataset = datasets.ImageFolder('../data/img_align_celeba/img_align_celeba',
                                   transform=transform)
    dataset_me = datasets.ImageFolder('../data/img_align_celeba/me',
                                      transform=transform)
    # dataset1 = datasets.CelebA('../data', split='train', download=True, transform=transform)
    # dataset2 = datasets.CelebA('../data', split='valid', transform=transform)
    dataset_len = len(dataset)
    # print('==> len', len(dataset))
    # train_set, val_set = torch.utils.data.random_split(dataset, [50000, 10000])
    test_set, train_set = torch.utils.data.random_split(
        dataset, [1000, dataset_len - 1000],
        generator=torch.Generator().manual_seed(42))

    train_loader = torch.utils.data.DataLoader(train_set, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_set, **kwargs)

    model = Net().to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    state = State('save', 'FACES_conv_autoencoder1-sr', model, optimizer, args)
    state.load_net()

    current_epoch = state.epoch

    def calc_conv2d(tensor: torch.Tensor, inout: int, convs: int, stride: int):
        _, _, h1, w1 = tensor.data.shape
        h = (h1 - convs + 1) / stride
        w = (w1 - convs + 1) / stride
        tensor = nn.Conv2d(inout, inout, convs, stride)(tensor)
        tensor2 = nn.ConvTranspose2d(inout, inout, convs, stride)(tensor)
        _, _, h2, w2 = tensor2.data.shape
        print(
            f'{h}, {w} || {h1}, {w1} || {h2}, {w2} || {h1 == h2}, {w1 == w2}')
        return tensor

    if args.skip_train:
        return

    criterion = nn.MSELoss()

    def train(train: bool, data, target, data_len):
        optimizer.zero_grad()
        model.train(train)
        output = None
        loss = None
        if train:
            output = model(data)
            loss = criterion(output, data)
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                output = model(data)
                loss = criterion(output, data)
        with torch.no_grad():
            # pred = output.argmax(dim=1, keepdim=True)
            # correct = pred.eq(target.view_as(pred)).sum().item()
            # acc = 100. * correct / data_len
            return loss.item(), loss.item(), loss.item()

    def test(epoch):
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _, _ = train(False, data, target, data_len)
            loss += _loss
            # correct += _correct
        loss /= data_len
        state.add_to_test_history(epoch, lr, loss, loss)
        state.log_last_test(data_len, correct)
        state.save_net()

    for epoch, batch_idx, data, target in run_loop(current_epoch, args.epochs,
                                                   train_loader, device):
        if epoch != current_epoch and batch_idx == 0:
            test(epoch - 1)

        data_len = len(data)
        lr = get_lr(optimizer)

        loss, acc, correct = train(True, data, target, data_len)

        state.add_to_history(epoch, batch_idx, data_len, lr, loss, acc)
        scheduler.step()

        del data

        if batch_idx % args.log_interval == 0:
            state.log_last_train(data_len, len(train_loader.dataset), correct)
            global frame_number
            frame_number += 1
            dampImg(model, 5, f'{epoch}_{batch_idx}', train_set, device)
            dampImg(model, 0, f'{epoch}_{batch_idx}', dataset_me, device)
    test(epoch)
예제 #3
0
파일: cifar.py 프로젝트: pit-rpg/reptiloid
def main():
    args, kwargs, device, = runner("CIFAR Test", options)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    train_set = datasets.CIFAR10('../data',
                                 train=False,
                                 download=False,
                                 transform=transform)
    test_set = datasets.CIFAR10('../data',
                                train=False,
                                download=False,
                                transform=transform)

    train_loader = torch.utils.data.DataLoader(train_set, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_set, **kwargs)

    model = Net().to(device)
    # optimizer = optim.Adam(model.parameters(), lr=args.lr)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    state = State('save', 'CIFAR_conv', model, optimizer, args)
    state.load_net()

    current_epoch = state.epoch

    infographics(args, model)

    # def calc_conv2d(tensor: torch.Tensor, inout: int, convs: int, stride: int):
    #     _, _, h1, w1 = tensor.data.shape
    #     h = (h1 - convs + 1) / stride
    #     w = (w1 - convs + 1) / stride
    #     tensor = nn.Conv2d(inout, inout, convs, stride)(tensor)
    #     tensor2 = nn.ConvTranspose2d(inout, inout, convs, stride)(tensor)
    #     _, _, h2, w2 = tensor2.data.shape
    #     print(f'{h}, {w} || {h1}, {w1} || {h2}, {w2} || {h1 == h2}, {w1 == w2}')
    #     return tensor

    # x = torch.rand(1, 3, 32, 32)

    # x = calc_conv2d(x, 3, 4, 1)
    # x = calc_conv2d(x, 3, 4, 2)
    # x = calc_conv2d(x, 3, 4, 2)
    # x = calc_conv2d(x, 3, 3, 1)
    # # x = calc_conv2d(x, 3, 4, 2)

    # criterion = nn.NLLLoss()
    criterion = nn.MSELoss()
    # criterion = nn.MSELoss()

    if args.skip_train:
        return

    def train(train: bool, data, target, data_len):
        optimizer.zero_grad()
        model.train(train)
        output = None
        loss = None
        target = torch.nn.functional.one_hot(target, num_classes=10).float()
        if train:
            output = model(data)
            # print(output)
            # print(target)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                output = model(data)
                loss = criterion(output, target)
        with torch.no_grad():
            x1 = output.argmax(dim=1, keepdim=True)
            x2 = target.argmax(dim=1, keepdim=True)
            correct = x1.eq(x2).sum().item()
            acc = 100. * correct / data_len
            return loss.item(), acc, correct

    def test(epoch):
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _acc, _correct = train(False, data, target, data_len)
            loss += _loss
            correct += _correct

        state.add_to_test_history(epoch, lr, loss / data_len,
                                  correct / data_len * 100.)
        state.log_last_test(data_len, correct)
        state.save_net()

    for epoch, batch_idx, data, target in run_loop(current_epoch, args.epochs,
                                                   train_loader, device):
        if epoch != current_epoch and batch_idx == 0:
            scheduler.step()
            test(epoch - 1)

        data_len = len(data)
        lr = get_lr(optimizer)

        loss, acc, correct = train(True, data, target, data_len)

        state.add_to_history(epoch, batch_idx, data_len, lr, loss, acc)

        if batch_idx % args.log_interval == 0:
            state.log_last_train(data_len, len(train_loader.dataset), correct)
    test(epoch)
def main():
    args, kwargs, device, = runner("FACES-pure-simple-sobel", options)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    dataset = datasets.ImageFolder('../data/img_align_celeba/img_align_celeba',
                                   transform=transform)
    dataset_me = datasets.ImageFolder('../data/img_align_celeba/me',
                                      transform=transform)
    dataset_len = len(dataset)
    test_set, train_set = torch.utils.data.random_split(
        dataset, [1000, dataset_len - 1000],
        generator=torch.Generator().manual_seed(42))

    train_loader = torch.utils.data.DataLoader(train_set, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_set, **kwargs)

    model = Net().to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    state = State('save', 'FACES_conv_autoencoder-pure-simple-sobel', model,
                  optimizer, args)
    state.load_net()

    current_epoch = state.epoch

    def calc_conv2d(tensor: torch.Tensor, inout: int, convs: int, stride: int):
        _, _, h1, w1 = tensor.data.shape
        h = (h1 - convs + 1) / stride
        w = (w1 - convs + 1) / stride
        tensor = nn.Conv2d(inout, inout, convs, stride)(tensor)
        tensor2 = nn.ConvTranspose2d(inout, inout, convs, stride)(tensor)
        _, _, h2, w2 = tensor2.data.shape
        print(
            f'{h}, {w} || {h1}, {w1} || {h2}, {w2} || {h1 == h2}, {w1 == w2}')
        return tensor

    if args.skip_train:
        return

    def criterion(output, data):
        loss1 = F.l1_loss(output, data)

        output1 = F.avg_pool2d(output, (2, 2))
        data1 = F.avg_pool2d(data, (2, 2))
        loss2 = F.l1_loss(output1, data1)

        output2 = kornia.filters.sobel(output, normalized=True)
        data2 = kornia.filters.sobel(data, normalized=True)
        loss3 = F.l1_loss(output2, data2)
        return loss1 + loss2 + loss3

    def train(train: bool, data, target, data_len):
        optimizer.zero_grad()
        model.train(train)
        output = None
        loss = None
        if train:
            output = model(data)
            loss = criterion(output, data)
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                output = model(data)
                loss = criterion(output, data)
        with torch.no_grad():
            # pred = output.argmax(dim=1, keepdim=True)
            # correct = pred.eq(target.view_as(pred)).sum().item()
            # acc = 100. * correct / data_len
            return loss.item(), loss.item(), loss.item()

    def test(epoch):
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _, _ = train(False, data, target, data_len)
            loss += _loss
        loss /= data_len
        state.add_to_test_history(epoch, lr, loss, loss)
        state.log_last_test(data_len, correct)
        state.save_net()

    for epoch, batch_idx, data, target in run_loop(current_epoch, args.epochs,
                                                   train_loader, device):
        if epoch != current_epoch and batch_idx == 0:
            test(epoch - 1)

        data_len = len(data)
        lr = get_lr(optimizer)

        loss, acc, correct = train(True, data, target, data_len)

        state.add_to_history(epoch, batch_idx, data_len, lr, loss, acc)

        del data

        global frame_number

        if batch_idx % args.log_interval == 0:
            state.log_last_train(data_len, len(train_loader.dataset), correct)
            frame_number += 1
            with torch.no_grad():
                dampImg(model, 5, f'{epoch}_{batch_idx}', train_set, device)
                dampImg(model, 0, f'{epoch}_{batch_idx}', dataset_me, device)

        if batch_idx % (args.log_interval * 20) == 0:
            state.save_last()

        signal.signal(signal.SIGINT, lambda x, y: state.save_last(True))
        signal.signal(signal.SIGTERM, lambda x, y: state.save_last(True))

    test(epoch)
예제 #5
0
def main():
    args, kwargs, device, = runner("FACES_conv_autoencoder", options)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    train_set = datasets.CIFAR10('../data', train=True, download=True, transform=transform)
    test_set = datasets.CIFAR10('../data', train=False, download=False, transform=transform)
    # dataset_me = datasets.ImageFolder('../data/img_align_celeba/me', transform=transform)
    # dataset1 = datasets.CelebA('../data', split='train', download=True, transform=transform)
    # dataset2 = datasets.CelebA('../data', split='valid', transform=transform)
    # dataset_len = len(dataset)
    # print('==> len', len(dataset))
    # train_set, val_set = torch.utils.data.random_split(dataset, [50000, 10000])
    # test_set, train_set = torch.utils.data.random_split(dataset, [1000, dataset_len-1000], generator=torch.Generator().manual_seed(42))

    train_loader = torch.utils.data.DataLoader(train_set, **kwargs)
    test_loader = torch.utils.data.DataLoader(test_set, **kwargs)

    model = Net().to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    state = State('save', 'FACES_conv_autoencoder', model, optimizer, args)
    state.load_net()

    current_epoch = state.epoch

    def calc_conv2d(tensor: torch.Tensor, inout: int, convs: int, stride: int):
        _, _, h1, w1 = tensor.data.shape
        h = (h1 - convs + 1) / stride
        w = (w1 - convs + 1) / stride
        tensor = nn.Conv2d(inout, inout, convs, stride)(tensor)
        tensor2 = nn.ConvTranspose2d(inout, inout, convs, stride)(tensor)
        _, _, h2, w2 = tensor2.data.shape
        print(f'{h}, {w} || {h1}, {w1} || {h2}, {w2} || {h1 == h2}, {w1 == w2}')
        return tensor

    # with torch.no_grad():
    #     x = torch.rand(1, 3, 218, 178)
    #     print('*' * 42)

    #     # x = calc_conv2d(x, 3, 3, 1)
    #     # x = calc_conv2d(x, 3, 4, 2)
    #     # x = calc_conv2d(x, 3, 5, 2)
    #     # x = calc_conv2d(x, 3, 4, 2)
    #     # x = calc_conv2d(x, 3, 3, 1)
    #     # x = calc_conv2d(x, 3, 3, 1)
    #     # x = calc_conv2d(x, 3, 3, 1)
    #     # x = calc_conv2d(x, 3, 3, 1)
    #     # x = calc_conv2d(x, 3, 3, 1)

    #     print('in --', x.data.shape)

    #     x = nn.Conv2d(3, 3, 3, 1)(x)
    #     x = nn.Conv2d(3, 3, 4, 2)(x)
    #     x = nn.Conv2d(3, 3, 5, 2)(x)
    #     x = nn.Conv2d(3, 3, 4, 2)(x)
    #     x = nn.Conv2d(3, 3, 3, 1)(x)
    #     x = nn.Conv2d(3, 3, 3, 1)(x)
    #     x = nn.Conv2d(3, 3, 3, 1)(x)
    #     x = nn.Conv2d(3, 3, 3, 1)(x)
    #     x = nn.Conv2d(3, 3, 3, 1)(x)

    #     print('---', x.data.shape)

    #     x = nn.ConvTranspose2d(3, 3, 3, 1)(x)
    #     x = nn.ConvTranspose2d(3, 3, 3, 1)(x)
    #     x = nn.ConvTranspose2d(3, 3, 3, 1)(x)
    #     x = nn.ConvTranspose2d(3, 3, 3, 1)(x)
    #     x = nn.ConvTranspose2d(3, 3, 3, 1)(x)
    #     x = nn.ConvTranspose2d(3, 3, 4, 2)(x)
    #     x = nn.ConvTranspose2d(3, 3, 5, 2)(x)
    #     x = nn.ConvTranspose2d(3, 3, 4, 2)(x)
    #     x = nn.ConvTranspose2d(3, 3, 3, 1)(x)

    #     print('out --', x.data.shape)
        # x = nn.Conv2d(3, 3, 3, 2)(x)
        # print('2--', x.data.shape)

        # calc_conv2d(x, 3, 4, 2)
        # x = nn.Conv2d(3, 3, 4, 2)(x)
        # # print('3--', x.data.shape)

        # calc_conv2d(x, 3, 5, 2)
        # x = nn.Conv2d(3, 3, 5, 2)(x)
        # # print('4--', x.data.shape)

        # calc_conv2d(x, 3, 3, 1)
        # x = nn.Conv2d(3, 3, 3, 1)(x)
        # print('---', x.data.shape)

        # x = nn.ConvTranspose2d(3, 3, 3, 1)(x)
        # print('4--', x.data.shape)

        # x = nn.ConvTranspose2d(3, 3, 5, 2)(x)
        # print('3--', x.data.shape)

        # x = nn.ConvTranspose2d(60, 60, 5, 2)(x)
        # print('3--', x.data.shape)

        # x = nn.ConvTranspose2d(60, 60, 5, 2)(x)
        # print('3--', x.data.shape)

        # x = nn.ConvTranspose2d(60, 60, 4, 2)(x)
        # print('2--', x.data.shape)

        # x = nn.ConvTranspose2d(60, 60, 3, 2)(x)
        # print('2--', x.data.shape)

        # x = nn.ConvTranspose2d(60, 60, 3, 1)(x)
        # print('2--', x.data.shape)

        # x = nn.ConvTranspose2d(60, 42, 4, 2)(x)
        # print('4--', x.data.shape)
        # print('-'*5)
        # x = nn.Conv2d(40, 20, 3)(x)

        # print('--', x.data.shape)
        # x = nn.ConvTranspose2d(20, 40, 3)(x)
        # print('--', x.data.shape)
        # x = nn.ConvTranspose2d(40, 60, 5, 3)(x)
        # print('1--', x.data.shape)
        # x = nn.ConvTranspose2d(60, 42, 5, 3)(x)
        # x = nn.ConvTranspose2d(42, 22, 3, 3)(x)
        # x = nn.ConvTranspose2d(22, 3, 3, 2)(x)
        # print('out', x.data.shape)

        # print(x.data.shape)

    # p = dataset1.
    # print(p)

    # plt.imshow(x.permute(1, 2, 0))
    # plt.show()

    # plt.figure(figsize=(16, 8))
    # plt.subplot(1, 3, 1)
    # plt.imshow(dataset1[2][0].permute(1, 2, 0))
    # plt.subplot(1, 3, 2)
    # plt.imshow(dataset1[3][0].permute(1, 2, 0))
    # plt.subplot(1, 3, 3)
    # plt.imshow(dataset1[4][0].permute(1, 2, 0))

    # plt.show()

    if args.skip_train:
        return

    criterion = nn.MSELoss()

    def train(train: bool, data, target, data_len):
        optimizer.zero_grad()
        model.train(train)
        output = None
        loss = None
        if train:
            output = model(data)
            loss = criterion(output, data)
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                output = model(data)
                loss = criterion(output, data)
        with torch.no_grad():
            # pred = output.argmax(dim=1, keepdim=True)
            # correct = pred.eq(target.view_as(pred)).sum().item()
            # acc = 100. * correct / data_len
            return loss.item(), loss.item(), loss.item()

    def test(epoch):
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _, _ = train(False, data, target, data_len)
            loss += _loss
            # correct += _correct
        loss /= data_len
        state.add_to_test_history(epoch, lr, loss, loss)
        state.log_last_test(data_len, correct)
        state.save_net()

    for epoch, batch_idx, data, target in run_loop(current_epoch, args.epochs, train_loader, device):
        if epoch != current_epoch and batch_idx == 0:
            scheduler.step()
            test(epoch - 1)

        data_len = len(data)
        lr = get_lr(optimizer)

        loss, acc, correct = train(True, data, target, data_len)

        state.add_to_history(epoch, batch_idx, data_len, lr, loss, acc)

        del data

        if batch_idx % args.log_interval == 0:
            state.log_last_train(data_len, len(train_loader.dataset), correct)
            global frame_number
            frame_number += 1
            dampImg(model, 5, f'{epoch}_{batch_idx}', train_set, device)
            dampImg(model, 0, f'{epoch}_{batch_idx}', dataset_me, device)
    test(epoch)
예제 #6
0
def main():
    args, kwargs, device, = runner("MNIST Test", options)

    transform = transforms.Compose([
        transforms.ToTensor(),
        # transforms.Normalize((0.1307,), (0.3081,))
    ])

    dataset1 = datasets.MNIST('../data',
                              train=True,
                              download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('../data', train=False, transform=transform)

    train_loader = torch.utils.data.DataLoader(dataset1, **kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2, **kwargs)

    model = Net().to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    # optimizer = optim.SGD(model.parameters(), lr=args.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    state = State('save', 'MNIST_conv', model, optimizer, args)
    state.load_net()

    current_epoch = state.epoch

    infographics(args, model)

    if args.skip_train:
        return

    def train(train: bool, data, target, data_len):
        optimizer.zero_grad()
        model.train(train)
        output = None
        loss = None
        if train:
            output = model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                output = model(data)
                loss = F.nll_loss(output, target)
        with torch.no_grad():
            pred = output.argmax(dim=1, keepdim=True)
            correct = pred.eq(target.view_as(pred)).sum().item()
            acc = 100. * correct / data_len
            return loss.item(), acc, correct

    def test(epoch):
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _acc, _correct = train(False, data, target, data_len)
            loss += _loss
            correct += _correct

        state.add_to_test_history(epoch, lr, loss / data_len,
                                  correct / data_len * 100.)
        state.log_last_test(data_len, correct)
        state.save_net()

    for epoch, batch_idx, data, target in run_loop(current_epoch, args.epochs,
                                                   train_loader, device):
        if epoch != current_epoch and batch_idx == 0:
            scheduler.step()
            test(epoch - 1)

        data_len = len(data)
        lr = get_lr(optimizer)

        loss, acc, correct = train(True, data, target, data_len)

        state.add_to_history(epoch, batch_idx, data_len, lr, loss, acc)

        if batch_idx % args.log_interval == 0:
            state.log_last_train(data_len, len(train_loader.dataset), correct)
    test(epoch)
예제 #7
0
        if args.skip_train:
            return
        loss, _, correct = 0, 0, 0
        data_len = len(test_loader.dataset)
        lr = get_lr(optimizer)

        for _, batch_idx, data, target in run_loop(0, 1, test_loader, device):
            _loss, _, _ = train(False, data, target, data_len)
            loss += _loss
            # correct += _correct
        loss /= data_len
        state.add_to_test_history(epoch, lr, loss, loss)
        state.log_last_test(data_len, correct)
        state.save_net()

    for epoch, batch_idx, data, target in run_loop(current_epoch, args.epochs, train_loader, device):
        if epoch != current_epoch and batch_idx == 0:
            test(epoch - 1)

        data_len = len(data)
        lr = get_lr(optimizer)

        loss, acc, correct = train(True, data, target, data_len)

        state.add_to_history(epoch, batch_idx, data_len, lr, loss, acc)

        del data

        global frame_number

        if batch_idx % args.log_interval == 0: