예제 #1
0
def train():
    dataset = SingleImageDataset(paths=args.datapath,
                                 in_size=args.insize,
                                 scale=args.scale)

    iterator = chainer.iterators.MultithreadIterator(dataset,
                                                     batch_size=args.batchsize,
                                                     repeat=True,
                                                     shuffle=True)
    # iterator = chainer.iterators.SerialIterator(dataset, batch_size=args.batchsize, repeat=True, shuffle=True)

    generator = MODEL()
    if args.pretrained_generator is not None:
        chainer.serializers.load_npz(args.pretrained_generator, generator)
    if args.gpu >= 0:
        generator.to_gpu(args.gpu)

    optimizer_generator = chainer.optimizers.Adam(alpha=1e-4)
    optimizer_generator.setup(generator)
    optimizer_generator.use_cleargrads()

    # updater = chainer.training.updaters.StandardUpdater(iterator, optimizer_generator, device=None)#args.gpu)

    # trainer = chainer.training.Trainer(updater, (200, 'epoch'), out='result')
    # trainer.run()

    step = 0
    sum_loss_generator = 0
    for zipped_batch in iterator:
        lr = chainer.Variable(xp.array([zipped[0] for zipped in zipped_batch]))
        hr = chainer.Variable(xp.array([zipped[1] for zipped in zipped_batch]))

        sr = generator(lr)

        loss_generator = chainer.functions.mean_absolute_error(sr, hr)
        #optimizer_generator.zero_grads()
        loss_generator.backward()
        optimizer_generator.update()
        loss_g = chainer.cuda.to_cpu(loss_generator.data)
        sum_loss_generator += loss_g

        report_span = 1
        save_span = 500
        step += 1
        if step % report_span == 0:
            sum_loss_generator = 0
            print("Step: {}".format(step))
            print("loss_generator: {}".format(loss_g))
        if step % save_span == 0:
            chainer.serializers.save_npz(
                os.path.join('checkpoint',
                             "generator_model_{}.npz".format(step)), generator)
예제 #2
0
    def val_dataloader(self):
        dataset = SingleImageDataset(
            this_file_dir / self.cfg.dataset.path,
            batch_size=self.cfg.dataset.batch_size,
            shape=self.cfg.dataset.shape,
            mode="val",
        )

        loader = DataLoader(
            dataset,
            shuffle=False,
            pin_memory=self.cfg.dataset.pin_memory,
            batch_size=None,  # Disable dataloader batching
            batch_sampler=None,  # Disable dataloader batching
        )

        return loader
예제 #3
0
def test_single_image_dataset_val(random_img):
    dataset = SingleImageDataset(random_img, 3, mode="val", normalize=False)
    dataloader = DataLoader(dataset, batch_size=None, batch_sampler=None)

    actual_img = np.zeros_like(random_img)
    for x, y in dataloader:
        x = x.numpy()
        y = y.numpy()

        # Unnormalize x
        x += 1
        x /= 2
        x *= 4  # shape - 1
        x = x.astype(np.int)

        actual_img[x[:, 1], x[:, 0]] = y

    assert (np.isclose(random_img, actual_img)).all()
예제 #4
0
    def train_dataloader(self):
        dataset = SingleImageDataset(
            this_file_dir / self.cfg.dataset.path,
            batch_size=self.cfg.dataset.batch_size,
            shape=self.cfg.dataset.shape,
            mode="train",
        )

        loader = DataLoader(
            dataset,
            pin_memory=self.cfg.dataset.pin_memory,
            num_workers=self.cfg.dataset.num_workers,
            batch_size=None,  # Disable dataloader batching
            batch_sampler=None,  # Disable dataloader batching
            worker_init_fn=lambda _: np.random.seed(
                int(torch.initial_seed()) % (2 ** 32 - 1)
            ),
        )

        return loader
예제 #5
0
def test_single_image_dataset_train_interpolation(mocker, simple_img):
    """Interpolate very positionally similar values"""

    # This is in X, Y cooridnates
    fake_random_data = torch.Tensor(
        np.array([
            [0.001, 0],  # top left
            [0, 0.999],  # bottom left
            [0.999, 0],  # top right
        ]))

    mock_random = mocker.patch("dataset.torch.rand")
    mock_random.return_value = fake_random_data

    dataset = SingleImageDataset(simple_img, 3, normalize=False)
    assert len(dataset) == 9

    dataloader = DataLoader(dataset, batch_size=None, batch_sampler=None)

    n_iter = 0
    for x, y in dataloader:
        n_iter += 1
        x = x.numpy()
        y = y.numpy()
        expected_x = np.array([
            [-0.998, -1],  # Top left
            [-1, 0.998],  # Bottom left
            [0.998, -1],  # Top right
        ])
        assert np.isclose(expected_x, x).all()

        expected_y = np.array([
            [1.0, 2.0, 3.0],
            [4.0, 5.0, 6.0],
            [7.0, 8.0, 9.0],
        ])
        assert np.isclose(expected_y, y, atol=0.05).all()

    assert n_iter == 9
예제 #6
0
def test_single_image_dataset_train(mocker, simple_img):
    """Tests basic training operation using nonrandom values"""

    # This is in X, Y cooridnates
    fake_random_data = torch.Tensor(
        np.array([
            [0, 0],  # top left
            [0, 1],  # bottom left
            [1, 0],  # top right
        ]))

    mock_random = mocker.patch("dataset.torch.rand")
    mock_random.return_value = fake_random_data

    dataset = SingleImageDataset(simple_img, 3, normalize=False)
    assert len(dataset) == 9

    dataloader = DataLoader(dataset, batch_size=None, batch_sampler=None)

    n_iter = 0
    for x, y in dataloader:
        n_iter += 1
        x = x.numpy()
        y = y.numpy()
        expected_x = np.array([
            [-1, -1],  # Top left
            [-1, 1],  # Bottom left
            [1, -1],  # Top right
        ])
        assert (expected_x == x).all()

        expected_y = np.array([
            [1.0, 2.0, 3.0],
            [4.0, 5.0, 6.0],
            [7.0, 8.0, 9.0],
        ])
        assert (expected_y == y).all()

    assert n_iter == 9
from models.vgg_pretrained import SingleImageModel
from train_evaluate_model import evaluate_model

FULLY_CONNECTED = [200]
BATCH_SIZE = 2
INPUT_SHAPE = [280, 700, 3]

if __name__ == '__main__':
    dataset_root = sys.argv[1]
    model_path = sys.argv[2]
    resolution_factor = float(sys.argv[3])
    add_geolocations = eval(sys.argv[4])

    dataset = SingleImageDataset(dataset_root,
                                 BATCH_SIZE,
                                 INPUT_SHAPE,
                                 add_geolocations=add_geolocations,
                                 is_training=False)

    new_width = int(round(resolution_factor * INPUT_SHAPE[1]))
    new_height = int(round(resolution_factor * INPUT_SHAPE[0]))
    dataset.train_images = tf.image.resize_images(dataset.train_images,
                                                  (new_height, new_width),
                                                  tf.image.ResizeMethod.AREA)
    dataset.valid_images = tf.image.resize_images(dataset.valid_images,
                                                  (new_height, new_width),
                                                  tf.image.ResizeMethod.AREA)
    dataset.test_images = tf.image.resize_images(dataset.test_images,
                                                 (new_height, new_width),
                                                 tf.image.ResizeMethod.AREA)