Exemple #1
0
def main():
    try:
        os.mkdir(args.snapshot_path)
    except:
        pass

    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    num_bins_x = 2**args.num_bits_x

    image_size = (28, 28)

    images = chainer.datasets.mnist.get_mnist(withlabel=False)[0]
    images = 255.0 * np.asarray(images).reshape((-1, ) + image_size + (1, ))
    if args.num_channels != 1:
        images = np.broadcast_to(
            images, (images.shape[0], ) + image_size + (args.num_channels, ))
    images = preprocess(images, args.num_bits_x)

    x_mean = np.mean(images)
    x_var = np.var(images)

    dataset = glow.dataset.Dataset(images)
    iterator = glow.dataset.Iterator(dataset, batch_size=args.batch_size)

    print(tabulate([
        ["#", len(dataset)],
        ["mean", x_mean],
        ["var", x_var],
    ]))

    hyperparams = Hyperparameters(args.snapshot_path)
    hyperparams.levels = args.levels
    hyperparams.depth_per_level = args.depth_per_level
    hyperparams.nn_hidden_channels = args.nn_hidden_channels
    hyperparams.image_size = image_size
    hyperparams.num_bits_x = args.num_bits_x
    hyperparams.lu_decomposition = args.lu_decomposition
    hyperparams.num_image_channels = args.num_channels
    hyperparams.save(args.snapshot_path)
    hyperparams.print()

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    optimizer = Optimizer(encoder)

    # Data dependent initialization
    if encoder.need_initialize:
        for batch_index, data_indices in enumerate(iterator):
            x = to_gpu(dataset[data_indices])
            encoder.initialize_actnorm_weights(
                x, reduce_memory=args.reduce_memory)
            break

    current_training_step = 0
    num_pixels = args.num_channels * hyperparams.image_size[0] * hyperparams.image_size[1]

    # Training loop
    for iteration in range(args.total_iteration):
        sum_loss = 0
        sum_nll = 0
        start_time = time.time()

        for batch_index, data_indices in enumerate(iterator):
            x = to_gpu(dataset[data_indices])
            x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)

            denom = math.log(2.0) * num_pixels

            factorized_z_distribution, logdet = encoder.forward_step(
                x, reduce_memory=args.reduce_memory)

            logdet -= math.log(num_bins_x) * num_pixels

            negative_log_likelihood = 0
            for (zi, mean, ln_var) in factorized_z_distribution:
                negative_log_likelihood += cf.gaussian_nll(zi, mean, ln_var)

            loss = (negative_log_likelihood / args.batch_size - logdet) / denom

            encoder.cleargrads()
            loss.backward()
            optimizer.update(current_training_step)
            current_training_step += 1

            sum_loss += float(loss.data)
            sum_nll += float(negative_log_likelihood.data) / args.batch_size
            printr(
                "Iteration {}: Batch {} / {} - loss: {:.8f} - nll: {:.8f} - log_det: {:.8f}".
                format(
                    iteration + 1, batch_index + 1, len(iterator),
                    float(loss.data),
                    float(negative_log_likelihood.data) / args.batch_size /
                    denom,
                    float(logdet.data) / denom))

        log_likelihood = -sum_nll / len(iterator)
        elapsed_time = time.time() - start_time
        print(
            "\033[2KIteration {} - loss: {:.5f} - log_likelihood: {:.5f} - elapsed_time: {:.3f} min".
            format(iteration + 1, sum_loss / len(iterator), log_likelihood,
                elapsed_time / 60))
        encoder.save(args.snapshot_path)
Exemple #2
0
def main():
    try:
        os.mkdir(args.snapshot_path)
    except:
        pass

    comm = chainermn.create_communicator()
    device = comm.intra_rank
    print("device", device, "/", comm.size)
    cuda.get_device(device).use()
    xp = cupy

    num_bins_x = 2**args.num_bits_x
    images = None

    if comm.rank == 0:
        assert args.dataset_format in ["png", "npy"]
        files = Path(args.dataset_path).glob("*.{}".format(
            args.dataset_format))
        if args.dataset_format == "png":
            images = []
            for filepath in files:
                image = np.array(Image.open(filepath)).astype("float32")
                image = preprocess(image, args.num_bits_x)
                images.append(image)
            assert len(images) > 0
            images = np.asanyarray(images)
        elif args.dataset_format == "npy":
            images = []
            for filepath in files:
                array = np.load(filepath).astype("float32")
                array = preprocess(array, args.num_bits_x)
                images.append(array)
            assert len(images) > 0
            num_files = len(images)
            images = np.asanyarray(images)
            images = images.reshape((num_files * images.shape[1], ) +
                                    images.shape[2:])
        else:
            raise NotImplementedError

        assert args.image_size == images.shape[2]

        x_mean = np.mean(images)
        x_var = np.var(images)

        print(
            tabulate([
                ["#", len(images)],
                ["mean", x_mean],
                ["var", x_var],
            ]))

    dataset = chainermn.scatter_dataset(images, comm, shuffle=True)

    hyperparams = Hyperparameters()
    hyperparams.levels = args.levels
    hyperparams.depth_per_level = args.depth_per_level
    hyperparams.nn_hidden_channels = args.nn_hidden_channels
    hyperparams.image_size = (args.image_size, args.image_size)
    hyperparams.num_bits_x = args.num_bits_x
    hyperparams.lu_decomposition = args.lu_decomposition
    hyperparams.save(args.snapshot_path)

    if comm.rank == 0:
        hyperparams.save(args.snapshot_path)
        hyperparams.print()

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    encoder.to_gpu()

    optimizer = chainermn.create_multi_node_optimizer(
        chainer.optimizers.Adam(alpha=1e-4), comm)
    optimizer.setup(encoder)

    current_training_step = 0
    num_pixels = 3 * hyperparams.image_size[0] * hyperparams.image_size[1]

    # Training loop
    for iteration in range(args.total_iteration):
        sum_loss = 0
        sum_nll = 0
        total_batch = 0
        start_time = time.time()
        iterator = chainer.iterators.SerialIterator(
            dataset, args.batch_size, repeat=False)

        # Data dependent initialization
        if encoder.need_initialize:
            for data in iterator:
                x = to_gpu(np.asanyarray(data))
                encoder.initialize_actnorm_weights(x)
                break

        for batch_index, data in enumerate(iterator):
            x = to_gpu(np.asanyarray(data))
            x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)

            batch_size = x.shape[0]
            denom = math.log(2.0) * num_pixels

            factorized_z_distribution, logdet = encoder.forward_step(x)

            logdet -= math.log(num_bins_x) * num_pixels

            negative_log_likelihood = 0
            for (zi, mean, ln_var) in factorized_z_distribution:
                negative_log_likelihood += cf.gaussian_nll(zi, mean, ln_var)

            loss = (negative_log_likelihood / batch_size - logdet) / denom

            encoder.cleargrads()
            loss.backward()
            optimizer.update()

            current_training_step += 1
            total_batch += 1

            sum_loss += float(loss.data)
            sum_nll += float(negative_log_likelihood.data) / args.batch_size

            if comm.rank == 0:
                printr(
                    "Iteration {}: Batch {} / {} - loss: {:.8f} - nll: {:.8f} - log_det: {:.8f}".
                    format(
                        iteration + 1, batch_index + 1,
                        len(dataset) // batch_size, float(loss.data),
                        float(negative_log_likelihood.data) / batch_size /
                        denom,
                        float(logdet.data) / denom))

            if (batch_index + 1) % 100 == 0:
                encoder.save(args.snapshot_path)

        if comm.rank == 0:
            log_likelihood = -sum_nll / total_batch
            elapsed_time = time.time() - start_time
            print(
                "\033[2KIteration {} - loss: {:.5f} - log_likelihood: {:.5f} - elapsed_time: {:.3f} min".
                format(iteration + 1, sum_loss / total_batch, log_likelihood,
                       elapsed_time / 60))
            encoder.save(args.snapshot_path)
Exemple #3
0
def main():
    try:
        os.mkdir(args.snapshot_path)
    except:
        pass

    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    num_bins_x = 2**args.num_bits_x

    assert args.dataset_format in ["png", "npy"]

    # Get datasets:
    if True:
        files = Path(args.dataset_path).glob("*.{}".format(
            args.dataset_format))
        if args.dataset_format == "png":
            images = []
            for filepath in files:
                image = np.array(Image.open(filepath)).astype("float32")
                image = preprocess(image, args.num_bits_x)
                images.append(image)
            assert len(images) > 0
            images = np.asanyarray(images)
        elif args.dataset_format == "npy":
            images = []
            for filepath in files:
                array = np.load(filepath).astype("float32")
                # TODO: Preprocess here
                array = preprocess(array, args.num_bits_x)
                images.append(array)
            assert len(images) > 0
            num_files = len(images)
            images = np.asanyarray(images)
            images = images.reshape((num_files * images.shape[1], ) +
                                    images.shape[2:])
        else:
            raise NotImplementedError

    # Print dataset information
    if True:
        x_mean = np.mean(images)
        x_var = np.var(images)

        dataset = glow.dataset.Dataset(images)
        iterator = glow.dataset.Iterator(dataset, batch_size=args.batch_size)

        print(
            tabulate([
                ["#", len(dataset)],
                ["mean", x_mean],
                ["var", x_var],
            ]))

    # Hyperparameters' info
    if True:
        hyperparams = Hyperparameters()
        hyperparams.levels = args.levels
        hyperparams.depth_per_level = args.depth_per_level
        hyperparams.nn_hidden_channels = args.nn_hidden_channels
        hyperparams.image_size = images.shape[2:]
        hyperparams.num_bits_x = args.num_bits_x
        hyperparams.lu_decomposition = args.lu_decomposition
        hyperparams.squeeze_factor = args.squeeze_factor
        hyperparams.save(args.snapshot_path)
        hyperparams.print()

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    optimizer = Optimizer(encoder)

    # Data dependent initialization
    if encoder.need_initialize:
        for batch_index, data_indices in enumerate(iterator):
            x = to_gpu(dataset[data_indices])
            encoder.initialize_actnorm_weights(x)
            break

    current_training_step = 0
    num_pixels = 3 * hyperparams.image_size[0] * hyperparams.image_size[1]

    # Training loop
    for iteration in range(args.total_iteration):
        sum_loss = 0
        sum_nll = 0
        sum_kld = 0
        start_time = time.time()

        for batch_index, data_indices in enumerate(iterator):
            x = to_gpu(dataset[data_indices])
            x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)

            denom = math.log(2.0) * num_pixels

            factorized_z_distribution, logdet = encoder.forward_step(x)

            logdet -= math.log(num_bins_x) * num_pixels

            kld = 0
            negative_log_likelihood = 0
            factor_z = []
            for (zi, mean, ln_var) in factorized_z_distribution:
                negative_log_likelihood += cf.gaussian_nll(zi, mean, ln_var)
                if args.regularize_z:
                    kld += cf.gaussian_kl_divergence(mean, ln_var)
                factor_z.append(zi.data.reshape(zi.shape[0], -1))
            factor_z = xp.concatenate(factor_z, axis=1)
            negative_log_likelihood += cf.gaussian_nll(
                factor_z, xp.zeros(factor_z.shape, dtype='float32'),
                xp.zeros(factor_z.shape, dtype='float32'))
            loss = (negative_log_likelihood + kld) / args.batch_size - logdet
            loss = loss / denom

            encoder.cleargrads()
            loss.backward()
            optimizer.update(current_training_step)

            current_training_step += 1

            sum_loss += _float(loss)
            sum_nll += _float(negative_log_likelihood) / args.batch_size
            sum_kld += _float(kld) / args.batch_size
            printr(
                "Iteration {}: Batch {} / {} - loss: {:.8f} - nll: {:.8f} - kld: {:.8f} - log_det: {:.8f}\n"
                .format(
                    iteration + 1, batch_index + 1, len(iterator),
                    _float(loss),
                    _float(negative_log_likelihood) / args.batch_size / denom,
                    _float(kld) / args.batch_size,
                    _float(logdet) / denom))

            if (batch_index + 1) % 100 == 0:
                encoder.save(args.snapshot_path)

        mean_log_likelihood = -sum_nll / len(iterator)
        mean_kld = sum_kld / len(iterator)
        elapsed_time = time.time() - start_time
        print(
            "\033[2KIteration {} - loss: {:.5f} - log_likelihood: {:.5f} - kld: {:.5f} - elapsed_time: {:.3f} min\n"
            .format(iteration + 1, sum_loss / len(iterator),
                    mean_log_likelihood, mean_kld, elapsed_time / 60))
        encoder.save(args.snapshot_path)
Exemple #4
0
def main():
    try:
        os.mkdir(args.snapshot_path)
    except:
        pass

    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cupy

    num_bins_x = 2**args.num_bits_x

    assert args.dataset_format in ["png", "npy"]

    files = Path(args.dataset_path).glob("*.{}".format(args.dataset_format))
    if args.dataset_format == "png":
        images = []
        for filepath in files:
            image = np.array(Image.open(filepath)).astype("float32")
            image = preprocess(image, args.num_bits_x)
            images.append(image)
        assert len(images) > 0
        images = np.asanyarray(images)
    elif args.dataset_format == "npy":
        images = []
        for filepath in files:
            array = np.load(filepath).astype("float32")
            array = preprocess(array, args.num_bits_x)
            images.append(array)
        assert len(images) > 0
        num_files = len(images)
        images = np.asanyarray(images)
        images = images.reshape((num_files * images.shape[1], ) +
                                images.shape[2:])
    else:
        raise NotImplementedError

    x_mean = np.mean(images)
    x_var = np.var(images)

    last_train_image = int((1 - args.validate_split) * len(images))

    train_dataset = glow.dataset.Dataset(images[:last_train_image])
    val_dataset = glow.dataset.Dataset(images[last_train_image:])

    train_iterator = glow.dataset.Iterator(train_dataset,
                                           batch_size=args.batch_size)
    val_iterator = glow.dataset.Iterator(val_dataset,
                                         batch_size=args.batch_size)

    print(
        tabulate([
            ["# train samples", len(train_dataset)],
            ["# validate samples", len(val_dataset)],
            ["overall mean", x_mean],
            ["overall var", x_var],
        ]))

    hyperparams = Hyperparameters()
    hyperparams.levels = args.levels
    hyperparams.depth_per_level = args.depth_per_level
    hyperparams.nn_hidden_channels = args.nn_hidden_channels
    hyperparams.image_size = images.shape[2:]
    hyperparams.num_bits_x = args.num_bits_x
    hyperparams.lu_decomposition = args.lu_decomposition
    hyperparams.squeeze_factor = args.squeeze_factor
    hyperparams.save(args.snapshot_path)
    hyperparams.print()

    encoder = Glow(hyperparams, hdf5_path=args.snapshot_path)
    if using_gpu:
        encoder.to_gpu()

    optimizer = Optimizer(encoder)

    # Data dependent initialization
    if encoder.need_initialize:
        for batch_index, data_indices in enumerate(train_iterator):
            x = to_gpu(train_dataset[data_indices])
            encoder.initialize_actnorm_weights(x)
            break

    current_training_step = 0
    num_pixels = 3 * hyperparams.image_size[0] * hyperparams.image_size[1]

    # Training loop
    for iteration in range(args.total_iteration):
        # Training step
        sum_loss = 0
        sum_nll = 0
        sum_kld = 0
        start_time = time.time()

        for batch_index, data_indices in enumerate(train_iterator):
            x = to_gpu(train_dataset[data_indices])
            x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)

            denom = math.log(2.0) * num_pixels

            factorized_z_distribution, logdet = encoder.forward_step(x)

            logdet -= math.log(num_bins_x) * num_pixels

            kld = 0
            negative_log_likelihood = 0
            for (zi, mean, ln_var) in factorized_z_distribution:
                negative_log_likelihood += cf.gaussian_nll(zi, mean, ln_var)
                if args.regularize_z:
                    kld += cf.gaussian_kl_divergence(mean, ln_var)

            loss = (negative_log_likelihood + kld) / args.batch_size - logdet
            loss = loss / denom

            encoder.cleargrads()
            loss.backward()
            optimizer.update(current_training_step)

            current_training_step += 1

            sum_loss += _float(loss)
            sum_nll += _float(negative_log_likelihood) / args.batch_size
            sum_kld += _float(kld) / args.batch_size
            if (batch_index + 1) % 200 == 0:
                print(
                    "Training Iteration {}: Batch {} / {} - loss: {:.8f} - nll: {:.8f} - kld: {:.8f} - log_det: {:.8f}"
                    .format(iteration + 1, batch_index + 1,
                            len(train_iterator), _float(loss),
                            _float(negative_log_likelihood) / args.batch_size,
                            _float(kld) / args.batch_size, _float(logdet)))

                encoder.save(args.snapshot_path)
        mean_log_likelihood = -sum_nll / len(train_iterator)
        mean_kld = sum_kld / len(train_iterator)
        elapsed_time = time.time() - start_time
        print(
            "\033[2KIteration {} Summary - training loss: {:.5f} - train log_likelihood: {:.5f} - train kld: {:.5f} - elapsed_time: {:.3f} min"
            .format(iteration + 1, sum_loss / len(train_iterator),
                    mean_log_likelihood, mean_kld, elapsed_time / 60))

        # Validation Step
        sum_loss = 0
        sum_nll = 0
        sum_kld = 0
        start_time = time.time()

        with chainer.no_backprop_mode():
            for batch_index, data_indices in enumerate(val_iterator):
                x = to_gpu(val_dataset[data_indices])
                x += xp.random.uniform(0, 1.0 / num_bins_x, size=x.shape)

                denom = math.log(2.0) * num_pixels

                factorized_z_distribution, logdet = encoder.forward_step(x)

                logdet -= math.log(num_bins_x) * num_pixels

                kld = 0
                negative_log_likelihood = 0
                for (zi, mean, ln_var) in factorized_z_distribution:
                    negative_log_likelihood += cf.gaussian_nll(
                        zi, mean, ln_var)
                    if args.regularize_z:
                        kld += cf.gaussian_kl_divergence(mean, ln_var)
                loss = (negative_log_likelihood +
                        kld) / args.batch_size - logdet
                loss = loss / denom

                sum_loss += _float(loss)
                sum_nll += _float(negative_log_likelihood) / args.batch_size
                sum_kld += _float(kld) / args.batch_size

        # Summary stats for iteration
        mean_log_likelihood = -sum_nll / len(val_iterator)
        mean_kld = sum_kld / len(val_iterator)
        elapsed_time = time.time() - start_time
        print(
            "\033[2KIteration {} Summary - validation loss: {:.5f} - test log_likelihood: {:.5f} - test kld: {:.5f} - elapsed_time: {:.3f} min"
            .format(iteration + 1, sum_loss / len(val_iterator),
                    mean_log_likelihood, mean_kld, elapsed_time / 60))
        encoder.save(args.snapshot_path)