Ejemplo n.º 1
0
def main(args):
    ttools.set_logger(args.verbose)
    args.denoiser_exe = os.path.abspath(args.denoiser_exe)

    # Create a working directory for the temporary output
    LOG.debug("Saving temporary data to: %s.", args.tmp_dir)
    os.makedirs(args.tmp_dir, exist_ok=True)
    os.chdir(args.tmp_dir)

    scene_name = os.path.basename(args.scene)

    # Link folder so that the Dataset class gets the expected folder structure,
    # with one scene
    bins_folder = os.path.join(args.tmp_dir, "bins")
    os.makedirs(bins_folder, exist_ok=True)
    try:
        os.symlink(os.path.abspath(args.scene),
                   os.path.join(bins_folder, scene_name))
    except FileExistsError as e:
        LOG.warning("scene path in %s already exists.", bins_folder)

    # Convert .bin to .exr in the format expected by NFOR
    exr_folder = os.path.join(args.tmp_dir, "exr_channels")
    bins2exr(bins_folder, exr_folder, args.spp)

    LOG.info("Denoising %s", args.scene)
    if args.verbose:
        stderr = None
    else:
        stderr = subprocess.DEVNULL
    os.makedirs(args.output, exist_ok=True)
    subprocess.call([args.denoiser_exe, exr_folder + "/", args.output],
                    stderr=stderr)
    shutil.rmtree(args.tmp_dir)
Ejemplo n.º 2
0
def main(args):
    ttools.set_logger(args.verbose)

    if args.width % args.tile_size != 0 or args.height % args.tile_size != 0:
        LOG.error("Block size should divide width and height.")
        raise ValueError("Block size should divide widt and height.")

    LOG.info("Starting job on worker %d of %d with %d threads" %
             (args.worker_id, args.num_workers, args.threads))

    gen_params = GeneratorParams(args)
    # render_params = RenderingParams(args)
    render_params = dict(spp=args.spp,
                         gt_spp=args.gt_spp,
                         height=args.height,
                         width=args.width,
                         path_depth=args.path_depth,
                         tile_size=args.tile_size)

    scene_queue = JoinableQueue()
    render_queue = JoinableQueue()
    Pool(args.threads, create_scene_file, (scene_queue, render_queue))
    Pool(args.threads, render, (render_queue, ))

    LOG.info("Generating %d random scenes", args.count)

    count = 0  # count the number of scenes generated
    while True:
        # Generate a batch of scene files (to limit memory usage, we do not
        # queue all scenes at once.
        for _ in range(min(args.batch_size, args.count)):
            idx = args.start_index + count * args.num_workers + args.worker_id
            data = {
                "idx": idx,
                "gen_params": gen_params,
                "render_params": render_params,
                "verbose": args.verbose,
                "clean": args.clean,
            }
            if args.count > 0 and count == args.count:
                break
            scene_queue.put(data, block=False)
            count += 1

        LOG.debug("Waiting for scene queue.")
        scene_queue.join()

        LOG.debug("Waiting for render queue.")
        render_queue.join()

        LOG.debug("Finished all queues.")

        # Only render up to `args.count` images
        if args.count > 0 and count == args.count:
            break

    LOG.debug("Shutting down the scene generator")
Ejemplo n.º 3
0
    def _setup(self):
        args = self.args
        ttools.set_logger(args.verbose)

        # Create a working directory for the temporary output
        LOG.debug("Saving temporary data to: %s.", args.tmp_dir)
        os.makedirs(args.tmp_dir, exist_ok=True)

        # Write the scene file with updated header
        with open(args.scene_path, 'w') as fid:
            fid.write(args.scene_desc)

        # Link scene resources to the temp directory
        for path in os.listdir(args.scene_root):
            if path == "scene.pbrt":
                continue
            LOG.debug("Linking %s to temp directory", path)
            dst = os.path.join(args.tmp_dir, path)
            if os.path.exists(dst):
                continue
            os.symlink(os.path.join(args.scene_root, path), dst)

        if args.kalantari_data is not None:
            LOG.info("Copying [Kalantari2015]'s pretrained weights.'")
            weights, feature_norm = args.kalantari_data
            w_ = os.path.basename(weights)
            fn_ = os.path.basename(feature_norm)
            if w_ != "Weights.dat":
                LOG.error(
                    "The first argument of `kalantari2015_data` should"
                    " be `Weights.dat`, got %s", w_)
                raise ValueError("The first argument of `kalantari2015_data`"
                                 " should be `Weights.dat`")
            if fn_ != "FeatureNorm.dat":
                LOG.error(
                    "The first argument of `kalantari2015_data` should"
                    " be `FeatureNorm.dat`, got %s", fn_)
                raise ValueError("The first argument of `kalantari2015_data`"
                                 " should be `FeatureNorm.dat`")
            os.symlink(os.path.abspath(weights),
                       os.path.join(args.tmp_dir, w_))
            os.symlink(os.path.abspath(feature_norm),
                       os.path.join(args.tmp_dir, fn_))
Ejemplo n.º 4
0
        ttools.callbacks.CheckpointingCallback(checkpointer,
                                               interval=None,
                                               max_epochs=2))
    trainer.train(dataloader,
                  num_epochs=args.num_epochs,
                  val_dataloader=val_dataloader,
                  starting_epoch=starting_epoch)


if __name__ == '__main__':
    parser = ttools.BasicArgumentParser()
    parser.add_argument("--w_surface", type=float, default=1)
    parser.add_argument("--w_alignment", type=float, default=0.01)
    parser.add_argument("--w_template", type=float, default=10)
    parser.add_argument("--eps", type=float, default=0.04)
    parser.add_argument("--max_stroke", type=float, default=0.04)
    parser.add_argument("--canvas_size", type=int, default=128)
    parser.add_argument("--n_samples_per_curve", type=int, default=120)
    parser.add_argument("--chamfer",
                        default=False,
                        dest='chamfer',
                        action='store_true')
    parser.add_argument("--simple_templates",
                        default=False,
                        dest='simple_templates',
                        action='store_true')
    parser.set_defaults(num_worker_threads=16, bs=32, lr=1e-4)
    args = parser.parse_args()
    ttools.set_logger(args.debug)
    main(args)
Ejemplo n.º 5
0
    # A periodic checkpointing operation
    trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_gen))
    trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_discrim))
    # A simple progress bar
    trainer.add_callback(ttools.callbacks.ProgressBarCallback(
        keys=["loss_g", "loss_d", "loss"]))
    # A volatile logging using visdom
    trainer.add_callback(ttools.callbacks.VisdomLoggingCallback(
        keys=["loss_g", "loss_d", "loss"],
        port=8080, env="mnist_demo"))
    # Image
    trainer.add_callback(VisdomImageCallback(port=8080, env="mnist_demo"))
    # -------------------------------------------------------------------------

    # Start the training
    LOG.info("Training started, press Ctrl-C to interrupt.")
    trainer.train(loader, num_epochs=args.epochs)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    # TODO: subparsers
    parser.add_argument("data", help="directory where we download and store the MNIST dataset.")
    parser.add_argument("out", help="directory where we write the checkpoints and visualizations.")
    parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for the optimizer.")
    parser.add_argument("--epochs", type=int, default=500, help="number of epochs to train for.")
    parser.add_argument("--bs", type=int, default=64, help="number of elements per batch.")
    args = parser.parse_args()
    ttools.set_logger(True)  # activate debug prints
    train(args)
Ejemplo n.º 6
0
    path = os.path.join(args.output, "8_loss.png")
    plt.savefig(path)
    path = os.path.join(args.output, "8_loss.pdf")
    plt.savefig(path)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('input')
    parser.add_argument('--output', required=True)
    parser.add_argument('--nsteps', type=int, default=10000)
    parser.add_argument('--ksize', type=int, default=15)
    parser.add_argument('--ds', type=int, default=8)
    parser.add_argument('--sigma', type=int, default=32)
    parser.add_argument('--spp', type=int, default=8)
    parser.add_argument('--width_factor',
                        type=int,
                        default=8,
                        help="how many more params for gather")
    parser.add_argument('--depth_factor',
                        type=int,
                        default=2,
                        help="how many more params for gather")
    parser.add_argument('--size', type=int, default=512)
    parser.add_argument('--outliers', dest="outliers", action="store_true")
    parser.add_argument('--outliers_p', type=float, default=0.99)
    parser.set_defaults(outliers=False)
    args = parser.parse_args()
    ttools.set_logger()
    main(args)
Ejemplo n.º 7
0
import numpy as np
from skimage.io import imread
from torch.utils.data import Dataset as TorchDataset
import wget

import ttools

from .mosaic import bayer, xtrans

__all__ = [
    "BAYER_MODE", "XTRANS_MODE", "Dataset", "TRAIN_SUBSET", "VAL_SUBSET",
    "TEST_SUBSET"
]

LOG = ttools.get_logger(__name__)
ttools.set_logger(True)

BAYER_MODE = "bayer"
"""Applies a Bayer mosaic pattern."""

XTRANS_MODE = "xtrans"
"""Applies an X-Trans mosaic pattern."""

TRAIN_SUBSET = "train"
"""Loads the 'train' subset of the data."""

VAL_SUBSET = "val"
"""Loads the 'val' subset of the data."""

TEST_SUBSET = "test"
"""Loads the 'test' subset of the data."""
Ejemplo n.º 8
0
                        default=16,
                        type=int,
                        help="number of output to compute")
    parser.add_argument("--imsize",
                        type=int,
                        help="if provided, override the raster output "
                        "resolution")
    parser.add_argument("--nsteps",
                        default=9,
                        type=int,
                        help="number of "
                        "interpolation steps for the interpolation")
    parser.add_argument("--nframes",
                        default=120,
                        type=int,
                        help="number of "
                        "frames for the interpolation video")
    parser.add_argument("--invert",
                        default=False,
                        action="store_true",
                        help="if True, render black on white rather than the"
                        " opposite")

    args = parser.parse_args()

    pydiffvg.set_use_gpu(False)

    ttools.set_logger(False)

    run(args)