コード例 #1
0
ファイル: my_dataset.py プロジェクト: kzmssk/cppn
 def __init__(self, paths: list, width: int, height: int, z_size: int):
     self.paths = paths
     self.width = width
     self.height = height
     self.z_size = z_size
     self.input_data = InputData(self.width, self.height, self.z_size)
     super(MyDataset, self).__init__()
コード例 #2
0
ファイル: test_input_data.py プロジェクト: kzmssk/cppn
def test_input_data():
    data = InputData(width=12, height=14, z_size=2)
    x, z = data.as_batch()

    # check shape
    assert x.shape == (12 * 14, 3)
    assert z.shape == (12 * 14, 2)
コード例 #3
0
ファイル: gen_images.py プロジェクト: kzmssk/cppn
def gen_images():
    parser = argparse.ArgumentParser(description="multiple images as single image")
    parser.add_argument('--out', type=Path, default=Path('./tmp/out.png'))
    parser.add_argument('--n_rows', type=int, default=5)
    parser.add_argument('--n_cols', type=int, default=5)
    parser.add_argument('--model_config_path', type=Path, default=Path('./conf/model.yaml'))
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--load', type=Path)
    parser.add_argument('--size', type=int)
    args = parser.parse_args()

    batch_size = args.n_rows * args.n_cols

    # init model
    model_config = ModelConfig.load(args.model_config_path)
    model = CPPN(model_config)

    # override size of output
    if args.size:
        model_config.width = args.size
        model_config.height = args.size

    if args.load:
        assert args.load.exists()
        print(f"load model from {args.load}")
        chainer.serializers.load_npz(args.load, model)

    # model to gpu
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    # init x and z
    input_data = InputData(width=model_config.width, height=model_config.height, z_size=model_config.z_size)

    x, z = [], []
    for _ in range(batch_size):
        _x, _z = input_data.as_batch()
        x.append(_x)
        z.append(_z)

    x = numpy.concatenate(x)
    z = numpy.concatenate(z)

    # to device
    xp = model.xp
    x = chainer.Variable(xp.asarray(x))
    z = chainer.Variable(xp.asarray(z))

    y = model.forward(x, z)
    y = chainer.cuda.to_cpu(y.data)

    # chainer variable [B, 1, W, H], float [0, 1] -> numpy array uint8 [0, 255]
    y = post_process_output(y)
    y = y.reshape((args.n_rows, args.n_cols, 1, input_data.height, input_data.width))
    y = y.transpose((0, 3, 1, 4, 2))
    y = y.reshape((args.n_rows * input_data.height, args.n_cols * input_data.width))
    Image.fromarray(y).save(args.out)
コード例 #4
0
    def __init__(self, width: int, height: int, z_size: int):
        self.width = width
        self.height = height
        self.z_size = z_size
        self.input_data = InputData(self.width, self.height, self.z_size)

        # use only train dataset
        self.train_data, _ = chainer.datasets.get_mnist()
        super(MnistDataset, self).__init__()
コード例 #5
0
    def __init__(self, width: int, height: int, z_size: int, data_path: Path):
        self.width = width
        self.height = height
        self.z_size = z_size
        self.input_data = InputData(self.width, self.height, self.z_size)

        # load image data
        with gzip.open(data_path, 'rb') as f:
            self.data = numpy.frombuffer(f.read(), numpy.uint8,
                                         offset=16).reshape(-1, 28 * 28)

        super(EMnistDataset, self).__init__()
コード例 #6
0
def test_conditional_forward():
    width = 5
    height = 7
    z_size = 2
    batch_size = 3
    batch_size = 3
    model = ConditionalCPPN(
        ConditionalModelConfig(width=width,
                               height=height,
                               n_units_xyr=3,
                               n_hidden_units=[
                                   10,
                                   10,
                               ],
                               z_size=z_size,
                               in_width=64,
                               in_height=64,
                               in_channel=1))
    x, z = [], []
    for _ in range(batch_size):
        _x, _z = InputData(width=width, height=height, z_size=z_size).as_batch()
        x.append(_x)
        z.append(_z)
    x = numpy.concatenate(x, axis=0)
    z = numpy.concatenate(z, axis=0)
    c = get_dammy_input(batch_size, 64, 64, 1)  # init dammy conditional input
    y = model.forward(x, z, c)
    assert y.shape == (batch_size, 1, width, height)
コード例 #7
0
class MnistDataset(chainer.dataset.DatasetMixin):
    def __init__(self, width: int, height: int, z_size: int):
        self.width = width
        self.height = height
        self.z_size = z_size
        self.input_data = InputData(self.width, self.height, self.z_size)

        # use only train dataset
        self.train_data, _ = chainer.datasets.get_mnist()
        super(MnistDataset, self).__init__()
    
    def __len__(self):
        return len(self.train_data)

    def get_example(self, i: int):
        """ Return batch of image [1, 1, S, S], where S = `size` """
        x, z = self.input_data.as_batch()

        c, _ = self.train_data[i]
        c = c * -1 + 1.0  # flip [0, 1] -> [1, 0]
        c = c.reshape((28, 28)) * 255.0
        image = Image.fromarray(c.astype(numpy.uint8)).resize((self.width, self.height))
        c = numpy.asarray(image).astype(numpy.float32) / 255.0  # 2D array
        c = c.reshape((1, 1, self.width, self.height))
        return x, z, c
コード例 #8
0
class EMnistDataset(chainer.dataset.DatasetMixin):
    def __init__(self, width: int, height: int, z_size: int, data_path: Path):
        self.width = width
        self.height = height
        self.z_size = z_size
        self.input_data = InputData(self.width, self.height, self.z_size)

        # load image data
        with gzip.open(data_path, 'rb') as f:
            self.data = numpy.frombuffer(f.read(), numpy.uint8,
                                         offset=16).reshape(-1, 28 * 28)

        super(EMnistDataset, self).__init__()

    def __len__(self):
        return len(self.data)

    def get_example(self, i: int):
        """ Return batch of image [1, 1, S, S], where S = `size` """
        x, z = self.input_data.as_batch()

        c = self.data[i]  # [0, 255]
        c = c * -1 + 255  # flip
        c = c.reshape((28, 28)).T
        image = Image.fromarray(c).resize((self.width, self.height))
        c = numpy.asarray(image).astype(numpy.float32) / 255.0  # 2D array
        c = c.reshape((1, 1, self.width, self.height))
        return x, z, c
コード例 #9
0
ファイル: my_dataset.py プロジェクト: kzmssk/cppn
class MyDataset(chainer.dataset.DatasetMixin):
    def __init__(self, paths: list, width: int, height: int, z_size: int):
        self.paths = paths
        self.width = width
        self.height = height
        self.z_size = z_size
        self.input_data = InputData(self.width, self.height, self.z_size)
        super(MyDataset, self).__init__()

    def __len__(self):
        return len(self.paths)

    def get_example(self, i):
        """ returns x, z, c """
        # get
        x, z = self.input_data.as_batch()

        # open image and convert to [1, 1, W, H]
        c = Image.open(Path(self.paths[i]))
        c = c.resize((self.width, self.height))

        c = c.convert('L')
        c = numpy.asarray(c, dtype=numpy.float32) / 255.  # [0, 255] -> [0, 1]
        c = c.reshape((1, 1, self.width, self.height))  # [1, 1, W, H]

        return x, z, c
コード例 #10
0
def gen_input_batch(batch_size, width, height, z_size):
    # create inputs
    inputs = {}
    x, z = [], []
    for idx in range(batch_size):
        _x, _z = InputData(width=width, height=height,
                           z_size=z_size).as_batch()
        _x = chainer.Variable(_x)
        _z = chainer.Variable(_z)
        x.append(_x)
        z.append(_z)
        inputs[idx] = (_x, _z)
    x = F.concat(x, axis=0)
    z = F.concat(z, axis=0)
    return x, z, inputs
コード例 #11
0
def test_unconditional_forward():
    width = 5
    height = 7
    z_size = 2
    batch_size = 3

    model = CPPN(ModelConfig(width=width, height=height, n_units_xyrz=3, n_hidden_units=[5, 5], z_size=z_size))

    x, z = [], []
    for _ in range(batch_size):
        _x, _z = InputData(width=width, height=height, z_size=z_size).as_batch()
        x.append(_x)
        z.append(_z)
    x = numpy.concatenate(x, axis=0)
    z = numpy.concatenate(z, axis=0)

    y = model.forward(x, z)
    assert y.shape == (batch_size, 1, width, height)
コード例 #12
0
ファイル: interp_movie.py プロジェクト: kzmssk/cppn
def interp_movie():
    parser = argparse.ArgumentParser(description="Gen gif movie")
    parser.add_argument('--out', type=Path, default=Path('./tmp/out.gif'))
    parser.add_argument('--frames', type=int, default=10)
    parser.add_argument('--z_points', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=50)
    parser.add_argument('--model_config_path',
                        type=Path,
                        default=Path('./conf/model.yaml'))
    parser.add_argument('--gpu', type=int, default=-1)
    parser.add_argument('--load', type=Path)
    parser.add_argument('--size', type=int)
    args = parser.parse_args()

    # create directory to put result
    args.out.parent.mkdir(exist_ok=True)

    # init model
    model_config = ModelConfig.load(args.model_config_path)

    # override size of output
    if args.size:
        model_config.width = args.size
        model_config.height = args.size

    model = CPPN(model_config)

    if args.load:
        assert args.load.exists()
        print(f"load model from {args.load}")
        chainer.serializers.load_npz(args.load, model)

    # model to gpu
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    # init x and z
    input_data = InputData(width=model_config.width,
                           height=model_config.height,
                           z_size=model_config.z_size)

    # gen frames
    images = []

    zs = []
    for _ in range(args.z_points):
        zs.extend(
            interp_z(sample_z(model_config.z_size),
                     sample_z(model_config.z_size), args.frames))

    for i in range(0, len(zs), args.batch_size):

        begin_idx = i
        end_idx = min(i + args.batch_size, len(zs) - 1)
        print(f"{begin_idx} -> {end_idx}")

        # make input batch
        x = []
        z = []
        for _z in zs[begin_idx:end_idx]:
            _x, _z = input_data.as_batch(z=_z)
            x.append(_x)
            z.append(_z)

        if len(x) == 0:
            break

        x = numpy.concatenate(x)
        z = numpy.concatenate(z)

        # to device
        xp = model.xp
        x = chainer.Variable(xp.asarray(x))
        z = chainer.Variable(xp.asarray(z))

        y = model.forward(x, z)
        y = chainer.cuda.to_cpu(y.data)

        # chainer variable [B, 1, W, H], float [0, 1] -> numpy array uint8 [0, 255]
        y = post_process_output(y)

        for _y in y:
            images.append(Image.fromarray(_y[0]))

    # save as gif
    images[0].save(str(args.out), save_all=True, append_images=images)