Exemplo n.º 1
0
 def test_surjectivity(self):
     t = Tilemap(shape=(5, 5, 5))
     self.assertEqual(t[0, 0, 0].name, 'air')
     t[0, 0, 0] = Tile('minecraft:stone')
     self.assertEqual(t.palette[1].name, 'stone')
     t[0, 0, 0] = Tile('minecraft:air')
     self.assertEqual(len(t.palette), 1)
Exemplo n.º 2
0
    def test_init(self):
        #Init empty
        t = Tilemap(shape=(5, 5, 5))

        #Init from data and palette
        d = np.array([[[0, 0], [0, 1]], [[1, 0], [1, 1]]])
        p = [Tile('minecraft:air'), Tile('minecraft:stone')]
        t = Tilemap(data=d, palette=p)

        #Init wrong number of arguments
        with self.assertRaises(TilemapInvalidInitArguments):
            d = np.array([[[0, 0], [0, 1]], [[1, 0], [1, 1]]])
            p = [Tile('minecraft:air'), Tile('minecraft:stone')]
            t = Tilemap(shape=(1, 1, 1), data=d, palette=p)

        #Init data wrong shape
        with self.assertRaises(TilemapDataShapeError):
            d = np.array([[1, 0], [1, 1]])  #2D
            p = [Tile('minecraft:air'), Tile('minecraft:stone')]
            t = Tilemap(data=d, palette=p)

        #Init data wrong type
        with self.assertRaises(TilemapDataTypeError):
            d = "foo"
            p = [Tile('minecraft:air'), Tile('minecraft:stone')]
            t = Tilemap(data=d, palette=p)

        #Init palette wrong type
        with self.assertRaises(TilemapPaletteIsNotAListOfTiles):
            d = np.array([[[0, 0], [0, 1]], [[1, 0], [1, 1]]])
            p = ["foo", "bar"]
            t = Tilemap(data=d, palette=p)
Exemplo n.º 3
0
    def generate(self, shape):
        """Generate tilemap with the style.

        Runs the generator part of the model on a random tensor and maps the
        resulting 4D tensor's last dimension to Minecraft tiles, thus creating
        a 3D tilemap.

        Args:
            shape (tuple of int): Size of the generated tilemap.

        Returns:
            Schematic: The generated tilemap.
        """
        # Importing Tensorflow
        import tensorflow as tf

        # Defining input shapes
        in_w, in_h, in_l = (max(s//8, 1) for s in shape)
        c = self.models.generator.input_channels

        # Generating random noise
        random_noise = np.random.normal(size=(1,in_w,in_h,in_l,c))

        # (Re)comping the generator model
        self.models.generator.model.compile(loss=tf.keras.losses.mean_squared_error, optimizer='adam')

        # Run the network on the random noise
        pred = self.models.generator.model.predict(random_noise)
        print(pred)

        w, h, l, latent_dim = pred.shape[1:5]

        palette = list([Tile(id_) for id_ in self.palette.keys()])
        encoded_palette = np.array(list(self.palette.values()))

        #Finding nearest latent vectors for pred
        block_data = np.linalg.norm(
            pred.reshape((1,w,h,l,1,latent_dim)) - encoded_palette.reshape((1,1,1,1,-1,latent_dim)), 
            axis=-1
        ).argmin(axis=-1).reshape((w,h,l))
        print(block_data)

        tlmp = Schematic(data=block_data, palette=palette, 
                         version=self.info['mc_version'])

        return tlmp
Exemplo n.º 4
0
    def display(self, style):
        """Display mode.

        This subcommand displays a plot of the latent space.
        Args:
            style (str): Name of the style.
        """
        self.stl = Style(style, mc_version='1.15.2')
        vae = self.stl.models.vae

        vae_data = Tile.vectorize_all(self.stl.info['mc_version'])
        encodings = vae.encoder.predict(vae_data)[0]

        tiles = [
            Tile('minecraft:quartz_stairs[half=bottom]', version='1.15.2'),
            Tile('minecraft:birch_stairs[half=bottom]', version='1.15.2'),
            Tile('minecraft:brick_stairs[half=bottom]', version='1.15.2'),
            Tile('minecraft:bricks', version='1.15.2'),
            Tile('minecraft:nether_bricks', version='1.15.2'),
            Tile('minecraft:white_carpet', version='1.15.2'),
            Tile('minecraft:snow[layers=1]', version='1.15.2')
        ]

        vae_data = vectorize(tiles, pad_to=vae.input_dim)
        encodings_subset = vae.encoder.predict(vae_data)[0]

        import matplotlib.pyplot as plt

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(encodings[:, 0],
                   encodings[:, 1],
                   c=[[.9, .9, .9]],
                   marker='x')
        ax.scatter(encodings_subset[:, 0],
                   encodings_subset[:, 1],
                   color='r',
                   marker='x')
        for idx, t in enumerate(tiles):
            ax.annotate(t.name,
                        (encodings_subset[idx, 0], encodings_subset[idx, 1]))
        ax.set_title('Minecraft tile-ok 2D látenstere')
        plt.show()
Exemplo n.º 5
0
    def test_id(self):
        t1 = Tile('minecraft:something[arg1=foo,arg2=bar]')
        self.assertEqual(t1.name, 'something')
        self.assertEqual(t1.data_values[0], 'arg1=foo')
        self.assertEqual(t1.data_values[1], 'arg2=bar')

        with self.assertRaises(InvalidMinecraftNamespaceID):
            t2 = Tile('minecraftsomething[arg1=foo,arg2=bar]')
        with self.assertRaises(InvalidMinecraftNamespaceID):
            t2 = Tile('minecraft:somethingarg1=foo,arg2=bar]')
        with self.assertRaises(InvalidMinecraftNamespaceID):
            t2 = Tile('minecraft:something[arg1=foo,arg2=bar')
        with self.assertRaises(InvalidMinecraftNamespaceID):
            t2 = Tile('minecraftt:something[arg1=foo,arg2=bar]')
        with self.assertRaises(InvalidMinecraftNamespaceID):
            t2 = Tile('minecraft:something[arg1=foo, arg2=bar]')
Exemplo n.º 6
0
 def test_model_loading(self):
     tiles = Tile.list_all('1.15.2')
     for t in tiles:
         l = t.textures
Exemplo n.º 7
0
    def train(self, vae=False, generator=False,
              schem_pth: PathLike = None, batch_size=128, epochs=100):
        """Training a style for a specific tilemap.

        Training takes place in two steps. First, a variation autoencoder is 
        trained to encode all existing blockstates of a given Minecraft 
        version. Tiles are passed to the model as vectors, and a 
        low-dimensional representation of them is obtained during training. 
        Because the vectors are created based on the properties of the 
        three-dimensional models of the tile, the visually similar blocks in 
        the latent-space are close to each other. Tiles and their associated 
        codes are saved in a dictionary.
        We then teach a convolutional upscaling network (generator) that 
        generates random noise tensors into the latent-space of the 
        variational autoencoder, thus generating tilemaps. The size of the 
        input noise tensor determines its output, the ratio of the two 
        is 1: 8. During training, the size of the input is varied at random, 
        for which the RandomNoise class is responsible. The generator grid is 
        taught to minimize a so-called 'feature-loss'.

        Args:
            vae (bool, optional): Training the VAE part of the model.
            generator (bool, optional): Training the generator part of the
                model.
            schem_pth (os.PathLike, optional): Path to the schematic of the 
                example tilemap.
            batch_size (int, optional): Batch size.
            epochs (int, optional): Number of epochs.

        """
        # Importing Tensorflow libraries
        import tensorflow as tf
        from tensorflow.keras.callbacks import Callback
        tf.compat.v1.disable_eager_execution()
        
        # Importing the the implementation of the different parts of the model
        from creAI.ml.models import VAE, GeneratorNetwork
        from creAI.ml.data_generators import RandomNoise
        from creAI.ml.train import init_generator, init_vae, train_generator, train_vae
        
        # Defining a custom callback function that saves the whole style at
        # the end of each epoch.
        class StyleTrainingCallback(Callback):
            def __init__(self, style: Style):
                self.style = style

            def on_epoch_end(self, epoch, logs=None):
                self.style.save()

        try:
            mc_version = self.info['mc_version']
        except:
            raise UndefinedStyleMinecraftVersion(self.name)

        # Training VAE
        if vae:
            print('Loading training data...')
            # Vectorizing all tiles from the given Minecraft version
            vae_data = Tile.vectorize_all(mc_version)
            # Init VAE
            self.models.vae = init_vae(vae_data.shape[-1], 2, self.models.vae)
            # Train
            train_vae(
                self.models.vae, vae_data,
                batch_size=batch_size, epochs=epochs,
                callbacks=[StyleTrainingCallback(self)]
            )
        
        # Training the generator
        if generator:
            if self.models.vae is None:
                raise VAEModelMissing(self.name)

            if not exists(schem_pth):
                raise SchematicFileMissing(schem_pth)

            # Opening example tilemap
            with open(schem_pth, 'rb') as schem_file:
                tlmp = Schematic.load(schem_file, version=mc_version)
            
            # Encoding the tilemap's palette with the previously trained VAE
            encoded_palette = self.models.vae.encoder.predict(
                tlmp.palette_to_vecs(pad_to=self.models.vae.input_dim)
            )[0]

            # Saving encoding
            for tile, z in zip(tlmp.palette, encoded_palette):
                self.palette[str(tile)] = z.tolist()

            # Mapping codes to the data array of the tilemap, thus creating a
            # 4D tensor.
            mapped = np.array([encoded_palette[idx]
                               for idx in list(tlmp.data.flat)])
            encoded_tlmp = mapped.reshape(
                tlmp.shape+(self.models.vae.latent_dim,))

            # Init generator
            self.models.generator = init_generator(
                encoded_tlmp, 256, self.models.vae.latent_dim, self.models.generator)

            # Creating random training data and validation data.
            data_generator = RandomNoise(
                10000, channels=self.models.generator.input_channels, 
                batch_size=batch_size, min_shape=[2,2,2], max_shape=[3,3,3],
                seed=0)
            validation_data_generator = RandomNoise(
                100, channels=self.models.generator.input_channels, 
                batch_size=batch_size, min_shape=[2,2,2], max_shape=[3,3,3],
                seed=12345)

            # Training
            self.models.generator.model.fit(
                            data_generator, 
                            epochs=epochs,
                            validation_data=validation_data_generator,
                            callbacks=[StyleTrainingCallback(self)])