Esempio n. 1
0
def train_latent_lec(args):
    print('exp_folder: {}, dataset: {}'.format(args.exp_dir, args.dataset))

    tf1.reset_default_graph()
    tf_cfg = tf1.ConfigProto(device_count={'GPU': 0}) if args.cpu else None
    with tf1.Session(config=tf_cfg) as sess_vae:
        vae_config = vae_util.get_training_config(args.exp_dir)
        vae = vae_util.load_vae_model(sess_vae, args.exp_dir, args.dataset)

        x, _ = data.load_dataset(args.dataset,
                                 'train',
                                 vae_config["root_dir"],
                                 normalize=True)
        c, n_class = data.load_dataset(args.dataset,
                                       'train',
                                       vae_config["root_dir"],
                                       label=True)
        y, __ = data.load_dataset(args.dataset,
                                  'train',
                                  vae_config["root_dir"],
                                  label=True)
        n_condition = n_class if vae_config["conditional"] else 0
        if args.drop:
            print("Dropping class {}".format(args.drop))
            # x, c = data.drop_class(x, c, args.drop)
            x, y = data.drop_class(x, y, args.drop)

        lec = tf.keras.models.load_model(args.lec_path)
        y_lec = lec.predict(x)

        # Train the latent space LEC
        with tf1.Session(config=tf_cfg) as sess_classifier:
            latent_lec = LatentSpaceLEC(sess_classifier,
                                        n_class,
                                        n_condition,
                                        latent_dim=vae_config["latent_dim1"],
                                        batch_size=64)

            c_one_hot = utility.one_hot(c, n_condition) \
                if vae_config["conditional"] else None
            y_one_hot = utility.one_hot(y, n_class)
            encode = get_encode(sess_vae,
                                vae,
                                x,
                                c=c_one_hot,
                                stage=args.stage)
            latent_lec.train(encode,
                             y_one_hot,
                             c_one_hot,
                             epochs=args.epochs,
                             lr=args.lr,
                             lr_epochs=args.lr_epochs,
                             encode_frequency=10)
            latent_lec.save(args.exp_dir)
Esempio n. 2
0
def visualize_2d_manifold(sess: tf1.Session, vae: VaeWrapper,
                          cnt_per_row: int = 30, bound: float = 3.0, label=None,
                          n_class=None, save_path: str = None):
    # linearly spaced coordinates corresponding to the 2D plot
    # of digit classes in the latent space
    grid_x = np.linspace(-bound, bound, cnt_per_row)
    grid_y = np.linspace(-bound, bound, cnt_per_row)[::-1]

    zs = np.array([[[xi, yi] for j, xi in enumerate(grid_x)]
                   for i, yi in enumerate(grid_y)])
    zs = np.vstack(zs)
    c = None
    if label is not None:
        c = one_hot(np.array([label] * len(zs)), n_class)
    decoded = vae.decode(zs, c=c)
    shape = decoded[0].shape
    height, width, depth = decoded.shape[1], decoded.shape[2], decoded.shape[3]
    figure = create_image_grid(decoded, cnt_per_row, cnt_per_row)

    if save_path is not None:
        plt.figure(figsize=(20, 20))
        plt.xlabel("u[0]")
        plt.ylabel("u[1]")

        # TODO 200526: refactor using width, height, depth.
        start_range = shape[0] // 2
        end_range = cnt_per_row * shape[0] + start_range + 1
        pixel_range = np.arange(start_range, end_range, shape[0])
        sample_range_x = np.round(grid_x, 1)
        sample_range_y = np.round(grid_y, 1)
        plt.xticks(pixel_range, sample_range_x)
        plt.yticks(pixel_range, sample_range_y)
        plt.imshow(figure, cmap='Greys_r' if shape[2] == 1 else None)
        plt.savefig(save_path)
    return figure
Esempio n. 3
0
def interpolate(vae, sess, exp_folder, xs, ys, cnt):
    # TODO (200622): move to `experiment.py`.
    mix_zs, mix_ys = [], []
    INTERP_CNT = 11
    for _ in range(cnt):
        while True:
            idx = np.random.randint(0, len(xs), 2)
            y1, y2 = tuple(ys[idx])
            if y1 == y2:
                break

        y_encoded = utility.one_hot(ys[idx], n_class=np.max(ys) + 1)
        zs = vae.encode(xs[idx], y_encoded)
        z0, z1 = np.expand_dims(zs[0], 0), np.expand_dims(zs[1], 0)
        alpha = np.linspace(0, 1.0, num=INTERP_CNT).reshape((INTERP_CNT, 1))
        mix = np.matmul(alpha, z0) + np.matmul(1 - alpha, z1)
        y = np.array([y_encoded[0]] * INTERP_CNT)
        mix_zs.append(mix)
        mix_ys.append(y)

    mix_zs = np.concatenate(mix_zs, axis=0)
    mix_ys = np.concatenate(mix_ys, axis=0)

    decoded = vae.decode(mix_zs, mix_ys)
    decoded = np.rint(decoded * 255)
    imgs, _ = vae_util.stitch_imgs(decoded, None, row_size=cnt,
                                   col_size=INTERP_CNT)
    cv2.imwrite(os.path.join(exp_folder, 'interpolation.png'), imgs)
Esempio n. 4
0
def __main__():

    print("\033[1;32;40m--------------------------------------------\n\
                PYTORCH\n--------------------------------------------\n\033[0m"
          )
    train_input, test_input, train_target, test_target = g.generate_sets()
    train_input, train_target = Variable(train_input), Variable(train_target)
    test_input, test_target = Variable(test_input), Variable(test_target)

    #Sanity check
    #    print(train_input.shape)
    #    print(train_target.shape)
    #    print(test_input.shape)
    #    print(test_target.shape)

    train_target_h = utility.one_hot(train_target, 2)

    mini_batch_size = 10

    model = Net()
    losses_, errors_, accuracies_ = train_model(model, train_input,
                                                train_target_h, train_target,
                                                mini_batch_size, True)
    nb_test_errors = compute_nb_errors(model, test_input, test_target)
    print('test error Pytorch {:0.2f}% {:d}/{:d}'.format(
        (100 * nb_test_errors) / test_input.size(0), nb_test_errors,
        test_input.size(0)))

    ### Plots
    fig, ax1 = plt.subplots(figsize=(12, 8))
    ax2 = ax1.twinx()

    f1 = ax1.plot(np.array(losses_)[:60], color='darkblue', label='Loss')

    f2 = ax2.plot(np.array(accuracies_)[:60],
                  color='crimson',
                  label='Accuracy')

    fs = f1 + f2
    labs = [l.get_label() for l in fs]
    ax1.legend(f1 + f2, labs, loc='center right', fontsize=15)

    ax1.set_xlabel('Epochs', fontsize=15)
    ax1.set_ylabel('MSE loss', fontsize=15)
    ax2.set_ylabel('Accuracy [%]', fontsize=15)

    plt.title('Loss and accuracy monitoring - PyTorch', fontsize=25)
    plt.show()
    with open('accuracy_pytorch_v2.pkl',
              'wb') as f:  # Python 3: open(..., 'wb')
        pickle.dump(np.array(accuracies_), f)

    with open('loss_pytorch_v2.pkl', 'wb') as f:  # Python 3: open(..., 'wb')
        pickle.dump(np.array(losses_), f)
Esempio n. 5
0
def evaluate_models(sess: tf1.Session, outer: OuterVaeModel,
                    inner: InnerVaeModel, dataset: str, root_dir='.') \
        -> Tuple[float, float]:
    """ Evaluate inner and outer VAE for the MAE of reconstruction

    :param sess: tf Session
    :param outer: outer VAE
    :param inner: inner VAE
    :param dataset: Dataset
    :param root_dir: dataset root folder
    :return: (mae1, mae2)
    """
    x, dim = load_dataset(dataset, 'test', root_dir, normalize=True)
    y, n_classes = load_dataset(dataset, 'test', root_dir, label=True)
    y_encoded = one_hot(y, n_classes)

    encoded = outer.encode(x, c=y_encoded)
    mae1 = outer.evaluate(x, c=y_encoded)
    mae2 = inner.evaluate(encoded, c=y_encoded)
    return mae1, mae2
Esempio n. 6
0
def test_latent_lec(exp_dir: str,
                    lec_path: str,
                    dataset: str,
                    on_cpu: bool = False,
                    drop: int = None,
                    stage: int = 1):
    vae_config = vae_util.get_training_config(exp_dir)
    x, y = data.get_test_dataset(dataset)
    if drop:
        x, y = data.drop_class_except(x, y, drop)
        print("dropped {}. len: {}".format(drop, len(x)))

    assert len(x) == len(y), "len(x): {}, len(y): {}".format(len(x), len(y))
    conditional = vae_config["conditional"]
    c_encoded = utility.one_hot(y) if conditional else None

    tf1.reset_default_graph()
    tf_cfg = tf1.ConfigProto(device_count={'GPU': 0}) if on_cpu else None

    with tf1.Session(config=tf_cfg) as sess_vae:
        vae_model = vae_util.load_vae_model(sess_vae,
                                            exp_dir,
                                            dataset,
                                            batch_size=64)
        encode = get_encode(sess_vae,
                            vae_model,
                            x,
                            c=c_encoded,
                            stage=stage,
                            probabilistic=False)
        z = encode()

    y_lec = get_lec_prediction(lec_path, x)

    tf1.reset_default_graph()
    with tf1.Session(config=tf_cfg) as sess_classifier:
        latent_lec = load_latent_lec(sess_classifier, exp_dir)
        acc_truth = latent_lec.measure_accuracy(y, z, c_encoded)
        acc_lec = latent_lec.measure_accuracy(np.argmax(y_lec, axis=1), z,
                                              c_encoded)
        print("acc_truth: {:.2%}, acc_lec: {:.2%}".format(acc_truth, acc_lec))
Esempio n. 7
0
def get_semantically_partial_dataset(dataset: str, exp_dir: str,
                                     root_dir: str = os.getcwd()) \
        -> Tuple[np.array, np.array, np.array, np.array]:
    """ Get partial dataset, separated by "semantics", or semantic
    similarity captured by VAE. The VAE to use is loaded from `exp_dir`.

    :param dataset: Name of the dataset
    :param exp_dir: VAE directory
    :param root_dir: Project root directory
    :return: (x1, x2, y1, y2)
    """
    ratio = .5
    x, __ = load_dataset(dataset, 'train', root_dir, normalize=True)
    y, n_class = load_dataset(dataset, 'train', root_dir, label=True)
    c = one_hot(y, n_class)

    with tf1.Session() as sess:
        vae = load_vae_model(sess, exp_dir, dataset)
        z, __ = vae.extract_posterior(x, c)
        inds1 = [i for i, z in enumerate(z)
                 if z[:, 0] >= norm.ppf(ratio)]
        inds2 = [i for i, z in enumerate(z)
                 if z[:, 0] < norm.ppf(ratio)]
    return x[inds1], x[inds2], y[inds1], y[inds2]
Esempio n. 8
0
 def get_recon_mae(_xs, _ys):
     _cs = one_hot(_ys, np.max(ys) + 1)\
         if self.outer.is_conditional else None
     x_hats = self.reconstruct(_xs, _cs)
     return calc_mae(_xs, x_hats)
Esempio n. 9
0
                m_b_t  = self.m_b[i]/(1-beta1)
                
                v_w_t = self.v_w[i]/(1-beta2)
                v_b_t = self.v_b[i]/(1-beta2)
                
                n.weights = n.weights - eta*m_w_t/(v_w_t**(0.5)+epsilon)
                n.bias = n.bias - eta*m_b_t/(v_b_t**(0.5)+epsilon)

#-----------------------------------------------------------------------------#
                
#------------------------------- INITIALIZATON -------------------------------#

# Generate sets
train_input, test_input, train_target, test_target = g.generate_sets()
# Turn train_target into one hot labels
train_labels = utility.one_hot(train_target,2)
# Create a network with loss = MSE
network = Sequential(train_labels, utility.d_MSE_loss, utility.MSE_loss)

#-----------------------------------------------------------------------------#

#---------------------------------- NETWORK ----------------------------------#
'''
Creation of the network by adding the different layers.
Here, composed of three hidden layers of size 25 in addition to input and output
layers of size 2.
Dropout is not mandatory, but added to test its functionnality -> it does not 
change much the result
Activation functions are sigmoid because ReLU could cause constant loss. However,
this does work with the first two layers having ReLU.
'''  
Esempio n. 10
0
 def xyc(self):
     x, __ = data.load_dataset(dataset, 'test', __root_dir)
     y, __ = data.load_dataset(dataset, 'test', __root_dir, label=True)
     c = utility.one_hot(y)
     return x, y, c
Esempio n. 11
0
 def generate_label(cnt):
     return utility.one_hot(np.random.randint(0, n_class, cnt), n_class)
Esempio n. 12
0
def evaluate(args, model1: OuterVaeModel, model2: InnerVaeModel,
             sess: tf1.Session):

    maes = vae_util.evaluate_models(sess, model1, model2, args.dataset,
                                    args.root_dir)
    logger.info(maes)
    total_params = vae_util.get_trainable_parameters('outer')
    logger.info("stage1 trainable params: {}".format(total_params))
    total_params = vae_util.get_trainable_parameters('inner')
    logger.info("stage2 trainable params: {}".format(total_params))

    # test dataset
    x, dim = load_dataset(args.dataset, 'test', args.root_dir,
                          normalize=True)
    y, n_class = load_dataset(args.dataset, 'test', args.root_dir,
                              label=True)
    inds = np.array(list(range(len(x))))
    np.random.shuffle(inds)
    x = x[inds][0:args.fid_cnt]
    y = y[inds][0:args.fid_cnt]
    y_encoded = utility.one_hot(y, n_class) if args.conditional else None

    # reconstruction and generation
    def generate_label(cnt):
        return utility.one_hot(np.random.randint(0, n_class, cnt), n_class)

    def decode(_v):
        return np.array([np.where(__v == 1)[0][0] for __v in _v])

    img_recons = model1.reconstruct(x, c=y_encoded)
    print('recon.shape', img_recons.shape)

    y, y1, y2 = None, None, None
    img_gens1, y1 = model1.generate(args.fid_cnt, generate_label)
    img_gens2, y2 = model2.generate(args.fid_cnt, generate_label)
    logger.debug('recon.shape: {}, img1.shape: {}, img2.shape: {}'
                 ''.format(img_recons.shape, img_gens1.shape, img_gens2.shape))
    y1 = decode(y1) if y1 is not None else None
    y2 = decode(y2) if y2 is not None else None

    col = 5 if args.dataset == 'taxinet' else 10
    img_recons_sample, recon_inds = vae_util.stitch_imgs(img_recons, None,
                                                         row_size=n_class,
                                                         col_size=col)
    print('img_recons_sample: {}, recon_inds: {}'.format(
        img_recons_sample.shape, recon_inds))
    # x = np.rint(x[recon_inds] * 255.0)
    img_originals, _ = vae_util.stitch_imgs(x[recon_inds], y,
                                            row_size=n_class, col_size=col)
    print('img_originals', img_originals.shape)
    img_originals = cv2.cvtColor(img_originals.astype(np.uint8),
                                 cv2.COLOR_BGR2RGB)
    # y1, y2
    img_gens1_sample, _ = vae_util.stitch_imgs(img_gens1, y1,
                                               row_size=n_class,
                                               col_size=col)
    img_gens2_sample, _ = vae_util.stitch_imgs(img_gens2, y2,
                                               row_size=n_class,
                                               col_size=col)
    cv2.imwrite(os.path.join(args.exp_dir, 'recon_original.png'),
                img_originals)
    cv2.imwrite(os.path.join(args.exp_dir, 'recon_sample.png'),
                vae_util.scale_up(img_recons_sample))
    cv2.imwrite(os.path.join(args.exp_dir, 'gen1_sample.png'),
                vae_util.scale_up(img_gens1_sample))
    cv2.imwrite(os.path.join(args.exp_dir, 'gen2_sample.png'),
                vae_util.scale_up(img_gens2_sample))

    # calculating FID score
    batches, parallel = 100, 4
    tf1.reset_default_graph()
    fid_recon = get_fid(img_recons, args.dataset, args.root_dir,
                        args.fid_cnt, num_batches=batches, parallel=parallel)
    logger.info('FID = {:.2f}\n'.format(fid_recon))
    fid_gen1 = get_fid(img_gens1, args.dataset, args.root_dir, args.fid_cnt,
                       num_batches=batches, parallel=parallel)
    logger.info('FID = {:.2f}\n'.format(fid_gen1))
    fid_gen2 = get_fid(img_gens2, args.dataset, args.root_dir, args.fid_cnt,
                       num_batches=batches, parallel=parallel)
    logger.info('FID = {:.2f}\n'.format(fid_gen2))

    logger.info('Reconstruction Results: FID = {:.2f}'.format(fid_recon))
    logger.info('Generation Results (Stage 1): FID = {:.2f}'.format(fid_gen1))
    logger.info('Generation Results (Stage 2): FID = {:.2f}'.format(fid_gen2))

    with open(os.path.join(args.exp_dir, 'fid.txt'), 'w') as f:
        f.write("recon: {:.2f}, 1st: {:.2f}, 2nd: {:.2f}\n".format(
            fid_recon, fid_gen1, fid_gen2))
    if args.train1 and args.wandb:
        # wandb is initialized only when train1 is True
        wandb.log({
            'fid_recon': fid_recon,
            'fid_gen1': fid_gen1,
            'fid_gen2': fid_gen2,
        })
Esempio n. 13
0
def main(args):
    global logger
    tf1.reset_default_graph()

    if not os.path.exists(args.exp_dir):
        os.makedirs(args.exp_dir)
    model_path = os.path.join(args.exp_dir, 'model')
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    logger = vae_util.setup_logger(os.path.join(args.exp_dir, 'training.log'),
                                   args.debug)
    logger.info("Experiment at {}".format(args.exp_dir))
    logger.info(vars(args))

    # dataset
    xs, dim = load_dataset(args.dataset, 'train', args.root_dir,
                           normalize=True)
    ys, n_class = load_dataset(args.dataset, 'train', args.root_dir,
                               label=True)
    if args.limit:
        xs, ys = xs[:6400], ys[:6400]
    logger.info('Train data len: {}, dim: {}, classes: {}'.format(len(xs),
                                                                  dim, n_class))

    xs_val, _ = load_dataset(args.dataset, 'test', args.root_dir,
                             normalize=True)
    ys_val, _ = load_dataset(args.dataset, 'test', args.root_dir, label=True)

    if args.drop >= 0:
        logger.info("Dropping class {}".format(args.drop))
        xs, ys = drop_class(xs, ys, args.drop)
        xs_val, ys_val = drop_class(xs_val, ys_val, args.drop)
    cs = utility.one_hot(ys)

    n_sample = np.shape(xs)[0]
    logger.info('Num Sample = {}.'.format(n_sample))

    # Load from configuration
    config_filename = os.path.join(args.exp_dir, 'config.yml')
    load_configuration(args, config_filename)
    pprinter = pprint.PrettyPrinter(indent=4)
    logger.info("Configuration: {}".format(pprinter.pformat(vars(args))))

    # Save/update the config only when any of the VAE gets trained
    if args.train1 or args.train2:
        logger.info("Saving configuration to " + config_filename)
        save_configuration(args, config_filename, n_sample)

    # session
    config = tf1.ConfigProto(device_count={'GPU': 0}) if args.cpu else None
    sess = tf1.Session(config=config)

    # model
    outer_vae = vae_util.get_outer_vae(args, sess)
    outer_params = vae_util.get_trainable_parameters('outer')
    logger.info("Created VAE models:")
    logger.info("{}, {} params".format(outer_vae, outer_params))

    # train model
    if args.train1:
        if args.wandb:
            wandb.init(project=args.dataset, name=args.exp_name,
                       sync_tensorboard=True, config=args)
        mae = outer_vae.train(lambda: (xs, cs), args.epochs1, args.lr1,
                              os.path.join(model_path, 'stage1'),
                              log_epoch=log_epoch)
        logger.info("Finished training stage 1 VAE. Mae: {:.2%}".format(mae))

    if args.train2:
        inner_vae = vae_util.get_inner_vae(args, sess, outer_vae)
        sess.run(tf1.global_variables_initializer())
        outer_vae.restore(os.path.join(model_path, 'stage1'))

        mu_z, sd_z = outer_vae.extract_posterior(xs, cs)

        def get_data():
            zs = mu_z + sd_z * np.random.normal(0, 1,
                                                [len(mu_z), args.latent_dim1])
            return zs, cs

        mae = inner_vae.train(get_data, args.epochs2, args.lr2,
                              os.path.join(model_path, 'stage2'),
                              log_epoch=log_epoch)
        logger.info("Finished training stage 2 VAE. Mae: {:.2%}".format(mae))

    # load
    if not (args.train1 or args.train2):
        # saver.restore(sess, os.path.join(model_path, 'stage1'))
        if os.path.exists(os.path.join(model_path, 'stage2.index')):
            inner_vae = vae_util.get_inner_vae(args, sess, outer_vae)
            inner_vae.restore(os.path.join(model_path, 'stage2'))
            logger.info("Loaded Stage 2 VAE")
        elif os.path.exists(os.path.join(model_path, 'stage1.index')):
            outer_vae.restore(os.path.join(model_path, 'stage1'))
            logger.info("Loaded Stage 1 VAE")
        else:
            raise Exception("No checkpoint found!")

    if args.eval:
        logger.info("Evaluating...")
        evaluate(args, outer_vae, inner_vae, sess)

    if args.interpolate:
        interpolate(VaeWrapper(outer_vae, inner_vae), sess, args.exp_dir, xs, ys, 20)

    if args.manifold:
        logger.info("Analyze manifold")
        vae_util.analyze_manifold(sess, VaeWrapper(outer_vae, inner_vae), args.dataset)
Esempio n. 14
0
    def generate(self, n_test: int, batch_size: int = 32)\
            -> Tuple[np.array, np.array, np.array]:
        """ Generate fault-revealing test cases

        :param n_test: Number of test cases to generate
        :param batch_size: Size of the batch to process at the same time.
        Higher batch size is more GPU efficient
        :return: (latent codes, synthesized inputs, synthesized labels)
        """
        cnt = Counter()
        synth_inputs, synth_latent_codes, synth_labels = [], [], []
        while cnt['bug'] < n_test:
            # new inputs are generated in batch of size `batch_size`.
            cnt['total'] += batch_size
            # synthesized labels
            ys_hat = np.array([
                random.randint(0, self.n_classes - 1)
                for _ in range(batch_size)
            ])
            cs_hat = one_hot(ys_hat, self.n_classes)
            # randomly chosen latent codes
            us_hat = np.array(
                [[random.gauss(0, 1) for _ in range(self.latent_dim)]
                 for _ in range(batch_size)])
            # synthesized inputs
            xs_hat = self.vae.decode(us_hat, cs_hat)
            ys_pred = np.argmax(self.model_under_test.predict(xs_hat), axis=-1)
            for u_hat, x_hat, y_hat, y_pred in zip(us_hat, xs_hat, ys_hat,
                                                   ys_pred):
                # when the predicted output and the expected output
                # don't match, it is considered a fault-finding test case.
                if y_pred != y_hat:
                    is_duplicate = False
                    for u in synth_latent_codes:
                        # if this u is close enough to any of the already
                        # synthesized ones, consider it as a duplicate.
                        if np.linalg.norm(u - u_hat) \
                                < self.distance_lower_bound:
                            is_duplicate = True
                            break
                    # keep track of low plausibility and high uncertainty
                    # cases, just for statistics
                    if self.get_plausibility(u_hat) < self.plaus_lower_bound:
                        cnt['low_plaus'] += 1
                    if self.get_uncertainty(np.expand_dims(x_hat, 0)) > \
                            self.uncertainty_upper_bound:
                        cnt['high_uncertainty'] += 1
                    if is_duplicate:
                        cnt['duplicate'] += 1
                    else:
                        synth_latent_codes.append(u_hat)
                        synth_inputs.append(x_hat)
                        synth_labels.append(y_hat)
                        cnt['bug'] += 1
        assert cnt['bug'] == len(synth_latent_codes) == \
               len(synth_inputs) == len(synth_labels)
        # Trim the excess beyond `n_test`, caused by batch-ed generation
        synth_latent_codes = np.array(synth_latent_codes[:n_test])
        synth_inputs = np.array(synth_inputs[:n_test])
        synth_labels = np.array(synth_labels[:n_test])
        print("xs.shape", synth_inputs.shape)
        return synth_latent_codes, synth_inputs, synth_labels
Esempio n. 15
0
args = parser.parse_args()
image_dir = args.image_dir
batch_size = args.batch_size
epochs = args.epochs
save_name = args.save_name
sample_percent = args.sample_percent

X, labels = load_data(image_dir, sample_percent=sample_percent)
length, width = X[0].shape
X = X.reshape(-1, length, width, 1)
X = X.astype('float32')
X /= 255
unique_y = unique(labels)
num_unique_labels = len(unique_y)
mapping = {
    label: one_hot(i, num_unique_labels)
    for i, label in enumerate(unique_y)
}
y = np.array([mapping[label] for label in labels])
y = y.astype('float32')
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.1,
                                                    shuffle=False)

model = Sequential()
model.add(Flatten(input_shape=(length, width, 1)))
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(num_unique_labels, activation='softmax'))