def __init__(self, params):

        self.params = params
        self.trained_weights_path = params['trained_weights_path']

        if not eval(self.params['train']):
            self.network = Autoencoder(load_model=True,
                                       load_path=self.trained_weights_path)
        else:
            self.get_data()
            self.network = Autoencoder(input_shape=self.x_train.shape[1:],
                                       learning_r=params['learning_rate'])
            self.network.save_model(self.trained_weights_path + 'model.h5',
                                    self.trained_weights_path + 'encoder.h5',
                                    self.trained_weights_path + 'decoder.h5')
Пример #2
0
def train_encoder():
    data = MNIST("MNIST",
                 train=True,
                 download=True,
                 transform=transforms.ToTensor())
    model = Autoencoder(1).to(device)
    epochs = 5
    outputs = train(model, epochs, 16, 1e-3, data)
    writer = SummaryWriter("runs/autoencodermnist")
    for k in range(0, epochs):
        fig = plt.figure(figsize=(9, 2))
        images = outputs[k][1].cpu().detach().numpy()
        recon = outputs[k][2].cpu().detach().numpy()
        for i, item in enumerate(images):
            if i >= 9:
                break
            plt.subplot(2, 9, i + 1)
            plt.imshow(item[0])
        for i, item in enumerate(recon):
            if i >= 9:
                break
            plt.subplot(2, 9, 9 + i + 1)
            plt.imshow(item[0])
        writer.add_figure("Autoencoder performance", fig, global_step=k)
    model.to("cpu")
    torch.save(model.state_dict(), "autoencoderMNIST.pth")
Пример #3
0
	def __init__(self, name, lr=0.001):
			super().__init__(name = name,  lr = lr)
			self.input_size = (512,512,1)
			self.model = Sequential()
			autoencoder_model = Autoencoder(trainable = 0)
			# autoencoder_model.load_weights()
			self.model.add(autoencoder_model.encoder)
			self.model.add(Conv2D(filters = 128, kernel_size = (3,3), input_shape = (64,64,128), strides = 1, padding ='same', activation = 'relu'))
			self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			self.model.add(Conv2D(filters = 128, kernel_size = (3,3), strides = 1, padding ='same', activation = 'relu'))
			self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			self.model.add(Conv2D(filters = 256, kernel_size = (3,3), strides = 1, padding ='same', activation = 'relu'))
			self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			# self.model.add(Conv2D(filters = 512, kernel_size = (3,3), strides = 1, padding ='same', activation = 'relu'))
			# self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			self.model.add(GlobalAveragePooling2D())
			self.model.add(Dense(2048, activation='relu'))
			self.model.add(Dense(1024, activation='relu'))
			self.model.add(Dense(4, activation='softmax'))

			sgd_opti = SGD(lr = self.lr, momentum = 0.9)
			self.model.compile(optimizer = sgd_opti, loss='categorical_crossentropy', metrics = ['accuracy'])
Пример #4
0
    def __init__(self, width, height, train=None, test=None, training=True):

        self.width = width
        self.height = height

        self.training = training

        if train is not None:
            (self.positive, self.negative) = train

        if test is not None:
            (self.positive_test, self.negative_test) = test

        self.kernel_size = (3, 3)
        self.model = None

        self.model_created = False

        self.ae = Autoencoder(width,
                              height,
                              train,
                              test,
                              training=self.training)
        self.d_net = None
        print("gan initiated")
Пример #5
0
def main():
    folder_path = create_models_folder(MODELS_PATH)
    init_log(folder_path + '/', 'execution.log')
    logging.info('Process started')
    folder_name = folder_path.split('//')[-1]
    logging.info(f'Folder {folder_name} created')

    forest_conf, ae_conf, full_config = read_config(CONFIG)
    write_config(full_config, folder_path)

    logging.info('Loading data')
    data = read_tsv(FILE_PATH, index_col=[0])
    logging.info('Data loaded')
    data = scale_data(data)

    logging.info('Starting Isolation Forest')
    forest = create_isolation_forest(forest_conf)
    fit_save_forest(forest, data, folder_path)

    logging.info('Starting Autoencoder')
    ae = Autoencoder(data.shape[1])
    ae.init_layer_dims()
    ae.init_model()
    ae.fit_save(ae_conf, data, folder_path)
    logging.info('Process finished')
Пример #6
0
def train_autoencoder(data):
    """ Train an autoencoder

    Args:
        data: A fucntion that provides the input data for the network.

    Returns:
        LCMC and MSE metric for the autoencoder that has been trained.
    """

    # Setup
    layers = calculate_layer_sizes(784, 200, 0.5)
    # Generations is needed to correct the number of training steps to match
    # the number of steps used in the evolutionary algorithm
    generations = 10
    config = NeuralNetworkConfig()
    # Start with a mutated config to have some variation between runs
    config.mutate()
    config.num_steps *= generations
    autoencoder = Autoencoder(config, layers[:2])

    # Training
    autoencoder.train(data)
    for index, layer_size in enumerate(layers[2:]):
        autoencoder.append_layer(layer_size)
        autoencoder.train(data, restore_layers=index+1)

    # Evaluation
    autoencoder.save_history()
    print(autoencoder.config)
    print(autoencoder.save_path)
    # autoencoder.reconstruct_images(data)

    return lcmc_fitness(autoencoder, data, True), mse_fitness(autoencoder, data, True)
Пример #7
0
def train(net, train_set, NUM_EPOCHS):
    print("Started training")
    net = Autoencoder()
    criterion = nn.MSELoss()
    train_loss = []
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
    if torch.cuda.is_available():
        net = net.cuda()
        criterion = criterion.cuda()

    for epoch in range(NUM_EPOCHS):
        running_loss = 0.0
        for i, data in enumerate(train_set, 0):
            img = data.get("image")
            img = img.cuda()
            img = img.view(img.size(0), -1)
            optimizer.zero_grad()
            outputs = net(img)
            loss = criterion(outputs, img)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        loss = running_loss / len(train_set)
        train_loss.append(loss)
        train_loss.append(loss)
        print('Epoch {} of {}, Train Loss: {:.3f}'.format(
            epoch + 1, NUM_EPOCHS, loss))

        #if epoch % 5 == 0:
        #save_decoded_image(outputs.cpu().data, epoch)
    print("Finished training")
    return train_loss
Пример #8
0
def main(args):
    if args.src_npy is None:
        print('Supply src_npy')
        return 0
    if args.dst_npy is None:
        print('Supply dst_npy')
        return 0

    model = Autoencoder()
    dummyx = tf.zeros((5, 64, 64, 3), dtype=tf.float32)
    _ = model(dummyx, verbose=True)
    saver = tfe.Saver(model.variables)
    saver.restore(args.snapshot)
    model.summary()

    nuclei = np.load(args.src_npy)
    print(nuclei.shape, nuclei.dtype, nuclei.min(), nuclei.max())

    if args.shuffle:
        print('Shuffling')
        np.random.shuffle(nuclei)

    n_images = nuclei.shape[0]
    n_batches = n_images // args.batch

    nuclei = np.array_split(nuclei, n_batches)
    print('Split into {} batches'.format(len(nuclei)))

    if args.n_batches is not None:
        subset_batches = min(n_batches, args.n_batches)
        print('Subsetting {} batches'.format(args.n_batches))
        nuclei = nuclei[:subset_batches]

    if args.draw:
        fig, axs = plt.subplots(5, 5, figsize=(5, 5))

    all_feat = []
    for k, batch in enumerate(nuclei):
        batch = (batch / 255.).astype(np.float32)
        batch_hat, features = model(tf.constant(batch, dtype=tf.float32),
                                    return_z=True,
                                    training=False)
        all_feat.append(features)

        if k % 50 == 0:
            print('batch {:06d}'.format(k))

        if args.draw:
            if k % 10 == 0:
                savebase = os.path.join(args.save, '{:05d}'.format(k))
                draw_result(batch,
                            batch_hat.numpy(),
                            fig,
                            axs,
                            savebase=savebase)

    all_feat = np.concatenate(all_feat, axis=0)
    print('all_feat', all_feat.shape)

    np.save(args.dst_npy, all_feat)
Пример #9
0
    def train(self,
              validation_perc=0.1,
              lr=1e-3,
              intermediate_size=500,
              encoded_size=100):
        if not os.path.isfile('tfidf_matrix.pkl'):
            self._embed(min_df=0.0001)
        else:
            with open('./tfidf_matrix.pkl', 'rb') as f:
                self.tfidf_df = pickle.load(f)

        ae = Autoencoder(
            self.tfidf_df,
            validation_perc=validation_perc,
            lr=lr,
            intermediate_size=intermediate_size,
            encoded_size=encoded_size,
        )

        ae.train_loop(epochs=40)
        losses = pd.DataFrame(data=list(zip(ae.train_losses, ae.val_losses)),
                              columns=['train_loss', 'validation_loss'])
        losses['epoch'] = losses.index + 1

        self.losses = losses
        self.encoded_tfidf = ae.get_encoded_representations()

        with open('./autoencoder_embeddings.pkl', 'wb') as fh:
            pickle.dump(self.encoded_tfidf, fh)

        return self.encoded_tfidf
Пример #10
0
def create_model_autoencoder(hparams):
    print("Creating auto-encoder...")
    train_graph = tf.Graph()
    with train_graph.as_default():
        train_model = Autoencoder(hparams, tf.contrib.learn.ModeKeys.TRAIN)

    eval_graph = tf.Graph()
    with eval_graph.as_default():
        eval_model = Autoencoder(hparams, tf.contrib.learn.ModeKeys.EVAL)

    infer_graph = tf.Graph()
    with infer_graph.as_default():
        infer_model = Autoencoder(hparams, tf.contrib.learn.ModeKeys.INFER)
    return TrainModel(graph=train_graph, model=train_model), EvalModel(
        graph=eval_graph, model=eval_model), InferModel(graph=infer_graph,
                                                        model=infer_model)
Пример #11
0
def main():
    nof_epochs = 8
    batch_size = 1000
    learning_rate = 1e-3
    loader = MNISTDataLoader(batch_size=batch_size)

    encoder = VariationalEncoder(2)
    decoder = nn.Sequential(nn.Linear(2, 512), nn.ReLU(), nn.Linear(512, 784),
                            Reshape([1, 28, 28]))
    autoencoder = Autoencoder(encoder, decoder)
    criterion = lambda x, x_hat: (
        (x - x_hat)**2).sum() + autoencoder.encoder.kl

    optimizer = torch.optim.Adam(
        autoencoder.parameters(),
        lr=learning_rate,
    )

    Trainer(nof_epochs=nof_epochs,
            loader=loader,
            criterion=criterion,
            optimizer=optimizer,
            model=autoencoder).run()

    plot_latent(autoencoder, loader.train, batch_size)

    plot_reconstructed(autoencoder)
Пример #12
0
def main():
    inputs = np.random.random((5, 5))
    autoencoder = Autoencoder([
        FCLayer((5, 4), SigmoidActivationFunction(), True),
        FCLayer((4, 3), SigmoidActivationFunction(), True),
        FCLayer((3, 4), SigmoidActivationFunction(), True),
        FCLayer((4, 5), SigmoidActivationFunction(), True)
    ])

    w = np.random.normal(size=autoencoder.net.params_number)
    autoencoder.net.set_weights(w)
    loss, loss_grad = autoencoder.compute_loss(inputs)
    num_params = autoencoder.net.params_number
    p = np.zeros((autoencoder.net.params_number))
    check_loss_grad = np.zeros((autoencoder.net.params_number))
    for i in range(num_params):
        p[:] = 0
        p[i] = 1
        check_loss_grad[i] = \
            check_grad(lambda x: loss_func(autoencoder, x, inputs), w, p)
    max_diff = np.abs(loss_grad - check_loss_grad).max()
    min_diff = np.abs(loss_grad - check_loss_grad).min()
    print("compute_loss")
    print("min_diff =  ", min_diff)
    print("max_diff = ", max_diff)
Пример #13
0
def main():
    nof_epochs = 16
    batch_size = 128
    learning_rate = 1e-3
    momentum = .8
    loader = MNISTDataLoader(batch_size=batch_size)
    criterion = lambda x_hat, x: ((x - x_hat)**2).sum()

    encoder = nn.Sequential(
        Reshape([1, 28 * 28]),
        nn.Linear(28 * 28, 512),
        nn.ReLU(),
        nn.Linear(512, 2),
        nn.ReLU(),
    )

    decoder = nn.Sequential(nn.Linear(2, 512), nn.ReLU(), nn.Linear(512, 784),
                            nn.ReLU(), Reshape([1, 28, 28]))

    autoencoder = Autoencoder(encoder, decoder)

    optimizer = torch.optim.Adam(
        autoencoder.parameters(),
        lr=learning_rate,
    )

    Trainer(nof_epochs=nof_epochs,
            loader=loader,
            criterion=criterion,
            optimizer=optimizer,
            model=autoencoder).run()

    plot_latent(autoencoder, loader.train, batch_size)

    plot_reconstructed(autoencoder)
Пример #14
0
def test(hparams):

    model = Autoencoder(hparams)

    model.encoder = torch.load("trained_models/train_all/encoder.pt")
    model.decoder = torch.load("trained_models/train_all/decoder.pt")

    #print(model)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    #from torchsummary import summary
    #summary(model, (1, 64, 192))

    model.encoder.eval()
    model.decoder.eval()

    output_dir = "output/{}".format(
        os.path.basename(hparams.image_list).split('.')[0])

    with open(hparams.image_list) as f:
        image_files = f.read().splitlines()
        play_thermal(image_files,
                     hparams,
                     output_dir,
                     encoder=model.encoder.to('cpu'),
                     decoder=model.decoder.to('cpu'),
                     norm=hparams.norm,
                     n_channels=hparams.nc,
                     show=False,
                     save=False)
    if not len(image_files) > 0:
        print("did not find any files")
def run_autoencoder(optimizer):
    """ Runs the autoencoder model using the specified optimizer.

    Parameters
    ----------
    optimizer : RMSProp/Adam
        Optimization algorithm to be used for parameter learning

    """
    optimizer = Adam(learning_rate=0.03) if optimizer == 'adam' else RMSProp(
        learning_rate=0.05)
    train_matrix, val_matrix = get_training_and_val_data()
    model = Autoencoder(input_dim=train_matrix.shape[1])
    model.print_summary()
    model.compile(optimizer)
    errors = model.fit(train_matrix,
                       train_matrix,
                       num_epochs=60,
                       val_set=(val_matrix, val_matrix),
                       early_stopping=True)
    plot_losses(errors['training'], errors['validation'])
    neuron_num = model.model.layers[0].optimizer.reference_index
    learning_rates = model.model.layers[0].optimizer.learning_rates
    plot_learning_rates(learning_rates['weights'], learning_rates['bias'],
                        neuron_num)
Пример #16
0
def load_model(dataset):
    model = Autoencoder(1)
    if dataset == "MNIST":
        model.load_state_dict(torch.load("autoencoderMNIST.pth"))
    elif dataset == "FashionMNIST":
        model.load_state_dict(torch.load("autoencoderFashionMNIST.pth"))
    model.to(device)
    return model
Пример #17
0
 def _network(self):
     self.autoencoder = Autoencoder(self.inputs, self.AE_network_params,
                                    self.learning_rate)
     features = self.autoencoder.encoded_vecs
     labeled_features = tf.slice(features, [0, 0], [self.num_labeled, -1])
     #labeled_features = tf.nn.dropout(tf.slice(self.inputs, [0,0], [self.num_labeled, -1]), 0.75)
     self.classifier = MLP(labeled_features, self.labels,
                           self.mlp_network_params, self.learning_rate)
Пример #18
0
    def __init__(self, autoencoder = Autoencoder()):
        self.autoencoder = autoencoder
        #(x_train, _), (x_test, self.y_test) = mnist.load_data()

        x_train, self.y_test = load_cancer()
        
        self.x_train = normalize(x_train, axis = 0)
        self.x_test = self.x_train

        self.name = ""
Пример #19
0
def train(hparams, dm):
    #logger = loggers.TensorBoardLogger(hparams.log_dir, name=f"da{hparams.data_root}_is{hparams.image_size}_nc{hparams.nc}")
    model = Autoencoder(hparams)
    # print detailed summary with estimated network size
    #summary(model, (hparams.nc, hparams.image_width, hparams.image_height), device="cpu")
    trainer = Trainer(gpus=hparams.gpus, max_epochs=hparams.max_epochs)
    trainer.fit(model, dm)
    #trainer.test(model)
    torch.save(model.encoder, "trained_models/encoder.pt")
    torch.save(model.decoder, "trained_models/decoder.pt")
def train(x_train, learning_rate, batch_size, epochs):
    autoencoder = Autoencoder(input_shape=(28, 28, 1),
                              conv_filters=(32, 64, 64, 64),
                              conv_kernels=(3, 3, 3, 3),
                              conv_strides=(1, 2, 2, 1),
                              latent_space_dim=2)
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    autoencoder.train(x_train, batch_size, epochs)
    return autoencoder
Пример #21
0
def _main(argv):
    print("initializing Params")
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    if FLAGS.training == True:
        ("building the Model")
        autoencoder1 = Autoencoder(FLAGS.learning_rate, FLAGS.batch_size,
                                   FLAGS.pointcloud_dim, FLAGS.epochs)
        autoencoder1.train()
Пример #22
0
def main():
    autoencoder = Autoencoder()
    autoencoder.load_state_dict(torch.load('autoencoder_model.pt'))



    train_pconns, train_labels, test_pconns, test_labels = create_class_data()
    test_pconns_dataset = TensorDataset(torch.Tensor(test_pconns))
    test_pconns_dataloader = DataLoader(test_pconns)
    evaluate_autoencoder(autoencoder, test_pconns_dataloader)

    """
Пример #23
0
def train(hparams):
    logger = loggers.TensorBoardLogger(
        hparams.log_dir,
        name=f"da{hparams.data_root}_is{hparams.image_size}_nc{hparams.nc}")
    model = Autoencoder(hparams)
    # print detailed summary with estimated network size
    summary(model, (hparams.nc, hparams.image_size, hparams.image_size),
            device="cpu")
    trainer = Trainer(logger=logger,
                      gpus=hparams.gpus,
                      max_epochs=hparams.max_epochs)
    trainer.fit(model)
    trainer.test(model)
    torch.save(model.encoder, "encoder.pt")
    torch.save(model.decoder, "decoder.pt")
Пример #24
0
def train_grid(x_train,
               learning_rate,
               batch_size,
               epochs,
               latent_space_dim=32):
    autoencoder = Autoencoder(input_shape=(23, 23, 5),
                              conv_filters=(16, 32, 32, 32),
                              conv_kernels=(3, 3, 3, 3),
                              conv_strides=(1, 1, 1, 1),
                              latent_space_dim=latent_space_dim,
                              name="Autoencoder_CNN_Grid_" +
                              str(latent_space_dim))
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    history = autoencoder.train(x_train, batch_size, epochs)
    return autoencoder, history
Пример #25
0
def make_pipeline(dimension_reduction="none"):
    '''
    Sets up a pipeline for the steps
    :param dimension_reduction: what type of dimensionality reduction shall be used {"none", "autoencoder", "svd", "pca"}
    '''
    # independent of dimensionality reduction vectorization and tf-idf is needed
    vectorizer = [('vect',
                   CountVectorizer(max_features=2000,
                                   analyzer='word',
                                   stop_words='english')),
                  ('tfidf', TfidfTransformer(use_idf=False))]
    # SVM classifier ('hinge')
    classifier = [('clf',
                   SGDClassifier(loss='hinge',
                                 penalty='l2',
                                 alpha=1e-3,
                                 random_state=42,
                                 max_iter=5,
                                 tol=None))]

    # Raw data classification (vectorize & classify)
    text_clf_raw = Pipeline(vectorizer + classifier)

    # Vectorize, apply data compression with autoencoder and classify
    text_clf_autoenc = Pipeline(vectorizer + [(
        'autoencoder',
        Autoencoder(
            n_features=2000, n_epochs=150, batch_size=8, enc_dimension=1000)
    )] + classifier)

    # Vectorize, convert to dense, perform PCA and classify
    text_clf_pca = Pipeline(vectorizer + [('to_dense', DenseTransformer())] +
                            [('pca', PCA(n_components=1000))] + classifier)

    # Vectorize, perform SVD (as this supports sparse input)  and classify
    text_clf_svd = Pipeline(vectorizer + [(
        'svd', TruncatedSVD(n_components=1000, n_iter=5, random_state=42))] +
                            classifier)

    if (dimension_reduction == "none"):
        return text_clf_raw
    elif (dimension_reduction == "svd"):
        return text_clf_svd
    elif (dimension_reduction == "pca"):
        return text_clf_pca
    elif (dimension_reduction == "autoencoder"):
        return text_clf_autoenc
Пример #26
0
    def build_model(self):
        self.is_training = tf.placeholder(tf.bool, name='is_training')
        self.images = tf.placeholder(tf.float32, [None] + self.image_shape,
                                     name='real_images')

        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')

        # self.G = self.generator(self.z)
        self._autoencoder = Autoencoder(zsize=self.z_dim)
        self.G = self._autoencoder.generator(self.z, training=self.is_training)

        self.D, self.D_logits = self.discriminator(self.images)
        self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)

        self.d_out_real_sum = tf.summary.scalar("d_out_real",
                                                tf.reduce_mean(self.D))
        self.d_out_fake_sum = tf.summary.scalar("d_out_fake",
                                                tf.reduce_mean(self.D_))

        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                    labels=tf.ones_like(
                                                        self.D)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.zeros_like(
                                                        self.D_)))
        self.g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.ones_like(
                                                        self.D_)))

        self.d_loss = self.d_loss_real + self.d_loss_fake

        # self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
        # self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)

        # self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        # self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver(max_to_keep=1)
Пример #27
0
    def fit(self, x_view, y_view):
        data1 = x_view
        data2 = y_view

        self.train_loader = torch.utils.data.DataLoader(
            ConcatDataset(data1, data2),
            batch_size=self.BATCH_SIZE,
            shuffle=True)

        self.model = Autoencoder(self.ZDIMS, self.input_dim)
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.0001)

        for epoch in range(1, self.EPOCHS + 1):
            # train(model,epoch,train_loader,optimizer,input_dim)
            self.train(epoch)
            #est(epoch)
            self.model.eval()
Пример #28
0
def restore_autoencoder(save_path):
    """ Reconstructs an autoencoder and its history
    Args:
        save_path: The full path to the autoencoder without extension

    Returns:
        The restored autoencoder and its corresponding history
    """
    hist_saver = HistorySaver.deserialize(save_path + ".pickle")
    config = hist_saver.get_last_config()
    # layers = calculate_layer_sizes(90, 25, 0.5)
    # layers = calculate_layer_sizes(784, 10, 0.234)
    # layers = calculate_layer_sizes(784, 196, 0.5)
    layers = calculate_layer_sizes(784, 10, 0.115)
    # layers = calculate_layer_sizes(784, 2, 0.052)
    autoencoder = Autoencoder(config, layers, restore_path=save_path + ".ckpt")

    return autoencoder, hist_saver
Пример #29
0
def create_child(auto_enc):
    """ Create a child through mutation

    Args:
        auto_enc: The parent autoencoder from which the child is generated.

    Returns:
        An (autoencoder, fitness) tuple that has a copy of the history of its parent. The
        weights are also restored from the paren autoencoder in the first
        training session. The configuration is also copied and then mutated.
    """
    auto_enc = auto_enc[0]
    new_conf = auto_enc.config.copy()
    new_conf.mutate()
    new_hist = auto_enc.history_saver.copy()

    new_ae = Autoencoder(new_conf, list(auto_enc.layer_sizes), auto_enc.save_path), float("inf")
    new_ae[0].history_saver = new_hist
    return new_ae
Пример #30
0
def main():
    nof_epochs = 16
    batch_size = 64
    learning_rate = 1e-3
    loader = MNISTDataLoader(batch_size=batch_size)
    criterion = nn.MSELoss()

    conv_encoder = nn.Sequential(
        nn.Conv2d(1, 16, 15),
        nn.ReLU(),
        nn.Conv2d(16, 32, 7),
        nn.ReLU(),
        nn.Conv2d(32, 64, 5),
        nn.ReLU(),
        nn.Conv2d(64, 64, 3),
        nn.ReLU(),
        nn.Conv2d(64, 64, (2, 1)),
        nn.ReLU(),
    )

    conv_decoder = nn.Sequential(nn.ConvTranspose2d(64, 64, (2, 1)), nn.ReLU(),
                                 nn.ConvTranspose2d(64, 64, 3), nn.ReLU(),
                                 nn.ConvTranspose2d(64, 32, 5), nn.ReLU(),
                                 nn.ConvTranspose2d(32, 16, 7), nn.ReLU(),
                                 nn.ConvTranspose2d(16, 1, 15))

    autoencoder = Autoencoder(conv_encoder, conv_decoder)

    optimizer = torch.optim.Adam(
        autoencoder.parameters(),
        lr=learning_rate,
    )

    Trainer(nof_epochs=nof_epochs,
            loader=loader,
            criterion=criterion,
            optimizer=optimizer,
            model=autoencoder).run()

    for i in range(64):
        plot_latent(autoencoder, loader.train, batch_size, i)

    plot_reconstructed(autoencoder)