Example #1
0
def compress_(path_feature_map, learning_rate):
    path_feature_map = copy(path_feature_map)

    feature_vectors = np.array(list(path_feature_map.values()))
    vector_size = feature_vectors.shape[2]
    feature_vectors = feature_vectors.reshape(-1, vector_size)

    feature_vectors /= np.sum(feature_vectors)
    #feature_vectors /= np.mean(feature_vectors)
    #feature_vectors = np.std(feature_vectors, axis=0)
    #print(np.std(feature_vectors, axis=0))
    #print(np.sum(feature_vectors, axis=0))
    #print(feature_vectors)

    autoencoder = Autoencoder(
        feature_vectors, 
        n_visible=feature_vectors.shape[1], 
        n_hidden=n_components,
        learning_rate=learning_rate
    )

    for epoch in range(n_iter):
        autoencoder.train() 

    for path, vector in path_feature_map.items():
        v = autoencoder.get_hidden_values(vector)
        path_feature_map[path] = v
    
    error = autoencoder.negative_log_likelihood()
    error = abs(error)
    return path_feature_map, error
Example #2
0
def main():
    inputs = np.random.random((5, 5))
    autoencoder = Autoencoder([
        FCLayer((5, 4), SigmoidActivationFunction(), True),
        FCLayer((4, 3), SigmoidActivationFunction(), True),
        FCLayer((3, 4), SigmoidActivationFunction(), True),
        FCLayer((4, 5), SigmoidActivationFunction(), True)
    ])

    w = np.random.normal(size=autoencoder.net.params_number)
    autoencoder.net.set_weights(w)
    loss, loss_grad = autoencoder.compute_loss(inputs)
    num_params = autoencoder.net.params_number
    p = np.zeros((autoencoder.net.params_number))
    check_loss_grad = np.zeros((autoencoder.net.params_number))
    for i in range(num_params):
        p[:] = 0
        p[i] = 1
        check_loss_grad[i] = \
            check_grad(lambda x: loss_func(autoencoder, x, inputs), w, p)
    max_diff = np.abs(loss_grad - check_loss_grad).max()
    min_diff = np.abs(loss_grad - check_loss_grad).min()
    print("compute_loss")
    print("min_diff =  ", min_diff)
    print("max_diff = ", max_diff)
Example #3
0
def train_autoencoder(data):
    """ Train an autoencoder

    Args:
        data: A fucntion that provides the input data for the network.

    Returns:
        LCMC and MSE metric for the autoencoder that has been trained.
    """

    # Setup
    layers = calculate_layer_sizes(784, 200, 0.5)
    # Generations is needed to correct the number of training steps to match
    # the number of steps used in the evolutionary algorithm
    generations = 10
    config = NeuralNetworkConfig()
    # Start with a mutated config to have some variation between runs
    config.mutate()
    config.num_steps *= generations
    autoencoder = Autoencoder(config, layers[:2])

    # Training
    autoencoder.train(data)
    for index, layer_size in enumerate(layers[2:]):
        autoencoder.append_layer(layer_size)
        autoencoder.train(data, restore_layers=index+1)

    # Evaluation
    autoencoder.save_history()
    print(autoencoder.config)
    print(autoencoder.save_path)
    # autoencoder.reconstruct_images(data)

    return lcmc_fitness(autoencoder, data, True), mse_fitness(autoencoder, data, True)
Example #4
0
    def train(self,
              validation_perc=0.1,
              lr=1e-3,
              intermediate_size=500,
              encoded_size=100):
        if not os.path.isfile('tfidf_matrix.pkl'):
            self._embed(min_df=0.0001)
        else:
            with open('./tfidf_matrix.pkl', 'rb') as f:
                self.tfidf_df = pickle.load(f)

        ae = Autoencoder(
            self.tfidf_df,
            validation_perc=validation_perc,
            lr=lr,
            intermediate_size=intermediate_size,
            encoded_size=encoded_size,
        )

        ae.train_loop(epochs=40)
        losses = pd.DataFrame(data=list(zip(ae.train_losses, ae.val_losses)),
                              columns=['train_loss', 'validation_loss'])
        losses['epoch'] = losses.index + 1

        self.losses = losses
        self.encoded_tfidf = ae.get_encoded_representations()

        with open('./autoencoder_embeddings.pkl', 'wb') as fh:
            pickle.dump(self.encoded_tfidf, fh)

        return self.encoded_tfidf
Example #5
0
def main():
    nof_epochs = 16
    batch_size = 128
    learning_rate = 1e-3
    momentum = .8
    loader = MNISTDataLoader(batch_size=batch_size)
    criterion = lambda x_hat, x: ((x - x_hat)**2).sum()

    encoder = nn.Sequential(
        Reshape([1, 28 * 28]),
        nn.Linear(28 * 28, 512),
        nn.ReLU(),
        nn.Linear(512, 2),
        nn.ReLU(),
    )

    decoder = nn.Sequential(nn.Linear(2, 512), nn.ReLU(), nn.Linear(512, 784),
                            nn.ReLU(), Reshape([1, 28, 28]))

    autoencoder = Autoencoder(encoder, decoder)

    optimizer = torch.optim.Adam(
        autoencoder.parameters(),
        lr=learning_rate,
    )

    Trainer(nof_epochs=nof_epochs,
            loader=loader,
            criterion=criterion,
            optimizer=optimizer,
            model=autoencoder).run()

    plot_latent(autoencoder, loader.train, batch_size)

    plot_reconstructed(autoencoder)
Example #6
0
    def __init__(self, width, height, train=None, test=None, training=True):

        self.width = width
        self.height = height

        self.training = training

        if train is not None:
            (self.positive, self.negative) = train

        if test is not None:
            (self.positive_test, self.negative_test) = test

        self.kernel_size = (3, 3)
        self.model = None

        self.model_created = False

        self.ae = Autoencoder(width,
                              height,
                              train,
                              test,
                              training=self.training)
        self.d_net = None
        print("gan initiated")
Example #7
0
def train_encoder():
    data = MNIST("MNIST",
                 train=True,
                 download=True,
                 transform=transforms.ToTensor())
    model = Autoencoder(1).to(device)
    epochs = 5
    outputs = train(model, epochs, 16, 1e-3, data)
    writer = SummaryWriter("runs/autoencodermnist")
    for k in range(0, epochs):
        fig = plt.figure(figsize=(9, 2))
        images = outputs[k][1].cpu().detach().numpy()
        recon = outputs[k][2].cpu().detach().numpy()
        for i, item in enumerate(images):
            if i >= 9:
                break
            plt.subplot(2, 9, i + 1)
            plt.imshow(item[0])
        for i, item in enumerate(recon):
            if i >= 9:
                break
            plt.subplot(2, 9, 9 + i + 1)
            plt.imshow(item[0])
        writer.add_figure("Autoencoder performance", fig, global_step=k)
    model.to("cpu")
    torch.save(model.state_dict(), "autoencoderMNIST.pth")
Example #8
0
def test(hparams):

    model = Autoencoder(hparams)

    model.encoder = torch.load("trained_models/train_all/encoder.pt")
    model.decoder = torch.load("trained_models/train_all/decoder.pt")

    #print(model)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    #from torchsummary import summary
    #summary(model, (1, 64, 192))

    model.encoder.eval()
    model.decoder.eval()

    output_dir = "output/{}".format(
        os.path.basename(hparams.image_list).split('.')[0])

    with open(hparams.image_list) as f:
        image_files = f.read().splitlines()
        play_thermal(image_files,
                     hparams,
                     output_dir,
                     encoder=model.encoder.to('cpu'),
                     decoder=model.decoder.to('cpu'),
                     norm=hparams.norm,
                     n_channels=hparams.nc,
                     show=False,
                     save=False)
    if not len(image_files) > 0:
        print("did not find any files")
Example #9
0
def train(net, train_set, NUM_EPOCHS):
    print("Started training")
    net = Autoencoder()
    criterion = nn.MSELoss()
    train_loss = []
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
    if torch.cuda.is_available():
        net = net.cuda()
        criterion = criterion.cuda()

    for epoch in range(NUM_EPOCHS):
        running_loss = 0.0
        for i, data in enumerate(train_set, 0):
            img = data.get("image")
            img = img.cuda()
            img = img.view(img.size(0), -1)
            optimizer.zero_grad()
            outputs = net(img)
            loss = criterion(outputs, img)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        loss = running_loss / len(train_set)
        train_loss.append(loss)
        train_loss.append(loss)
        print('Epoch {} of {}, Train Loss: {:.3f}'.format(
            epoch + 1, NUM_EPOCHS, loss))

        #if epoch % 5 == 0:
        #save_decoded_image(outputs.cpu().data, epoch)
    print("Finished training")
    return train_loss
def main(args):
    if args.src_npy is None:
        print('Supply src_npy')
        return 0
    if args.dst_npy is None:
        print('Supply dst_npy')
        return 0

    model = Autoencoder()
    dummyx = tf.zeros((5, 64, 64, 3), dtype=tf.float32)
    _ = model(dummyx, verbose=True)
    saver = tfe.Saver(model.variables)
    saver.restore(args.snapshot)
    model.summary()

    nuclei = np.load(args.src_npy)
    print(nuclei.shape, nuclei.dtype, nuclei.min(), nuclei.max())

    if args.shuffle:
        print('Shuffling')
        np.random.shuffle(nuclei)

    n_images = nuclei.shape[0]
    n_batches = n_images // args.batch

    nuclei = np.array_split(nuclei, n_batches)
    print('Split into {} batches'.format(len(nuclei)))

    if args.n_batches is not None:
        subset_batches = min(n_batches, args.n_batches)
        print('Subsetting {} batches'.format(args.n_batches))
        nuclei = nuclei[:subset_batches]

    if args.draw:
        fig, axs = plt.subplots(5, 5, figsize=(5, 5))

    all_feat = []
    for k, batch in enumerate(nuclei):
        batch = (batch / 255.).astype(np.float32)
        batch_hat, features = model(tf.constant(batch, dtype=tf.float32),
                                    return_z=True,
                                    training=False)
        all_feat.append(features)

        if k % 50 == 0:
            print('batch {:06d}'.format(k))

        if args.draw:
            if k % 10 == 0:
                savebase = os.path.join(args.save, '{:05d}'.format(k))
                draw_result(batch,
                            batch_hat.numpy(),
                            fig,
                            axs,
                            savebase=savebase)

    all_feat = np.concatenate(all_feat, axis=0)
    print('all_feat', all_feat.shape)

    np.save(args.dst_npy, all_feat)
Example #11
0
def main():
    nof_epochs = 8
    batch_size = 1000
    learning_rate = 1e-3
    loader = MNISTDataLoader(batch_size=batch_size)

    encoder = VariationalEncoder(2)
    decoder = nn.Sequential(nn.Linear(2, 512), nn.ReLU(), nn.Linear(512, 784),
                            Reshape([1, 28, 28]))
    autoencoder = Autoencoder(encoder, decoder)
    criterion = lambda x, x_hat: (
        (x - x_hat)**2).sum() + autoencoder.encoder.kl

    optimizer = torch.optim.Adam(
        autoencoder.parameters(),
        lr=learning_rate,
    )

    Trainer(nof_epochs=nof_epochs,
            loader=loader,
            criterion=criterion,
            optimizer=optimizer,
            model=autoencoder).run()

    plot_latent(autoencoder, loader.train, batch_size)

    plot_reconstructed(autoencoder)
 def __init__(self, numpy_rng, theano_rng=None,
              input=None,
              n_visible=784, n_hidden=500,
              W=None, bhid=None, bvis=None):
     Autoencoder.__init__(self, numpy_rng, theano_rng,
                          input, n_visible, n_hidden,
                          W, bhid, bvis)
Example #13
0
def _main(argv):
    print("initializing Params")
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    if FLAGS.training == True:
        ("building the Model")
        autoencoder1 = Autoencoder(FLAGS.learning_rate, FLAGS.batch_size,
                                   FLAGS.pointcloud_dim, FLAGS.epochs)
        autoencoder1.train()
Example #14
0
 def __init__(self,
              numpy_rng,
              theano_rng=None,
              input=None,
              n_visible=784,
              n_hidden=500,
              W=None,
              bhid=None,
              bvis=None):
     Autoencoder.__init__(self, numpy_rng, theano_rng, input, n_visible,
                          n_hidden, W, bhid, bvis)
def LDAclf(X_train, X_test, y_train, y_test, val_images=None):
    pca = randomized_PCA(X_train) 

    ae = Autoencoder(max_iter=200,sparsity_param=0.1,
                                    beta=3,n_hidden = 190,alpha=3e-3,
                                    verbose=False, random_state=1).fit(X_train)

    X_train, X_test = ae.transform(X_train), ae.transform(X_test)

    lda = LDA()
    y_pred = lda.fit(X_train, y_train).predict(X_test)
    show_metrics(y_test, y_pred)
Example #16
0
def main():
    autoencoder = Autoencoder()
    autoencoder.load_state_dict(torch.load('autoencoder_model.pt'))



    train_pconns, train_labels, test_pconns, test_labels = create_class_data()
    test_pconns_dataset = TensorDataset(torch.Tensor(test_pconns))
    test_pconns_dataloader = DataLoader(test_pconns)
    evaluate_autoencoder(autoencoder, test_pconns_dataloader)

    """
Example #17
0
def main():
    folder_path = create_models_folder(MODELS_PATH)
    init_log(folder_path + '/', 'execution.log')
    logging.info('Process started')
    folder_name = folder_path.split('//')[-1]
    logging.info(f'Folder {folder_name} created')

    forest_conf, ae_conf, full_config = read_config(CONFIG)
    write_config(full_config, folder_path)

    logging.info('Loading data')
    data = read_tsv(FILE_PATH, index_col=[0])
    logging.info('Data loaded')
    data = scale_data(data)

    logging.info('Starting Isolation Forest')
    forest = create_isolation_forest(forest_conf)
    fit_save_forest(forest, data, folder_path)

    logging.info('Starting Autoencoder')
    ae = Autoencoder(data.shape[1])
    ae.init_layer_dims()
    ae.init_model()
    ae.fit_save(ae_conf, data, folder_path)
    logging.info('Process finished')
def autoencode_adaboost(X_train, X_test, y_train, y_test, val_images=None):
    divide = np.vectorize(lambda x: x/255.0)
    X_train = divide(X_train)
    X_test = divide(X_test)

    ae = Autoencoder(max_iter=200,sparsity_param=0.1,
                                    beta=3,n_hidden = 190,alpha=3e-3,
                                    verbose=False, random_state=1).fit(X_train)
    X_train = ae.transform(X_train)
    X_test = ae.transform(X_test)
    bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
                         algorithm="SAMME",
                         n_estimators=200).fit(X_train, y_train)
    y_pred = bdt.predict(X_test)
    show_metrics(y_test, y_pred)
    def __init__(self, params):

        self.params = params
        self.trained_weights_path = params['trained_weights_path']

        if not eval(self.params['train']):
            self.network = Autoencoder(load_model=True,
                                       load_path=self.trained_weights_path)
        else:
            self.get_data()
            self.network = Autoencoder(input_shape=self.x_train.shape[1:],
                                       learning_r=params['learning_rate'])
            self.network.save_model(self.trained_weights_path + 'model.h5',
                                    self.trained_weights_path + 'encoder.h5',
                                    self.trained_weights_path + 'decoder.h5')
Example #20
0
def load_model(dataset):
    model = Autoencoder(1)
    if dataset == "MNIST":
        model.load_state_dict(torch.load("autoencoderMNIST.pth"))
    elif dataset == "FashionMNIST":
        model.load_state_dict(torch.load("autoencoderFashionMNIST.pth"))
    model.to(device)
    return model
Example #21
0
def create_model_autoencoder(hparams):
    print("Creating auto-encoder...")
    train_graph = tf.Graph()
    with train_graph.as_default():
        train_model = Autoencoder(hparams, tf.contrib.learn.ModeKeys.TRAIN)

    eval_graph = tf.Graph()
    with eval_graph.as_default():
        eval_model = Autoencoder(hparams, tf.contrib.learn.ModeKeys.EVAL)

    infer_graph = tf.Graph()
    with infer_graph.as_default():
        infer_model = Autoencoder(hparams, tf.contrib.learn.ModeKeys.INFER)
    return TrainModel(graph=train_graph, model=train_model), EvalModel(
        graph=eval_graph, model=eval_model), InferModel(graph=infer_graph,
                                                        model=infer_model)
Example #22
0
	def __init__(self, name, lr=0.001):
			super().__init__(name = name,  lr = lr)
			self.input_size = (512,512,1)
			self.model = Sequential()
			autoencoder_model = Autoencoder(trainable = 0)
			# autoencoder_model.load_weights()
			self.model.add(autoencoder_model.encoder)
			self.model.add(Conv2D(filters = 128, kernel_size = (3,3), input_shape = (64,64,128), strides = 1, padding ='same', activation = 'relu'))
			self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			self.model.add(Conv2D(filters = 128, kernel_size = (3,3), strides = 1, padding ='same', activation = 'relu'))
			self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			self.model.add(Conv2D(filters = 256, kernel_size = (3,3), strides = 1, padding ='same', activation = 'relu'))
			self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			# self.model.add(Conv2D(filters = 512, kernel_size = (3,3), strides = 1, padding ='same', activation = 'relu'))
			# self.model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))

			self.model.add(GlobalAveragePooling2D())
			self.model.add(Dense(2048, activation='relu'))
			self.model.add(Dense(1024, activation='relu'))
			self.model.add(Dense(4, activation='softmax'))

			sgd_opti = SGD(lr = self.lr, momentum = 0.9)
			self.model.compile(optimizer = sgd_opti, loss='categorical_crossentropy', metrics = ['accuracy'])
Example #23
0
def main():
    args = utils.get_args()
    
    print("Prepare dataset...")
    mnist = input_data.read_data_sets("mnist/", one_hot = True)
    
    with tf.Graph().as_default(), tf.Session() as session:
        autoencoder = Autoencoder(
            784, args.hid_shape, args.lat_shape,
            optimizer = tf.train.AdagradOptimizer(args.lr),
            batch_size = args.batch_size,
            dropout = args.dropout)
        
        session.run(tf.initialize_all_variables())

        if args.save_model or args.load_model:
            saver = tf.train.Saver()

        if args.load_model:
            try:
                saver.restore(session, utils.SAVER_FILE)
            except ValueError:
                print("Cant find model file")
                sys.exit(1)
                
        if args.make_imgs:
            index = 0
            print("Prepare images directory...")
            utils.prepare_image_folder()
            example = utils.get_example(args.digit, mnist.test)
            
        print("Start training...")
        for epoch in range(args.epoches):
            for i, batch in enumerate(utils.gen_data(args.batch_size, mnist.train.images)):
                autoencoder.fit_on_batch(session, batch)
                if (i+1) % args.log_after == 0:
                    test_cost = autoencoder.evaluate(session, mnist.test.images)
                    print("Test error = {0:.4f} on {1} batch in {2} epoch".format(test_cost, i+1, epoch+1))
                    
                    if args.make_imgs:
                        path = os.path.join(utils.IMG_FOLDER, "{0:03}.png".format(index))
                        autoencoded = autoencoder.encode_decode(session, example.reshape(1, 784))
                        utils.save_image(autoencoded.reshape((28, 28)), path)
                        index += 1
            if args.save_model:
                saver.save(session, utils.SAVER_FILE)
                print("Model saved")
Example #24
0
 def _network(self):
     self.autoencoder = Autoencoder(self.inputs, self.AE_network_params,
                                    self.learning_rate)
     features = self.autoencoder.encoded_vecs
     labeled_features = tf.slice(features, [0, 0], [self.num_labeled, -1])
     #labeled_features = tf.nn.dropout(tf.slice(self.inputs, [0,0], [self.num_labeled, -1]), 0.75)
     self.classifier = MLP(labeled_features, self.labels,
                           self.mlp_network_params, self.learning_rate)
Example #25
0
    def fit(self, x_view, y_view):
        data1 = x_view
        data2 = y_view

        self.train_loader = torch.utils.data.DataLoader(
            ConcatDataset(data1, data2),
            batch_size=self.BATCH_SIZE,
            shuffle=True)

        self.model = Autoencoder(self.ZDIMS, self.input_dim)
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.0001)

        for epoch in range(1, self.EPOCHS + 1):
            # train(model,epoch,train_loader,optimizer,input_dim)
            self.train(epoch)
            #est(epoch)
            self.model.eval()
Example #26
0
    def build_model(self):
        self.is_training = tf.placeholder(tf.bool, name='is_training')
        self.images = tf.placeholder(tf.float32, [None] + self.image_shape,
                                     name='real_images')

        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')

        # self.G = self.generator(self.z)
        self._autoencoder = Autoencoder(zsize=self.z_dim)
        self.G = self._autoencoder.generator(self.z, training=self.is_training)

        self.D, self.D_logits = self.discriminator(self.images)
        self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)

        self.d_out_real_sum = tf.summary.scalar("d_out_real",
                                                tf.reduce_mean(self.D))
        self.d_out_fake_sum = tf.summary.scalar("d_out_fake",
                                                tf.reduce_mean(self.D_))

        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                    labels=tf.ones_like(
                                                        self.D)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.zeros_like(
                                                        self.D_)))
        self.g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.ones_like(
                                                        self.D_)))

        self.d_loss = self.d_loss_real + self.d_loss_fake

        # self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
        # self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)

        # self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        # self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver(max_to_keep=1)
def train(x_train, learning_rate, batch_size, epochs):
    autoencoder = Autoencoder(input_shape=(28, 28, 1),
                              conv_filters=(32, 64, 64, 64),
                              conv_kernels=(3, 3, 3, 3),
                              conv_strides=(1, 2, 2, 1),
                              latent_space_dim=2)
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    autoencoder.train(x_train, batch_size, epochs)
    return autoencoder
def run_autoencoder(optimizer):
    """ Runs the autoencoder model using the specified optimizer.

    Parameters
    ----------
    optimizer : RMSProp/Adam
        Optimization algorithm to be used for parameter learning

    """
    optimizer = Adam(learning_rate=0.03) if optimizer == 'adam' else RMSProp(
        learning_rate=0.05)
    train_matrix, val_matrix = get_training_and_val_data()
    model = Autoencoder(input_dim=train_matrix.shape[1])
    model.print_summary()
    model.compile(optimizer)
    errors = model.fit(train_matrix,
                       train_matrix,
                       num_epochs=60,
                       val_set=(val_matrix, val_matrix),
                       early_stopping=True)
    plot_losses(errors['training'], errors['validation'])
    neuron_num = model.model.layers[0].optimizer.reference_index
    learning_rates = model.model.layers[0].optimizer.learning_rates
    plot_learning_rates(learning_rates['weights'], learning_rates['bias'],
                        neuron_num)
Example #29
0
def test(hparams, path):
    model = Autoencoder.load_from_checkpoint(path)
    model.eval()
    #play(data_root = hparams.data_root, set = 'test_val', folder = 'aligned_out_item00_image00', model = model)
    play_thermal(view='view1_nc1',
                 crop='crop0',
                 set='test',
                 model=model,
                 n_channels=hparams.nc)
Example #30
0
def main():
    nof_epochs = 16
    batch_size = 64
    learning_rate = 1e-3
    loader = MNISTDataLoader(batch_size=batch_size)
    criterion = nn.MSELoss()

    conv_encoder = nn.Sequential(
        nn.Conv2d(1, 16, 15),
        nn.ReLU(),
        nn.Conv2d(16, 32, 7),
        nn.ReLU(),
        nn.Conv2d(32, 64, 5),
        nn.ReLU(),
        nn.Conv2d(64, 64, 3),
        nn.ReLU(),
        nn.Conv2d(64, 64, (2, 1)),
        nn.ReLU(),
    )

    conv_decoder = nn.Sequential(nn.ConvTranspose2d(64, 64, (2, 1)), nn.ReLU(),
                                 nn.ConvTranspose2d(64, 64, 3), nn.ReLU(),
                                 nn.ConvTranspose2d(64, 32, 5), nn.ReLU(),
                                 nn.ConvTranspose2d(32, 16, 7), nn.ReLU(),
                                 nn.ConvTranspose2d(16, 1, 15))

    autoencoder = Autoencoder(conv_encoder, conv_decoder)

    optimizer = torch.optim.Adam(
        autoencoder.parameters(),
        lr=learning_rate,
    )

    Trainer(nof_epochs=nof_epochs,
            loader=loader,
            criterion=criterion,
            optimizer=optimizer,
            model=autoencoder).run()

    for i in range(64):
        plot_latent(autoencoder, loader.train, batch_size, i)

    plot_reconstructed(autoencoder)
Example #31
0
def train(hparams, dm):
    #logger = loggers.TensorBoardLogger(hparams.log_dir, name=f"da{hparams.data_root}_is{hparams.image_size}_nc{hparams.nc}")
    model = Autoencoder(hparams)
    # print detailed summary with estimated network size
    #summary(model, (hparams.nc, hparams.image_width, hparams.image_height), device="cpu")
    trainer = Trainer(gpus=hparams.gpus, max_epochs=hparams.max_epochs)
    trainer.fit(model, dm)
    #trainer.test(model)
    torch.save(model.encoder, "trained_models/encoder.pt")
    torch.save(model.decoder, "trained_models/decoder.pt")
Example #32
0
    def __init__(self, autoencoder = Autoencoder()):
        self.autoencoder = autoencoder
        #(x_train, _), (x_test, self.y_test) = mnist.load_data()

        x_train, self.y_test = load_cancer()
        
        self.x_train = normalize(x_train, axis = 0)
        self.x_test = self.x_train

        self.name = ""
Example #33
0
def autoencoder(args):
    kf = KFold(n_splits=args.splits_num, shuffle=args.shuffle, random_state=42)

    score_lst = list()

    for fold, (train_index, valid_index) in enumerate(kf.split(users)):
        # Initialize matrices
        train_users = users[train_index]
        train_movies = movies[train_index]
        train_ratings = ratings[train_index]

        valid_users = users[valid_index]
        valid_movies = movies[valid_index]
        valid_ratings = ratings[valid_index]

        data_zeros = np.full((number_of_users, number_of_movies), 0)
        data_mask = np.full((number_of_users, number_of_movies), 0)

        for i, (user, movie) in enumerate(zip(train_users, train_movies)):
            data_zeros[user][movie] = train_ratings[i]
            data_mask[user][movie] = 1

        model = Autoencoder(number_of_users,
                            number_of_movies,
                            layers=args.hidden_layers,
                            masking=args.masking)
        model.fit(data_zeros,
                  data_mask,
                  valid_users=valid_users,
                  valid_movies=valid_movies,
                  valid_ratings=valid_ratings,
                  n_epochs=args.num_epochs,
                  verbose=False)

        preds = model.predict(data_zeros, valid_users, valid_movies)

        score = root_mean_square_error(valid_ratings, preds)
        score_lst.append(score)

        print("Fold:", fold + 1, "score:", score)

    print('Mean CV RMSE:', np.mean(score_lst))
Example #34
0
 def get_embedding(self, dataloader, embedding_dim):
     '''
     Create and save embedding 
     :param: dataloader:dataloader, embedding_din:embedding dimension 
     '''
     print(f"embedding dim: {embedding_dim}")
     encoder = Autoencoder(embedding_dim, self.args['img_dim'])
     encoder.load_state_dict(torch.load(f'{DATASET}_weights.pt'))
     embedding = torch.zeros([len(dataloader.dataset), embedding_dim])
     encoder = encoder.to(self.device)
     with torch.no_grad():
         for x, _, idxs in dataloader:
             x = x.to(self.device)
             _, e1 = encoder(x)
             embedding[idxs] = e1.cpu()
     #np.save(f'./{DATASET}_EMBEDDING.npy', embedding)
     embedding = embedding.numpy()
     #embedding = np.load(f'./tsne_plots_new/{DATASET}_fc1_features.npy')
     print(embedding.shape)
     return embedding
Example #35
0
    def create_net(self):

        if self.name == 'feed_forward':
            self.model = feed_forward(self.input_dim, self.output_dim,
                                      self.n_hid)
            self.J = nn.MSELoss(size_average=True, reduce=True)
        elif self.name == 'autoencoder':
            self.model = Autoencoder(self.input_dim, self.output_dim,
                                     self.n_hid, self.n_bottleneck)
            self.J = nn.MSELoss(size_average=True, reduce=True)
        elif self.name == 'ff_mlpg':
            self.model = ff_mlpg(self.input_dim, self.output_dim, self.n_hid)
            self.J = Nloss_GD(self.input_dim)
        else:
            pass

        if self.cuda:
            self.model = self.model.cuda()
            self.J = self.J.cuda()
        print('    Total params: %.2fM' %
              (sum(p.numel() for p in self.model.parameters()) / 1000000.0))
Example #36
0
            shapes.append(Light(t1, material1))

    light = Light((-1., -1., 2.), (0.961, 1., 0.87))
    camera = Camera(img_sz, img_sz)
    #shader = PhongShader()
    shader = DepthMapShader(6.1)
    scene = Scene(shapes, [light], camera, shader)
    return scene.build()

#Hyper-parameters
num_capsule = 2
epsilon = 0.0001
num_epoch = 200


ae = Autoencoder(scene, D, 300, 30, 10, num_capsule)
opt = MGDAutoOptimizer(ae)

train_ae = opt.optimize(train_data)
get_recon = theano.function([], ae.get_reconstruct(train_data[0])[:,:,0])
get_center= theano.function([], ae.encoder(train_data[0]))

recon = get_recon()
center = get_center()[0]
imsave('output/test_balls0.png', recon)
print '...Initial center1 (%g,%g,%g)' % (center[0], center[1], center[2])
print recon.sum()

n=0;
while (n<num_epoch):
    n+=1
if __name__ == "__main__":
    # テストに使うデータミニバッチ
    x = T.matrix('x')

    # ファイルから学習したパラメータをロード
    f = open("autoencoder.pkl", "rb")
    state = cPickle.load(f)
    f.close()

    # 自己符号化器を構築
    # 学習時と同様の構成が必要
    rng = np.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))
    autoencoder = Autoencoder(numpy_rng=rng,
                              theano_rng=theano_rng,
                              input=x,
                              n_visible=28*28,
                              n_hidden=500)

    # 学習したパラメータをセット
    autoencoder.__setstate__(state)

    # テスト用データをロード
    # 訓練時に使わなかったテストデータで試す
    datasets = load_data('mnist.pkl.gz')
    test_set_x = datasets[2][0]

    # 最初の100枚の画像を描画
    # test_set_xは共有変数なのでget_value()で内容を取得できる
    pos = 1
    for i in range(100):
Example #38
0
from autoencoder import Autoencoder
from sklearn import datasets

hidden_dim = 1
data = datasets.load_iris().data
input_dim = len(data[0])
ae = Autoencoder(input_dim, hidden_dim)
ae.train(data)
ae.test([[8, 4, 6, 2]])
Example #39
0
train, valid, test = mnist.load(
    normalize=True, shuffle=True, spaun=args.spaun)
train_images, test_images = train[0], test[0]

# --- pretrain with SGD backprop
n_epochs = 15
batch_size = 100

deep = DeepAutoencoder()
data = train_images
for i in range(n_layers):
    vis_func = None if i == 0 else neuron_fn

    # create autoencoder for the next layer
    auto = Autoencoder(
        shapes[i], shapes[i+1], rf_shape=rf_shapes[i],
        vis_func=vis_func, hid_func=neuron_fn)
    deep.autos.append(auto)

    # train the autoencoder using SGD
    auto.auto_sgd(data, deep, test_images, n_epochs=n_epochs, rate=rates[i])

    # hidden layer activations become training data for next layer
    data = auto.encode(data)

plt.figure(99)
plt.clf()
recons = deep.reconstruct(test_images)
show_recons(test_images, recons)
print "recons error", rms(test_images - recons, axis=1).mean()
Example #40
0
rates = [1., 1., 0.3]
n_layers = len(shapes) - 1
assert len(funcs) == len(shapes)
assert len(rf_shapes) == n_layers
assert len(rates) == n_layers

n_epochs = 5
batch_size = 100

deep = DeepAutoencoder()
data = train_images
for i in range(n_layers):
    savename = "sigmoid-auto-%d.npz" % i
    if not os.path.exists(savename):
        auto = Autoencoder(
            shapes[i], shapes[i+1], rf_shape=rf_shapes[i],
            vis_func=funcs[i], hid_func=funcs[i+1])
        deep.autos.append(auto)
        auto.auto_sgd(data, deep, test_images, noise=0.1,
                      n_epochs=n_epochs, rate=rates[i])
        auto.to_file(savename)
    else:
        auto = FileObject.from_file(savename)
        assert type(auto) is Autoencoder
        deep.autos.append(auto)

    data = auto.encode(data)

plt.figure(99)
plt.clf()
recons = deep.reconstruct(test_images)

def get_pca(file_dir, s, t, i):
    from sklearn.decomposition import IncrementalPCA

    ipca = IncrementalPCA(n_components=48)
    for counter in range(s, t, i):
        features_file = np.load(file_dir + "/pca" + str(counter) + "_code.npy")
        ipca.partial_fit(features_file[:, 0:4096])
    return ipca


if __name__ == "__main__":
    args = parser()
    tree, codes_image = create_structures(get_code_from_files(args.codesdir, 0, 1000, 1000, 4975))
    my_autoencoder = Autoencoder(args.solver, args.model)
    code = my_autoencoder.get_fc7(jpg_dir + "009961" + ".jpg")
    pca = get_pca(args.codesdir, 0, 1000, 1000)
    with open(images_dir + "test.txt") as f:
        for image in f:
            print "new image", image
            # compare_image(image.rstrip(), tree, my_autoencoder, 1, pca, codes_image)
    # print 'ok'
    code_red = pca.transform(code)
    # print code_red.shape
    # print
    result = tree.query(code_red, k=1, p=2)
    print "results: ", len(result), result[1], result[0], codes_image[result[1].astype(int), -1]
    print total_retrieved_correct, total_retrieved
    print total_retrieved_false
    print total_retrieved