def test_contractive(): if dz.tracing.TRACE_GRAPHS: with pytest.raises(ValueError): model = dz.recipes.ContractiveAutoEncoder( ConvolutionalEncoder(), CifarDecoder(), gamma=0.1 ) else: model = dz.recipes.ContractiveAutoEncoder( ConvolutionalEncoder(), CifarDecoder(), gamma=0.1 ) cbs = make_callbacks(model) train(model, cbs)
def test_gan_with_vae_forward_pass(): with pytest.raises(DazeModelTypeError): model = dz.GAN( CifarDecoder(), ConvolutionalEncoder(), 100, forward_pass_func=dz.forward_pass.probabilistic_encode_decode())
def test_ae_with_vae_forward_pass(): with pytest.raises(DazeModelTypeError): model = dz.AutoEncoder( ConvolutionalEncoder(3), CifarDecoder(), forward_pass_func=dz.forward_pass.probabilistic_encode_decode(), loss_funcs=[dz.loss.latent_l1()])
def test_get_batch_encodings_np(): x, _ = dz.data.cifar10.load(70, "f32") x /= 255 model = dz.AutoEncoder(ConvolutionalEncoder(latent_dim=2), CifarDecoder()) encodings = model.get_batch_encodings(x) assert isinstance(encodings, tf.Tensor) assert encodings.numpy().shape[0] == 70 assert encodings.numpy().shape[1] == 2
def test_get_batch_encodings_unknown(): with pytest.raises(ValueError): model = dz.AutoEncoder(ConvolutionalEncoder(latent_dim=2), CifarDecoder()) encodings = model.get_batch_encodings([1.0, 2.0, 3.0])
def test_ae_with_vae_loss_func(): with pytest.raises(DazeModelTypeError): model = dz.AutoEncoder(ConvolutionalEncoder(3), CifarDecoder(), loss_funcs=[dz.loss.kl()])
def test_ae_with_gan_forward_pass(): with pytest.raises(DazeModelTypeError): model = dz.AutoEncoder( ConvolutionalEncoder(3), CifarDecoder(), forward_pass_func=dz.forward_pass.generative_adversarial())
def test_gan_with_disc_loss_in_gen_loss(): with pytest.raises(DazeModelTypeError): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), 100, generator_loss=[dz.loss.one_sided_label_smoothing()])
def test_vae(): model = dz.recipes.VariationalAutoEncoder(ConvolutionalEncoder(), CifarDecoder()) cbs = make_callbacks(model) train(model, cbs)
def test_gan_instance_noise(): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), noise_dim=100, forward_pass_func=dz.forward_pass.generative_adversarial_instance_noise(.2, 0., 1000)) train(model, None)
def test_gan_feature_matching(): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), noise_dim=100, generator_loss=[dz.loss.feature_matching()]) train(model, None)
def test_gan_one_sided_labels(): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), noise_dim=100, discriminator_loss=[dz.loss.one_sided_label_smoothing()]) train(model, None)
def test_gan(): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), 100) cbs = [tensorboard_generative_sample(dz.math.random_normal([5, 100]))] train(model, cbs)
def test_default(): model = dz.AutoEncoder(ConvolutionalEncoder(3), CifarDecoder()) cbs = make_callbacks(model) train(model, cbs)
def test_gan_with_ae_loss_in_gen_loss(): with pytest.raises(DazeModelTypeError): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), 100, generator_loss=[dz.loss.contractive(.1)])
def test_gan_with_ae_loss_in_disc_loss(): with pytest.raises(DazeModelTypeError): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), 100, discriminator_loss=[dz.loss.reconstruction()])
def test_denoising(): model = dz.recipes.DenoisingAutoEncoder(ConvolutionalEncoder(), CifarDecoder(), gamma=0.1) cbs = make_callbacks(model) train(model, cbs)
def test_gan_with_gen_loss_in_disc_loss(): with pytest.raises(DazeModelTypeError): model = dz.GAN(CifarDecoder(), ConvolutionalEncoder(), 100, discriminator_loss=[dz.loss.vanilla_generator_loss()])
def test_klsparse(): model = dz.recipes.KlSparseAutoEncoder( ConvolutionalEncoder(), CifarDecoder(), rho=0.01, beta=0.1 ) cbs = make_callbacks(model) train(model, cbs)
def test_l1sparse(): model = dz.recipes.L1SparseAutoEncoder(ConvolutionalEncoder(), CifarDecoder(), gamma=0.1) cbs = make_callbacks(model) train(model, cbs)
from daze.nets.encoders import ConvolutionalEncoder from daze.nets.decoders import CifarDecoder, MnistDecoder if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-weights", type=str) parser.add_argument("-output", type=str) parser.add_argument("-dataset", type=str, choices=["mnist", "cifar", "cifar10"]) args = parser.parse_args() if args.dataset in ["cifar", "cifar10"]: dataset = dz.data.cifar10 data, _ = dataset.load(dtype="f") data /= 255.0 Decoder = CifarDecoder elif args.dataset in ["mnist"]: dataset = dz.data.mnist data, _ = dataset.load(dtype="f") data /= 255.0 Decoder = MnistDecoder elif os.path.exists(args.dataset): data = dz.data.utils.load_from_file(args.dataset) model = dz.Model(ConvolutionalEncoder(latent_dim=3), Decoder()) model.load_weights(args.weights) encodings = model.get_batch_encodings(data) np.savetxt(args.output, encodings)
def test_ae_with_gan_loss_func(): with pytest.raises(DazeModelTypeError): model = dz.AutoEncoder(ConvolutionalEncoder(3), CifarDecoder(), loss_funcs=[dz.loss.feature_matching()])
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-weights", type=str) parser.add_argument("-dataset", type=str) parser.add_argument("-latent_size", type=int, default=32) args = parser.parse_args() if args.dataset in ["cifar", "cifar10"]: dataset, _ = dz.data.cifar10.load(dtype="f") Decoder = CifarDecoder elif args.dataset in ["mnist"]: dataset, _ = dz.data.mnist.load(dtype="f") dataset = np.squeeze(dataset) Decoder = MnistDecoder model = dz.Model(ConvolutionalEncoder(latent_dim=args.latent_size), Decoder()) model.load_weights(args.weights) dataset /= 255.0 np.random.shuffle(dataset) test_images = dataset[:5, ...] rows = 5 columns = 2 f, axarr = plt.subplots(rows, columns) for row in range(rows): img = test_images[row, ...] axarr[row, 0].imshow(img) x_hat = model.predict(reshape_for_prediction(img))