def generator(Z, out_dim): layer = [layers.Dense(Z, 16)] layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dense(layer[-1], 16)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dense(layer[-1], out_dim)) return layer
def encoder(X, latent_dim): layer = [layers.Dense(X, 32)] layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dense(layer[-1], 32)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dense(layer[-1], latent_dim * 2)) return layer
def discriminator(X): layer = [layers.Dense(X, 32)] layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dense(layer[-1], 32)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dense(layer[-1], 2)) return layer
layer.append(layers.Conv2D(layer[-1], 16, (3, 3))) layer.append(layers.BatchNormalization(layer[-1], [0, 2, 3], deterministic)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Pool2D(layer[-1], (3, 3))) layer.append(layers.Conv2D(layer[-1], 16, (3, 3))) layer.append(layers.BatchNormalization(layer[-1], [0, 2, 3], deterministic)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Pool2D(layer[-1], (1, 2))) layer.append(layers.Conv2D(layer[-1], 32, (3, 3))) layer.append(layers.BatchNormalization(layer[-1], [0, 2, 3], deterministic)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) #layer.append(layers.Pool2D(layer[-1], (3, 1))) layer.append(layers.Dense(layer[-1], 256)) layer.append(layers.BatchNormalization(layer[-1], [0], deterministic)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dropout(layer[-1], 0.5, deterministic)) layer.append(layers.Dense(layer[-1], 32)) layer.append(layers.BatchNormalization(layer[-1], [0], deterministic)) layer.append(layers.Activation(layer[-1], T.leaky_relu)) layer.append(layers.Dropout(layer[-1], 0.2, deterministic)) layer.append(layers.Dense(T.relu(layer[-1]), 2)) loss = theanoxla.losses.sparse_crossentropy_logits(label, layer[-1]) accuracy = theanoxla.losses.accuracy(label, layer[-1]) var = sum([lay.variables for lay in layer], [])