Exemple #1
0
parse = argparse.ArgumentParser()
parse.add_argument('-L', type=int)
args = parse.parse_args()

# dataset
wavs, labels, infos = theanoxla.datasets.load_freefield1010(subsample=2,
                                                            n_samples=7000)
wavs /= wavs.max(1, keepdims=True)
wavs_train, wavs_test, labels_train, labels_test = theanoxla.utils.train_test_split(
    wavs, labels, 0.33)

# variables
L = args.L
BS = 6

signal = T.Placeholder((BS, len(wavs[0])), 'float32')

if L > 0:
    WVD = T.signal.wvd(T.expand_dims(signal, 1), 1024, L=L, hop=32)
else:
    WVD = T.signal.mfsc(T.expand_dims(signal, 1), 1024, 192, 80, 2, 44100 / 4,
                        44100 / 4)

tf_func = theanoxla.function(signal, outputs=[WVD], backend='cpu')

tf = T.Placeholder(WVD.shape, 'float32')
label = T.Placeholder((BS, ), 'int32')
deterministic = T.Placeholder((1, ), 'bool')

# first layer
NN = 32
Exemple #2
0
import theanoxla.tensor as T

import matplotlib.pyplot as plt
from matplotlib import interactive

interactive(False)
# https://github.com/google/jax/blob/master/jax/lib/xla_bridge.py
from jax.lib import xla_client
from scipy.ndimage import gaussian_filter

fs, SIGNAL = read("output2.wav")
SIGNAL = SIGNAL[2**15:, 0]
SIGNAL = SIGNAL / SIGNAL.max()

SS = 2**16
signal = T.Placeholder((SS, ), "float32")

signal2 = T.reshape(signal, (1, 1, -1))
wv = T.signal.wvd(signal2, 1024, 32, L=32, apod=T.signal.hanning, mode="same")
sp = T.signal.spectrogram(signal2, 256, 32, apod=T.signal.hanning, mode="same")
melsp = T.signal.melspectrogram(signal2, 1024, 32, 80, 10, 20000, 22000)
mfcc = T.signal.mfcc(signal2, 1024, 32, 80, 10, 20000, 22000, 12)

filters = T.signal.mel_filterbank(1024, 80, 10, 20000, 22000)
fil = theanoxla.function(outputs=[filters])
tfs = theanoxla.function(signal,
                         outputs=[wv[0, 0], sp[0, 0], melsp[0, 0], mfcc[0, 0]])

t = time.time()
TFs = tfs(SIGNAL[:SS].astype("float32"))
FIL = fil()[0]
Exemple #3
0

asdasd












w = T.Placeholder(SHAPE, 'float32', name='w')
noise = T.random.uniform(SHAPE, dtype='float32')
y = T.cos(theanoxla.nn.activations.leaky_relu(z,0.3) + w + noise)
cost = T.pool(y, (2, 2))
cost = T.sum(cost)

grads = theanoxla.gradients(cost, [w, z], [1])

print(cost.get({w: np.random.randn(*SHAPE)}))
noise.seed = 20
print(cost.get({w: np.random.randn(*SHAPE)}))
noise.seed = 40
print(cost.get({w: np.random.randn(*SHAPE)}))

updates = {z:z-0.01*grads[0]}
fn1 = theanoxla.function(w, outputs=[cost])
Exemple #4
0
# KL div. between the prior (standard gaussian) and our distribution
enc = encoder(X, 2)
dec = decoder(enc, X.shape[1])
mu = enc[-1][:latent_dim]
logvar = enc[-1][latent_dim:]

divergence = -0.5 * (np.sum(T.exp(logvar) - logvar + mu**2, 1) - latent_dim)
rec = ((X - dec[-1])**2).sum(1)
elbo = tf.reduce_mean(rec - divergence)
loss = -elbo

BS = 100
lr = 0.001
DATA, _ = datasets.make_moons(1000)

X = T.Placeholder([BS, 2], 'float32')
Z = T.Placeholder([BS, 2], 'float32')

G_sample = generator(Z, 2)
logits = discriminator(T.concatenate([G_sample[-1], X]))
labels = T.concatenate([T.zeros(BS, dtype='int32'), T.ones(BS, dtype='int32')])

disc_loss = losses.sparse_crossentropy_logits(labels, logits[-1]).mean()
gen_loss = losses.sparse_crossentropy_logits(1 - labels[:BS],
                                             logits[-1][:BS]).mean()
masks = T.concatenate([G_sample[1] > 0, G_sample[3] > 0], 1)

A = T.stack([
    gradients(G_sample[-1][:, 0].sum(), [Z])[0],
    gradients(G_sample[-1][:, 1].sum(), [Z])[0]
], 1)
Exemple #5
0
parse.add_argument("-L", type=int)
args = parse.parse_args()

# dataset
wavs, labels, infos = theanoxla.datasets.load_freefield1010(subsample=2, n_samples=7000)
wavs /= wavs.max(1, keepdims=True)
wavs_train, wavs_test, labels_train, labels_test = theanoxla.utils.train_test_split(
    wavs, labels, 0.33
)

# variables
L = args.L
BS = 6


signal = T.Placeholder((BS, len(wavs[0])), "float32")

if L > 0:
    WVD = T.signal.wvd(T.expand_dims(signal, 1), 1024, L=L, hop=32)
else:
    WVD = T.signal.mfsc(
        T.expand_dims(signal, 1), 1024, 192, 80, 2, 44100 / 4, 44100 / 4
    )

tf_func = theanoxla.function(signal, outputs=[WVD], backend="cpu")

tf = T.Placeholder(WVD.shape, "float32")
label = T.Placeholder((BS,), "int32")
deterministic = T.Placeholder((1,), "bool")

# first layer
Exemple #6
0
import theanoxla
import theanoxla.tensor as T

import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(False)
#https://github.com/google/jax/blob/master/jax/lib/xla_bridge.py
from jax.lib import xla_client
from scipy.ndimage import gaussian_filter

fs, SIGNAL = read('output2.wav')
SIGNAL = SIGNAL[2**15:, 0]
SIGNAL = SIGNAL / SIGNAL.max()

SS = 2**16
signal = T.Placeholder((SS, ), 'float32')

signal2 = T.reshape(signal, (1, 1, -1))
wv = T.signal.wvd(signal2, 1024, 32, L=32, apod=T.signal.hanning, mode='same')
sp = T.signal.spectrogram(signal2, 256, 32, apod=T.signal.hanning, mode='same')
melsp = T.signal.melspectrogram(signal2, 1024, 32, 80, 10, 20000, 22000)
mfcc = T.signal.mfcc(signal2, 1024, 32, 80, 10, 20000, 22000, 12)

filters = T.signal.mel_filterbank(1024, 80, 10, 20000, 22000)
fil = theanoxla.function(outputs=[filters])
tfs = theanoxla.function(signal,
                         outputs=[wv[0, 0], sp[0, 0], melsp[0, 0], mfcc[0, 0]])

t = time.time()
TFs = tfs(SIGNAL[:SS].astype('float32'))
FIL = fil()[0]
Exemple #7
0
print(image)
print(image)
print(q)
asdf
T.signal.fft(image, axes=(-1, ))
print(image)
print(image.get({}))

#print(T.gather(image, [0, 1]))
#print(T.gather(image, [0, 1]).get({}))

#patches = T.extract_image_patches(image, (2, 1))
print(patches.shape)
print(patches.get({})[0, 0, 0, 0])
sdf
tr = T.Placeholder((10, 10), 'float32')
tr2 = T.concatenate([tr, tr], 0)
tr3 = tr * 4
print(tr2.roots, tr3.roots)
asdf

print(patches.get({}))
asdf

a = T.Placeholder((4, 4), 'float32')
print(a.roots)
b = a * 3 + a
c = b + b + a + b * a
print(c.roots)
asdf
Exemple #8
0
import jax
import numpy as np
import sys
sys.path.insert(0, "../")

import theanoxla
import theanoxla.tensor as T
import theanoxla.nn as nn

w = T.Placeholder((3, ), np.float32, name='w')

# MAP example 1
output = T.map(lambda a, b: T.pow(a, b), w, T.cast(T.arange(3), 'float32'))
print(output.get({w: jax.numpy.arange(3).astype('float32')}))
fn = theanoxla.function(w, outputs=[output])
print(fn(jax.numpy.arange(3).astype('float32')))

# MAP example 2
output = T.map(lambda a: T.pow(a, 2.), T.cast(T.arange(3), 'float32'))
print(output.get())
fn = theanoxla.function(outputs=[output])
print(fn())

# SCAN example 1
output = T.scan(lambda a, b: a + b, T.zeros(1), T.reshape(w, (3, 1)))
print(output.get({w: jax.numpy.arange(3).astype('float32')}))
fn = theanoxla.function(w, outputs=[output])
print(fn(jax.numpy.arange(3).astype('float32')))

# SCAN example 2
output = T.scan(lambda a, b, c: a + b * c, T.zeros(1),
Exemple #9
0
sys.path.insert(0, "../")
from scipy.io.wavfile import read

import theanoxla
import theanoxla.tensor as T
from theanoxla import layers

import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(False)
#https://github.com/google/jax/blob/master/jax/lib/xla_bridge.py
from jax.lib import xla_client
from sklearn.metrics import roc_auc_score, accuracy_score

BS = 1
signal = T.Placeholder((BS, 4), 'float32')
deterministic = T.Placeholder((1, ), 'bool')
random = T.random.bernoulli((2, 2), p=0.5)
output = layers.Dropout(signal, 0.5, deterministic)
g = theanoxla.function(outputs=[random])
f = theanoxla.function(signal, deterministic, outputs=[output])

for epoch in range(100):
    print(g())

for epoch in range(100):
    print(f(np.ones((BS, 4)), 0)[0])

for epoch in range(100):
    print(f(np.ones((BS, 4)), 1)[0])
Exemple #10
0
# KL div. between the prior (standard gaussian) and our distribution
enc = encoder(X, 2)
dec = decoder(enc, X.shape[1])
mu = enc[-1][:latent_dim]
logvar = enc[-1][latent_dim:]

divergence = -0.5 * (np.sum(T.exp(logvar) - logvar + mu**2, 1) - latent_dim)
rec = ((X - dec[-1])**2).sum(1)
elbo = tf.reduce_mean(rec - divergence)
loss = -elbo

BS = 100
lr = 0.001
DATA, _ = datasets.make_moons(1000)

X = T.Placeholder([BS, 2], "float32")
Z = T.Placeholder([BS, 2], "float32")

G_sample = generator(Z, 2)
logits = discriminator(T.concatenate([G_sample[-1], X]))
labels = T.concatenate([T.zeros(BS, dtype="int32"), T.ones(BS, dtype="int32")])

disc_loss = losses.sparse_crossentropy_logits(labels, logits[-1]).mean()
gen_loss = losses.sparse_crossentropy_logits(1 - labels[:BS],
                                             logits[-1][:BS]).mean()
masks = T.concatenate([G_sample[1] > 0, G_sample[3] > 0], 1)

A = T.stack(
    [
        gradients(G_sample[-1][:, 0].sum(), [Z])[0],
        gradients(G_sample[-1][:, 1].sum(), [Z])[0],
Exemple #11
0
from scipy.io.wavfile import read

import theanoxla
import theanoxla.tensor as T
from theanoxla import layers

import matplotlib.pyplot as plt
from matplotlib import interactive

interactive(False)
# https://github.com/google/jax/blob/master/jax/lib/xla_bridge.py
from jax.lib import xla_client
from sklearn.metrics import roc_auc_score, accuracy_score

BS = 1
signal = T.Placeholder((BS, 4), "float32")
deterministic = T.Placeholder((1, ), "bool")
random = T.random.bernoulli((2, 2), p=0.5)
output = layers.Dropout(signal, 0.5, deterministic)
g = theanoxla.function(outputs=[random])
f = theanoxla.function(signal, deterministic, outputs=[output])

for epoch in range(100):
    print(g())

for epoch in range(100):
    print(f(np.ones((BS, 4)), 0)[0])

for epoch in range(100):
    print(f(np.ones((BS, 4)), 1)[0])
Exemple #12
0
import jax
import numpy as np
import sys
sys.path.insert(0, "../")

import theanoxla
import theanoxla.tensor as T

image = T.Placeholder((512**2, ), 'float32')
output = image.reshape((1, 1, 512, 512))
f = theanoxla.function(image, outputs=[output])
for i in range(10000):
    print(i)
    f(np.random.randn(512**2))
Exemple #13
0
import sys

sys.path.insert(0, "../")

import theanoxla
import theanoxla.tensor as T


def conv(x, y):
    x = x[0, 0]
    y = y[0, 0]
    filter = y[::-1, ::-1]
    return (x[:-1, :-1] * y[0, 0] + x[1:, :-1] * y[1, 0] +
            x[:-1, 1:] * y[0, 1] + x[1:, 1:] * y[1, 1])


SHAPE = (1, 1, 1164, 1164)
w = T.Placeholder(SHAPE, "float32", name="w")
SHAPE2 = (1, 1, 2, 2)
filter = T.Placeholder(SHAPE2, "float32", name="filter")
output = T.convNd(w, filter)

f = theanoxla.function(w, filter, outputs=[output])

data = np.random.randn(*SHAPE).astype("float32")
filter = np.random.randn(*SHAPE2).astype("float32")
output = f(data, filter)[0][0, 0]
target = conv(data, filter)
print("% close values:",
      100 * np.mean(np.isclose(target, output).astype("float32")))
Exemple #14
0
import jax
import numpy as np
import sys

sys.path.insert(0, "../")

import theanoxla
import theanoxla.tensor as T

image = T.Placeholder((512**2, ), "float32")
output = image.reshape((1, 1, 512, 512))
f = theanoxla.function(image, outputs=[output])
for i in range(10000):
    print(i)
    f(np.random.randn(512**2))
Exemple #15
0
print(image)
print(image)
print(q)
asdf
T.signal.fft(image, axes=(-1, ))
print(image)
print(image.get({}))

# print(T.gather(image, [0, 1]))
# print(T.gather(image, [0, 1]).get({}))

# patches = T.extract_image_patches(image, (2, 1))
print(patches.shape)
print(patches.get({})[0, 0, 0, 0])
sdf
tr = T.Placeholder((10, 10), "float32")
tr2 = T.concatenate([tr, tr], 0)
tr3 = tr * 4
print(tr2.roots, tr3.roots)
asdf

print(patches.get({}))
asdf

a = T.Placeholder((4, 4), "float32")
print(a.roots)
b = a * 3 + a
c = b + b + a + b * a
print(c.roots)
asdf
Exemple #16
0
import argparse

parse = argparse.ArgumentParser()
parse.add_argument('-L', type=int)
args = parse.parse_args()

# dataset
wavs, labels, infos = theanoxla.datasets.load_freefield1010(subsample=2,
                                                            n_samples=7000)

# variables
L = args.L
BS = 1

signal = T.Placeholder((BS, wavs.shape[1]), 'float32')

if L > 0:
    WVD = T.signal.wvd(T.expand_dims(signal, 1), 1024, L=L, hop=32)
else:
    WVD = T.signal.mfsc(T.expand_dims(signal, 1), 1024, 192, 80, 2, 44100 / 4,
                        44100 / 4)

tf_func = theanoxla.function(signal, outputs=[WVD], backend='cpu')

# transform the data

for i, x in enumerate(
        theanoxla.utils.batchify(wavs, batch_size=BS, option='continuous')):
    print('before')
    np.savez_compressed(