Beispiel #1
0
    def make(self, fit=True):
        """
        :param fit: If ``True`` (or if a size has not been provided), find the
            best fit for the data to avoid data overflow errors.
        """
        self.buffer = util.BitBuffer()
        util.generate_data(self.buffer, self.data_list)

        if fit or (self.version is None):
            self.best_fit(start=self.version)
        self.makeImpl(False, self.best_mask_pattern())
def run_separation(args, fft_size=4096, hop_size=1024, n_channels=2, apply_mwf_flag=True, ch_flip_average=False):
    sources = ['vocals', 'bass', 'drums', 'other']

    for i, input_file in enumerate(args.inputs):
        sample_rate, inp_stft = generate_data(
            input_file, fft_size, hop_size, n_channels)
        print("%d / %d  : %s" % (i + 1, len(args.inputs), input_file))
        out_stfts = {}
        inp_stft_contiguous = np.abs(np.ascontiguousarray(inp_stft))
        for source in sources:
            with open('./configs/{}.yaml'.format(source)) as file:
                # Load source specific Hyper parameters
                hparams = yaml.load(file, Loader=yaml.FullLoader)
            d3netwrapper = D3NetOpenVinoWrapper(args, source)
            out_sep = model_separate(
                inp_stft_contiguous, hparams, ch_flip_average=ch_flip_average, openvino_wrapper=d3netwrapper)

            out_stfts[source] = out_sep * np.exp(1j * np.angle(inp_stft))

        if apply_mwf_flag:
            out_stfts = apply_mwf(out_stfts, inp_stft)

        sub_dir_name = ''
        output_subdir = args.out_dir + sub_dir_name
        output_subdir = os.path.join(
            output_subdir, os.path.splitext(os.path.basename(input_file))[0])

        if not os.path.exists(output_subdir):
            os.makedirs(output_subdir)

        out = {}
        for source in sources:
            out[source] = save_stft_wav(out_stfts[source], hop_size, sample_rate, output_subdir + '/' +
                                        source + '.wav', samplewidth=2)
Beispiel #3
0
def test_majority():
    """ Test the majority classifier"""
    x, y = generate_data()
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
    clf = MajorityClassifier()
    assert clf.majority_label is None
    clf.fit(x_train, y_train)
    assert clf.majority_label is not None
    pred_test = clf.predict(x_test)
    score = accuracy_score(y_true=y_test, y_pred=pred_test)
    assert score > 0.5
Beispiel #4
0
def test_one_nearest_neighbor():
    """ Unit tests for the 1NN classifier. """
    x, y = generate_data()
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
    clf = OneNearestNeighborClassifier()
    assert clf.training_labels is None
    assert clf.training_points is None
    clf.fit(x_train, y_train)
    assert clf.training_labels is not None
    assert clf.training_points is not None
    pred_test = clf.predict(x_test)
    score = accuracy_score(y_true=y_test, y_pred=pred_test)
    assert score > 0.9
Beispiel #5
0
	def fit(self, n_iter):

		old_params = util.stack_params(self.u, self.V, self.R)
		
		x_test = util.generate_data(5, self.W, self.sigx, self.dimx, self.dimz)[0]
		llh = []

		print "True: ", self.true_marg(x_test)

		def objective_wrapper(params, x):
			u, V, R = util.unstack_params(params)
		
			return self.objective(x, R, u, V)

		for j in xrange(n_iter):
			for i in xrange(self.n):
				x = self.observed[i]
				
				obj_local = lambda param: objective_wrapper(param, x)
				
				#u, V, R = self.gradient_descent(obj_local, self.u, self.V, self.R, x)
				
				u, V, R = util.adam(obj_local, self.u, self.V, self.R)
				#u, V, R = util.optimizer(obj_local, self.u, self.V, self.R)
				#print self.objective(x, R,self.u,self.V)
				#temp
				a = 1/float(self.n)
				self.u = np.dot(self.V, self.u)*(1- a) + a*(np.dot(V,u))
				self.V = self.V + a*(V- self.V)
				self.u = np.dot( np.linalg.inv(self.V), self.u)
				
				#RM update
				a2 = 1/float(self.n)
				self.R = (1-a2)*self.R + a2*R				
				#print "obj: ", self.objective(x, R,self.u,self.V)


			params = util.stack_params(self.u, self.V, self.R)
			diff =  np.linalg.norm(old_params - params)
			old_params = params
				
			ll = self.get_marginal(self.u, self.V, self.R, x_test)
			llh.append(ll)
			
			print "m: ", ll, diff, "iter: ", j
			np.save('DATA50testNr2', np.array(llh))
		

		np.save('DATA50testNr2', np.array(llh))
Beispiel #6
0
def run_separation(args,
                   fft_size=4096,
                   hop_size=1024,
                   n_channels=2,
                   apply_mwf_flag=True,
                   ch_flip_average=False):
    # Set NNabla extention
    ctx = get_extension_context(args.context)
    nn.set_default_context(ctx)

    for i, input_file in enumerate(args.inputs):
        sample_rate, inp_stft = generate_data(input_file, fft_size, hop_size,
                                              n_channels)
        print(f"{i+1} / {len(args.inputs)}  : {input_file}")
        out_stfts = {}
        inp_stft_contiguous = np.abs(np.ascontiguousarray(inp_stft))

        for target in args.targets:
            # Load the model weights for corresponding target
            nn.load_parameters(f"{os.path.join(args.model_dir, target)}.h5")
            with open(f"./configs/{target}.yaml") as file:
                # Load target specific Hyper parameters
                hparams = yaml.load(file, Loader=yaml.FullLoader)
            with nn.parameter_scope(target):
                out_sep = model_separate(inp_stft_contiguous,
                                         hparams,
                                         ch_flip_average=ch_flip_average)
                out_stfts[target] = out_sep * np.exp(1j * np.angle(inp_stft))

        if apply_mwf_flag:
            out_stfts = apply_mwf(out_stfts, inp_stft)

        sub_dir_name = ''
        output_subdir = args.out_dir + sub_dir_name
        output_subdir = os.path.join(
            output_subdir,
            os.path.splitext(os.path.basename(input_file))[0])

        if not os.path.exists(output_subdir):
            os.makedirs(output_subdir)

        out = {}
        for target in args.targets:
            out[target] = save_stft_wav(out_stfts[target],
                                        hop_size,
                                        sample_rate,
                                        output_subdir + '/' + target + '.wav',
                                        samplewidth=2)
Beispiel #7
0
	def __init__(self, n, dimx, dimz):
		
		'''
		n: size of dataset
		dimx: dimensions of observed variables
		dimz: dimensions of local latent variables
		'''
		'''
		Generative procedure
		global param W and latent variables are not visible
		'''
		self.n = n
		self.sigx = 0.1
		self.W = np.random.normal(0,1, size = (dimx,dimz))
		self.dimz = dimz
		self.dimx = dimx
		#data
		data = util.generate_data(n, self.W, self.sigx, dimx, dimz)
		self.observed = data[0] 
		self.latent = data[1] 
		
		
		'''
		Model Parameters: mean and precision
		'''
		#SEP params
		f = dimx*dimz
		self.SEP_prior_mean = np.zeros(f).reshape((f,1)) 
		self.SEP_prior_prec = np.identity(f)

		self.u = np.zeros(f).reshape((f,))	
		self.V = (1e-4)*np.eye(f)
		
		
		self.R = np.random.randn(dimz,dimx)
		self.S = np.identity(dimz) 
		
		I = np.linalg.inv(self.sigx*np.eye(3))
		S = np.eye(2) + np.dot(np.dot(self.W.T, I), self.W)
		S = np.linalg.inv(S)
		self.S = S
Beispiel #8
0
	def fit(self, n_iter):
		
		old_params = util.stack_params(self.u, self.V, self.R)
		
		x_test = util.generate_data(20, self.W, self.sigx, self.dimx, self.dimz)[0]
		llh = []
		for j in xrange(n_iter):
			for i in xrange(self.n):
				x = self.observed[i]
				
				obj_local = lambda param: self.objective_wrapper(param, x)
				
				u, V, R = util.gradient_descent(obj_local, self.u, self.V, self.R)
				
				#u, V, R = util.optimizer(obj_local, self.u, self.V, self.R)
				
				#DO SEP updates-- damping- a is alpha - see SEP paper
				a = 1/float(self.n)
				self.u = np.dot(self.V, self.u)*(1- a) + a*(np.dot(V,u))
				self.V = self.V + a*(V- self.V)
				self.u = np.dot( np.linalg.inv(self.V), self.u)
					
				
				#RM update
				a2 = 1/float(self.n)
				self.R = (1-a2)*self.R + a2*R
				
				
			params = util.stack_params(self.u, self.V, self.R)
			diff =  np.linalg.norm(old_params - params)
			old_params = params
				
			ll = self.get_marginal(self.u, self.V, self.R, x_test)
			llh.append(ll)
			print "m: ", ll, diff
			np.save('learn', np.array(llh))
			
		np.save('learn', np.array(llh))
Beispiel #9
0
	def __init__(self, n, dimx, dimz):
		
		'''
		n: size of dataset
		dimx: dimensions of observed variables
		dimz: dimensions of local latent variables
		'''
		'''
		Generative procedure
		global param W and latent variables are not visible
		'''
		self.n = n
		self.sigx = 0.1
		np.random.seed(1234)
		self.W = np.random.normal(0,1, size = (dimx,dimz))
		self.dimz = dimz
		self.dimx = dimx
		#data
		data = util.generate_data(n, self.W, self.sigx, dimx, dimz)
		self.observed = data[0] 
		self.latent = data[1] 
		
		
		'''
		Model Parameters: mean and precision
		'''
		#SEP params
		f = dimx*dimz
		self.SEP_prior_mean = np.zeros(f).reshape((f,1)) #fx1
		self.SEP_prior_prec = np.identity(f)	#fxf

		self.u = np.random.randn(f)
		self.V = 1e-4*np.eye(f)
		
		#recogntion model parameters
		self.R = np.random.randn(dimz,dimx) 
		self.S = self.sigx*np.eye(dimz)
Beispiel #10
0
def z(lamb, beta):
    vals, _ = util.generate_data(lambda a, b: a/b, 100)
    diff = lambda a, b: np.abs(a / b - (lamb*a + (1-lamb)*b) / (beta*a + (1-beta)*b))
    return np.mean([diff(a,b) for a in vals[:,0] for b in vals[:,1]], axis=0)
Beispiel #11
0
trials = [
        # (lambda x: x+2, 1),
        # (lambda x, y: x+y, 1),
        # (lambda x, y: x-y, 1),
        # (lambda x, y: x*y, 1),
        (lambda x, y: x/y, 1),
        # (lambda x, y, z: x * (y + z), 2),
        # (lambda w, x, y, z: (w + x) / (y - z), 3),
        # (lambda x, y, z: x / (y + z), 2),
        # (lambda w, x, y, z: (w + x) * (y + z), 3),
        # (lambda w, x, y, z: w + x*(2.0 + y*z), 4),
        # (lambda I, M, m, l, b: -(I+m*l*l)*b / (I*(M+m) + M*m*l*l), 10),
        # (lambda x: 6.0*x, 1)
]

# models = [
#         mlp,
#         pnet_lambda(SCALE)
# ]

models = [pnet_lambda(lamb) for lamb in (100,)]# (100, 1000, 10000)]

for fn, steps in trials:
    X, y = util.generate_data(fn, DATASIZE, scale=SCALE)#, noise=np.random.randn)
    # pnet(fn, X, y, steps)
    all_losses = parallel.run(models, args=(fn, X, y, steps))
    # for losses in all_losses:
    #     plt.plot(losses)
    # plt.legend(['Standard MLP', 'Processor network'])
    # plt.show()
Beispiel #12
0
Spyder Editor

This is a temporary script file.
"""
import tensorflow as tf
from util import generate_data, split_test_train, plot_3d_data, plot_pred_real
import numpy as np

if __name__ == "__main__":
    sample_size, p = 100, 0.3
    hidden_units = 10

    train_size, test_size = int((1.0 - p) * sample_size), int(sample_size * p)
    sample_size = train_size + test_size

    dataset, utopy = generate_data(n=sample_size)
    test, train = split_test_train(dataset, size=test_size)

    train_x1_x2, train_y = np.split(train, [2], 1)
    test_x1_x2, test_y = np.split(test, [2], 1)

    rho = tf.constant(0.01, dtype=tf.float32)

    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)

    v = tf.Variable(tf.truncated_normal(shape=[hidden_units, 1],
                                        dtype=tf.float32),
                    name="v")
    w = tf.Variable(tf.truncated_normal(shape=[hidden_units, 2],
                                        dtype=tf.float32),
Beispiel #13
0
from util import generate_data

if __name__ == "__main__":
    generate_data()
Beispiel #14
0
''''
Params for training samples
'''
logging.getLogger().setLevel(logging.DEBUG)
batch_size = 16
image_size = 33
label_size = 21
stride = 15
magnitude = 2
EpochNum = 3
''''
Generate the h5 files
'''
if not os.path.exists(os.path.join(os.getcwd(), 'train.h5')):
    generate_data(image_size, stride, label_size, magnitude, "Train")
if not os.path.exists(os.path.join(os.getcwd(), 'val.h5')):
    generate_data(image_size, stride * 3, label_size, magnitude, "Val")
''''
Read data from h5 file and convert to NDArrayIter.
'''
train_data, train_label = read_h5file(os.path.join(os.getcwd(), 'train.h5'))
val_data, val_label = read_h5file(os.path.join(os.getcwd(), 'val.h5'))
train_iter = mx.io.NDArrayIter(train_data,
                               train_label,
                               batch_size=batch_size,
                               data_name='data',
                               label_name='label')
val_iter = mx.io.NDArrayIter(val_data, val_label, batch_size=batch_size)
''''
Set network symbols
Beispiel #15
0
if __name__ == "__main__":
    # Define hyperparameters
    EPOCHS = 1000
    LEARNING_RATE = 0.01
    SEED = 1743734
    SAMPLE_SIZE, p = 1000, 0.3
    INPUT_UNITS = 2
    HIDDEN_UNITS = 10
    OUTPUT_UNITS = 1

    # Define Dataset
    train_size, test_size = int((1.0 - p) * SAMPLE_SIZE), int(SAMPLE_SIZE * p)
    SAMPLE_SIZE = train_size + test_size

    dataset, utopy = generate_data(n=SAMPLE_SIZE)
    test, train = split_test_train(dataset, size=test_size)
    train_x1_x2, train_y = np.split(train, [2], 1)
    test_x1_x2, test_y = np.split(test, [2], 1)

    print("[info] test", type(train_y), train_y.shape)
    print("[info] train", type(train), train.shape)

    # Define placeholders
    x = tf.placeholder(tf.float32, [None, INPUT_UNITS])
    y = tf.placeholder(tf.float32, [None, OUTPUT_UNITS])

    # Creating Training model

    # Defining weights
    pi = {'rho': tf.constant(0.00001)}
Beispiel #16
0
parser.add_argument('--model', default='ff', help='Pick a model to use for estimation.')
parser.add_argument('-t', '--train', action='store_true', help='Explicitly train.')
parser.add_argument('-p', '--plot', action='store_true', help='Explicitly plot.')

FLAGS, unparsed = parser.parse_known_args()

print 'running with arguments: ({})'.format(FLAGS)

errors = []

load, earliest_date, latest_date = ResInterval60.get_batch(sqlClient, batch_size=20000)
weather = LocalWeather.get_weather(sqlClient, earliest_date, latest_date)

kf = KFold(shuffle=True, n_splits=2, random_state=0)

X, Y = generate_data(LOOKBACK, load, weather)

print 'total dataset size X:{},Y:{}'.format(X.shape, Y.shape)

for train, test in kf.split(X):
    model = None

    if FLAGS.model == 'ff':
        tf.logging.set_verbosity(tf.logging.INFO)
        model = FeedForward(num_layer=4, num_neuron=300,input_size=X.shape[1])
        tf.logging.set_verbosity(tf.logging.WARN)
    elif FLAGS.model == 'linear':
        model = LinearRegression()
    elif FLAGS.model == 'gru':
        model = GRU()
    elif FLAGS.model == 'svr':
Beispiel #17
0
""" Make plots to compare the classifiers. """
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

from majority import MajorityClassifier
from one_nearest_neighbor import OneNearestNeighborClassifier
from util import generate_data

X, y = generate_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

true_markers = {1: '+', -1: 'o'}
predicted_colors = {1: 'blue', -1: 'red'}

# Show the ground truth of the datasets
for label in [1, -1]:
    points = X_train[y_train == label, :]
    plt.scatter(points[:, 0], points[:, 1], marker=true_markers[label], label=f'{label} train', color='black')
    points = X_test[y_test == label, :]
    plt.scatter(points[:, 0], points[:, 1], marker=true_markers[label], label=f'{label} test', color='magenta')
plt.legend()
plt.title('Ground truth')
plt.show()


def show_performance(features, true_labels, predicted_labels, classifier_name):
    """ Plot performance of one classifier & show accuracy """
    for index, predicted_label in enumerate(predicted_labels):
        plt.scatter(
                features[index, 0], features[index, 1],  # X/Y coordinates
Beispiel #18
0
def refresh_data():
    generate_data()
    return redirect("/")
Beispiel #19
0
from pyro.optim import Adam
from util import generate_data
"""
This scripts implements a simple Mixture Density network in Pyro. It
uses the same data example as in Bishops's book (Bishop, C. M. (2013).
Pattern Recognition and Machine Learning.) and is based on the pytorch
implementation of David Ha,
https://github.com/hardmaru/pytorch_notebooks/blob/master/mixture_density_networks.ipynb
"""
torch.manual_seed(123)
np.random.seed(4530)

n_samples = 1000
n_gaussians = 5

x_data, x_variable, y_data, y_variable = generate_data(n_samples)
"""
Define network model.
"""


class MDN(nn.Module):
    def __init__(self, n_hidden, n_gaussians):
        super().__init__()
        self.z_h = nn.Sequential(nn.Linear(1, n_hidden), nn.Tanh())
        self.z_pi = nn.Linear(n_hidden, n_gaussians)
        self.z_sigma = nn.Linear(n_hidden, n_gaussians)
        self.z_mu = nn.Linear(n_hidden, n_gaussians)

    def forward(self, data):
        z_h = self.z_h(data.view(-1, 1))