def mini_batch(samples, labels, batchSize): sampleN = samples.shape[0] randomIndex = nd.array(list(range(sampleN))) nd.shuffle(randomIndex) for i in range(0, sampleN, batchSize): batchIndex = randomIndex[i:min(i + batchSize, sampleN)] yield samples.take(batchIndex), labels.take(batchIndex)
def Gen_noise(fs, T, C, fixed=None): tNoiseKEY = 'Event' noise_address = os.path.join('./', 'data', 'LIGO_O1_noise_ndarray') root = os.path.expanduser(noise_address) ll = [ file for file in os.listdir(root) if '_bug' not in file if tNoiseKEY in file ] if fixed: r = 2 noise = nd.concatenate([ readnpy(noise_address, file)[1][:, :C, ::4] for file in ll[r:r + T] ], axis=0).astype('float32') # (4096*T, C, 4096) noise_gps = nd.concatenate([ readnpy(noise_address, file)[0, :, :1, ::4] for file in ll[r:r + T] ], axis=0).astype( 'float32') # (4096*T, C, 4096) noise = noise.swapaxes(1, 0).reshape(0, -1, fs * T).swapaxes(1, 0) noise_gps = noise_gps.swapaxes(1, 0).reshape(0, -1, fs * T).swapaxes(1, 0) noise = nd.concatenate([noise, noise_gps], axis=1) return noise[:, :2], (ll[r:r + T], noise_gps.asnumpy()) r = np.random.randint(len(ll) - T) noise = nd.concatenate( [readnpy(noise_address, file)[1][:, :C, ::4] for file in ll[r:r + T]], axis=0).astype('float32') # (4096*T, C, 4096) noise_gps = nd.concatenate( [readnpy(noise_address, file)[0, :, :1, ::4] for file in ll[r:r + T]], axis=0).astype('float32') # (4096*T, C, 4096) noise = noise.swapaxes(1, 0).reshape(0, -1, fs * T).swapaxes(1, 0) noise_gps = noise_gps.swapaxes(1, 0).reshape(0, -1, fs * T).swapaxes(1, 0) noise = nd.concatenate([noise, noise_gps], axis=1) noise = nd.shuffle(noise) if T != 1: return noise[:, :2], (ll[r:r + T], noise[:, 2].asnumpy()) return noise, (ll[r:r + T], noise[:, 2, 0].asnumpy()) # (nsample, C, N)
import mxnet as mx from mxnet import nd, init, gluon, autograd, image from mxnet.gluon import data as gdata, loss as gloss, nn import numpy as np import d2l CTX = d2l.try_gpu() import time import matplotlib.pyplot as plt # Import the DenseVAE model import sys sys.path.insert(0, "./models") from DenseVAE import DenseVAE all_features = nd.load('../project_data/anime_faces.ndy')[0].as_in_context(CTX) all_features = nd.shuffle(all_features) # Use 80% of the data as training data # since the anime faces have no particular order, just take the first # 80% as training set # Prepare the training data and training data iterator n_train = int(all_features.shape[0] * 0.8) train_features = all_features[0:n_train] test_features = all_features[n_train:] batch_size = 64 train_iter = gdata.DataLoader(train_features, batch_size, shuffle=True, last_batch='keep') # Extract the training image's shape _, n_channels, width, height = train_features.shape
def train(self, inputs, outputs, epochs=10, batch_size=32, lr=0.001, transform=None, verbose=True): """train the neural network to fit the outputs with the inputs. Args: inputs: an ndarray of input. outputs: an ndarray of outputs. epochs, batch_size, lr: the parameters of the learning algorithm. transform: if None, take the output as given, else try to compute transformed outputs = transform(outputs) and fit with them. verbose: If True then the results will be displayed all along the training. Returns: The historical of the training. (tuple of array).""" if transform: outputs = transform(outputs) n = (inputs.shape[1] - 1) // batch_size + 1 #inputs-1/batch - 1 < n <= inputs-1/batch if len(outputs.shape) == 1: outputs = outputs.reshape((1, outputs.shape[0])) assert inputs.shape[1] == outputs.shape[1], "Shapes does not match." data = nd.concat(inputs.T, outputs.T) efficiencies = [] cumuLosses = [] epochs = list(range(epochs)) for i in epochs: efficiency = 0 cumuLoss = 0 data = nd.shuffle(data) batchs = [ data[k * batch_size:min(inputs.shape[1], (k + 1) * batch_size), :] for k in range(n) ] for batch in batchs: with autograd.record(): output = self.compute(batch[:, :inputs.shape[0]].T) loss = SymNet.squared_error(output, batch[:, inputs.shape[0]:].T) loss.backward() self.adam_descent(batch_size, lr) output = nd.round(output) cumuLoss += loss.asscalar() efficiency += nd.sum( nd.equal(output, batch[:, inputs.shape[0]:].T)).asscalar() efficiency /= outputs.shape[1] * outputs.shape[0] efficiencies.append(efficiency) cumuLoss /= outputs.shape[1] * outputs.shape[0] cumuLosses.append(cumuLoss) if verbose: print("Epochs %d: Pe = %lf , loss = %lf" % (i, 1 - efficiency, cumuLoss)) return (epochs, cumuLosses, efficiencies)
import numpy as np import d2l CTX = d2l.try_gpu() import time import matplotlib.pyplot as plt import os # Import the DenseVAE and the DenseLogisticRegressor models import sys sys.path.insert(0, "./models") from ConvVAE import ConvVAE from ConvDisc_LeakyReLU import ConvDisc_LeakyReLU as ConvDisc # Prepare the training data and training data iterator print("[STATE]: Loading data onto context") all_features = nd.shuffle(nd.load('../project_data/anime_faces.ndy')[0].as_in_context(CTX)) # Use 80% of the data as training data # since the anime faces have no particular order, just take the first # 80% as training set # Prepare the training data and training data iterator n_train = int(all_features.shape[0] * 0.8) train_features = all_features[0:n_train] test_features = all_features[n_train:] batch_size = 64 train_iter = gdata.DataLoader(train_features, batch_size, shuffle=True, last_batch='keep') print("[STATE]: Data loaded onto context")