def __init__(self, input_size, layers=[], loss_type="mse", num_layers=None, print_loss=False): """ Initializing the attributes of Neural Layer: input_size : size of a training example layers : Layers which are to be used in sequence (Class of layer must be Layer) loss_type : type of loss you want to use ("mse", "binary_crossentropy", ---) num_layers : Number of layers in Neural Network (Not compulsary) """ self.layers = { "layer" + str(l + 1): layers[l] for l in range(len(layers)) } self.loss = Loss(loss_type) self.input_size = input_size self.num_layers = len(layers) self.print_loss = print_loss self.layers["layer1"].weights = np.random.randn( self.layers["layer1"].units, input_size) * 0.01 for l in range(1, self.num_layers): self.layers["layer" + str(l + 1)].weights = np.random.randn( self.layers["layer" + str(l + 1)].units, self.layers["layer" + str(l)].units) * 0.01
def initialize_loss(self, model: Model) -> Callable: loss = Loss( model=model, kl_scale=self.kl_scale, n_samples=self.n_samples, stochastic_linearization=self.stochastic_linearization, ) return loss.nelbo_fsvi_classification
### Plot generated data plt.figure() plt.subplot(2, 2, 1) plt.title('Generated X') plt.imshow(X_gen) plt.subplot(2, 2, 2) plt.title('Generated X with noise') plt.imshow(X_gen_noisy) """ Define the model """ loss = Loss.Loss(X_gen_noisy, M=M, d_link=d_link, h_link=h_link, sigN=7, kernel=kernel, cov_pars=(d_parm, h_parm)) """ Run model """ D_opt, H_opt, final_loss = loss.optimize(num_iters=50, print_steps=10) X_opt = D_opt @ H_opt print("Loss: {}".format(final_loss)) plt.subplot(2, 2, 3) plt.title('GPP-NMF Constructed X') plt.imshow(X_opt)
Model set-up """ M = 2 kernel = cvf.laplacian h_link = lf.exp_to_gauss(lambd=1, sig=1) d_link = lf.rect_gauss(s=1, sig=1) # Kernel parameters d_parm = 1 h_parm = 10 """ Define the model """ loss = Loss.Loss(X, M=M, d_link=d_link, h_link=h_link, sigN=2, kernel=kernel, cov_pars=(d_parm, h_parm)) """ Run model """ D, H, final_loss = loss.optimize(num_iters=50, print_steps=10) X_reconstruct = D @ H print("Loss: {}".format(final_loss)) plot = True # upper left plot is the reconstructed matrix if plot: plt.figure()
def score(self): """Score model""" print(str(datetime.datetime.now()) + '\tModel.score Score instance') scores = [] # load reference data ref = np.genfromtxt(self.data, delimiter=',', names=True) # initialize loss function path = self.data.split('/')[:len(self.data.split('/')) - 1] path = '/'.join(path) + '/Loss.png' loss = Loss.Loss(int(self.mean), int(self.slope), int(self.threshold), path) output_islet_path = Islet.env['output'] + 'Islet_' + Islet.env[ 'rid'] + '_' + self.gid + '/' for output in os.listdir(output_islet_path): if 'csv' in output: # load simulated data sim = np.genfromtxt(output_islet_path + '/' + output, delimiter=',', names=True) print( str(datetime.datetime.now()) + '\tModel.score Length of reference and experimental data', len(ref[reference[1]]), len(sim[simulated[1]])) # normalize data to timescale with larger steps and shorter interval # determine smaller time step multiple = getMultiple(ref[reference[0]], sim[simulated[0]]) sim_normalized = sim[simulated[1]][::int(multiple)] print( str(datetime.datetime.now()) + '\tModel.score Large time step / small time step', multiple, 'length of normalized smaller time step data', len(sim_normalized), len(ref[reference[0]])) # determine smaller sample size size_normalized = getSize(ref[reference[0]], sim_normalized) print( str(datetime.datetime.now()) + '\tModel.score Minimum sample size', len(ref[reference[1]]), len(sim_normalized), size_normalized) # cut off simulated and reference data at same place ref[reference[1]] = ref[reference[1]][:size_normalized] sim_normalized = sim_normalized[:size_normalized] print( str(datetime.datetime.now()) + '\tModel.score Lengths of reference and simulated data after normalization', len(ref[reference[1]]), len(sim_normalized)) # subtract one array from the other output_data = [] for val in range(len(ref[reference[1]])): output_data.append(ref[reference[1]][val] - sim_normalized[val]) save = Islet.env['wd'] + re.split('\.csv', output)[0] + '.png' print( str(datetime.datetime.now()) + '\tModel.score Path to save difference plot', save) # plt.clf() # plt.title('Difference Between Reference and Simulated Data') # plt.xlabel('Time (ms)') # plt.ylabel('Membrane Potential (mV)') # plt.plot(output_data) # plt.savefig(save) scores.append(loss.getLoss(sum(output_data))) print( str(datetime.datetime.now()) + '\tModel.score Cell score', scores) print( str(datetime.datetime.now()) + '\tModel.score Islet score', sum(scores) / len(scores)) # save data to appropriate file output_generation_path = Islet.env['output'] + 'Islets_' + Islet.env[ 'rid'] + '_' + self.gid.split('_')[0] output_generation_file = '/Islet_' + Islet.env[ 'rid'] + '_' + self.gid + '.pl' os.system('mkdir -p ' + output_generation_path) dump = open(output_generation_path + output_generation_file, 'wb') pickle.dump([sum(scores) / len(scores), scores], dump)