def csv_output(self): """This method is used to report the results of a subscription test to a csv file""" # determine the file name csv_filename = "subscription-%s-%siter-%s-%s.csv" % (self.subscriptiontype, self.iterations, self.chart_type.lower(), self.testdatetime) # initialize the csv file csvfile_stream = open(csv_filename, "w") csvfile_writer = csv.writer(csvfile_stream, delimiter=',', quoting=csv.QUOTE_MINIMAL) # iterate over the SIBs for sib in self.results.keys(): row = [sib] # add all the times for value in self.results[sib]: row.append(value) # add the mean, min, max and variance value of the times to the row row.append(round(nmean(self.results[sib]),3)) row.append(round(nmin(self.results[sib]),3)) row.append(round(nmax(self.results[sib]),3)) row.append(round(nvar(self.results[sib]),3)) # write the row csvfile_writer.writerow(row) # close the csv file csvfile_stream.close()
def checkValidity_MultiFrquency(X1, Y1, f1, NFFT1): d1_list = [] d2_list = [] g2_list = [] peak1 = np.sort(findPeakPyVersion((2. * X1[0:int(NFFT1 / 2. + 1.)])), 'descend') peak1Len = peak1.shape[1] chk_val1 = nzeros([peak1Len, 1]) index1 = nzeros([peak1Len, 1]) for i in narange(1., (peak1Len + 1)): chk_val1[int(i) - 1, :] = peak1[int(i) - 1] / (nmean(peak1[int(i):peak1Len])) if chk_val1[int(i) - 1, :] > 2.: index1[int(i) - 1, :] = np.nonzero( X1[0:NFFT1 / 2. + 1.] == peak1[:, int(i) - 1] / 2.) d1_list.append(f1[index1[int(i) - 1, :]]) mean1 = nmean(peak1[0:peak1Len]) k = 1. peak2 = np.sort(findPeakPyVersion((2. * Y1[0:NFFT1 / 2. + 1.])), 'descend') peak2Len = peak2.shape[1] chk_val = nzeros([peak2Len, 1]) index2 = nzeros([peak2Len, 1]) for i in narange(1., peak2Len): chk_val[int(i) - 1, :] = peak2[0, int(i) - 1] / nmean(peak2[int(i):peak2Len]) if chk_val[int(i) - 1, :] > 3.: g2_list.append(chk_val[int(i) - 1, :]) index2[int(i) - 1, :] = np.nonzero( (Y1[0:NFFT1 / 2. + 1.] == peak2[:, int(i) - 1] / 2.)) d2_list.append(f1[index2[int(i) - 1, :]]) d2 = np.array([d2_list]) d1 = np.array([d1_list]) g2 = np.array([g2_list]) if g2 >= 0.: g1 = nmean(g2) else: g1 = nmean(g2) return [g1, d1, d2]
def FFT_MultiFrequency_update(s1, s2): Fs = np.array([[31.]]) #% Sampling frequency T = 1. / Fs #% Sample time L = 512. #% Length of signal t = ndot(narange(0., L), T) NFFT1 = float(pow(2, pyNextPow2(L))) #% Next power of 2 from length of y s1[0, :] = s1[0, :] - nmean(s1[0, :]) s2[0, :] = s2[0, :] - nmean(s2[0, :]) X1 = nabs(np.fft(s1, NFFT1) / L) Y1 = nabs(np.fft(s2, NFFT1) / L) f1 = ndot(Fs / 2., np.linspace(0., 1., (NFFT1 / 2. + 1.))) [g1, d1, d2] = checkValidity_MultiFrquency(X1, Y1, f1, NFFT1) return [d1, X1, Y1, f1, NFFT1, d2, g1]
def plot_chart(self): """This method is used to plot the chart""" # determine the type of the chart if self.chart_type == "Bar": # determine the file name chart_filename = "update-%s-%sstep-%smax-%siter-%s-%s.svg" % (self.updatetype, self.step, self.limit, self.iterations, self.chart_type.lower(), self.testdatetime) # initialise the chart chart = Bar() chart.title = self.chart_title chart.x_title = self.chart_x_title chart.y_title = self.chart_y_title chart.style = eval(self.chart_style) # iterate over the SIBs for sib in self.results.keys(): # iterate over the possible block lengths values = [] for triple_length in sorted(self.results[sib].keys(), key=int): values.append(nmean(self.results[sib][triple_length])) # add the values to the chart chart.add(sib, values) # add the labels for the x axis x_labels = [] for triple_length in sorted(self.results[self.results.keys()[0]].keys(), key=int): x_labels.append(triple_length) chart.x_labels = x_labels # plot the chart chart.render_to_file(chart_filename) else: # chart type not available raise UpdateTestException("Chart type %s not available" % self.chart_type)
def csv_output(self): """This method is used to report the results of an update test to a csv file""" # determine the file name csv_filename = "update-%s-%sstep-%smax-%siter-%s-%s.csv" % (self.updatetype, self.step, self.limit, self.iterations, self.chart_type.lower(), self.testdatetime) # initialize the csv file csvfile_stream = open(csv_filename, "w") csvfile_writer = csv.writer(csvfile_stream, delimiter=',', quoting=csv.QUOTE_MINIMAL) # iterate over the SIBs for sib in self.results.keys(): # iterate over the possible block lengths for triple_length in sorted(self.results[sib].keys(), key=int): row = [sib] # add the length of the block to the row row.append(triple_length) # add all the times for value in self.results[sib][triple_length]: row.append(value) # add the mean value of the times to the row row.append(round(nmean(self.results[sib][triple_length]),3)) row.append(round(nmin(self.results[sib][triple_length]),3)) row.append(round(nmax(self.results[sib][triple_length]),3)) row.append(round(nvar(self.results[sib][triple_length]),3)) # write the row csvfile_writer.writerow(row) # close the csv file csvfile_stream.close()
def fit(model: Union[Module, List[Module]], optimiser: Optimizer, loss_fn: Callable, epochs: int, dataloader: DataLoader, prepare_batch: Callable, metrics: List[Union[str, Callable]] = None, callbacks: List[Callback] = None, verbose: bool = True, fit_function: Callable = gradient_step, n_models: int = 1, fit_function_kwargs: dict = {}): """Function to abstract away training loop. The benefit of this function is that allows training scripts to be much more readable and allows for easy re-use of common training functionality provided they are written as a subclass of voicemap.Callback (following the Keras API). # Arguments model: Model to be fitted. optimiser: Optimiser to calculate gradient step from loss loss_fn: Loss function to calculate between predictions and outputs epochs: Number of epochs of fitting to be performed dataloader: `torch.DataLoader` instance to fit the model to prepare_batch: Callable to perform any desired preprocessing metrics: Optional list of metrics to evaluate the model with callbacks: Additional functionality to incorporate into training such as logging metrics to csv, model checkpointing, learning rate scheduling etc... See voicemap.callbacks for more. verbose: All print output is muted if this argument is `False` fit_function: Function for calculating gradients. Leave as default for simple supervised training on labelled batches. For more complex training procedures (meta-learning etc...) you will need to write your own fit_function fit_function_kwargs: Keyword arguments to pass to `fit_function` """ # Determine number of samples: num_batches = len(dataloader) batch_size = dataloader.batch_size fit_function_kwargs_logs = dict(fit_function_kwargs) fit_function_kwargs_logs['train'] = False fit_function_kwargs_logs['pred_fn'] = logmeanexp_preds callbacks = CallbackList([ DefaultCallback(), ] + (callbacks or []) + [ ProgressBarLogger(), ]) callbacks.set_model(model) callbacks.set_params({ 'num_batches': num_batches, 'batch_size': batch_size, 'verbose': verbose, 'metrics': (metrics or []), 'prepare_batch': prepare_batch, 'loss_fn': loss_fn, 'optimiser': optimiser, 'n_models': n_models }) if verbose: print('Begin training...') callbacks.on_train_begin() for epoch in range(1, epochs + 1): callbacks.on_epoch_begin(epoch) epoch_logs = {} for batch_index, batch in enumerate(dataloader): batch_logs = dict(batch=batch_index, size=(batch_size or 1)) callbacks.on_batch_begin(batch_index, batch_logs) x, y = prepare_batch(batch) # result = { # "meta_batch_loss": meta_batch_loss, # "task_predictions": task_predictions, # "models_losses": models_losses, # "models_predictions": models_predictions, # "mean_support_loss": mean_support_loss # } result = fit_function(model, optimiser, loss_fn, x, y, **fit_function_kwargs) loss = result['meta_batch_loss'] y_pred = result['task_predictions'] models_losses = result['models_losses'] models_preds = result['models_predictions'] support_loss = result['mean_support_loss'] batch_logs['loss'] = loss.item() batch_logs['support_loss'] = support_loss # Loops through all metrics batch_logs = batch_metrics(model, y_pred, y, metrics, batch_logs) task_preds = defaultdict(list) for model_pred in models_preds: for i, task in enumerate(model_pred): task_preds[i].append(task) # task_preds : {task_idx : [model_1_pred, model_2_pred, ....] } logprobs_pred = [] logprobs_loss = [] for task_idx, task_pred in task_preds.items(): y_pred_ = logmeanexp_preds(task_pred) logprobs_pred.append(y_pred_) y_pred_logprobs = torch.cat(logprobs_pred) # TODO: make it work with MixturePredLoss # with torch.no_grad(): # loss_logprobs = loss_fn(y_pred_logprobs, y).item() # # batch_logs['logprobs_loss'] = loss_logprobs batch_logs['logprobs_nll'] = nll_loss(y_pred_logprobs, y, reduction="mean").item() batch_logs = batch_metrics(model, y_pred_logprobs, y, metrics, batch_logs, 'logprobs') for i, (loss, y_pred) in enumerate(zip(models_losses, models_preds)): batch_logs[f'loss_{i}'] = nmean(loss) batch_logs[f'categorical_accuracy_{i}'] = NAMED_METRICS[ 'categorical_accuracy'](y, torch.cat(y_pred)) callbacks.on_batch_end(batch_index, batch_logs) # Run on epoch end callbacks.on_epoch_end(epoch, epoch_logs) # Run on train end if verbose: print('Finished.') callbacks.on_train_end()