def testWeights(name, values): flistwidget, fnames = values['flist'] weightfile = values['weightfile'][1] if not weightfile: Error('You must first generate weights or select a file from which ' + \ 'to load the weights.') return errors = [] label, value = values['test-args'][1]['matrixshape'] matrixshape = parsematlab.parse(value.lower().replace('x', ' ')) if isinstance(matrixshape, str): errors.append(label + '\n ' + value.replace('\n', '\n ')) if np.isscalar(matrixshape): matrixshape = [matrixshape] label, value = values['test-args'][1]['repetitions'] repetitions = parsematlab.parse(value) if isinstance(repetitions, str): errors.append(label + '\n ' + value.replace('\n', '\n ')) if len(errors) > 0: Error('\n\n'.join(errors)) return classifier = loaddata.load_weights(weightfile) if isinstance(classifier, str): Error(classifier) return removeanomalies = values['generation-args'][1]['removeanomalies'][1] data = [] type = [] samplingrate = None try: for fname in fnames: result = loaddata.load_data(fname, [0, classifier.shape[0]], None, True, removeanomalies = removeanomalies) if isinstance(result, str): Error(result) return if samplingrate == None: samplingrate = result[2] if samplingrate != result[2]: Error('Not all data files have the same sampling rate.') return data.append(result[0]) type.append(result[1]) if len(data) == 0 or len(type) == 0: Error('You must select some data upon which to test the weights.') return try: data = np.concatenate(data) except ValueError: Error('Not all data files have the same number of channels.') return type = np.concatenate(type) result = testweights.test_weights(data, type, classifier, matrixshape, repetitions) if isinstance(result, str): Error(result) return score, correctness = result message = '\n'.join(fnames) message += '\n\n%s\n\nExpected accuracy for a %s matrix:\n\n' % \ ( weightfile, 'x'.join(str(i) for i in matrixshape) ) for i in range(len(repetitions)): if repetitions[i] != 1: message += '%i repetitions: %0.1f%%\n' % \ (repetitions[i], correctness[i] * 100) else: message += '1 repetition: %0.1f%%\n' % (correctness[i] * 100) message += '\nTarget STDEV: %f\nNontarget STDEV: %f\n' % score Info(message) except MemoryError: Error('Could not fit all the selected data in memory.\n' + \ 'Try loading fewer data files.') return
def diagnosticPlot(name, values): args = values['generation-args'][1] errors = [] for key in ['responsewindow']: label, value = args[key] value = parsematlab.parse(value) if isinstance(value, str): errors.append(label + '\n ' + value.replace('\n', '\n ')) args[key] = value if len(errors) > 0: Error('\n\n'.join(errors)) return response_window = args['responsewindow'] fnames = values['flist'][1] removeanomalies = args['removeanomalies'][1] weightfile = values['weightfile'][1] data = [] type = [] samplingrate = None try: for fname in fnames: result = loaddata.load_data(fname, response_window, None, removeanomalies = removeanomalies) if isinstance(result, str): Error(result) return if samplingrate == None: samplingrate = result[2] if samplingrate != result[2]: Error('Not all data files have the same sampling rate.') return data.append(result[0]) type.append(result[1]) if len(data) == 0 or len(type) == 0: Error('You must select some data to plot.') return try: data = np.concatenate(data) except ValueError: Error('Not all data files have the same number of channels.') return type = np.concatenate(type) if weightfile: weights = loaddata.load_weights(weightfile) if isinstance(weights, str): Error(weights) return classifier = np.zeros(data.shape[1:]) classifier[:weights.shape[0], :weights.shape[1]] = weights classifier_max = max(abs(classifier.max()), abs(classifier.min())) else: classifier = None if isinstance(classifier, str): Error(classifier) return num_plots = 3 if classifier == None else 4 signed_r = np.zeros(data.shape[1:]) for row in range(signed_r.shape[0]): for col in range(signed_r.shape[1]): signed_r[row, col] = stats.linregress( data[:, row, col], type )[2] signed_r_max = max(abs(signed_r.max()), abs(signed_r.min())) x = np.arange(data.shape[1]) * 1000 / samplingrate target = data[type.nonzero()[0]].mean(axis = 0) nontarget = data[(~type).nonzero()[0]].mean(axis = 0) vmin, vmax = ylim = [min(target.min(), nontarget.min()), max(target.max(), nontarget.max())] fig = pylab.figure() fig.subplots_adjust(bottom = 0.06, top = 0.93, hspace = 0.45) master_ax = ax = pylab.subplot(num_plots, 1, 1) pylab.title('Target', fontsize = 'medium') pylab.imshow(target.transpose(), interpolation = 'nearest', cmap = 'PRGn', aspect = 'auto', vmin = vmin, vmax = vmax, origin = 'lower', extent = ( 0, data.shape[1] * 1000 / samplingrate, -0.5, data.shape[2] - 0.5 ) ) pylab.xticks(fontsize = 'small') pylab.yticks(range(data.shape[2]), [str(i) for i in range(1, data.shape[2] + 1)], fontsize = 'small') pylab.axes(pylab.colorbar().ax) pylab.yticks(fontsize = 'small') ax = pylab.subplot(num_plots, 1, 2, sharex = master_ax, sharey = master_ax) pylab.title('Non-Target', fontsize = 'medium') pylab.imshow(nontarget.transpose(), interpolation = 'nearest', cmap = 'PRGn', aspect = 'auto', vmin = vmin, vmax = vmax, origin = 'lower', extent = ( 0, data.shape[1] * 1000 / samplingrate, -0.5, data.shape[2] - 0.5 ) ) pylab.xticks(fontsize = 'small') pylab.yticks(range(data.shape[2]), [str(i) for i in range(1, data.shape[2] + 1)], fontsize = 'small') pylab.axes(pylab.colorbar().ax) pylab.yticks(fontsize = 'small') ax = pylab.subplot(num_plots, 1, 3, sharex = master_ax, sharey = master_ax) pylab.title('Correlation Coefficient', fontsize = 'medium') pylab.imshow(signed_r.transpose(), interpolation = 'nearest', cmap = 'PRGn', aspect = 'auto', vmin = -signed_r_max, vmax = signed_r_max, origin = 'lower', extent = ( 0, data.shape[1] * 1000 / samplingrate, -0.5, data.shape[2] - 0.5 ) ) pylab.xticks(fontsize = 'small') pylab.yticks(range(data.shape[2]), [str(i) for i in range(1, data.shape[2] + 1)], fontsize = 'small') pylab.axes(pylab.colorbar().ax) pylab.yticks(fontsize = 'small') if classifier == None: return ax = pylab.subplot(num_plots, 1, 4, sharex = master_ax, sharey = master_ax) pylab.title('Classifier Weights', fontsize = 'medium') pylab.imshow(classifier.transpose(), interpolation = 'nearest', cmap = 'PRGn', aspect = 'auto', vmin = -classifier_max, vmax = classifier_max, origin = 'lower', extent = ( 0, data.shape[1] * 1000 / samplingrate, -0.5, data.shape[2] - 0.5 ) ) pylab.xticks(fontsize = 'small') pylab.yticks(range(data.shape[2]), [str(i) for i in range(1, data.shape[2] + 1)], fontsize = 'small') pylab.axes(pylab.colorbar().ax) pylab.yticks(fontsize = 'small') except MemoryError: Error('Could not fit all the selected data in memory.\n' + \ 'Try loading fewer data files.') return