def same_count_im(self, cmat): @np.vectorize def pixel_scale(e, minn, maxx): return 256 * ((e - minn) / maxx - minn) cmat = pixel_scale(arr(cmat)[1:, 1:], 0, len(cmat) - 1) cmat.shape = tuple(list(cmat.shape) + [1]) cmat = concat(make3d(zeros(len(cmat))), make3d(zeros(len(cmat))), cmat, axis=2) return cmat
def val_eval(self): nnstate.CURRENT_TRUE_MAP = self.val_data.class_label_map ds = self.val_data.dataset(self.HEIGHT_WIDTH) steps = self.val_data.num_steps log('Testing... (ims=$,steps=$)', len(self.val_data), steps) net_mets.cmat = zeros(len(listkeys(nnstate.CURRENT_PRED_MAP)), len(listkeys(nnstate.CURRENT_TRUE_MAP))) nnstate.TEST_STEPS = steps return self.net.evaluate( ds, verbose=self.VERBOSE_MODE, steps=steps, use_multiprocessing=True, workers=16, )
def train(self): log('training network...') nnstate.CURRENT_PRED_MAP = self.train_data.class_label_map nnstate.CURRENT_TRUE_MAP = self.train_data.class_label_map ds = self.train_data.dataset(self.HEIGHT_WIDTH) steps = self.train_data.num_steps log('Training... (ims=$,steps=$)', len(self.train_data), steps) net_mets.cmat = zeros(len(listkeys(nnstate.CURRENT_PRED_MAP)), len(listkeys(nnstate.CURRENT_TRUE_MAP))) history = self.net.fit( # x,y, ds, epochs=1, verbose=self.VERBOSE_MODE, use_multiprocessing=True, workers=16, steps_per_epoch=steps, shuffle=False) return history
def test_record(self, ei): nnstate.CURRENT_PRED_MAP = self.train_data.class_label_map nnstate.CURRENT_TRUE_MAP = self.test_data.class_label_map ds = self.test_data.dataset(self.HEIGHT_WIDTH) steps = self.test_data.num_steps log('Recording(1)... (ims=$,steps=$)', len(self.test_data), steps) net_mets.cmat = zeros(len(listkeys(nnstate.CURRENT_PRED_MAP)), len(listkeys(nnstate.CURRENT_TRUE_MAP))) inter_lay_name = self.net.layers[self.INTER_LAY].name inter_output_model = self.tf.python.keras.models.Model( self.net.input, self.net.get_layer(index=self.INTER_LAY).output) y_pred = arr( self.net.predict( ds, steps=steps, verbose=Verbose.PROGRESS_BAR, use_multiprocessing=True, workers=16, )) log('done recording(1)') if len( y_pred.shape ) == 3: # GNET has 3 outputs, all identical I guess but not sure y_pred = y_pred[2] log('Recording(2)... (ims=$,steps=$)', len(self.test_data), steps) inter_activations = arr( inter_output_model.predict(ds, steps=steps, verbose=Verbose.PROGRESS_BAR, use_multiprocessing=True, workers=16)) log('done recording(2)') x, _ = self.test_data.x(self) y = self.test_data.y(self) y_true = arr(y).flatten() raw_images = x raw_images2 = [] if len(x.shape) == 5: for batch in raw_images: for im in batch: raw_images2.append(im) else: raw_images2 = raw_images raw_images = arr(raw_images2) raw_images2 = [] for i in itr(raw_images): raw_images2.append(raw_images[i].flatten()) raw_images = arr(raw_images2) inter_shape = inter_activations.shape inter_activations = np.reshape(inter_activations, (inter_shape[0], -1)) BLOCK_LEN = 10 # I'm writing this bc I think it was always 10 back when I ran this code TEST_CLASS_MAP = nnstate.CURRENT_TRUE_MAP clas_set = ClassSet( [Class(name=k, index=v) for k, v in TEST_CLASS_MAP.items()]) def run_and_save_rsa(nam, mat1, layer_name=None, layer_i=None): index_to_cn = {v: k for k, v in TEST_CLASS_MAP.items()} feature_matrix = FeatureMatrix( mat1, clas_set, [Class(index_to_cn[iii], iii) for iii, yt in enum(y_true)]) feature_matrix.sort_by_class_name() fd = feature_matrix.compare(rsa_norm).image_plot() tit = f'L2-{nam}' fd.title = f'{tit} ({nnstate.FLAGS.arch}{nnstate.FLAGS.ntrain}E{ei + 1})' if nam == 'Inter': fd.title = f'{fd.title}(Layer{layer_i}:{layer_name})' save_dnn_data(fd, tit, f'CM{ei + 1}', 'mfig') run_and_save_rsa('Output', y_pred, layer_name='Output', layer_i='-1') run_and_save_rsa('Inter', inter_activations, layer_name=inter_lay_name, layer_i=self.INTER_LAY) run_and_save_rsa('Raw', raw_images) for met in net_mets.METS_TO_USE(): met(y_true, y_pred) log('done recording.')
def compare(self, fun: Type[Correlation], GPU=False): special_confuse_mat = zeros(len(self.data), len(self.data)) if (fun == PearsonCorrelation) and any([min(x) == max(x) for x in self.data]): raise MathFail # # Pearson's Correlation Coefficient fails if # # two arrays are commpared that have a zero standard deviation product (divide by zero) # # Using an if statement above, I should prevent this data = self.data # pleasework def _fun(i): # cannot be lambda? return [(i, j, fun.fun(data[i, :], data[j, :])) for j in itr(data)] def _fun_tf(data): # cannot be lambda? return fun.fun_tf(data) MULTIPROCESS = False from pathos.multiprocessing import ProcessPool if islinux() and MULTIPROCESS: # slower than GPU # BUGGY # not optimized with ProcessPool() as p: # if islinux(): # mapid = randrange(0,10000) # print(f'starting map {mapid}') r = p.map(_fun, itr(self.data)) for results in r: for rr in results: special_confuse_mat[rr[0], rr[1]] = rr[2] elif islinux() and GPU: import tensorflow as tf special_confuse_mat = tf.zeros((len(self.data), len(self.data))) with tf.device('/GPU:0'): special_confuse_mat = _fun_tf(self.data).numpy() # results[net] = rsa.numpy() # tfdata = tf.convert_to_tensor(self.data).cuda() else: r = listmap(_fun, itr(self.data)) for results in r: for rr in results: special_confuse_mat[rr[0], rr[1]] = rr[2] return ComparisonMatrix( data=nan_above_eye(naneye(special_confuse_mat)), method_used=fun.__name__, ground_truth=self.ground_truth, class_set=self.class_set )
def _transform(self, x: np.ndarray) -> np.ndarray: scales = [x] for i in range(1, 9): import cv2 # 3 SECOND IMPORT x = cv2.pyrDown(x) scales.append(x) self.intermediate_hook(f'scale_{i}', x) for center in [2, 3, 4]: for surround in [center + x for x in [3, 4]]: scale_diff = surround - center center_im = scales[center] surround_im = scales[center] feat_intense = zeros(center_im.shape[0], center_im.shape[1]) feat_rg = zeros(center_im.shape[0], center_im.shape[1]) feat_by = zeros(center_im.shape[0], center_im.shape[1]) for px_row in range(0, center_im.shape[0]): px_row_sur = px_row for i in range(scale_diff): px_row_sur = px_row_sur / 2 px_row_sur = round(px_row_sur) for px_col in range(0, center_im.shape[1]): px_col_sur = px_col for i in range(scale_diff): px_col_sur = px_col_sur / 2 px_col_sur = round(px_col_sur) center_intense = sum(center_im[px_row, px_col]) surround_intense = sum(surround_im[px_row_sur, px_col_sur]) feat_intense[px_row, px_col] = abs(center_intense - surround_intense) err('stick with intensity for now, do full learning experiments with it before moving on!' ) err('DUH! if its black its black... its zero.') center_rg = ( center_im[px_row, px_col][0] / center_intense) - ( center_im[px_row, px_col][1] / center_intense) surround_gr = ( surround_im[px_row_sur, px_col_sur][1] / surround_intense) - ( surround_im[px_row_sur, px_col_sur][0] / surround_intense) feat_rg[px_row, px_col] = abs(center_rg - surround_gr) cen_yellow = mean([ center_im[px_row, px_col, 0], center_im[px_row, px_col, 1] ]) sur_yellow = mean([ surround_im[px_row_sur, px_col_sur, 0], surround_im[px_row_sur, px_col_sur, 1] ]) center_by = (center_im[px_row, px_col][2] / center_intense) - (cen_yellow / center_intense) surround_yb = (sur_yellow / surround_intense) - ( surround_im[px_row_sur, px_col_sur][2] / surround_intense) feat_by[px_row, px_col] = abs(center_by - surround_yb) self.intermediate_hook(f'feat_intense_{center}_{surround}', feat_intense - 1) # minus 1 since plus one above? if center == 2 and surround == 5: output = feat_intense - 1 # minus 1 since plus one above? mn = amin(feat_rg) mx = amax(feat_rg) def vis(px): px = px - mn px = px / (mx - mn) px = px * 255 return px - 1 # minus 1 since plus one above? self.intermediate_hook(f'feat_rg_{center}_{surround}', vectorize(vis)(feat_rg)) mn = amin(feat_by) mx = amax(feat_by) def vis(px): px = px - mn px = px / (mx - mn) px = px * 255 return px - 1 # minus 1 since plus one above? self.intermediate_hook(f'feat_by_{center}_{surround}', vectorize(vis)(feat_by)) # err('step 1. figure out what resolution it is supposed to return') return output # not done?