def SaabTransform(self, X, saab, train, layer): shrinkArg, SaabArg = self.shrinkArgs[layer], self.SaabArgs[layer] assert ('func' in shrinkArg.keys()), "shrinkArg must contain key 'func'!" X = shrinkArg['func'](X, shrinkArg) S = list(X.shape) X = X.reshape(-1, S[-1]) if SaabArg['num_AC_kernels'] != -1: S[-1] = SaabArg['num_AC_kernels'] if train == True: isInteger, bits, opType, whichPCA = False, 8, 'int32', 'numpy' if 'isInteger' in SaabArg.keys(): isInteger = SaabArg['isInteger'] if 'bits' in SaabArg.keys(): bits = SaabArg['bits'] if 'opType' in SaabArg.keys(): opType = SaabArg['opType'] if 'whichPCA' in SaabArg.keys(): whichPCA = SaabArg['opType'] saab = Saab(num_kernels=SaabArg['num_AC_kernels'], useDC=SaabArg['useDC'], needBias=SaabArg['needBias'], isInteger=isInteger, bits=bits, opType=opType) saab.fit(X, whichPCA=whichPCA) transformed = saab.transform(X).reshape(S) return saab, transformed
class Pixelhop(): def __init__(self, dilate, pad, SaabArg, batch=None): self.saab = Saab(num_kernels=SaabArg['num_AC_kernels'], useDC=SaabArg['useDC'], needBias=SaabArg['needBias']) self.dilate = np.array([dilate]).tolist() self.pad = pad self.batch = batch self.trained = False def fit(self, X): if self.batch == None: X = PixelHop_Neighbour(X, self.dilate, self.pad) else: X = Batch_PixelHop_Neighbour(X, self.dilate, self.pad, self.batch) X = X.reshape(-1, X.shape[-1]) self.saab.fit(X) self.trained = True def transform(self, X): assert (self.trained == True), "Call fit first!" if self.batch == None: X = PixelHop_Neighbour(X, self.dilate, self.pad) else: X = Batch_PixelHop_Neighbour(X, self.dilate, self.pad, self.batch) S = X.shape X = X.reshape(-1, X.shape[-1]) X, DC = self.saab.transform(X) X = X.reshape(S[0], S[1], S[2], -1) return X, DC
class PixelHop_Unit(): def __init__(self, X, num_kernels, window=5, stride=1): self.X = self.Shrink(X, window, stride) #N*28*28*(5*5*1) self.S = list(self.X.shape) self.X = self.X.reshape(-1, self.S[-1]) #(N*28*28)*(5*5*1) self.num_kernels = num_kernels self.saab = None self.window = window self.stride = stride def train(self): self.saab = Saab(num_kernels=self.num_kernels, useDC=True) self.saab.fit(self.X) def transform(self, X): X = self.Shrink(X, self.window, self.stride) #N*28*28*(5*5*1) S = list(X.shape) X = X.reshape(-1, S[-1]) #(N*28*28)*(5*5*1) assert (self.saab != None ), "the model hasn't been trained, must call train() first!" transformed = self.saab.transform(X).reshape(S[0], S[1], S[2], -1) #N*28*28*25 return self.saab, transformed def Shrink(self, X, win, stride): X = view_as_windows(X, (1, win, win, 1), (1, stride, stride, 1)) return X.reshape(X.shape[0], X.shape[1], X.shape[2], -1)
def __init__(self, dilate, pad, SaabArg, batch=None): self.saab = Saab(num_kernels=SaabArg['num_AC_kernels'], useDC=SaabArg['useDC'], needBias=SaabArg['needBias']) self.dilate = np.array([dilate]).tolist() self.pad = pad self.batch = batch self.trained = False
def SaabFit(self, X, layer, bias=0): '''Learn a saab model''' shrinkArg, SaabArg = self.shrinkArgs[layer], self.SaabArgs[layer] assert ('func' in shrinkArg.keys()), "shrinkArg must contain key 'func'!" X = shrinkArg['func'](X, shrinkArg) S = list(X.shape) X = X.reshape(-1, S[-1]) saab = Saab(num_kernels=SaabArg['num_AC_kernels'], needBias=SaabArg['needBias'], bias=bias) saab.fit(X) return saab
def PixelHop_Unit(X, num_kernels, saab=None, window=5, stride=1, train=True): print('input shape', X.shape) X = Shrink(X, 5, 1) print('extracting patches', X.shape) S = list(X.shape) X = X.reshape(-1, S[-1]) if (train == True): saab = Saab(num_kernels=num_kernels, useDC=True, needBias=True) saab.fit(X) transformed = saab.transform(X).reshape(S[0], S[1], S[2], -1) print('transformed shape', transformed.shape) return saab, transformed
def SaabTransform(self, X, saab, train, layer): SaabArg = self.SaabArgs[layer] X = self.Neighbor(X, layer) S = list(X.shape) X = X.reshape(-1, S[-1]) if SaabArg['num_AC_kernels'] != -1: S[-1] = SaabArg['num_AC_kernels'] if train == True: saab = Saab(num_kernels=SaabArg['num_AC_kernels'], useDC=SaabArg['useDC'], needBias=SaabArg['needBias']) saab.fit(X) # use batch to avoid memory error batch_size = int(X.shape[0] / 5) x1 = saab.transform(X[:1 * batch_size]) x2 = saab.transform(X[1 * batch_size:2 * batch_size]) x3 = saab.transform(X[2 * batch_size:3 * batch_size]) x4 = saab.transform(X[3 * batch_size:4 * batch_size]) x5 = saab.transform(X[4 * batch_size:]) del X X = np.concatenate((x1, x2, x3, x4, x5), axis=0) del x1, x2, x3, x4, x5 X = X.reshape(S) X = self.Pooling(X, layer) return saab, X
def SaabTransform(self, X, saab, train, layer): shrinkArg, SaabArg = self.shrinkArgs[layer], self.SaabArgs[layer] assert ('func' in shrinkArg.keys()), "shrinkArg must contain key 'func'!" X = shrinkArg['func'](X, shrinkArg) S = list(X.shape) X = X.reshape(-1, S[-1]) if SaabArg['num_AC_kernels'] != -1: S[-1] = SaabArg['num_AC_kernels'] if train == True: saab = Saab(num_kernels=SaabArg['num_AC_kernels'], useDC=SaabArg['useDC'], needBias=SaabArg['needBias']) saab.fit(X) transformed, dc = saab.transform(X) transformed = transformed.reshape(S) return saab, transformed, dc
def fit_hop1(self, images, verbose): # train the first hop if verbose: print('Hop1') print("Input shape:", images.shape) saab = Saab(kernel_size=self.kernel_sizes[0], bias_flag=False) saab.fit(images) self.saabs['Hop1'] = [saab] self.energies['Hop1'] = [saab.eigenvalues / sum(saab.eigenvalues)] n_channels = np.sum(self.energies['Hop1'][0] > self.keep_thr) output = saab.transform(images, n_channels) self.features['Hop1'] = [self.max_pooling(output)] self.info['Hop1'] = [(0, 0, n_channels)] if verbose: print("Output shape:", self.features['Hop1'][-1].shape)
def fit_hop_n(self, n, verbose): # train the nth hop (n > 1) if verbose: print('Hop' + str(n)) self.saabs['Hop' + str(n)] = [] self.energies['Hop' + str(n)] = [] self.features['Hop' + str(n)] = [] self.info['Hop' + str(n)] = [] for saab_id in range(len(self.saabs['Hop' + str(n - 1)])): saab_parent = self.saabs['Hop' + str(n - 1)][saab_id] energies_parent = self.energies['Hop' + str(n - 1)][saab_id] features_parent = self.features['Hop' + str(n - 1)][saab_id] for channel_id in range(len(energies_parent)): energy = energies_parent[channel_id] if energy > self.split_thr: features = features_parent[:, :, :, channel_id][..., np.newaxis] if verbose: print("SaabID:", saab_id, "ChannelID:", channel_id, "Energy:", energy) print("Input shape:", features.shape) saab = Saab(kernel_size=self.kernel_sizes[n - 1], bias_flag=True) saab.fit(features) self.saabs['Hop' + str(n)].append(saab) energies = saab.eigenvalues / sum( saab.eigenvalues) * energy self.energies['Hop' + str(n)].append(energies) n_channels = np.sum(energies > self.keep_thr) output = saab.transform(features, n_channels) self.tmp.append( (saab_id, channel_id, self.max_pooling(output))) self.features['Hop' + str(n)].append( self.max_pooling(output)) self.info['Hop' + str(n)].append( (saab_id, channel_id, n_channels)) if verbose: print("Output shape:", self.features['Hop' + str(n)][-1].shape) else: break
def SaabTransform(self, X, saab, train, layer): shrinkArg, SaabArg = self.shrinkArgs[layer], self.SaabArgs[layer] assert ('func' in shrinkArg.keys()), "shrinkArg must contain key 'func'!" X = shrinkArg['func'](X, shrinkArg) S = list(X.shape) X = X.reshape(-1, S[-1]) if SaabArg['num_AC_kernels'] != -1: S[-1] = SaabArg['num_AC_kernels'] if train == True: saab = Saab(num_kernels=SaabArg['num_AC_kernels'], useDC=SaabArg['useDC'], needBias=SaabArg['needBias']) saab.fit(X) print("fit finish") # batch_size = 10000 num_batches = 15 batch_size = int(X.shape[0] / num_batches) flag = False transformed = None dc = [] for i in range(num_batches - 1): print("batch :", i) X[i * batch_size:(i + 1) * batch_size], dc_tmp = saab.transform( X[i * batch_size:(i + 1) * batch_size]) #transformed_tmp = transformed_tmp.reshape(S) dc.append(dc_tmp) X[(num_batches - 1) * batch_size:], dc_tmp = saab.transform( X[(num_batches - 1) * batch_size:]) dc.append(dc_tmp) dc = np.concatenate(dc, axis=0) X = X.reshape(S) # print(transformed.shape) print("current layer", layer) #batch_size = int(X.shape[0] / num_batches) #poolwin = shrinkArg['poolwin'] #X = view_as_windows(X,(1,poolwin,poolwin,1),(1,poolwin,poolwin,1)) #print(X.shape) #X = X.reshape(X.shape[0],X.shape[1],X.shape[2],X.shape[3],-1) #X = shrinkArg['method'](X,axis = -1) print("shape before pooling", X.shape) if layer != 2: pool_win = shrinkArg['poolwin'] pool_method = shrinkArg['method'] num_batches = 6 X = poolImage_batch(X, num_batches, pool_win, pool_method) print("shape of pooled X: ", X.shape) return saab, X, dc
def train(self): self.saab = Saab(num_kernels=self.num_kernels, useDC=True) self.saab.fit(self.X)