def __init__(self, input_shape, filter_sizes, init, strides=[1,1,1,1], padding='SAME', activation=None, bias=0., use_bias=True, name=None, load=None, train=True): self.input_shape = input_shape self.filter_sizes = filter_sizes self.batch_size, self.h, self.w, self.fin = self.input_shape self.fh, self.fw, self.fin, self.fout = self.filter_sizes self.init = init self.strides = strides _, self.sh, self.sw, _ = self.strides self.padding = padding self.activation = Linear() if activation == None else activation self.use_bias = use_bias self.name = name self.train_flag = train if load: print ("Loading Weights: " + self.name) weight_dict = np.load(load, encoding='latin1', allow_pickle=True).item() filters = weight_dict[self.name] bias = weight_dict[self.name + '_bias'] else: filters = init_filters(size=self.filter_sizes, init=self.init) bias = np.ones(shape=self.fout) * bias self.filters = tf.Variable(filters, dtype=tf.float32) self.bias = tf.Variable(bias, dtype=tf.float32)
def __init__(self, input_sizes, filter_sizes, strides, padding, init=None, alpha=0., activation=None, bias=0., name=None, load=None, train=True): self.input_sizes = input_sizes self.filter_sizes = filter_sizes self.batch_size, self.h, self.w, self.fin = self.input_sizes self.fh, self.fw, self.fin, self.fout = self.filter_sizes bias = np.ones(shape=self.fout) * bias self.strides = strides self.padding = padding self.alpha = alpha self.activation = Linear() if activation == None else activation self.name = name self._train = train if load: print("Loading Weights: " + self.name) weight_dict = np.load(load, encoding='latin1').item() filters = weight_dict[self.name] bias = weight_dict[self.name + '_bias'] else: if init == "zero": filters = np.zeros(shape=self.filter_sizes) elif init == "sqrt_fan_in": sqrt_fan_in = math.sqrt(self.h * self.w * self.fin) filters = np.random.uniform(low=-1.0 / sqrt_fan_in, high=1.0 / sqrt_fan_in, size=self.filter_sizes) elif init == "alexnet": filters = np.random.normal(loc=0.0, scale=0.01, size=self.filter_sizes) else: # https://www.tensorflow.org/api_docs/python/tf/glorot_uniform_initializer # can verify we did this right ... fan_in = self.fh * self.fw * self.fin fan_out = self.fout lim = np.sqrt(6. / (fan_in + fan_out)) filters = np.random.uniform(low=-lim, high=lim, size=self.filter_sizes) self.filters = tf.Variable(filters, dtype=tf.float32) self.bias = tf.Variable(bias, dtype=tf.float32)
def __init__(self, input_shape, num_classes, name=None): self.num_classes = num_classes self.input_shape = input_shape self.name = name ''' if load: weight_dict = np.load(load).item() self.B = tf.cast(tf.Variable(weight_dict[self.name]), tf.float32) elif std is not None: b = np.random.normal(loc=0., scale=std, size=(self.num_classes, self.output_size)) self.B = tf.cast(tf.Variable(b), tf.float32) else: # var = 1. / self.output_size # std = np.sqrt(var) # b = np.random.normal(loc=0., scale=std, size=(self.num_classes, self.output_size)) b = FeedbackMatrix(size=(self.num_classes, self.output_size), sparse=self.sparse, rank=self.rank) self.B = tf.cast(tf.Variable(b), tf.float32) ''' # THE PROBLEM WAS NEVER THE BIAS ... IT WAS THE FACT WE WERNT DIVIDING BY N # l0 = FullyConnected(input_shape=input_shape, size=self.input_shape, init='alexnet', activation=Relu(), bias=1., name=self.name) self.l0 = FullyConnected(input_shape=input_shape, size=self.num_classes, init='alexnet', activation=Linear(), bias=0., name=self.name)
def __init__(self, input_shape, size, init=None, activation=None, bias=0., alpha=0., name=None, load=None, train=True): self.input_size = input_shape self.output_size = size self.size = [self.input_size, self.output_size] bias = np.ones(shape=self.output_size) * bias self.alpha = alpha self.activation = Linear() if activation == None else activation self.name = name self._train = train if load: print("Loading Weights: " + self.name) weight_dict = np.load(load).item() weights = weight_dict[self.name] bias = weight_dict[self.name + '_bias'] else: if init == "zero": weights = np.zeros(shape=self.size) elif init == "sqrt_fan_in": sqrt_fan_in = math.sqrt(self.input_size) weights = np.random.uniform(low=-1.0 / sqrt_fan_in, high=1.0 / sqrt_fan_in, size=self.size) elif init == "alexnet": weights = np.random.normal(loc=0.0, scale=0.01, size=self.size) else: # https://www.tensorflow.org/api_docs/python/tf/glorot_uniform_initializer # can verify we did this right ... fan_in = self.input_size fan_out = self.output_size lim = np.sqrt(6. / (fan_in + fan_out)) weights = np.random.uniform(low=-lim, high=lim, size=self.size) self.weights = tf.Variable(weights, dtype=tf.float32) self.bias = tf.Variable(bias, dtype=tf.float32)
def __init__(self, input_shape, pool_shape, num_classes, name=None): self.input_shape = input_shape self.batch_size, self.h, self.w, self.fin = self.input_shape self.pool_shape = pool_shape self.num_classes = num_classes self.name = name l1 = AvgPool(size=self.input_shape, ksize=self.pool_shape, strides=self.pool_shape, padding='SAME') l2_input_shape = l1.output_shape() l2 = ConvToFullyConnected(input_shape=l2_input_shape) l3_input_shape = l2.output_shape() l3 = FullyConnected(input_shape=l3_input_shape, size=self.num_classes, init='alexnet', activation=Linear(), bias=0., name=self.name) self.B = Model(layers=[l1, l2, l3])
class FullyConnected(Layer): def __init__(self, input_shape, size, init=None, activation=None, bias=0., alpha=0., name=None, load=None, train=True): self.input_size = input_shape self.output_size = size self.size = [self.input_size, self.output_size] bias = np.ones(shape=self.output_size) * bias self.alpha = alpha self.activation = Linear() if activation == None else activation self.name = name self._train = train if load: print("Loading Weights: " + self.name) weight_dict = np.load(load).item() weights = weight_dict[self.name] bias = weight_dict[self.name + '_bias'] else: if init == "zero": weights = np.zeros(shape=self.size) elif init == "sqrt_fan_in": sqrt_fan_in = math.sqrt(self.input_size) weights = np.random.uniform(low=-1.0 / sqrt_fan_in, high=1.0 / sqrt_fan_in, size=self.size) elif init == "alexnet": weights = np.random.normal(loc=0.0, scale=0.01, size=self.size) else: # https://www.tensorflow.org/api_docs/python/tf/glorot_uniform_initializer # can verify we did this right ... fan_in = self.input_size fan_out = self.output_size lim = np.sqrt(6. / (fan_in + fan_out)) weights = np.random.uniform(low=-lim, high=lim, size=self.size) self.weights = tf.Variable(weights, dtype=tf.float32) self.bias = tf.Variable(bias, dtype=tf.float32) ################################################################### def get_weights(self): return [(self.name, self.weights), (self.name + "_bias", self.bias)] def set_weights(self, weight_dic): weights = weight_dic[self.name] bias = weight_dic[self.name + '_bias'] return [self.weights.assign(weights), self.bias.assign(bias)] def num_params(self): weights_size = self.input_size * self.output_size bias_size = self.output_size return weights_size + bias_size def forward(self, X): Z = tf.matmul(X, self.weights) + self.bias A = self.activation.forward(Z) return A ################################################################### def backward(self, AI, AO, DO): DO = tf.multiply(DO, self.activation.gradient(AO)) DI = tf.matmul(DO, tf.transpose(self.weights)) return DI def gv(self, AI, AO, DO): if not self._train: return [] N = tf.shape(AI)[0] N = tf.cast(N, dtype=tf.float32) DO = tf.multiply(DO, self.activation.gradient(AO)) DW = tf.matmul(tf.transpose(AI), DO) DB = tf.reduce_sum(DO, axis=0) return [(DW, self.weights), (DB, self.bias)] def train(self, AI, AO, DO): if not self._train: return [] N = tf.shape(AI)[0] N = tf.cast(N, dtype=tf.float32) DO = tf.multiply(DO, self.activation.gradient(AO)) DW = tf.matmul(tf.transpose(AI), DO) DB = tf.reduce_sum(DO, axis=0) self.weights = self.weights.assign( tf.subtract(self.weights, tf.scalar_mul(self.alpha, DW))) self.bias = self.bias.assign( tf.subtract(self.bias, tf.scalar_mul(self.alpha, DB))) return [(DW, self.weights), (DB, self.bias)] ################################################################### def dfa_backward(self, AI, AO, E, DO): return tf.ones_like(AI) def dfa_gv(self, AI, AO, E, DO): if not self._train: return [] N = tf.shape(AI)[0] N = tf.cast(N, dtype=tf.float32) DO = tf.multiply(DO, self.activation.gradient(AO)) DW = tf.matmul(tf.transpose(AI), DO) DB = tf.reduce_sum(DO, axis=0) return [(DW, self.weights), (DB, self.bias)] def dfa(self, AI, AO, E, DO): if not self._train: return [] N = tf.shape(AI)[0] N = tf.cast(N, dtype=tf.float32) DO = tf.multiply(DO, self.activation.gradient(AO)) DW = tf.matmul(tf.transpose(AI), DO) DB = tf.reduce_sum(DO, axis=0) self.weights = self.weights.assign( tf.subtract(self.weights, tf.scalar_mul(self.alpha, DW))) self.bias = self.bias.assign( tf.subtract(self.bias, tf.scalar_mul(self.alpha, DB))) return [(DW, self.weights), (DB, self.bias)] ################################################################### def lel_backward(self, AI, AO, E, DO, Y): # DI = tf.zeros_like(AI) DI = self.backward(AI, AO, DO) return DI def lel_gv(self, AI, AO, E, DO, Y): return self.gv(AI, AO, DO) def lel(self, AI, AO, E, DO, Y): return self.train(AI, AO, DO)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--batch_size', type=int, default=128) parser.add_argument('--alpha', type=float, default=1e-4) parser.add_argument('--beta', type=float, default=1e-4) #feedback weights, B, learning rate parser.add_argument('--sigma', type=float, default=0.1) #node pert standard deviation parser.add_argument('--l2', type=float, default=0.) parser.add_argument('--decay', type=float, default=1.) parser.add_argument('--eps', type=float, default=1e-5) parser.add_argument('--dropout', type=float, default=0.5) parser.add_argument('--act', type=str, default='tanh') parser.add_argument('--bias', type=float, default=0.1) parser.add_argument('--gpu', type=int, default=1) parser.add_argument('--dfa', type=int, default=1) parser.add_argument('--feedbacklearning', type=int, default=1) #Whether or not to learn feedback weights parser.add_argument('--sparse', type=int, default=0) parser.add_argument('--rank', type=int, default=0) parser.add_argument('--init', type=str, default="sqrt_fan_in") parser.add_argument('--opt', type=str, default="adam") parser.add_argument('--N', type=int, default=50) parser.add_argument('--save', type=int, default=0) parser.add_argument('--name', type=str, default="cifar10_conv_np") parser.add_argument('--load', type=str, default=None) args = parser.parse_args() if args.gpu >= 0: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) cifar10 = tf.keras.datasets.cifar10.load_data() ############################################## EPOCHS = args.epochs TRAIN_EXAMPLES = 50000 TEST_EXAMPLES = 10000 BATCH_SIZE = args.batch_size if args.act == 'tanh': act = Tanh() elif args.act == 'relu': act = Relu() else: assert (False) train_fc = True if args.load: train_conv = False else: train_conv = True weights_fc = None weights_conv = args.load #Setup the parameters attrs = ['sigma', 'alpha', 'beta'] log_scale = [True, True, True] ranges = [[-4, -1], [-6, -3], [-6, -3]] params = [] isnan = [] train_accs = [] test_accs = [] #Here we run a bunch of times for different parameters... for idx in range(args.N): #Choose some random parameters... param = set_random_hyperparameters(args, attrs, ranges, log_scale) params.append(param) if args.feedbacklearning == 0: args.beta = 0 #Tell me the params.... print('Alpha, beta, sigma are: ', args.alpha, args.beta, args.sigma) tf.set_random_seed(0) tf.reset_default_graph() batch_size = tf.placeholder(tf.int32, shape=()) dropout_rate = tf.placeholder(tf.float32, shape=()) learning_rate = tf.placeholder(tf.float32, shape=()) sigma = tf.placeholder(tf.float32, shape=(), name="Sigma") X = tf.placeholder(tf.float32, [None, 32, 32, 3]) X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), X) Y = tf.placeholder(tf.float32, [None, 10]) l0 = Convolution(input_sizes=[batch_size, 32, 32, 3], filter_sizes=[5, 5, 3, 96], num_classes=10, init_filters=args.init, strides=[1, 1, 1, 1], padding="SAME", alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='conv1', load=weights_conv, train=train_conv) l1 = MaxPool(size=[batch_size, 32, 32, 96], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME") #Add perturbation to activity to get output to train feedback weights with l2p = NodePert(size=[batch_size, 16, 16, 96], sigma=sigma) l2 = FeedbackConv(size=[batch_size, 16, 16, 96], num_classes=10, sparse=args.sparse, rank=args.rank, name='conv1_fb') l3 = Convolution(input_sizes=[batch_size, 16, 16, 96], filter_sizes=[5, 5, 96, 128], num_classes=10, init_filters=args.init, strides=[1, 1, 1, 1], padding="SAME", alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='conv2', load=weights_conv, train=train_conv) l4 = MaxPool(size=[batch_size, 16, 16, 128], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME") l5p = NodePert(size=[batch_size, 8, 8, 128], sigma=sigma) l5 = FeedbackConv(size=[batch_size, 8, 8, 128], num_classes=10, sparse=args.sparse, rank=args.rank, name='conv2_fb') l6 = Convolution(input_sizes=[batch_size, 8, 8, 128], filter_sizes=[5, 5, 128, 256], num_classes=10, init_filters=args.init, strides=[1, 1, 1, 1], padding="SAME", alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='conv3', load=weights_conv, train=train_conv) l7 = MaxPool(size=[batch_size, 8, 8, 256], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME") l8p = NodePert(size=[batch_size, 4, 4, 256], sigma=sigma) l8 = FeedbackConv(size=[batch_size, 4, 4, 256], num_classes=10, sparse=args.sparse, rank=args.rank, name='conv3_fb') l9 = ConvToFullyConnected(shape=[4, 4, 256]) l10p = NodePert(size=[batch_size, 4 * 4 * 256], sigma=sigma) l10 = FullyConnected(size=[4 * 4 * 256, 2048], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='fc1', load=weights_fc, train=train_fc) l11 = Dropout(rate=dropout_rate) l12 = FeedbackFC(size=[4 * 4 * 256, 2048], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc1_fb') l13p = NodePert(size=[batch_size, 2048], sigma=sigma) l13 = FullyConnected(size=[2048, 2048], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='fc2', load=weights_fc, train=train_fc) l14 = Dropout(rate=dropout_rate) l15 = FeedbackFC(size=[2048, 2048], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc2_fb') l16 = FullyConnected(size=[2048, 10], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Linear(), bias=args.bias, last_layer=True, name='fc3', load=weights_fc, train=train_fc) ############################################## model = Model(layers=[ l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16 ]) model_perturbed = Model(layers=[ l0, l1, l2p, l2, l3, l4, l5p, l5, l6, l7, l8p, l8, l9, l10p, l10, l11, l12, l13p, l13, l14, l15, l16 ]) predict = model.predict(X=X) predict_perturbed = model_perturbed.predict(X=X) ####### #Pairs of perturbations and feedback weights #feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l10p, l12], [l13p, l15]] #Test one at a time... this works, so it must be l10p, 12 pair that fails feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l13p, l15]] #Get noise, feedback matrices, and loss function and unperturbed loss function, to make update rule for feedback weights loss = tf.reduce_sum(tf.pow(tf.nn.softmax(predict) - Y, 2), 1) / 2 loss_perturbed = tf.reduce_sum( tf.pow(tf.nn.softmax(predict_perturbed) - Y, 2), 1) / 2 train_B = [] E = tf.nn.softmax(predict) - Y for idx, (noise, feedback) in enumerate(feedbackpairs): print(idx, batch_size, feedback.output_size) xi = tf.reshape(noise.get_noise(), (batch_size, feedback.output_size)) B = feedback.B lambd = tf.matmul( tf.diag(loss_perturbed - loss) / args.sigma / args.sigma, xi) np_error = tf.matmul(E, B) - lambd grad_B = tf.matmul(tf.transpose(E), np_error) new_B = B.assign(B - args.beta * grad_B) train_B.append(new_B) ####### weights = model.get_weights() if args.opt == "adam" or args.opt == "rms" or args.opt == "decay": if args.dfa: grads_and_vars = model.dfa_gvs(X=X, Y=Y) else: grads_and_vars = model.gvs(X=X, Y=Y) if args.opt == "adam": train = tf.train.AdamOptimizer( learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=args.eps).apply_gradients( grads_and_vars=grads_and_vars) elif args.opt == "rms": train = tf.train.RMSPropOptimizer( learning_rate=learning_rate, decay=0.99, epsilon=args.eps).apply_gradients( grads_and_vars=grads_and_vars) elif args.opt == "decay": train = tf.train.GradientDescentOptimizer( learning_rate=learning_rate).apply_gradients( grads_and_vars=grads_and_vars) else: assert (False) else: if args.dfa: train = model.dfa(X=X, Y=Y) else: train = model.train(X=X, Y=Y) correct = tf.equal(tf.argmax(predict, 1), tf.argmax(Y, 1)) total_correct = tf.reduce_sum(tf.cast(correct, tf.float32)) ############################################## sess = tf.InteractiveSession() tf.global_variables_initializer().run() tf.local_variables_initializer().run() (x_train, y_train), (x_test, y_test) = cifar10 x_train = x_train.reshape(TRAIN_EXAMPLES, 32, 32, 3) y_train = keras.utils.to_categorical(y_train, 10) x_test = x_test.reshape(TEST_EXAMPLES, 32, 32, 3) y_test = keras.utils.to_categorical(y_test, 10) ############################################## filename = args.name + '.results' f = open(filename, "w") f.write(filename + "\n") f.write("total params: " + str(model.num_params()) + "\n") f.close() ############################################## for ii in range(EPOCHS): if args.opt == 'decay' or args.opt == 'gd': decay = np.power(args.decay, ii) lr = args.alpha * decay else: lr = args.alpha print(ii) ############################# _count = 0 _total_correct = 0 #The training loop... here we add something to also update the feedback weights with the node pert for jj in range(int(TRAIN_EXAMPLES / BATCH_SIZE)): xs = x_train[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE] ys = y_train[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE] _correct, _ = sess.run( [total_correct, train], feed_dict={ sigma: 0.0, batch_size: BATCH_SIZE, dropout_rate: args.dropout, learning_rate: lr, X: xs, Y: ys }) #Add step to update B...... _ = sess.run( [train_B], feed_dict={ sigma: args.sigma, batch_size: BATCH_SIZE, dropout_rate: args.dropout, learning_rate: lr, X: xs, Y: ys }) _total_correct += _correct _count += BATCH_SIZE train_acc = 1.0 * _total_correct / _count train_accs.append(train_acc) ############################# _count = 0 _total_correct = 0 for jj in range(int(TEST_EXAMPLES / BATCH_SIZE)): xs = x_test[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE] ys = y_test[jj * BATCH_SIZE:(jj + 1) * BATCH_SIZE] _correct = sess.run(total_correct, feed_dict={ sigma: 0.0, batch_size: BATCH_SIZE, dropout_rate: 0.0, learning_rate: 0.0, X: xs, Y: ys }) _total_correct += _correct _count += BATCH_SIZE test_acc = 1.0 * _total_correct / _count test_accs.append(test_acc) isnan.append(None) #try: # trainer.train() #except ValueError: # print("Method fails to converge for these parameters") # isnan[n,m] = 1 #Save results... ############################# print("train acc: %f test acc: %f" % (train_acc, test_acc)) f = open(filename, "a") f.write("train acc: %f test acc: %f\n" % (train_acc, test_acc)) f.close() #Save params after each run fn = "./cifar10_conv_np_hyperparam_search_varalpha_septsearch_2_dfa_%d_fblearning_%d.npz" % ( args.dfa, args.feedbacklearning) to_save = { 'attr': attrs, 'params': params, 'train_accs': train_accs, 'test_accs': test_accs, 'isnan': isnan } pickle.dump(to_save, open(fn, "wb"))
l8p = NodePert(size=[batch_size, 4, 4, 256], sigma = sigma) l8 = FeedbackConv(size=[batch_size, 4, 4, 256], num_classes=10, sparse=args.sparse, rank=args.rank, name='conv3_fb') l9 = ConvToFullyConnected(shape=[4, 4, 256]) l10p = NodePert(size=[batch_size, 4*4*256], sigma = sigma) l10 = FullyConnected(size=[4*4*256, 2048], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='fc1', load=weights_fc, train=train_fc) l11 = Dropout(rate=dropout_rate) l12 = FeedbackFC(size=[4*4*256, 2048], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc1_fb') l13p = NodePert(size=[batch_size, 2048], sigma = sigma) l13 = FullyConnected(size=[2048, 2048], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=act, bias=args.bias, last_layer=False, name='fc2', load=weights_fc, train=train_fc) l14 = Dropout(rate=dropout_rate) l15 = FeedbackFC(size=[2048, 2048], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc2_fb') l16 = FullyConnected(size=[2048, 10], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Linear(), bias=args.bias, last_layer=True, name='fc3', load=weights_fc, train=train_fc) ############################################## model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16]) model_perturbed = Model(layers=[l0, l1, l2p, l2, l3, l4, l5p, l5, l6, l7, l8p, l8, l9, l10p, l10, l11, l12, l13p, l13, l14, l15, l16]) predict = model.predict(X=X) predict_perturbed = model_perturbed.predict(X=X) ####### #Pairs of perturbations and feedback weights #feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l10p, l12], [l13p, l15]] #Test one at a time... this works, so it must be l10p, 12 pair that fails feedbackpairs = [[l2p, l2], [l5p, l5], [l8p, l8], [l13p, l15]]
activation=act, bias=args.bias, last_layer=False, name="fc3") l8 = Dropout(rate=dropout_rate) l9 = FeedbackFC(size=[1000, 1000], num_classes=10, sparse=args.sparse, rank=args.rank, name="fc3_fb") l10 = FullyConnected(size=[1000, 10], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Linear(), bias=args.bias, last_layer=True, name="fc4") model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10]) ############################################## predict = model.predict(X=X) weights = model.get_weights() if args.opt == "adam" or args.opt == "rms" or args.opt == "decay": if args.dfa: grads_and_vars = model.dfa_gvs(X=X, Y=Y)
class Convolution(Layer): def __init__(self, input_shape, filter_sizes, init, strides=[1,1,1,1], padding='SAME', activation=None, bias=0., use_bias=True, name=None, load=None, train=True): self.input_shape = input_shape self.filter_sizes = filter_sizes self.batch_size, self.h, self.w, self.fin = self.input_shape self.fh, self.fw, self.fin, self.fout = self.filter_sizes self.init = init self.strides = strides _, self.sh, self.sw, _ = self.strides self.padding = padding self.activation = Linear() if activation == None else activation self.use_bias = use_bias self.name = name self.train_flag = train if load: print ("Loading Weights: " + self.name) weight_dict = np.load(load, encoding='latin1', allow_pickle=True).item() filters = weight_dict[self.name] bias = weight_dict[self.name + '_bias'] else: filters = init_filters(size=self.filter_sizes, init=self.init) bias = np.ones(shape=self.fout) * bias self.filters = tf.Variable(filters, dtype=tf.float32) self.bias = tf.Variable(bias, dtype=tf.float32) ################################################################### def get_weights(self): return [(self.name, self.filters), (self.name + "_bias", self.bias)] def output_shape(self): oh = conv_output_length(self.h, self.fh, self.padding.lower(), self.sh) ow = conv_output_length(self.w, self.fw, self.padding.lower(), self.sw) od = self.fout return [oh, oh, od] def num_params(self): filter_weights_size = self.fh * self.fw * self.fin * self.fout bias_weights_size = self.fout return filter_weights_size + bias_weights_size def forward(self, X): Z = tf.nn.conv2d(X, self.filters, self.strides, self.padding) if self.use_bias: Z = Z + tf.reshape(self.bias, (1, 1, 1, self.fout)) A = self.activation.forward(Z) return {'aout':A, 'cache':{}} ################################################################### def bp(self, AI, AO, DO, cache): DO = tf.multiply(DO, self.activation.gradient(AO)) DI = tf.nn.conv2d_backprop_input(input_sizes=self.input_shape, filter=self.filters, out_backprop=DO, strides=self.strides, padding=self.padding) DF = tf.nn.conv2d_backprop_filter(input=AI, filter_sizes=self.filter_sizes, out_backprop=DO, strides=self.strides, padding=self.padding) DB = tf.reduce_sum(DO, axis=[0, 1, 2]) if self.train_flag: return {'dout':DI, 'cache':{}}, [(DF, self.filters), (DB, self.bias)] else: return {'dout':DI, 'cache':{}}, [] def dfa(self, AI, AO, E, DO, cache): return self.bp(AI, AO, DO, cache) def lel(self, AI, AO, DO, Y, cache): return self.bp(AI, AO, DO, cache)
tf.set_random_seed(0) tf.reset_default_graph() batch_size = tf.placeholder(tf.int32, shape=()) dropout_rate = tf.placeholder(tf.float32, shape=()) learning_rate = tf.placeholder(tf.float32, shape=()) X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) l0 = FullyConnected(size=[784, 400], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Tanh(), bias=args.bias, l2=args.l2, last_layer=False, name="fc1") l1 = Dropout(rate=dropout_rate) l2 = FeedbackFC(size=[784, 400], num_classes=10, sparse=args.sparse, rank=args.rank, name="fc1_fb") l3 = FullyConnected(size=[400, 10], num_classes=10, init_weights=args.init, alpha=learning_rate, activation=Linear(), bias=args.bias, l2=args.l2, last_layer=True, name="fc2") model = Model(layers=[l0, l1, l2, l3]) ############################################## predict = model.predict(X=X) weights = model.get_weights() if args.opt == "adam" or args.opt == "rms" or args.opt == "decay": if args.dfa: grads_and_vars = model.dfa_gvs(X=X, Y=Y) else: grads_and_vars = model.gvs(X=X, Y=Y)
class Convolution(Layer): def __init__(self, input_sizes, filter_sizes, strides, padding, init=None, alpha=0., activation=None, bias=0., name=None, load=None, train=True): self.input_sizes = input_sizes self.filter_sizes = filter_sizes self.batch_size, self.h, self.w, self.fin = self.input_sizes self.fh, self.fw, self.fin, self.fout = self.filter_sizes bias = np.ones(shape=self.fout) * bias self.strides = strides self.padding = padding self.alpha = alpha self.activation = Linear() if activation == None else activation self.name = name self._train = train if load: print("Loading Weights: " + self.name) weight_dict = np.load(load, encoding='latin1').item() filters = weight_dict[self.name] bias = weight_dict[self.name + '_bias'] else: if init == "zero": filters = np.zeros(shape=self.filter_sizes) elif init == "sqrt_fan_in": sqrt_fan_in = math.sqrt(self.h * self.w * self.fin) filters = np.random.uniform(low=-1.0 / sqrt_fan_in, high=1.0 / sqrt_fan_in, size=self.filter_sizes) elif init == "alexnet": filters = np.random.normal(loc=0.0, scale=0.01, size=self.filter_sizes) else: # https://www.tensorflow.org/api_docs/python/tf/glorot_uniform_initializer # can verify we did this right ... fan_in = self.fh * self.fw * self.fin fan_out = self.fout lim = np.sqrt(6. / (fan_in + fan_out)) filters = np.random.uniform(low=-lim, high=lim, size=self.filter_sizes) self.filters = tf.Variable(filters, dtype=tf.float32) self.bias = tf.Variable(bias, dtype=tf.float32) ################################################################### def get_weights(self): return [(self.name, self.filters), (self.name + "_bias", self.bias)] def set_weights(self, weight_dic): filters = weight_dic[self.name] bias = weight_dic[self.name + '_bias'] return [self.filters.assign(filters), self.bias.assign(bias)] def output_shape(self): oh = conv_output_length(self.h, self.fh, self.padding.lower(), self.sh) ow = conv_output_length(self.w, self.fw, self.padding.lower(), self.sw) od = self.fout return [oh, oh, od] def num_params(self): filter_weights_size = self.fh * self.fw * self.fin * self.fout bias_weights_size = self.fout return filter_weights_size + bias_weights_size def forward(self, X): Z = tf.nn.conv2d(X, self.filters, self.strides, self.padding) + tf.reshape( self.bias, [1, 1, self.fout]) A = self.activation.forward(Z) return A ################################################################### def backward(self, AI, AO, DO): DO = tf.multiply(DO, self.activation.gradient(AO)) DI = tf.nn.conv2d_backprop_input(input_sizes=self.input_sizes, filter=self.filters, out_backprop=DO, strides=self.strides, padding=self.padding) return DI def gv(self, AI, AO, DO): if not self._train: return [] DO = tf.multiply(DO, self.activation.gradient(AO)) DF = tf.nn.conv2d_backprop_filter(input=AI, filter_sizes=self.filter_sizes, out_backprop=DO, strides=self.strides, padding=self.padding) DB = tf.reduce_sum(DO, axis=[0, 1, 2]) return [(DF, self.filters), (DB, self.bias)] def train(self, AI, AO, DO): if not self._train: return [] DO = tf.multiply(DO, self.activation.gradient(AO)) DF = tf.nn.conv2d_backprop_filter(input=AI, filter_sizes=self.filter_sizes, out_backprop=DO, strides=self.strides, padding=self.padding) DB = tf.reduce_sum(DO, axis=[0, 1, 2]) self.filters = self.filters.assign( tf.subtract(self.filters, tf.scalar_mul(self.alpha, DF))) self.bias = self.bias.assign( tf.subtract(self.bias, tf.scalar_mul(self.alpha, DB))) return [(DF, self.filters), (DB, self.bias)] ################################################################### def dfa_backward(self, AI, AO, E, DO): return tf.ones(shape=(tf.shape(AI))) def dfa_gv(self, AI, AO, E, DO): if not self._train: return [] DO = tf.multiply(DO, self.activation.gradient(AO)) DF = tf.nn.conv2d_backprop_filter(input=AI, filter_sizes=self.filter_sizes, out_backprop=DO, strides=self.strides, padding=self.padding) DB = tf.reduce_sum(DO, axis=[0, 1, 2]) return [(DF, self.filters), (DB, self.bias)] def dfa(self, AI, AO, E, DO): if not self._train: return [] DO = tf.multiply(DO, self.activation.gradient(AO)) DF = tf.nn.conv2d_backprop_filter(input=AI, filter_sizes=self.filter_sizes, out_backprop=DO, strides=self.strides, padding=self.padding) DB = tf.reduce_sum(DO, axis=[0, 1, 2]) self.filters = self.filters.assign( tf.subtract(self.filters, tf.scalar_mul(self.alpha, DF))) self.bias = self.bias.assign( tf.subtract(self.bias, tf.scalar_mul(self.alpha, DB))) return [(DF, self.filters), (DB, self.bias)] ################################################################### def lel_backward(self, AI, AO, E, DO, Y): return self.backward(AI, AO, DO) def lel_gv(self, AI, AO, E, DO, Y): return self.gv(AI, AO, DO) def lel(self, AI, AO, E, DO, Y): return self.train(AI, AO, DO)