def sparse(self, delta = 1e-4): sys.stdout.write('FNN_layer -> Performing finite difference check for 3 layer sparse FNN ... ') sys.stdout.flush() data = numx.random.rand(1,6*6) label = numx.zeros((1,6*6)) label[0,14] = 1.0 acts1 = [AFct.Sigmoid,AFct.SoftMax,AFct.SoftSign,AFct.Identity,AFct.RadialBasis(),AFct.SoftPlus,AFct.HyperbolicTangent] acts2 = [AFct.Sigmoid,AFct.SoftMax,AFct.SoftSign,AFct.Identity,AFct.RadialBasis(),AFct.SoftPlus,AFct.HyperbolicTangent] acts3 = [AFct.Sigmoid,AFct.SoftMax,AFct.SoftSign,AFct.Identity,AFct.RadialBasis(),AFct.SoftPlus,AFct.HyperbolicTangent] costs = [CFct.NegLogLikelihood,CFct.CrossEntropyError,CFct.SquaredError] for c in costs: for a3 in acts3: for a2 in acts2: for a1 in acts1: if ((c != CFct.CrossEntropyError and c != CFct.NegLogLikelihood) or (c == CFct.CrossEntropyError and (a3 == AFct.Sigmoid or a3 == AFct.SoftMax)) or (c == CFct.NegLogLikelihood and a3 == AFct.SoftMax)): res = self.check(data = data, delta = delta, act1 = a1, act2 = a2, act3 = a3, reg_sparseness = [0.3,0.1,0.0], desired_sparseness = [0.01,0.1,0], cost_sparseness = [CFct.SquaredError,CFct.SquaredError,0], reg_targets = [0.0,0.0,1], desired_targets = [0.0,0.0,label], cost_targets = [None,None,c], full = True) if res > delta: print "Failed! ", res, a1 ,a2, a3, c assert numx.all(res < delta) print('successfully passed!') sys.stdout.flush()
def check_all(self, data, epsilon, contractive, sparseness, desired_sparseness, data_next, slowness_penalty): ''' Checks several possible combinations. ''' N = data.shape[1] M = 2*data.shape[1] weights = numx.random.randn(N,M)*0.1 bv = numx.random.randn(1,N)*0.1 bh = numx.random.randn(1,M)*0.1 ov = numx.random.random((1,N)) oh = numx.random.random((1,M)) for loss in [CFct.SquaredError,CFct.CrossEntropyError]: for act_in in [AFct.Identity,AFct.SoftPlus,AFct.Sigmoid,AFct.HyperbolicTangent,AFct.RadialBasis()]: for act_out in [AFct.Identity,AFct.SoftPlus,AFct.Sigmoid,AFct.HyperbolicTangent,AFct.RadialBasis()]: if (loss != CFct.CrossEntropyError or (loss == CFct.CrossEntropyError and act_in == AFct.Sigmoid)): ae = MODEL.AutoEncoder(number_visibles = N, number_hiddens = M, data=None, visible_activation_function=act_in, hidden_activation_function=act_out, cost_function=loss, initial_weights=weights, initial_visible_bias=bv, initial_hidden_bias=bh, initial_visible_offsets=0, initial_hidden_offsets=0) w,b,c = ae.finit_differences(data, 0.001, sparseness, desired_sparseness, contractive, slowness_penalty,data_next) maxW = numx.max(numx.abs(w)) maxb = numx.max(numx.abs(b)) maxc = numx.max(numx.abs(c)) if maxW > 0.0001 or maxb > 0.0001 or maxc > 0.0001 : print("Gradient check failed for ae with: ",) print(" CENTERING ",loss," ",act_in," ",act_out) assert numx.all(maxW < 0.0001) assert numx.all(maxb < 0.0001) assert numx.all(maxc < 0.0001) ae = MODEL.AutoEncoder(number_visibles = N, number_hiddens = M, data=None, visible_activation_function=act_in, hidden_activation_function=act_out, cost_function=loss, initial_weights=weights, initial_visible_bias=bv, initial_hidden_bias=bh, initial_visible_offsets=ov, initial_hidden_offsets=oh) w,b,c = ae.finit_differences(data, 0.001, sparseness, desired_sparseness, contractive, slowness_penalty,data_next) maxW = numx.max(numx.abs(w)) maxb = numx.max(numx.abs(b)) maxc = numx.max(numx.abs(c)) if maxW > 0.0001 or maxb > 0.0001 or maxc > 0.0001 : print("Gradient check failed for ae with: ",) print(" CENTERING ",loss," ",act_in," ",act_out) print(maxW,'\t',maxb,'\t',maxc) assert numx.all(maxW < 0.0001) assert numx.all(maxb < 0.0001) assert numx.all(maxc < 0.0001)