def test_all_costs(): """Check all instances of ConvNonLinearity. Either they should be consistent with the corresponding subclass of `Linear`, or their `cost` method should not be implemented. """ cases = [[SigmoidConvNonlinearity(), Sigmoid, True], [IdentityConvNonlinearity(), Linear, True], [TanhConvNonlinearity(), Tanh, False], [RectifierConvNonlinearity(), RectifiedLinear, False]] for conv_nonlinearity, mlp_nonlinearity, cost_implemented in cases: check_case(conv_nonlinearity, mlp_nonlinearity, cost_implemented)
def generateConvRegressor(teacher_hintlayer, student_layer): layer_name = 'hint_regressor' out_ch = teacher_hintlayer.get_output_space().num_channels ks0 = student_layer.get_output_space( ).shape[0] - teacher_hintlayer.get_output_space().shape[0] + 1 ks1 = student_layer.get_output_space( ).shape[1] - teacher_hintlayer.get_output_space().shape[1] + 1 ks = [ks0, ks1] irng = 0.05 mkn = 0.9 tb = 1 if isinstance(teacher_hintlayer, MaxoutConvC01B): hint_reg_layer = MaxoutConvC01B( num_channels=out_ch, num_pieces=teacher_hintlayer.num_pieces, kernel_shape=ks, pool_shape=[1, 1], pool_stride=[1, 1], layer_name=layer_name, irange=irng, max_kernel_norm=mkn, tied_b=teacher_hintlayer.tied_b) elif isinstance(teacher_hintlayer, ConvRectifiedLinear): nonlin = RectifierConvNonlinearity() hint_reg_layer = ConvElemwise(output_channels=out_ch, kernel_shape=ks, layer_name=layer_name, nonlinearity=nonlin, irange=irng, max_kernel_norm=mkn, tied_b=tb) elif isinstance(teacher_hintlayer, ConvElemwisePL2): nonlin = teacher_hintlayer.nonlinearity hint_reg_layer = ConvElemwisePL2(output_channels=out_ch, kernel_shape=ks, layer_name=layer_name, nonlinearity=nonlin, irange=irng, max_kernel_norm=mkn, tied_b=tb) else: raise AssertionError("Unknown convolutional layer type") return hint_reg_layer
def __init__(self, batch_size, fprop_code=True, lr=.01, n_steps=10, lbda=0, top_most=False, nonlinearity=RectifierConvNonlinearity(),*args, **kwargs): ''' Compiled version: the sparse code is calulated using 'top' and is not just simbolic. Parameters for the optimization/feedforward operation: lr : learning rate n_steps : number of steps or uptades of the hidden code truncate: truncate the gradient after this number (default -1 which means do not truncate) ''' super(CompositeSparseCoding, self).__init__(*args, **kwargs) self.batch_size = batch_size self.fprop_code = fprop_code self.n_steps = n_steps self.lr = lr self.lbda = lbda self.top_most = top_most self.nonlin = nonlinearity