def mlp_from_cascade(layers, loss): """From a list of layers, build up an mlp with the given loss.""" # Determine transfer functions. transfers = [l.feature_transfer for l in layers[:-1]] out_transfer = layers[-1].out_transfer # Determine sizes. sizes = [l.n_feature for l in layers[:-1]] n_inpt = layers[0].n_inpt n_output = layers[-1].n_output # Build up mlp. mlp = MultiLayerPerceptron(n_inpt, sizes, n_output, transfers, out_transfer, loss) mlp.parameters.data[:] = np.zeros(mlp.parameters.data.shape) # Transfer weights and biases. weights, biases = [], [] for l in layers: w, b = get_affine_parameters(l) weights.append(w) biases.append(b) weight_names = ["in_to_hidden"] + ["hidden_to_hidden_%i" % i for i in range(len(weights) - 2)] + ["hidden_to_out"] bias_names = ["hidden_bias_%i" % i for i in range(len(biases) - 1)] + ["out_bias"] for wname, bname, w, b in zip(weight_names, bias_names, weights, biases): mlp.parameters[wname][:] = w mlp.parameters[bname][:] = b return mlp
def test_mlp2(): l = MultiLayerPerceptron(2, [10, 12], 3, ['tanh', 'sigmoid'], 'softabs', 'squared') f = l.function(['inpt', 'target'], 'loss', mode='FAST_COMPILE') grad = T.grad(l.exprs['loss'], l.parameters.flat) fprime = l.function(['inpt', 'target'], grad, mode='FAST_COMPILE') f(np.random.random((10, 2)), np.random.random((10, 3))) fprime(np.random.random((10, 2)), np.random.random((10, 3)))
def test_mlp1(): l = MultiLayerPerceptron(2, [10], 3, ['tanh'], 'softabs', 'squared') f = l.function(['inpt', 'target'], 'loss', mode='FAST_COMPILE') grad = T.grad(l.exprs['loss'], l.parameters.flat) fprime = l.function(['inpt', 'target'], grad, mode='FAST_COMPILE') f(np.random.random((10, 2)), np.random.random((10, 3))) fprime(np.random.random((10, 2)), np.random.random((10, 3)))
def mlp_from_cascade(layers, loss): """From a list of layers, build up an mlp with the given loss.""" # Determine transfer functions. transfers = [l.feature_transfer for l in layers[:-1]] out_transfer = layers[-1].out_transfer # Determine sizes. sizes = [l.n_feature for l in layers[:-1]] n_inpt = layers[0].n_inpt n_output = layers[-1].n_output # Build up mlp. mlp = MultiLayerPerceptron(n_inpt, sizes, n_output, transfers, out_transfer, loss) mlp.parameters.data[:] = np.zeros(mlp.parameters.data.shape) # Transfer weights and biases. weights, biases = [], [] for l in layers: w, b = get_affine_parameters(l) weights.append(w) biases.append(b) weight_names = ( ['in_to_hidden'] + ['hidden_to_hidden_%i' % i for i in range(len(weights) - 2)] + ['hidden_to_out']) bias_names = ['hidden_bias_%i' % i for i in range(len(biases) - 1)] + ['out_bias'] for wname, bname, w, b in zip(weight_names, bias_names, weights, biases): mlp.parameters[wname][:] = w mlp.parameters[bname][:] = b return mlp