def test_MLPN_xor_poly(): """ Simple test checking whether a 3 layer MLP is able to learn XOR """ x1 = np.array([0.0, 0.0]) x2 = np.array([1.0, 1.0]) x3 = np.array([0.0, 1.0]) x4 = np.array([1.0, 0.0]) parameters = {} parameters['layers'] = MLP_module.get_layers([3, 15, 3]) np.random.seed(seed=509) parameters['weights'] = MLP_module.get_weights(parameters['layers']) parameters['beta'] = np.array([1.0]) parameters['learning_rate'] = np.array([0.1]) parameters['momentum'] = np.array([0.5]) parameters['mse'] = np.array([0.0]) parameters['polynomial'] = True M = MLP_module.MLP(parameters) for n in xrange(30000): M.train(x1, np.array([1.0, 0.0])) M.train(x2, np.array([1.0, 0.0])) M.train(x3, np.array([0.0, 1.0])) M.train(x4, np.array([0.0, 1.0])) o1 = M.evaluate(x1) o2 = M.evaluate(x2) o3 = M.evaluate(x3) o4 = M.evaluate(x4) assert o1[0] > 0.8 assert o1[1] < 0.2 assert o2[0] > 0.8 assert o2[1] < 0.2 assert o3[0] < 0.2 assert o3[1] > 0.8 assert o4[0] < 0.2 assert o4[1] > 0.8
def upgrade_to_ver_1(parameters): parameters['internal_buffers'] = [] parameters['output_min'] = SharedArray.SharedNumpyArray_like(parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like(parameters['output_block']) parameters['avg_delta'] = SharedArray.SharedNumpyArray_like(parameters['output_block']) parameters['integral_blocks'] = [] parameters['derivative_blocks'] = [] parameters['error_blocks'] = [] parameters['use_derivative'] = True parameters['use_integral'] = True parameters['use_error'] = True parameters['use_t_2_block'] = False parameters['predict_2_steps'] = False parameters['use_global_backprop'] = False parameters['normalize_output'] = False parameters["complex_context_in_second_layer"] = False for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) parameters['derivative_blocks'].append(SharedArray.SharedNumpyArray_like(block)) parameters['integral_blocks'].append(SharedArray.SharedNumpyArray_like(block)) parameters['error_blocks'].append(SharedArray.SharedNumpyArray_like(block)) if "complex" not in parameters.keys(): parameters["complex"] = False if len(parameters["Primary_Predictor_params"]['layers']) == 4: parameters["complex"] = True if "autoencoder" not in parameters.keys(): parameters["autoencoder"] = False if "readout_learning_rate" not in parameters.keys(): parameters['readout_learning_rate'] = parameters["Primary_Predictor_params"]["learning_rate"] if "momentum" not in parameters.keys(): parameters['momentum'] = parameters["Primary_Predictor_params"]["momentum"] nhidden = parameters["Primary_Predictor_params"]['layers'][-2]['activation'].shape[0]-1 nreadout = 0 nouputs = 0 for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: nouputs += np.prod(block.shape) for (block, delta, pblock) in parameters['readout_blocks']: nreadout += np.prod(block.shape) if "Readout_Predictor_params" not in parameters.keys(): parameters["Readout_Predictor_params"] = {} parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers([nhidden+1, nreadout+1]) parameters["Readout_Predictor_params"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"]['learning_rate'] = parameters['readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters['momentum'] parameters["Readout_Predictor_params"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float) parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights(parameters["Readout_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['weights'][0][:] = parameters["Primary_Predictor_params"]['weights'][-1][:, nouputs:] old_weight_matrix = parameters["Primary_Predictor_params"]['weights'][-1] parameters["Primary_Predictor_params"]['weights'][-1] = SharedArray.SharedNumpyArray((nhidden+1, nouputs), np.float) parameters["Primary_Predictor_params"]['weights'][-1][:] = old_weight_matrix[:, :nouputs] parameters["Primary_Predictor_params"]['layers'][-1] = {'activation': SharedArray.SharedNumpyArray(nouputs, np.float), 'error': SharedArray.SharedNumpyArray(nouputs, np.float), 'delta': SharedArray.SharedNumpyArray(nouputs, np.float) } parameters['backpropagate_readout_error'] = True
def test_perceptron_two_layers(): X = np.array([[0, 1], [1, 1], [1, 0], [0, 0]]) Y = np.array([[1], [1], [1], [0]]) parameters = {} parameters['layers'] = MLP_module.get_layers([3, 2]) np.random.seed(seed=509) parameters['weights'] = MLP_module.get_weights(parameters['layers']) parameters['beta'] = np.array([1.0]) parameters['learning_rate'] = np.array([0.25]) parameters['momentum'] = np.array([0.5]) parameters['mse'] = np.array([0.0]) M = MLP_module.MLP(parameters) for n in xrange(30000): i = np.random.randint(low=0, high=4) M.train(X[i], Y[i]) for i in range(4): O = M.evaluate(X[i]) assert (np.allclose(Y[i], O, atol=0.03))
def test_perceptron_two_layers(): X = np.array([[0, 1], [1, 1], [1, 0], [0, 0]]) Y = np.array([[1], [1], [1], [0]]) parameters = {} parameters['layers'] = MLP_module.get_layers([3, 2]) np.random.seed(seed=509) parameters['weights'] = MLP_module.get_weights(parameters['layers']) parameters['beta'] = np.array([1.0]) parameters['learning_rate'] = np.array([0.25]) parameters['momentum'] = np.array([0.5]) parameters['mse'] = np.array([0.0]) M = MLP_module.MLP(parameters) for n in xrange(30000): i = np.random.randint(low=0, high=4) M.train(X[i], Y[i]) for i in range(4): O = M.evaluate(X[i]) assert(np.allclose(Y[i], O, atol=0.03))
def generate_missing_parameters(parameters, options): """ This method can be called to generate all the missing dictionary parameters when all the other relevant variables are known. Leave empty if there is nothing more to generate. When complex_unit is False, a standard 3-layer MLP is used. When complex_unit is True, an MLP with additional hidden layers is used. There needs to be no return value, the method leaves a side effect by modifying the perameters dict. :param parameters: parameter dictionary :type parameters: dict """ complex_unit = options['unit_type'] == "complex" polynomial = options['polynomial'] == '1' autoencoder = options['autoencoder'] == '1' use_t_2_block = options['use_t_minus_2_block'] == '1' use_derivative = options['use_derivative'] == '1' use_integral = options['use_integral'] == '1' use_error = options['use_error'] == '1' predict_2_steps = options['predict_two_steps'] == '1' use_global_backprop = options['use_global_backprop'] == '1' complex_context_in_second_layer = options['feed_context_in_complex_layer'] == '1' parameters['normalize_output'] = options["normalize_output"] == "1" parameters['backpropagate_readout_error'] = options["backpropagate_readout_error"] == "1" nhidden = np.prod(parameters['output_block'].shape) parameters['output_min'] = SharedArray.SharedNumpyArray_like(parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])+1 parameters['avg_delta'] = SharedArray.SharedNumpyArray_like(parameters['output_block']) ninputs = 0 noutputs = 0 ncontext = 0 # Any additional memory buffers needed in the operation of the unit parameters['internal_buffers'] = [] parameters['integral_blocks'] = [] parameters['derivative_blocks'] = [] parameters['error_blocks'] = [] parameters['use_derivative'] = use_derivative parameters['use_integral'] = use_integral parameters['use_error'] = use_error parameters['use_t_2_block'] = use_t_2_block parameters['predict_2_steps'] = predict_2_steps for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) if use_derivative: parameters['derivative_blocks'].append(SharedArray.SharedNumpyArray_like(block)) if use_integral: parameters['integral_blocks'].append(SharedArray.SharedNumpyArray_like(block)) if use_error: parameters['error_blocks'].append(SharedArray.SharedNumpyArray_like(block)) input_block_features = 1 output_predictions = 1 if use_derivative: input_block_features += 1 if use_integral: input_block_features += 1 if use_error: input_block_features += 1 if use_t_2_block: input_block_features += 1 if predict_2_steps: output_predictions += 1 for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: ninputs += np.prod(block.shape) * input_block_features for (block, delta, factor) in parameters['context_blocks']: ncontext += np.prod(block.shape) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: noutputs += np.prod(block.shape) * output_predictions nreadout = 0 for (block, delta, pblock) in parameters['readout_blocks']: nreadout += np.prod(block.shape) parameters["Primary_Predictor_params"] = {} parameters["Readout_Predictor_params"] = {} if complex_unit and complex_context_in_second_layer: # 4 layer perceptron parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ninputs+1, 2*nhidden+ncontext+1, nhidden+1, noutputs+1]) elif complex_unit: parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ninputs+ncontext+1, 2*nhidden+1, nhidden+1, noutputs+1]) else: # 3 layer perceptron Simple MLP Unit (not complex unit) parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ninputs+ncontext+1, nhidden+1, noutputs+1]) parameters["Primary_Predictor_params"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float) parameters["Primary_Predictor_params"]['beta'][0] = 1.0 parameters["Primary_Predictor_params"]['learning_rate'] = parameters['primary_learning_rate'] parameters["Primary_Predictor_params"]['momentum'] = parameters['momentum'] parameters["Primary_Predictor_params"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float) parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights(parameters["Primary_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers([nhidden+1, 2*nhidden+1, nreadout+1]) parameters["Readout_Predictor_params"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"]['learning_rate'] = parameters['readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters['momentum'] parameters["Readout_Predictor_params"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float) parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights(parameters["Readout_Predictor_params"]['layers']) parameters["Primary_Predictor_params"]['polynomial'] = polynomial parameters["Readout_Predictor_params"]['polynomial'] = polynomial parameters['autoencoder'] = autoencoder parameters['use_global_backprop'] = use_global_backprop parameters["complex_context_in_second_layer"] = complex_context_in_second_layer parameters["complex"] = complex_unit
def generate_missing_parameters(parameters, options): """ This method can be called to generate all the missing dictionary parameters when all the other relevant variables are known. Leave empty if there is nothing more to generate. When complex_unit is False, a standard 3-layer MLP is used. When complex_unit is True, an MLP with additional hidden layers is used. There needs to be no return value, the method leaves a side effect by modifying the perameters dict. :param parameters: parameter dictionary :type parameters: dict """ complex_unit = options['unit_type'] == "complex" polynomial = options['polynomial'] == '1' autoencoder = options['autoencoder'] == '1' nhidden = np.prod(parameters['output_block'].shape) parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) ninputs = 0 noutputs = 0 ncontext = 0 # Any additional memory buffers needed in the operation of the unit parameters['internal_buffers'] = [] for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: ninputs += np.prod(block.shape) * len( ExecutionUnit.UNSUPERVISED_SIGNAL_INPUTS) for (block, delta, factor) in parameters['context_blocks']: ncontext += np.prod(block.shape) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: noutputs += 2 * np.prod( block.shape) # to predict two steps into the future nadditional = 0 for (block, delta, pblock) in parameters['readout_blocks']: nadditional += np.prod(block.shape) parameters["Primary_Predictor_params"] = {} parameters["Residual_Predictor_params"] = {} parameters["Readout_Predictor_params"] = {} if complex_unit: # 4 layer perceptron parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers( [ncontext + 1, 3 * nhidden + 1, 2 * nhidden + 1, noutputs + 1]) parameters["Residual_Predictor_params"]['layers'] = MLP.get_layers( [ninputs + 1, 2 * nhidden + 1, nhidden + 1, noutputs + 1]) parameters["complex"] = True else: # 3 layer perceptron Simple MLP Unit (not complex unit) parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers( [ncontext + 1, 2 * nhidden + 1, noutputs + 1]) parameters["Residual_Predictor_params"]['layers'] = MLP.get_layers( [ninputs + 2 * nhidden + 1, nhidden + 1, noutputs + 1]) parameters["Primary_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['beta'][0] = 1.0 parameters["Primary_Predictor_params"]['learning_rate'] = parameters[ 'primary_learning_rate'] parameters["Primary_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Primary_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights( parameters["Primary_Predictor_params"]['layers']) parameters["Residual_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Residual_Predictor_params"]['beta'][0] = 1.0 parameters["Residual_Predictor_params"]['learning_rate'] = parameters[ 'primary_learning_rate'] parameters["Residual_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Residual_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Residual_Predictor_params"]['weights'] = MLP.get_weights( parameters["Residual_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers( [nhidden + 1, 2 * nhidden + 1, nadditional + 1]) parameters["Readout_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"]['learning_rate'] = parameters[ 'readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Readout_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights( parameters["Readout_Predictor_params"]['layers']) parameters["Primary_Predictor_params"]['polynomial'] = polynomial parameters["Residual_Predictor_params"]['polynomial'] = polynomial parameters["Readout_Predictor_params"]['polynomial'] = polynomial parameters['autoencoder'] = autoencoder
def generate_missing_parameters(parameters, options): """ This method can be called to generate all the missing dictionary parameters when all the other relevant variables are known. Leave empty if there is nothing more to generate. When complex_unit is False, a standard 3-layer MLP is used. When complex_unit is True, an MLP with additional hidden layers is used. There needs to be no return value, the method leaves a side effect by modifying the perameters dict. :param parameters: parameter dictionary :type parameters: dict """ complex_unit = options['unit_type'] == "complex" polynomial = options['polynomial'] == '1' autoencoder = options['autoencoder'] == '1' use_t_2_block = options['use_t_minus_2_block'] == '1' use_derivative = options['use_derivative'] == '1' use_integral = options['use_integral'] == '1' use_error = options['use_error'] == '1' predict_2_steps = options['predict_two_steps'] == '1' use_global_backprop = options['use_global_backprop'] == '1' complex_context_in_second_layer = options[ 'feed_context_in_complex_layer'] == '1' parameters['normalize_output'] = options["normalize_output"] == "1" parameters['backpropagate_readout_error'] = options[ "backpropagate_readout_error"] == "1" nhidden = np.prod(parameters['output_block'].shape) parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) + 1 parameters['avg_delta'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) ninputs = 0 noutputs = 0 ncontext = 0 # Any additional memory buffers needed in the operation of the unit parameters['internal_buffers'] = [] parameters['integral_blocks'] = [] parameters['derivative_blocks'] = [] parameters['error_blocks'] = [] parameters['use_derivative'] = use_derivative parameters['use_integral'] = use_integral parameters['use_error'] = use_error parameters['use_t_2_block'] = use_t_2_block parameters['predict_2_steps'] = predict_2_steps for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) if use_derivative: parameters['derivative_blocks'].append( SharedArray.SharedNumpyArray_like(block)) if use_integral: parameters['integral_blocks'].append( SharedArray.SharedNumpyArray_like(block)) if use_error: parameters['error_blocks'].append( SharedArray.SharedNumpyArray_like(block)) input_block_features = 1 output_predictions = 1 if use_derivative: input_block_features += 1 if use_integral: input_block_features += 1 if use_error: input_block_features += 1 if use_t_2_block: input_block_features += 1 if predict_2_steps: output_predictions += 1 for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: ninputs += np.prod(block.shape) * input_block_features for (block, delta, factor) in parameters['context_blocks']: ncontext += np.prod(block.shape) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: noutputs += np.prod(block.shape) * output_predictions nreadout = 0 for (block, delta, pblock) in parameters['readout_blocks']: nreadout += np.prod(block.shape) parameters["Primary_Predictor_params"] = {} parameters["Readout_Predictor_params"] = {} if complex_unit and complex_context_in_second_layer: # 4 layer perceptron parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ ninputs + 1, 2 * nhidden + ncontext + 1, nhidden + 1, noutputs + 1 ]) elif complex_unit: parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ ninputs + ncontext + 1, 2 * nhidden + 1, nhidden + 1, noutputs + 1 ]) else: # 3 layer perceptron Simple MLP Unit (not complex unit) parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers( [ninputs + ncontext + 1, nhidden + 1, noutputs + 1]) parameters["Primary_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['beta'][0] = 1.0 parameters["Primary_Predictor_params"]['learning_rate'] = parameters[ 'primary_learning_rate'] parameters["Primary_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Primary_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights( parameters["Primary_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers( [nhidden + 1, 2 * nhidden + 1, nreadout + 1]) parameters["Readout_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"]['learning_rate'] = parameters[ 'readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Readout_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights( parameters["Readout_Predictor_params"]['layers']) parameters["Primary_Predictor_params"]['polynomial'] = polynomial parameters["Readout_Predictor_params"]['polynomial'] = polynomial parameters['autoencoder'] = autoencoder parameters['use_global_backprop'] = use_global_backprop parameters[ "complex_context_in_second_layer"] = complex_context_in_second_layer parameters["complex"] = complex_unit
def upgrade_to_ver_1(parameters): parameters['internal_buffers'] = [] parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['avg_delta'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['integral_blocks'] = [] parameters['derivative_blocks'] = [] parameters['error_blocks'] = [] parameters['use_derivative'] = True parameters['use_integral'] = True parameters['use_error'] = True parameters['use_t_2_block'] = False parameters['predict_2_steps'] = False parameters['use_global_backprop'] = False parameters['normalize_output'] = False parameters["complex_context_in_second_layer"] = False for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) parameters['derivative_blocks'].append( SharedArray.SharedNumpyArray_like(block)) parameters['integral_blocks'].append( SharedArray.SharedNumpyArray_like(block)) parameters['error_blocks'].append( SharedArray.SharedNumpyArray_like(block)) if "complex" not in parameters.keys(): parameters["complex"] = False if len(parameters["Primary_Predictor_params"]['layers']) == 4: parameters["complex"] = True if "autoencoder" not in parameters.keys(): parameters["autoencoder"] = False if "readout_learning_rate" not in parameters.keys(): parameters['readout_learning_rate'] = parameters[ "Primary_Predictor_params"]["learning_rate"] if "momentum" not in parameters.keys(): parameters['momentum'] = parameters["Primary_Predictor_params"][ "momentum"] nhidden = parameters["Primary_Predictor_params"]['layers'][-2][ 'activation'].shape[0] - 1 nreadout = 0 nouputs = 0 for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: nouputs += np.prod(block.shape) for (block, delta, pblock) in parameters['readout_blocks']: nreadout += np.prod(block.shape) if "Readout_Predictor_params" not in parameters.keys(): parameters["Readout_Predictor_params"] = {} parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers( [nhidden + 1, nreadout + 1]) parameters["Readout_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"][ 'learning_rate'] = parameters['readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Readout_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"][ 'weights'] = MLP.get_weights( parameters["Readout_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['weights'][ 0][:] = parameters["Primary_Predictor_params"]['weights'][ -1][:, nouputs:] old_weight_matrix = parameters["Primary_Predictor_params"][ 'weights'][-1] parameters["Primary_Predictor_params"]['weights'][ -1] = SharedArray.SharedNumpyArray((nhidden + 1, nouputs), np.float) parameters["Primary_Predictor_params"]['weights'][ -1][:] = old_weight_matrix[:, :nouputs] parameters["Primary_Predictor_params"]['layers'][-1] = { 'activation': SharedArray.SharedNumpyArray(nouputs, np.float), 'error': SharedArray.SharedNumpyArray(nouputs, np.float), 'delta': SharedArray.SharedNumpyArray(nouputs, np.float) } parameters['backpropagate_readout_error'] = True