示例#1
0
 def generate_missing_parameters(parameters):
     """
     This method can be called to generate all the missing dictionary parameters when all
     the other relevant variables are known. Leave empty if there is nothing more to generate
     """
     ninputs = ((2*parameters['array'].shape[0])/3)*parameters['block'][2]*parameters['block'][3]*3
     for block in parameters["context_blocks"]:
         ninputs += np.prod(block.shape)
     nhidden = parameters['internal_rep_size']
     noutputs = (parameters['array'].shape[0]-(2*parameters['array'].shape[0])/3)*parameters['block'][2]*parameters['block'][3]*3
     parameters["MLP_parameters"] = {}
     parameters["MLP_parameters"]['layers'] = [
         {'activation': SharedArray.SharedNumpyArray((ninputs+1), np.float),
          'error': SharedArray.SharedNumpyArray((ninputs+1), np.float),
          'delta': SharedArray.SharedNumpyArray((ninputs+1), np.float)
          },
         {'activation': SharedArray.SharedNumpyArray((nhidden+1), np.float),
          'error': SharedArray.SharedNumpyArray((nhidden+1), np.float),
          'delta': SharedArray.SharedNumpyArray((nhidden+1), np.float)
          },
         {'activation': SharedArray.SharedNumpyArray((noutputs+1), np.float),
          'error': SharedArray.SharedNumpyArray((noutputs+1), np.float),
          'delta': SharedArray.SharedNumpyArray((noutputs+1), np.float)
          },
     ]
     parameters["MLP_parameters"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float)
     parameters["MLP_parameters"]['beta'][0] = 1.0
     parameters["MLP_parameters"]['learning_rate'] = parameters['learning_rate']
     parameters["MLP_parameters"]['momentum'] = parameters['momentum']
     parameters["MLP_parameters"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float)
     parameters["MLP_parameters"]['weights'] = [
         MLP.initialize_weights(SharedArray.SharedNumpyArray((ninputs+1, nhidden), np.float)),
         MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, noutputs), np.float))
     ]
示例#2
0
def test_MLPN_xor_poly():
    """
    Simple test checking whether a 3 layer MLP is able to learn XOR
    """
    x1 = np.array([0.0, 0.0])
    x2 = np.array([1.0, 1.0])
    x3 = np.array([0.0, 1.0])
    x4 = np.array([1.0, 0.0])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 15, 3])
    np.random.seed(seed=509)
    parameters['weights'] = MLP_module.get_weights(parameters['layers'])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([0.1])
    parameters['momentum'] = np.array([0.5])
    parameters['mse'] = np.array([0.0])
    parameters['polynomial'] = True
    M = MLP_module.MLP(parameters)
    for n in xrange(30000):
        M.train(x1, np.array([1.0, 0.0]))
        M.train(x2, np.array([1.0, 0.0]))
        M.train(x3, np.array([0.0, 1.0]))
        M.train(x4, np.array([0.0, 1.0]))
    o1 = M.evaluate(x1)
    o2 = M.evaluate(x2)
    o3 = M.evaluate(x3)
    o4 = M.evaluate(x4)
    assert o1[0] > 0.8
    assert o1[1] < 0.2
    assert o2[0] > 0.8
    assert o2[1] < 0.2
    assert o3[0] < 0.2
    assert o3[1] > 0.8
    assert o4[0] < 0.2
    assert o4[1] > 0.8
示例#3
0
def test_MLPN_xor_poly():
    """
    Simple test checking whether a 3 layer MLP is able to learn XOR
    """
    x1 = np.array([0.0, 0.0])
    x2 = np.array([1.0, 1.0])
    x3 = np.array([0.0, 1.0])
    x4 = np.array([1.0, 0.0])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 15, 3])
    np.random.seed(seed=509)
    parameters['weights'] = MLP_module.get_weights(parameters['layers'])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([0.1])
    parameters['momentum'] = np.array([0.5])
    parameters['mse'] = np.array([0.0])
    parameters['polynomial'] = True
    M = MLP_module.MLP(parameters)
    for n in xrange(30000):
        M.train(x1, np.array([1.0, 0.0]))
        M.train(x2, np.array([1.0, 0.0]))
        M.train(x3, np.array([0.0, 1.0]))
        M.train(x4, np.array([0.0, 1.0]))
    o1 = M.evaluate(x1)
    o2 = M.evaluate(x2)
    o3 = M.evaluate(x3)
    o4 = M.evaluate(x4)
    assert o1[0] > 0.8
    assert o1[1] < 0.2
    assert o2[0] > 0.8
    assert o2[1] < 0.2
    assert o3[0] < 0.2
    assert o3[1] > 0.8
    assert o4[0] < 0.2
    assert o4[1] > 0.8
示例#4
0
 def generate_missing_parameters(parameters):
     """
     This method can be called to generate all the missing dictionary parameters when all
     the other relevant variables are known. Leave empty if there is nothing more to generate
     """
     ninputs = 2*parameters['block'][2]*parameters['block'][3]*3
     nhidden = ninputs / 5
     noutputs = parameters['block'][2]*parameters['block'][3]*3
     parameters["MLP_parameters"] = {}
     parameters["MLP_parameters"]['layers'] = [
         {'activation': SharedArray.SharedNumpyArray((ninputs+1), np.float),
          'error': SharedArray.SharedNumpyArray((ninputs+1), np.float),
          'delta': SharedArray.SharedNumpyArray((ninputs+1), np.float)
          },
         {'activation': SharedArray.SharedNumpyArray((nhidden+1), np.float),
          'error': SharedArray.SharedNumpyArray((nhidden+1), np.float),
          'delta': SharedArray.SharedNumpyArray((nhidden+1), np.float)
          },
         {'activation': SharedArray.SharedNumpyArray((noutputs+1), np.float),
          'error': SharedArray.SharedNumpyArray((noutputs+1), np.float),
          'delta': SharedArray.SharedNumpyArray((noutputs+1), np.float)
          },
     ]
     parameters["MLP_parameters"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float)
     parameters["MLP_parameters"]['beta'][0] = 1.0
     parameters["MLP_parameters"]['learning_rate'] = parameters['learning_rate']
     parameters["MLP_parameters"]['momentum'] = parameters['momentum']
     parameters["MLP_parameters"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float)
     parameters["MLP_parameters"]['weights'] = [
         MLP.initialize_weights(SharedArray.SharedNumpyArray((ninputs+1, nhidden), np.float)),
         MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, noutputs), np.float))
     ]
示例#5
0
 def upgrade_to_ver_1(parameters):
     parameters['internal_buffers'] = []
     parameters['output_min'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])
     parameters['output_max'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])
     parameters['avg_delta'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])
     parameters['integral_blocks'] = []
     parameters['derivative_blocks'] = []
     parameters['error_blocks'] = []
     parameters['use_derivative'] = True
     parameters['use_integral'] = True
     parameters['use_error'] = True
     parameters['use_t_2_block'] = False
     parameters['predict_2_steps'] = False
     parameters['use_global_backprop'] = False
     parameters['normalize_output'] = False
     parameters["complex_context_in_second_layer"] = False
     for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']:
         block01 = SharedArray.SharedNumpyArray_like(block)
         block02 = SharedArray.SharedNumpyArray_like(block)
         block03 = SharedArray.SharedNumpyArray_like(block)
         parameters['internal_buffers'].append((block01, block02, block03))
         parameters['derivative_blocks'].append(SharedArray.SharedNumpyArray_like(block))
         parameters['integral_blocks'].append(SharedArray.SharedNumpyArray_like(block))
         parameters['error_blocks'].append(SharedArray.SharedNumpyArray_like(block))
     if "complex" not in parameters.keys():
         parameters["complex"] = False
     if len(parameters["Primary_Predictor_params"]['layers']) == 4:
         parameters["complex"] = True
     if "autoencoder" not in parameters.keys():
         parameters["autoencoder"] = False
     if "readout_learning_rate" not in parameters.keys():
         parameters['readout_learning_rate'] = parameters["Primary_Predictor_params"]["learning_rate"]
     if "momentum" not in parameters.keys():
         parameters['momentum'] = parameters["Primary_Predictor_params"]["momentum"]
     nhidden = parameters["Primary_Predictor_params"]['layers'][-2]['activation'].shape[0]-1
     nreadout = 0
     nouputs = 0
     for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']:
         nouputs += np.prod(block.shape)
     for (block, delta, pblock) in parameters['readout_blocks']:
         nreadout += np.prod(block.shape)
     if "Readout_Predictor_params" not in parameters.keys():
         parameters["Readout_Predictor_params"] = {}
         parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers([nhidden+1, nreadout+1])
         parameters["Readout_Predictor_params"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float)
         parameters["Readout_Predictor_params"]['beta'][0] = 1.0
         parameters["Readout_Predictor_params"]['learning_rate'] = parameters['readout_learning_rate']
         parameters["Readout_Predictor_params"]['momentum'] = parameters['momentum']
         parameters["Readout_Predictor_params"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float)
         parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights(parameters["Readout_Predictor_params"]['layers'])
         parameters["Readout_Predictor_params"]['weights'][0][:] = parameters["Primary_Predictor_params"]['weights'][-1][:, nouputs:]
         old_weight_matrix = parameters["Primary_Predictor_params"]['weights'][-1]
         parameters["Primary_Predictor_params"]['weights'][-1] = SharedArray.SharedNumpyArray((nhidden+1, nouputs), np.float)
         parameters["Primary_Predictor_params"]['weights'][-1][:] = old_weight_matrix[:, :nouputs]
         parameters["Primary_Predictor_params"]['layers'][-1] = {'activation': SharedArray.SharedNumpyArray(nouputs, np.float),
                                                                 'error': SharedArray.SharedNumpyArray(nouputs, np.float),
                                                                 'delta': SharedArray.SharedNumpyArray(nouputs, np.float)
                                                                 }
         parameters['backpropagate_readout_error'] = True
示例#6
0
 def generate_missing_parameters(parameters):
     """
     This method can be called to generate all the missing dictionary parameters when all
     the other relevant variables are known. Leave empty if there is nothing more to generate
     """
     ninputs = ((2 * parameters['array'].shape[0]) /
                3) * parameters['block'][2] * parameters['block'][3] * 3
     for block in parameters["context_blocks"]:
         ninputs += np.prod(block.shape)
     nhidden = parameters['internal_rep_size']
     noutputs = (parameters['array'].shape[0] -
                 (2 * parameters['array'].shape[0]) /
                 3) * parameters['block'][2] * parameters['block'][3] * 3
     parameters["MLP_parameters"] = {}
     parameters["MLP_parameters"]['layers'] = [
         {
             'activation': SharedArray.SharedNumpyArray((ninputs + 1),
                                                        np.float),
             'error': SharedArray.SharedNumpyArray((ninputs + 1), np.float),
             'delta': SharedArray.SharedNumpyArray((ninputs + 1), np.float)
         },
         {
             'activation': SharedArray.SharedNumpyArray((nhidden + 1),
                                                        np.float),
             'error': SharedArray.SharedNumpyArray((nhidden + 1), np.float),
             'delta': SharedArray.SharedNumpyArray((nhidden + 1), np.float)
         },
         {
             'activation':
             SharedArray.SharedNumpyArray((noutputs + 1), np.float),
             'error':
             SharedArray.SharedNumpyArray((noutputs + 1), np.float),
             'delta':
             SharedArray.SharedNumpyArray((noutputs + 1), np.float)
         },
     ]
     parameters["MLP_parameters"]['beta'] = SharedArray.SharedNumpyArray(
         (1, ), np.float)
     parameters["MLP_parameters"]['beta'][0] = 1.0
     parameters["MLP_parameters"]['learning_rate'] = parameters[
         'learning_rate']
     parameters["MLP_parameters"]['momentum'] = parameters['momentum']
     parameters["MLP_parameters"]['mse'] = SharedArray.SharedNumpyArray(
         (1, ), np.float)
     parameters["MLP_parameters"]['weights'] = [
         MLP.initialize_weights(
             SharedArray.SharedNumpyArray((ninputs + 1, nhidden),
                                          np.float)),
         MLP.initialize_weights(
             SharedArray.SharedNumpyArray((nhidden + 1, noutputs),
                                          np.float))
     ]
示例#7
0
 def generate_missing_parameters(parameters):
     """
     This method can be called to generate all the missing dictionary parameters when all
     the other relevant variables are known. Leave empty if there is nothing more to generate
     """
     ninputs1 = 3 * parameters['block'][2] * parameters['block'][3] * 3
     nhidden = ninputs1 / 5
     ninputs = ninputs1 + nhidden
     noutputs = 2 * parameters['block'][2] * parameters['block'][3] * 3
     parameters["MLP_parameters"] = {}
     parameters["MLP_parameters"]['layers'] = [
         {
             'activation': SharedArray.SharedNumpyArray((ninputs + 1),
                                                        np.float),
             'error': SharedArray.SharedNumpyArray((ninputs + 1), np.float),
             'delta': SharedArray.SharedNumpyArray((ninputs + 1), np.float)
         },
         {
             'activation': SharedArray.SharedNumpyArray((nhidden + 1),
                                                        np.float),
             'error': SharedArray.SharedNumpyArray((nhidden + 1), np.float),
             'delta': SharedArray.SharedNumpyArray((nhidden + 1), np.float)
         },
         {
             'activation':
             SharedArray.SharedNumpyArray((noutputs + 1), np.float),
             'error':
             SharedArray.SharedNumpyArray((noutputs + 1), np.float),
             'delta':
             SharedArray.SharedNumpyArray((noutputs + 1), np.float)
         },
     ]
     parameters["MLP_parameters"]['beta'] = SharedArray.SharedNumpyArray(
         (1, ), np.float)
     parameters["MLP_parameters"]['beta'][0] = 1.0
     parameters["MLP_parameters"]['learning_rate'] = parameters[
         'learning_rate']
     parameters["MLP_parameters"]['momentum'] = parameters['momentum']
     parameters["MLP_parameters"]['mse'] = SharedArray.SharedNumpyArray(
         (1, ), np.float)
     parameters["MLP_parameters"]['weights'] = [
         MLP.initialize_weights(
             SharedArray.SharedNumpyArray((ninputs + 1, nhidden),
                                          np.float)),
         MLP.initialize_weights(
             SharedArray.SharedNumpyArray((nhidden + 1, noutputs),
                                          np.float))
     ]
示例#8
0
def test_MLPN_eval00():
    """
    Simple test checking whether the MLP evaluates properly with zero weights
    """
    x = np.array([1.0, 0.0])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 5, 5])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([1.0])
    parameters['momentum'] = np.array([0.0])
    parameters['mse'] = np.array([0.0])
    parameters['weights'] = [np.array([[0, 0, 0, 0],
                                       [0, 0, 0, 0],
                                       [0, 0, 0, 0]]).astype(np.float)*100,
                             np.array([[0, 0, 0, 0],
                                       [0, 0, 0, 0],
                                       [0, 0, 0, 0],
                                       [0, 0, 0, 0],
                                       [0, 0, 0, 0]]).astype(np.float)*100
                             ]
    M = MLP_module.MLP(parameters)
    out = M.evaluate(x)
    assert np.allclose(out, np.array([0.5, 0.5, 0.5, 0.5]))
    hidden = M.get_activation(1)
    assert np.allclose(hidden, np.array([0.5, 0.5, 0.5, 0.5]))
示例#9
0
 def __init__(self, parameters):
     self.yet_previous_array = parameters['yet_previous_array'].view(
         np.ndarray)
     self.previous_array = parameters['previous_array'].view(np.ndarray)
     self.current_array = parameters['current_array'].view(np.ndarray)
     self.predicted_array = parameters['predicted_array'].view(np.ndarray)
     self.block = parameters['block'].view(np.ndarray)
     self.MLP = MLP.MLP(parameters["MLP_parameters"])
示例#10
0
def test_MLPN_eval01():
    """
    Simple test checking whether the MLP evaluates properly on a specific case
    """
    x3 = np.array([0.0, 1.0])
    x4 = np.array([1.0, 0.0])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 6, 5])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([1.0])
    parameters['momentum'] = np.array([0.0])
    parameters['mse'] = np.array([0.0])
    parameters['polynomial'] = True
    parameters['weights'] = [
        np.array([[1, -1, 1, -1], [1, -1, -1, 1], [0, 0, 0, 0]]).astype(
            np.float) * 100,
        np.array([
            [1, 0, 0, 0],
            [0, 2, 0, 0],
            [0, 0, 1, 0],
            [0, 0, -1, 2],
            [0, 0, 0, 0],
            [0, -1, 0, -1],
        ]).astype(np.float) * 100
    ]
    M = MLP_module.MLP(parameters)
    out = M.evaluate(x4)
    assert (out[0] > 0.99)
    assert (out[1] < 0.01)
    assert (out[2] > 0.99)
    assert (out[3] < 0.01)
    hidden = M.get_activation(1)
    assert (hidden[0] > 0.99)
    assert (hidden[1] < 0.01)
    assert (hidden[2] > 0.99)
    assert (hidden[3] < 0.01)
    out = M.evaluate(x3)
    assert (out[0] > 0.99)
    assert (out[1] < 0.01)
    assert (out[2] < 0.01)
    assert (out[3] > 0.99)
    hidden = M.get_activation(1)
    assert (hidden[0] > 0.99)
    assert (hidden[1] < 0.01)
    assert (hidden[2] < 0.01)
    assert (hidden[3] > 0.99)
示例#11
0
def test_perceptron_two_layers():
    X = np.array([[0, 1], [1, 1], [1, 0], [0, 0]])
    Y = np.array([[1], [1], [1], [0]])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 2])
    np.random.seed(seed=509)
    parameters['weights'] = MLP_module.get_weights(parameters['layers'])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([0.25])
    parameters['momentum'] = np.array([0.5])
    parameters['mse'] = np.array([0.0])
    M = MLP_module.MLP(parameters)
    for n in xrange(30000):
        i = np.random.randint(low=0, high=4)
        M.train(X[i], Y[i])
    for i in range(4):
        O = M.evaluate(X[i])
        assert (np.allclose(Y[i], O, atol=0.03))
示例#12
0
 def __init__(self, parameters):
     self.array = parameters['array'].view(np.ndarray)
     self.divider = parameters['array'].shape[0] - (
         2 * parameters['array'].shape[0]) / 3
     self.predicted_array = parameters['predicted_array'].view(np.ndarray)
     self.block = parameters['block'].view(np.ndarray)
     self.output_block = parameters['output_block']
     self.context_blocks = parameters['context_blocks']
     self.MLP = MLP.MLP(parameters["MLP_parameters"])
示例#13
0
def test_perceptron_two_layers():
    X = np.array([[0, 1], [1, 1], [1, 0], [0, 0]])
    Y = np.array([[1], [1], [1], [0]])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 2])
    np.random.seed(seed=509)
    parameters['weights'] = MLP_module.get_weights(parameters['layers'])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([0.25])
    parameters['momentum'] = np.array([0.5])
    parameters['mse'] = np.array([0.0])
    M = MLP_module.MLP(parameters)
    for n in xrange(30000):
        i = np.random.randint(low=0, high=4)
        M.train(X[i], Y[i])
    for i in range(4):
        O = M.evaluate(X[i])
        assert(np.allclose(Y[i], O, atol=0.03))
示例#14
0
def upgrade_readout(simulation_dict):
    """
    Upgrade the dictionary with a parameters for a three layer perceptron for readout
    :param simulation_dict:
    :return:
    """
    logging.info("Upgrading readout network to a full perceptron")
    import PVM_framework.SharedArray as SharedArray
    import PVM_framework.MLP as MLP
    simulation_dict['stage0_size'] = len(simulation_dict['stage0'])
    needed = True
    for i in range(simulation_dict['stage0_size']):
        if len(simulation_dict['stage0'][i]["MLP_parameters_additional"]
               ['layers']) == 2 and len(
                   simulation_dict['stage0'][i]["MLP_parameters_additional"]
                   ['weights']) == 1:
            nhidden = simulation_dict['stage0'][i]["MLP_parameters_additional"][
                'layers'][0]['activation'].shape[0] - 1
            nadditional = simulation_dict['stage0'][i][
                "MLP_parameters_additional"]['layers'][-1]['activation'].shape[
                    0] - 1
            layer = {
                'activation': SharedArray.SharedNumpyArray((nhidden + 1),
                                                           np.float),
                'error': SharedArray.SharedNumpyArray((nhidden + 1), np.float),
                'delta': SharedArray.SharedNumpyArray((nhidden + 1), np.float)
            }

            simulation_dict['stage0'][i]["MLP_parameters_additional"][
                'layers'].insert(1, layer)
            simulation_dict['stage0'][i]["MLP_parameters_additional"]['weights'] = \
                [MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, nhidden), np.float)),
                 MLP.initialize_weights(SharedArray.SharedNumpyArray((nhidden+1, nadditional), np.float)),
                 ]
        else:
            needed = False
    if needed:
        logging.info("Upgrade complete")
    else:
        logging.info("Upgrade was not nescessary")
示例#15
0
def test_MLPN_eval00():
    """
    Simple test checking whether the MLP evaluates properly with zero weights
    """
    x = np.array([1.0, 0.0])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 5, 5])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([1.0])
    parameters['momentum'] = np.array([0.0])
    parameters['mse'] = np.array([0.0])
    parameters['weights'] = [
        np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]).astype(np.float) *
        100,
        np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
                  [0, 0, 0, 0]]).astype(np.float) * 100
    ]
    M = MLP_module.MLP(parameters)
    out = M.evaluate(x)
    assert np.allclose(out, np.array([0.5, 0.5, 0.5, 0.5]))
    hidden = M.get_activation(1)
    assert np.allclose(hidden, np.array([0.5, 0.5, 0.5, 0.5]))
示例#16
0
 def __init__(self, parameters):
     self.yet_previous_array1 = parameters['yet_previous_array1'].view(
         np.ndarray)
     self.yet_previous_array = parameters['yet_previous_array'].view(
         np.ndarray)
     self.previous_array = parameters['previous_array'].view(np.ndarray)
     self.current_array = parameters['current_array'].view(np.ndarray)
     self.predicted_array = parameters['predicted_array'].view(np.ndarray)
     self.first_order_error = parameters['first_order_error'].view(
         np.ndarray)
     self.second_order_error = parameters['second_order_error'].view(
         np.ndarray)
     self.block = parameters['block'].view(np.ndarray)
     self.MLP = MLP.MLP(parameters["MLP_parameters"])
示例#17
0
def test_MLPN_eval01():
    """
    Simple test checking whether the MLP evaluates properly on a specific case
    """
    x3 = np.array([0.0, 1.0])
    x4 = np.array([1.0, 0.0])
    parameters = {}
    parameters['layers'] = MLP_module.get_layers([3, 6, 5])
    parameters['beta'] = np.array([1.0])
    parameters['learning_rate'] = np.array([1.0])
    parameters['momentum'] = np.array([0.0])
    parameters['mse'] = np.array([0.0])
    parameters['polynomial'] = True
    parameters['weights'] = [np.array([[1, -1, 1, -1],
                                       [1, -1, -1, 1],
                                       [0, 0, 0, 0]]).astype(np.float)*100,
                             np.array([[1, 0, 0, 0],
                                       [0, 2, 0, 0],
                                       [0, 0, 1, 0],
                                       [0, 0, -1, 2],
                                       [0, 0, 0, 0],
                                       [0, -1, 0, -1],
                                       ]).astype(np.float)*100
                             ]
    M = MLP_module.MLP(parameters)
    out = M.evaluate(x4)
    assert (out[0] > 0.99)
    assert (out[1] < 0.01)
    assert (out[2] > 0.99)
    assert (out[3] < 0.01)
    hidden = M.get_activation(1)
    assert (hidden[0] > 0.99)
    assert (hidden[1] < 0.01)
    assert (hidden[2] > 0.99)
    assert (hidden[3] < 0.01)
    out = M.evaluate(x3)
    assert (out[0] > 0.99)
    assert (out[1] < 0.01)
    assert (out[2] < 0.01)
    assert (out[3] > 0.99)
    hidden = M.get_activation(1)
    assert (hidden[0] > 0.99)
    assert (hidden[1] < 0.01)
    assert (hidden[2] < 0.01)
    assert (hidden[3] > 0.99)
示例#18
0
    def generate_missing_parameters(parameters, options):
        """
        This method can be called to generate all the missing dictionary parameters when all
        the other relevant variables are known. Leave empty if there is nothing more to generate.
        When complex_unit is False, a standard 3-layer MLP is used.
        When complex_unit is True, an MLP with additional hidden layers is used.

        There needs to be no return value, the method leaves a side effect by modifying the perameters dict.

        :param parameters: parameter dictionary
        :type parameters: dict
        """
        complex_unit = options['unit_type'] == "complex"
        polynomial = options['polynomial'] == '1'
        autoencoder = options['autoencoder'] == '1'
        use_t_2_block = options['use_t_minus_2_block'] == '1'
        use_derivative = options['use_derivative'] == '1'
        use_integral = options['use_integral'] == '1'
        use_error = options['use_error'] == '1'
        predict_2_steps = options['predict_two_steps'] == '1'
        use_global_backprop = options['use_global_backprop'] == '1'
        complex_context_in_second_layer = options[
            'feed_context_in_complex_layer'] == '1'
        parameters['normalize_output'] = options["normalize_output"] == "1"
        parameters['backpropagate_readout_error'] = options[
            "backpropagate_readout_error"] == "1"

        nhidden = np.prod(parameters['output_block'].shape)
        parameters['output_min'] = SharedArray.SharedNumpyArray_like(
            parameters['output_block'])
        parameters['output_max'] = SharedArray.SharedNumpyArray_like(
            parameters['output_block']) + 1
        parameters['avg_delta'] = SharedArray.SharedNumpyArray_like(
            parameters['output_block'])
        ninputs = 0
        noutputs = 0
        ncontext = 0
        # Any additional memory buffers needed in the operation of the unit
        parameters['internal_buffers'] = []
        parameters['integral_blocks'] = []
        parameters['derivative_blocks'] = []
        parameters['error_blocks'] = []
        parameters['use_derivative'] = use_derivative
        parameters['use_integral'] = use_integral
        parameters['use_error'] = use_error
        parameters['use_t_2_block'] = use_t_2_block
        parameters['predict_2_steps'] = predict_2_steps
        for (block, delta, pred_block,
             pred_block2) in parameters['signal_blocks']:
            block01 = SharedArray.SharedNumpyArray_like(block)
            block02 = SharedArray.SharedNumpyArray_like(block)
            block03 = SharedArray.SharedNumpyArray_like(block)
            parameters['internal_buffers'].append((block01, block02, block03))
            if use_derivative:
                parameters['derivative_blocks'].append(
                    SharedArray.SharedNumpyArray_like(block))
            if use_integral:
                parameters['integral_blocks'].append(
                    SharedArray.SharedNumpyArray_like(block))
            if use_error:
                parameters['error_blocks'].append(
                    SharedArray.SharedNumpyArray_like(block))

        input_block_features = 1
        output_predictions = 1
        if use_derivative:
            input_block_features += 1
        if use_integral:
            input_block_features += 1
        if use_error:
            input_block_features += 1
        if use_t_2_block:
            input_block_features += 1
        if predict_2_steps:
            output_predictions += 1

        for (block, delta, pred_block,
             pred_block2) in parameters['signal_blocks']:
            ninputs += np.prod(block.shape) * input_block_features
        for (block, delta, factor) in parameters['context_blocks']:
            ncontext += np.prod(block.shape)
        for (block, delta, pred_block,
             pred_block2) in parameters['signal_blocks']:
            noutputs += np.prod(block.shape) * output_predictions

        nreadout = 0
        for (block, delta, pblock) in parameters['readout_blocks']:
            nreadout += np.prod(block.shape)
        parameters["Primary_Predictor_params"] = {}
        parameters["Readout_Predictor_params"] = {}
        if complex_unit and complex_context_in_second_layer:  # 4 layer perceptron
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([
                ninputs + 1, 2 * nhidden + ncontext + 1, nhidden + 1,
                noutputs + 1
            ])
        elif complex_unit:
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([
                ninputs + ncontext + 1, 2 * nhidden + 1, nhidden + 1,
                noutputs + 1
            ])
        else:  # 3 layer perceptron Simple MLP Unit (not complex unit)
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers(
                [ninputs + ncontext + 1, nhidden + 1, noutputs + 1])

        parameters["Primary_Predictor_params"][
            'beta'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Primary_Predictor_params"]['beta'][0] = 1.0
        parameters["Primary_Predictor_params"]['learning_rate'] = parameters[
            'primary_learning_rate']
        parameters["Primary_Predictor_params"]['momentum'] = parameters[
            'momentum']
        parameters["Primary_Predictor_params"][
            'mse'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights(
            parameters["Primary_Predictor_params"]['layers'])

        parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers(
            [nhidden + 1, 2 * nhidden + 1, nreadout + 1])
        parameters["Readout_Predictor_params"][
            'beta'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Readout_Predictor_params"]['beta'][0] = 1.0
        parameters["Readout_Predictor_params"]['learning_rate'] = parameters[
            'readout_learning_rate']
        parameters["Readout_Predictor_params"]['momentum'] = parameters[
            'momentum']
        parameters["Readout_Predictor_params"][
            'mse'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights(
            parameters["Readout_Predictor_params"]['layers'])
        parameters["Primary_Predictor_params"]['polynomial'] = polynomial
        parameters["Readout_Predictor_params"]['polynomial'] = polynomial
        parameters['autoencoder'] = autoencoder
        parameters['use_global_backprop'] = use_global_backprop
        parameters[
            "complex_context_in_second_layer"] = complex_context_in_second_layer
        parameters["complex"] = complex_unit
示例#19
0
    def __init__(self, parameters):
        # Mapping SharedArray items through .view(np.ndarray) improved the access time
        self.signal_blocks = self.open_views_into_shmem(
            parameters, "signal_blocks",
            len(ExecutionUnit.SIGNAL_BLOCK_CONTENTS))
        self.readout_blocks = self.open_views_into_shmem(
            parameters, "readout_blocks",
            len(ExecutionUnit.SUPERVISED_TASK_OUTPUTS))
        self.context_blocks = self.open_views_into_shmem(
            parameters, "context_blocks",
            len(ExecutionUnit.UNSUPERVISED_CONTEXT_INPUTS))
        self.derivative_blocks = self.open_views_into_shmem_single(
            parameters, "derivative_blocks")
        self.delta_blocks = self.open_views_into_shmem_single(
            parameters, "delta_blocks")
        self.integral_blocks = self.open_views_into_shmem_single(
            parameters, "integral_blocks")
        self.error_blocks = self.open_views_into_shmem_single(
            parameters, "error_blocks")
        self.internal_buffers = self.open_views_into_shmem(
            parameters, "internal_buffers", 3)
        self.output_block = parameters['output_block'].view(
            np.ndarray
        )  # Will appear in other places as either input or context
        self.output_min = parameters['output_min']
        self.output_max = parameters['output_max']
        self.avg_delta = parameters['avg_delta']
        self.internal_buffers = parameters['internal_buffers']
        self.flags = parameters['flags']
        # The perceptron
        self.MLP_internal_prediction = MLP.MLP(
            parameters["Primary_Predictor_params"])
        self.MLP_readout = MLP.MLP(
            parameters["Readout_Predictor_params"]
        )  # This is a "task" supervised MLP (e.g., tracker)
        self.primary_learning_rate = parameters['primary_learning_rate']
        self.readout_learning_rate = parameters['readout_learning_rate']
        self.layers_internal_prediction = parameters[
            "Primary_Predictor_params"]['layers']
        self.layers_readout = parameters["Readout_Predictor_params"][
            'layers']  # These are the "task" supervised MLP layers
        self.tau = parameters[
            'tau']  # Tau is the integration constant for the signal integral
        # Operation parameters
        self.input_blocks_skip = 1
        self.output_blocks_skip = 1
        self.backpropagate_readout_error = False
        self.complex = False
        self.complex_context_in_second_layer = False
        self.use_global_backprop = False
        self.use_t_2_block = False
        self.use_derivative = False
        self.use_error = False
        self.use_integral = False
        self.predict_2_steps = False
        self.normalize = False
        if 'backpropagate_readout_error' in parameters.keys():
            self.normalize = parameters['backpropagate_readout_error']
        if 'normalize_output' in parameters.keys():
            self.normalize = parameters['normalize_output']
        if 'complex' in parameters.keys():
            self.complex = parameters['complex']
        if 'complex_context_in_second_layer' in parameters.keys():
            self.complex_context_in_second_layer = parameters[
                'complex_context_in_second_layer']
        if 'use_derivative' in parameters.keys():
            self.use_derivative = parameters['use_derivative']
        if 'use_integral' in parameters.keys():
            self.use_integral = parameters['use_integral']
        if 'use_error' in parameters.keys():
            self.use_error = parameters['use_error']
        if 'use_t_2_block' in parameters.keys():
            self.use_t_2_block = parameters['use_t_2_block']
        if 'use_global_backprop' in parameters.keys():
            self.use_global_backprop = parameters['use_global_backprop']
        if 'autoencoder' in parameters.keys():
            self.autoencoder = parameters['autoencoder']
        if 'predict_2_steps' in parameters.keys():
            self.predict_2_steps = parameters['predict_2_steps']
        if self.use_derivative:
            self.input_blocks_skip += 1
        if self.use_integral:
            self.input_blocks_skip += 1
        if self.use_error:
            self.input_blocks_skip += 1
        if self.use_t_2_block:
            self.input_blocks_skip += 1
        if self.predict_2_steps:
            self.output_blocks_skip += 1
        # Input buffers
        self.ninputs = 0
        self.npredictions = 0
        self.npinputs = 0
        self.ncontexts = 0
        for (block, delta, pred_block1, pred_block2) in self.signal_blocks:
            self.ninputs += self.input_blocks_skip * np.prod(block.shape)
            self.npredictions += self.output_blocks_skip * np.prod(block.shape)
        for (teaching_block, delta, readout_block) in self.readout_blocks:
            self.npinputs += np.prod(teaching_block.shape)
        self.inputs_t = np.zeros((self.ninputs, ))
        self.actual_signal_t = np.zeros(
            (self.npredictions / self.output_blocks_skip, ))
        self.readout_training_signal = np.zeros((self.npinputs, ))

        self.inputs_t_1 = np.zeros((self.ninputs, ))
        self.actual_signal_t_1 = np.zeros(
            (self.npredictions / self.output_blocks_skip, ))
        self.pinputs_t_1 = np.zeros((self.npinputs, ))

        # Context buffers
        for (block, delta, factor) in self.context_blocks:
            self.ncontexts += np.prod(block.shape)
        self.contexts_t_1 = np.zeros((self.ncontexts, ))
        self.output_layer = len(
            self.layers_internal_prediction
        ) - 2  # Becuse the "output" of this Unit comes from its hidden layer
        self.output_length = np.prod(self.output_block.shape)
        # Buffer for storing activations
        self.activations_buffer = []
        # additional flags
        if self.use_global_backprop:
            self.push_activation()
        if self.predict_2_steps:
            self.push_activation()
    def generate_missing_parameters(parameters, options):
        """
        This method can be called to generate all the missing dictionary parameters when all
        the other relevant variables are known. Leave empty if there is nothing more to generate.
        When complex_unit is False, a standard 3-layer MLP is used.
        When complex_unit is True, an MLP with additional hidden layers is used.

        There needs to be no return value, the method leaves a side effect by modifying the perameters dict.

        :param parameters: parameter dictionary
        :type parameters: dict
        """
        complex_unit = options['unit_type'] == "complex"
        polynomial = options['polynomial'] == '1'
        autoencoder = options['autoencoder'] == '1'

        nhidden = np.prod(parameters['output_block'].shape)
        parameters['output_min'] = SharedArray.SharedNumpyArray_like(
            parameters['output_block'])
        parameters['output_max'] = SharedArray.SharedNumpyArray_like(
            parameters['output_block'])
        ninputs = 0
        noutputs = 0
        ncontext = 0
        # Any additional memory buffers needed in the operation of the unit
        parameters['internal_buffers'] = []
        for (block, delta, pred_block,
             pred_block2) in parameters['signal_blocks']:
            block01 = SharedArray.SharedNumpyArray_like(block)
            block02 = SharedArray.SharedNumpyArray_like(block)
            block03 = SharedArray.SharedNumpyArray_like(block)
            parameters['internal_buffers'].append((block01, block02, block03))

        for (block, delta, pred_block,
             pred_block2) in parameters['signal_blocks']:
            ninputs += np.prod(block.shape) * len(
                ExecutionUnit.UNSUPERVISED_SIGNAL_INPUTS)
        for (block, delta, factor) in parameters['context_blocks']:
            ncontext += np.prod(block.shape)
        for (block, delta, pred_block,
             pred_block2) in parameters['signal_blocks']:
            noutputs += 2 * np.prod(
                block.shape)  # to predict two steps into the future
        nadditional = 0
        for (block, delta, pblock) in parameters['readout_blocks']:
            nadditional += np.prod(block.shape)
        parameters["Primary_Predictor_params"] = {}
        parameters["Residual_Predictor_params"] = {}
        parameters["Readout_Predictor_params"] = {}
        if complex_unit:  # 4 layer perceptron
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers(
                [ncontext + 1, 3 * nhidden + 1, 2 * nhidden + 1, noutputs + 1])
            parameters["Residual_Predictor_params"]['layers'] = MLP.get_layers(
                [ninputs + 1, 2 * nhidden + 1, nhidden + 1, noutputs + 1])
            parameters["complex"] = True
        else:  # 3 layer perceptron Simple MLP Unit (not complex unit)
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers(
                [ncontext + 1, 2 * nhidden + 1, noutputs + 1])
            parameters["Residual_Predictor_params"]['layers'] = MLP.get_layers(
                [ninputs + 2 * nhidden + 1, nhidden + 1, noutputs + 1])

        parameters["Primary_Predictor_params"][
            'beta'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Primary_Predictor_params"]['beta'][0] = 1.0
        parameters["Primary_Predictor_params"]['learning_rate'] = parameters[
            'primary_learning_rate']
        parameters["Primary_Predictor_params"]['momentum'] = parameters[
            'momentum']
        parameters["Primary_Predictor_params"][
            'mse'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights(
            parameters["Primary_Predictor_params"]['layers'])

        parameters["Residual_Predictor_params"][
            'beta'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Residual_Predictor_params"]['beta'][0] = 1.0
        parameters["Residual_Predictor_params"]['learning_rate'] = parameters[
            'primary_learning_rate']
        parameters["Residual_Predictor_params"]['momentum'] = parameters[
            'momentum']
        parameters["Residual_Predictor_params"][
            'mse'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Residual_Predictor_params"]['weights'] = MLP.get_weights(
            parameters["Residual_Predictor_params"]['layers'])

        parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers(
            [nhidden + 1, 2 * nhidden + 1, nadditional + 1])
        parameters["Readout_Predictor_params"][
            'beta'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Readout_Predictor_params"]['beta'][0] = 1.0
        parameters["Readout_Predictor_params"]['learning_rate'] = parameters[
            'readout_learning_rate']
        parameters["Readout_Predictor_params"]['momentum'] = parameters[
            'momentum']
        parameters["Readout_Predictor_params"][
            'mse'] = SharedArray.SharedNumpyArray((1, ), np.float)
        parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights(
            parameters["Readout_Predictor_params"]['layers'])
        parameters["Primary_Predictor_params"]['polynomial'] = polynomial
        parameters["Residual_Predictor_params"]['polynomial'] = polynomial
        parameters["Readout_Predictor_params"]['polynomial'] = polynomial
        parameters['autoencoder'] = autoencoder
    def __init__(self, parameters):
        # Mapping SharedArray items through .view(np.ndarray) improved the access time
        self.signal_blocks = self.open_views_into_shmem(
            parameters, "signal_blocks",
            len(ExecutionUnit.SIGNAL_BLOCK_CONTENTS))
        self.readout_blocks = self.open_views_into_shmem(
            parameters, "readout_blocks",
            len(ExecutionUnit.SUPERVISED_TASK_OUTPUTS))
        self.context_blocks = self.open_views_into_shmem(
            parameters, "context_blocks",
            len(ExecutionUnit.UNSUPERVISED_CONTEXT_INPUTS))
        self.internal_buffers = self.open_views_into_shmem(
            parameters, "internal_buffers", 3)
        self.output_block = parameters['output_block'].view(
            np.ndarray
        )  # Will appear in other places as either input or context
        self.output_min = parameters['output_min']
        self.output_max = parameters['output_max']

        # The perceptron
        self.MLP_internal_prediction = MLP.MLP(
            parameters["Primary_Predictor_params"])
        self.MLP_residual_prediction = MLP.MLP(
            parameters["Residual_Predictor_params"])
        self.MLP_readout = MLP.MLP(
            parameters["Readout_Predictor_params"]
        )  # This is a "task" supervised MLP (e.g., tracker)
        self.primary_learning_rate = parameters['primary_learning_rate']
        self.readout_learning_rate = parameters['readout_learning_rate']
        self.layers_internal_prediction = parameters[
            "Primary_Predictor_params"]['layers']
        self.layers_residual_prediction = parameters[
            "Residual_Predictor_params"]['layers']
        self.layers_readout = parameters["Readout_Predictor_params"][
            'layers']  # These are the "task" supervised MLP layers
        self.tau = parameters[
            'tau']  # Tau is the integration constant for the signal integral

        # Input buffers
        self.ninputs = 0
        self.npredictions = 0
        self.npinputs = 0
        self.ncontexts = 0
        for (block, delta, pred_block1, pred_block2) in self.signal_blocks:
            self.ninputs += len(
                ExecutionUnit.UNSUPERVISED_SIGNAL_INPUTS) * np.prod(
                    block.shape)
            self.npredictions += np.prod(block.shape)
        for (teaching_block, delta, readout_block) in self.readout_blocks:
            self.npinputs += np.prod(teaching_block.shape)
        self.inputs_t = np.zeros((self.ninputs, ))
        self.actual_signal_t = np.zeros((self.npredictions, ))
        self.readout_training_signal = np.zeros((self.npinputs, ))

        self.inputs_t_1 = np.zeros((self.ninputs, ))
        self.actual_signal_t_1 = np.zeros((self.npredictions, ))
        self.pinputs_t_1 = np.zeros((self.npinputs, ))

        # Context buffers
        for (block, delta, factor) in self.context_blocks:
            self.ncontexts += np.prod(block.shape)
        self.contexts_t_1 = np.zeros((self.ncontexts, ))
        self.output_layer = len(
            self.layers_residual_prediction
        ) - 2  # Becuse the "output" of this Unit comes from its hidden layer
        self.output_length = np.prod(self.output_block.shape)
        # Buffer for storing activations
        self.activations_buffer = []
        # additional flags
        if 'complex' in parameters.keys() and parameters['complex']:
            self.complex = True
        else:
            self.complex = False
        self.push_activation()
示例#22
0
 def upgrade_to_ver_1(parameters):
     parameters['internal_buffers'] = []
     parameters['output_min'] = SharedArray.SharedNumpyArray_like(
         parameters['output_block'])
     parameters['output_max'] = SharedArray.SharedNumpyArray_like(
         parameters['output_block'])
     parameters['avg_delta'] = SharedArray.SharedNumpyArray_like(
         parameters['output_block'])
     parameters['integral_blocks'] = []
     parameters['derivative_blocks'] = []
     parameters['error_blocks'] = []
     parameters['use_derivative'] = True
     parameters['use_integral'] = True
     parameters['use_error'] = True
     parameters['use_t_2_block'] = False
     parameters['predict_2_steps'] = False
     parameters['use_global_backprop'] = False
     parameters['normalize_output'] = False
     parameters["complex_context_in_second_layer"] = False
     for (block, delta, pred_block,
          pred_block2) in parameters['signal_blocks']:
         block01 = SharedArray.SharedNumpyArray_like(block)
         block02 = SharedArray.SharedNumpyArray_like(block)
         block03 = SharedArray.SharedNumpyArray_like(block)
         parameters['internal_buffers'].append((block01, block02, block03))
         parameters['derivative_blocks'].append(
             SharedArray.SharedNumpyArray_like(block))
         parameters['integral_blocks'].append(
             SharedArray.SharedNumpyArray_like(block))
         parameters['error_blocks'].append(
             SharedArray.SharedNumpyArray_like(block))
     if "complex" not in parameters.keys():
         parameters["complex"] = False
     if len(parameters["Primary_Predictor_params"]['layers']) == 4:
         parameters["complex"] = True
     if "autoencoder" not in parameters.keys():
         parameters["autoencoder"] = False
     if "readout_learning_rate" not in parameters.keys():
         parameters['readout_learning_rate'] = parameters[
             "Primary_Predictor_params"]["learning_rate"]
     if "momentum" not in parameters.keys():
         parameters['momentum'] = parameters["Primary_Predictor_params"][
             "momentum"]
     nhidden = parameters["Primary_Predictor_params"]['layers'][-2][
         'activation'].shape[0] - 1
     nreadout = 0
     nouputs = 0
     for (block, delta, pred_block,
          pred_block2) in parameters['signal_blocks']:
         nouputs += np.prod(block.shape)
     for (block, delta, pblock) in parameters['readout_blocks']:
         nreadout += np.prod(block.shape)
     if "Readout_Predictor_params" not in parameters.keys():
         parameters["Readout_Predictor_params"] = {}
         parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers(
             [nhidden + 1, nreadout + 1])
         parameters["Readout_Predictor_params"][
             'beta'] = SharedArray.SharedNumpyArray((1, ), np.float)
         parameters["Readout_Predictor_params"]['beta'][0] = 1.0
         parameters["Readout_Predictor_params"][
             'learning_rate'] = parameters['readout_learning_rate']
         parameters["Readout_Predictor_params"]['momentum'] = parameters[
             'momentum']
         parameters["Readout_Predictor_params"][
             'mse'] = SharedArray.SharedNumpyArray((1, ), np.float)
         parameters["Readout_Predictor_params"][
             'weights'] = MLP.get_weights(
                 parameters["Readout_Predictor_params"]['layers'])
         parameters["Readout_Predictor_params"]['weights'][
             0][:] = parameters["Primary_Predictor_params"]['weights'][
                 -1][:, nouputs:]
         old_weight_matrix = parameters["Primary_Predictor_params"][
             'weights'][-1]
         parameters["Primary_Predictor_params"]['weights'][
             -1] = SharedArray.SharedNumpyArray((nhidden + 1, nouputs),
                                                np.float)
         parameters["Primary_Predictor_params"]['weights'][
             -1][:] = old_weight_matrix[:, :nouputs]
         parameters["Primary_Predictor_params"]['layers'][-1] = {
             'activation': SharedArray.SharedNumpyArray(nouputs, np.float),
             'error': SharedArray.SharedNumpyArray(nouputs, np.float),
             'delta': SharedArray.SharedNumpyArray(nouputs, np.float)
         }
         parameters['backpropagate_readout_error'] = True
示例#23
0
    def generate_missing_parameters(parameters, options):
        """
        This method can be called to generate all the missing dictionary parameters when all
        the other relevant variables are known. Leave empty if there is nothing more to generate.
        When complex_unit is False, a standard 3-layer MLP is used.
        When complex_unit is True, an MLP with additional hidden layers is used.

        There needs to be no return value, the method leaves a side effect by modifying the perameters dict.

        :param parameters: parameter dictionary
        :type parameters: dict
        """
        complex_unit = options['unit_type'] == "complex"
        polynomial = options['polynomial'] == '1'
        autoencoder = options['autoencoder'] == '1'
        use_t_2_block = options['use_t_minus_2_block'] == '1'
        use_derivative = options['use_derivative'] == '1'
        use_integral = options['use_integral'] == '1'
        use_error = options['use_error'] == '1'
        predict_2_steps = options['predict_two_steps'] == '1'
        use_global_backprop = options['use_global_backprop'] == '1'
        complex_context_in_second_layer = options['feed_context_in_complex_layer'] == '1'
        parameters['normalize_output'] = options["normalize_output"] == "1"
        parameters['backpropagate_readout_error'] = options["backpropagate_readout_error"] == "1"

        nhidden = np.prod(parameters['output_block'].shape)
        parameters['output_min'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])
        parameters['output_max'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])+1
        parameters['avg_delta'] = SharedArray.SharedNumpyArray_like(parameters['output_block'])
        ninputs = 0
        noutputs = 0
        ncontext = 0
        # Any additional memory buffers needed in the operation of the unit
        parameters['internal_buffers'] = []
        parameters['integral_blocks'] = []
        parameters['derivative_blocks'] = []
        parameters['error_blocks'] = []
        parameters['use_derivative'] = use_derivative
        parameters['use_integral'] = use_integral
        parameters['use_error'] = use_error
        parameters['use_t_2_block'] = use_t_2_block
        parameters['predict_2_steps'] = predict_2_steps
        for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']:
            block01 = SharedArray.SharedNumpyArray_like(block)
            block02 = SharedArray.SharedNumpyArray_like(block)
            block03 = SharedArray.SharedNumpyArray_like(block)
            parameters['internal_buffers'].append((block01, block02, block03))
            if use_derivative:
                parameters['derivative_blocks'].append(SharedArray.SharedNumpyArray_like(block))
            if use_integral:
                parameters['integral_blocks'].append(SharedArray.SharedNumpyArray_like(block))
            if use_error:
                parameters['error_blocks'].append(SharedArray.SharedNumpyArray_like(block))

        input_block_features = 1
        output_predictions = 1
        if use_derivative:
            input_block_features += 1
        if use_integral:
            input_block_features += 1
        if use_error:
            input_block_features += 1
        if use_t_2_block:
            input_block_features += 1
        if predict_2_steps:
            output_predictions += 1

        for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']:
            ninputs += np.prod(block.shape) * input_block_features
        for (block, delta, factor) in parameters['context_blocks']:
            ncontext += np.prod(block.shape)
        for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']:
            noutputs += np.prod(block.shape) * output_predictions

        nreadout = 0
        for (block, delta, pblock) in parameters['readout_blocks']:
            nreadout += np.prod(block.shape)
        parameters["Primary_Predictor_params"] = {}
        parameters["Readout_Predictor_params"] = {}
        if complex_unit and complex_context_in_second_layer:  # 4 layer perceptron
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ninputs+1, 2*nhidden+ncontext+1, nhidden+1, noutputs+1])
        elif complex_unit:
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ninputs+ncontext+1, 2*nhidden+1, nhidden+1, noutputs+1])
        else:  # 3 layer perceptron Simple MLP Unit (not complex unit)
            parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ninputs+ncontext+1, nhidden+1, noutputs+1])

        parameters["Primary_Predictor_params"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float)
        parameters["Primary_Predictor_params"]['beta'][0] = 1.0
        parameters["Primary_Predictor_params"]['learning_rate'] = parameters['primary_learning_rate']
        parameters["Primary_Predictor_params"]['momentum'] = parameters['momentum']
        parameters["Primary_Predictor_params"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float)
        parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights(parameters["Primary_Predictor_params"]['layers'])

        parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers([nhidden+1, 2*nhidden+1, nreadout+1])
        parameters["Readout_Predictor_params"]['beta'] = SharedArray.SharedNumpyArray((1,), np.float)
        parameters["Readout_Predictor_params"]['beta'][0] = 1.0
        parameters["Readout_Predictor_params"]['learning_rate'] = parameters['readout_learning_rate']
        parameters["Readout_Predictor_params"]['momentum'] = parameters['momentum']
        parameters["Readout_Predictor_params"]['mse'] = SharedArray.SharedNumpyArray((1,), np.float)
        parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights(parameters["Readout_Predictor_params"]['layers'])
        parameters["Primary_Predictor_params"]['polynomial'] = polynomial
        parameters["Readout_Predictor_params"]['polynomial'] = polynomial
        parameters['autoencoder'] = autoencoder
        parameters['use_global_backprop'] = use_global_backprop
        parameters["complex_context_in_second_layer"] = complex_context_in_second_layer
        parameters["complex"] = complex_unit
示例#24
0
    def __init__(self, parameters):
        # Mapping SharedArray items through .view(np.ndarray) improved the access time
        self.signal_blocks = self.open_views_into_shmem(
            parameters, "signal_blocks",
            len(ExecutionUnit.SIGNAL_BLOCK_CONTENTS))
        self.predicted_blocks = self.open_views_into_shmem(
            parameters, "predicted_blocks",
            len(ExecutionUnit.SUPERVISED_TASK_OUTPUTS))
        self.context_blocks = self.open_views_into_shmem(
            parameters, "context_blocks",
            len(ExecutionUnit.UNSUPERVISED_CONTEXT_INPUTS))
        self.output_block = parameters['output_block'].view(
            np.ndarray
        )  # Will appear in other places as either input or context

        # The perceptron
        self.MLP = MLP.MLP(parameters["MLP_parameters"])
        self.MLP_1 = MLP.MLP(
            parameters["MLP_parameters_additional"]
        )  # This is a "task" supervised MLP (e.g., tracker)
        self.learning_rate = parameters["MLP_parameters"]['learning_rate']
        self.learning_rate_1 = parameters["MLP_parameters_additional"][
            'learning_rate']
        self.layers = parameters["MLP_parameters"]['layers']
        self.layers_1 = parameters["MLP_parameters_additional"][
            'layers']  # These are the "task" supervised MLP layers
        self.tau = parameters[
            'tau']  # Tau is the integration constant for the signal integral

        # Input buffers
        self.ninputs = 0
        self.npredictions = 0
        self.npinputs = 0
        self.ncontexts = 0
        for (block, delta, pred_block, past_block, dblock, iblock,
             pred_block_local) in self.signal_blocks:
            self.ninputs += len(
                ExecutionUnit.UNSUPERVISED_SIGNAL_INPUTS) * np.prod(
                    block.shape)
            self.npredictions += np.prod(block.shape)
        for (block, delta, pblock, pdblock) in self.predicted_blocks:
            self.npinputs += np.prod(block.shape)
        self.inputs_t = np.zeros((self.ninputs, ))
        self.predictions_t = np.zeros((self.npredictions, ))
        self.pinputs_t = np.zeros((self.npinputs, ))

        # Context buffers
        for (block, delta, factor) in self.context_blocks:
            self.ncontexts += np.prod(block.shape)
        self.contexts_t_1 = np.zeros((self.ncontexts, ))

        # Buffer for storing the averaged out deltas
        self.output_layer = len(
            self.layers
        ) - 2  # Becuse the "output" of this Unit comes from its hidden layer
        self.output_length = np.prod(self.output_block.shape)

        # additional flags
        if 'autoencoder' in parameters.keys() and parameters['autoencoder']:
            self.autoencoder = True
        else:
            self.autoencoder = False
        if 'complex' in parameters.keys() and parameters['complex']:
            self.complex = True
        else:
            self.complex = False