def upgrade_to_ver_1(parameters): parameters['internal_buffers'] = [] parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03))
def upgrade_dictionary_to_ver1_0(simulation_dict): upgrade(simulation_dict) if "version_major" not in simulation_dict.keys( ) or simulation_dict["version_major"] < 1: logging.info( "Simulation dictionary of the old type, automatically upgrading to ver 1.0" ) if 'learning_rates' not in simulation_dict.keys(): simulation_dict['learning_rates'] = [] if 'momenta' not in simulation_dict.keys(): simulation_dict['momenta'] = [] if 'taus' not in simulation_dict.keys(): simulation_dict['taus'] = [] if 'predicted_arrays' not in simulation_dict.keys(): simulation_dict['predicted_arrays'] = [] if 'predicted_arrays_t2' not in simulation_dict.keys(): simulation_dict['predicted_arrays_t2'] = [] if 'predicted_readout_arrays' not in simulation_dict.keys(): simulation_dict['predicted_readout_arrays'] = [] if 'readout_arrays' not in simulation_dict.keys(): simulation_dict['readout_arrays'] = [] if 'predicted_arrays' not in simulation_dict.keys(): simulation_dict['predicted_arrays'] = [] if 'state_arrays' not in simulation_dict.keys(): simulation_dict['state_arrays'] = [] if 'delta_arrays' not in simulation_dict.keys(): simulation_dict['delta_arrays'] = [] for i in range(PVM_MAX_LAYERS): # max number of layers if "delta_array%02d" % i in simulation_dict.keys(): simulation_dict['delta_arrays'].append( simulation_dict['delta_array%02d' % i]) del simulation_dict['delta_array%02d' % i] if "learning_rate%02d" % i in simulation_dict.keys(): simulation_dict['learning_rates'].append( simulation_dict['learning_rate%02d' % i]) del simulation_dict['learning_rate%02d' % i] if "state_array%02d" % i in simulation_dict.keys(): simulation_dict['state_arrays'].append( simulation_dict['state_array%02d' % i]) del simulation_dict['state_array%02d' % i] if "predicted_readout_array_float%02d" % i in simulation_dict.keys( ): simulation_dict['predicted_readout_arrays'].append( simulation_dict['predicted_readout_array_float%02d' % i]) del simulation_dict['predicted_readout_array_float%02d' % i] if "readout_array_float%02d" % i in simulation_dict.keys(): simulation_dict['readout_arrays'].append( simulation_dict['readout_array_float%02d' % i]) del simulation_dict['readout_array_float%02d' % i] if "predicted_array%02d" % i in simulation_dict.keys(): simulation_dict['predicted_arrays'].append( simulation_dict['predicted_array%02d' % i]) simulation_dict['predicted_arrays_t2'].append( SharedArray.SharedNumpyArray_like( simulation_dict['predicted_array%02d' % i])) del simulation_dict['predicted_array%02d' % i] if "motor_delta_array_float" in simulation_dict.keys(): del simulation_dict["motor_delta_array_float"] if "readout_array_float" in simulation_dict.keys(): del simulation_dict["readout_array_float"] if "readout_array_float" in simulation_dict.keys(): del simulation_dict["readout_array_float"] if "predicted_motor_derivative_array_float" in simulation_dict.keys(): del simulation_dict["predicted_motor_derivative_array_float"] if "predicted_readout_array_float" in simulation_dict.keys(): del simulation_dict["predicted_readout_array_float"] if "additional_learning_rate" in simulation_dict.keys(): simulation_dict["readout_learning_rate"] = simulation_dict[ "additional_learning_rate"] del simulation_dict["additional_learning_rate"] elif "readout_learning_rate" not in simulation_dict.keys(): simulation_dict[ "readout_learning_rate"] = SharedArray.SharedNumpyArray( (1, ), np.float) simulation_dict["readout_learning_rate"][:] = 0.00001 simulation_dict['execution_unit_module'] += "_v1" if not simulation_dict['execution_unit_module'].startswith( "PVM_models"): simulation_dict[ 'execution_unit_module'] = "PVM_models." + simulation_dict[ 'execution_unit_module'] ex_unit = importlib.import_module( simulation_dict['execution_unit_module']) for s in range(simulation_dict['stages']): stage = simulation_dict['stage%d' % s] for unit in stage: signal_blocks = unit['signal_blocks'] unit['signal_blocks'] = [] for block in signal_blocks: # Each block: signal_block, delta_block, prediction_t+1, prediction_t+2 unit['signal_blocks'].append([ block[0], block[1], block[2], SharedArray.SharedNumpyArray_like(block[2]) ]) context_blocks = unit['context_blocks'] unit['context_blocks'] = [] for block in context_blocks: # Each block: context_block, delta_block, switching_factor unit['context_blocks'].append( [block[0], block[1], block[2]]) readout_blocks = unit['predicted_blocks'] del unit['predicted_blocks'] unit['readout_blocks'] = [] for block in readout_blocks: # Each block: teaching_signal, delta_block, readout_block unit['readout_blocks'].append( [block[0], block[1], block[2]]) # Delta blocks can remain unchanged if "learning_rate" in unit.keys(): unit['primary_learning_rate'] = unit.pop("learning_rate") if "momentum" in unit.keys(): unit['primary_momentum'] = unit.pop("momentum") unit['readout_momentum'] = unit['primary_momentum'] if "additional_learning_rate" in unit.keys(): unit['readout_learning_rate'] = unit.pop( "additional_learning_rate") else: unit['readout_learning_rate'] = simulation_dict[ "readout_learning_rate"] # Output block may remain unchanged if "MLP_parameters" in unit.keys(): unit['Primary_Predictor_params'] = unit.pop( "MLP_parameters") if "MLP_parameters_res" in unit.keys(): unit['Residual_Predictor_params'] = unit.pop( "MLP_parameters_res") if "MLP_parameters_additional" in unit.keys(): unit['Readout_Predictor_params'] = unit.pop( "MLP_parameters_additional") unit['flags'] = simulation_dict['flags'] ex_unit.ExecutionUnit.upgrade_to_ver_1(unit) simulation_dict['version_major'] = 1 simulation_dict['version_minor'] = 0 # Remove all the old source files simulation_dict['sources'] = {} logging.info("Upgrade succesfull") else: for s in range(simulation_dict['stages']): stage = simulation_dict['stage%d' % s] for unit in stage: unit['flags'] = simulation_dict['flags'] logging.info("Dictionary already ver 1.0 or above, no need to upgrade")
def generate_v1(name, description, options): input_block_size = int(options["input_block_size"]) hidden_size = int(options["hidden_block_size"]) layer_shape = map(lambda x: int(x), options["layer_shapes"]) readout_block_size = map(lambda x: int(x), options["readout_block_size"]) readout_layer = map(lambda x: x == "1", options["enable_readout"]) lateral_radius = float(options["lateral_radius"]) fan_in_square_size = int(options["fan_in_square_size"]) fan_in_radius = int(options["fan_in_radius"]) readout_depth = int(options["readout_depth"]) ex_module = options["ex_module"] exclude_self = (options["context_exclude_self"] == '1') last_layer_context_to_all = (options["last_layer_context_to_all"] == '1') send_context_two_layers_back = ( options["send_context_two_layers_back"] == '1') simulation_dict = create_blank_dictionary( name=name, description=description, save_sources=(options["save_source_files"] == '1')) simulation_dict['stages'] = 1 simulation_dict['num_proc'] = 2 * mp.cpu_count() / 3 simulation_dict['stage0'] = [] simulation_dict['execution_unit_module'] = ex_module simulation_dict['version_major'] = 1 simulation_dict['version_minor'] = 0 unit = importlib.import_module(simulation_dict['execution_unit_module']) blocks_per_dim = layer_shape layers = len(blocks_per_dim) error_log = SharedArray.SharedNumpyArray((layers + 1, 1000000), np.float) simulation_dict['error_log'] = error_log simulation_dict['input_block_size'] = input_block_size simulation_dict['hidden_size'] = hidden_size simulation_dict['learning_rates'] = [] simulation_dict['momenta'] = [] simulation_dict['taus'] = [] simulation_dict['predicted_arrays'] = [] simulation_dict['predicted_arrays_t2'] = [] simulation_dict['predicted_readout_arrays'] = [] simulation_dict['readout_arrays'] = [] simulation_dict['state_arrays'] = [] simulation_dict['delta_arrays'] = [] input_array = SharedArray.SharedNumpyArray( (input_block_size * blocks_per_dim[0], input_block_size * blocks_per_dim[0], 3), np.uint8) simulation_dict['input_array'] = input_array input_array_float = SharedArray.SharedNumpyArray( (input_block_size * blocks_per_dim[0], input_block_size * blocks_per_dim[0], 3), np.float) simulation_dict['input_array_float'] = input_array_float for (i, bpd) in enumerate(blocks_per_dim): if readout_layer[i]: readout_array_float00 = SharedArray.SharedNumpyArray( (bpd * readout_block_size[i], bpd * readout_block_size[i], readout_depth), np.float) simulation_dict['readout_arrays'].append(readout_array_float00) predicted_readout_array_float00 = SharedArray.SharedNumpyArray( (bpd * readout_block_size[i], bpd * readout_block_size[i], readout_depth), np.float) simulation_dict['predicted_readout_arrays'].append( predicted_readout_array_float00) # input array 0 is a special case, 3 dimensions because of color input predicted_array0 = SharedArray.SharedNumpyArray( (input_block_size * blocks_per_dim[0], input_block_size * blocks_per_dim[0], 3), np.float) simulation_dict['predicted_arrays'].append(predicted_array0) predicted_array2 = SharedArray.SharedNumpyArray( (input_block_size * blocks_per_dim[0], input_block_size * blocks_per_dim[0], 3), np.float) simulation_dict['predicted_arrays_t2'].append(predicted_array2) delta_array0 = SharedArray.SharedNumpyArray( (input_block_size * blocks_per_dim[0], input_block_size * blocks_per_dim[0], 3), np.float) simulation_dict['delta_arrays'].append(delta_array0) # All the rest is generic for (i, bpd) in enumerate(blocks_per_dim[:-1]): predicted_array1 = SharedArray.SharedNumpyArray( (hidden_size * bpd, hidden_size * bpd), np.float) simulation_dict['predicted_arrays'].append(predicted_array1) predicted_array2 = SharedArray.SharedNumpyArray( (hidden_size * bpd, hidden_size * bpd), np.float) simulation_dict['predicted_arrays_t2'].append(predicted_array2) delta_array1 = SharedArray.SharedNumpyArray( (hidden_size * bpd, hidden_size * bpd), np.float) simulation_dict['delta_arrays'].append(delta_array1) for (i, bpd) in enumerate(blocks_per_dim): state_array0 = SharedArray.SharedNumpyArray( (hidden_size * bpd, hidden_size * bpd), np.float) simulation_dict['state_arrays'].append(state_array0) # Base learning rate for (i, bpd) in enumerate(blocks_per_dim): learning_rate = SharedArray.SharedNumpyArray((1, ), np.float) learning_rate[0] = 0.0 simulation_dict['learning_rates'].append(learning_rate) additional_learning_rate = SharedArray.SharedNumpyArray((1, ), np.float) additional_learning_rate[0] = 0.0 simulation_dict['readout_learning_rate'] = additional_learning_rate # Momentum is the same everywhere. momentum = SharedArray.SharedNumpyArray((1, ), np.float) momentum[0] = float(options["momentum"]) simulation_dict['momentum'] = momentum # Tau is the integration constant for the signal integral tau = SharedArray.SharedNumpyArray((1, ), np.float) tau[0] = float(options['tau']) simulation_dict['tau'] = tau context_factor_lateral = SharedArray.SharedNumpyArray((1, ), np.float) context_factor_lateral[0] = 0.0 simulation_dict['context_factor_lateral'] = context_factor_lateral context_factor_feedback = SharedArray.SharedNumpyArray((1, ), np.float) context_factor_feedback[0] = 0.0 simulation_dict['context_factor_feedback'] = context_factor_feedback base_index = [0] for bpd in blocks_per_dim: base_index.append(base_index[-1] + bpd * bpd) # Layer 0 is specific and has to be constructed separately for i in xrange(blocks_per_dim[0] * blocks_per_dim[0]): unit_parameters = create_basic_unit_v1( simulation_dict['learning_rates'][0], momentum, tau, additional_learning_rate) x = (i / blocks_per_dim[0]) * input_block_size y = (i % blocks_per_dim[0]) * input_block_size dx = input_block_size dy = input_block_size input_block = SharedArray.DynamicView(input_array_float)[x:x + dx, y:y + dy] predicted_block = SharedArray.DynamicView( simulation_dict['predicted_arrays'][0])[x:x + dx, y:y + dy] predicted_block_2 = SharedArray.DynamicView( simulation_dict['predicted_arrays_t2'][0])[x:x + dx, y:y + dy] delta_block = SharedArray.DynamicView( simulation_dict['delta_arrays'][0])[x:x + dx, y:y + dy] if not (predicted_block.shape == (dx, dy, 3)): print predicted_block.shape raise Exception("Block sizes don't agree") k = (i / blocks_per_dim[0]) * hidden_size l = (i % blocks_per_dim[0]) * hidden_size output_block = SharedArray.DynamicView( simulation_dict['state_arrays'][0])[k:k + hidden_size, l:l + hidden_size] unit_parameters['signal_blocks'].append( (input_block, delta_block, predicted_block, predicted_block_2)) unit_parameters['output_block'] = output_block if readout_layer[0]: # Motor heatmap prediction layer = 0 bpd = blocks_per_dim[layer] readout_teaching_block = SharedArray.DynamicView( simulation_dict['readout_arrays'] [layer])[(i / bpd) * readout_block_size[0]:(i / bpd + 1) * readout_block_size[0], (i % bpd) * readout_block_size[0]:(i % bpd + 1) * readout_block_size[0]] readout_delta_block = SharedArray.SharedNumpyArray_like( readout_teaching_block) predicted_readout_block = SharedArray.DynamicView( simulation_dict['predicted_readout_arrays'] [layer])[(i / bpd) * readout_block_size[0]:(i / bpd + 1) * readout_block_size[0], (i % bpd) * readout_block_size[0]:(i % bpd + 1) * readout_block_size[0]] unit_parameters['readout_blocks'] = [ (readout_teaching_block, readout_delta_block, predicted_readout_block) ] unit_parameters["layer"] = 0 # End motor heatmap prediction simulation_dict['stage0'].append(unit_parameters) # Layer 0 surround gather_surround(simulation_dict, (base_index[0], blocks_per_dim[0]), radius=lateral_radius, context_factor=context_factor_lateral, exclude_self=exclude_self) # The following layers are more generic for layer in range(1, layers): for i in xrange(blocks_per_dim[layer] * blocks_per_dim[layer]): unit_parameters = create_basic_unit_v1( simulation_dict['learning_rates'][layer], momentum, tau, additional_learning_rate) k = (i / blocks_per_dim[layer]) * hidden_size l = (i % blocks_per_dim[layer]) * hidden_size output_block = SharedArray.DynamicView( simulation_dict['state_arrays'][layer])[k:k + hidden_size, l:l + hidden_size] unit_parameters['output_block'] = output_block if readout_layer[layer]: # Motor heatmap prediction bpd = blocks_per_dim[layer] readout_teaching_block = SharedArray.DynamicView( simulation_dict['readout_arrays'][layer])[ (i / bpd) * readout_block_size[layer]:(i / bpd + 1) * readout_block_size[layer], (i % bpd) * readout_block_size[layer]:(i % bpd + 1) * readout_block_size[layer]] readout_delta_block = SharedArray.SharedNumpyArray_like( readout_teaching_block) predicted_readout_block = SharedArray.DynamicView( simulation_dict['predicted_readout_arrays'][layer])[ (i / bpd) * readout_block_size[layer]:(i / bpd + 1) * readout_block_size[layer], (i % bpd) * readout_block_size[layer]:(i % bpd + 1) * readout_block_size[layer]] unit_parameters['readout_blocks'] = [ (readout_teaching_block, readout_delta_block, predicted_readout_block) ] unit_parameters["layer"] = layer # End motor heatmap prediction simulation_dict['stage0'].append(unit_parameters) # Connect to the previous layer connect_forward_and_back_v1( simulation_dict, (base_index[layer - 1], blocks_per_dim[layer - 1], simulation_dict['predicted_arrays'][layer], simulation_dict['predicted_arrays_t2'][layer]), (base_index[layer], blocks_per_dim[layer]), square_size=fan_in_square_size, radius=fan_in_radius, context_factor=context_factor_feedback) # Layer surround gather_surround(simulation_dict, (base_index[layer], blocks_per_dim[layer]), radius=lateral_radius, context_factor=context_factor_lateral, exclude_self=exclude_self) if send_context_two_layers_back and layer > 1: connect_back(simulation_dict, (base_index[layer], blocks_per_dim[layer]), (base_index[layer - 2], blocks_per_dim[layer - 2]), square_size=2 * fan_in_square_size, radius=2 * fan_in_radius, context_factor=context_factor_feedback) # Add the global feedback from the top layer if last_layer_context_to_all: logging.info("Connecting last layer back to everyone") for to_idx in xrange(base_index[layers - 1]): for from_idx in range(base_index[layers - 1], len(simulation_dict["stage0"])): context_block = simulation_dict['stage0'][from_idx][ 'output_block'] delta_block2 = SharedArray.SharedNumpyArray_like(context_block) simulation_dict['stage0'][from_idx]['delta_blocks'].append( delta_block2) # Connect the context block to the source simulation_dict['stage0'][to_idx]['context_blocks'].append( (context_block, delta_block2, context_factor_feedback)) simulation_dict['stage0_size'] = len(simulation_dict['stage0']) for i in range(simulation_dict['stage0_size']): simulation_dict['stage0'][i]['flags'] = simulation_dict['flags'] unit.ExecutionUnit.generate_missing_parameters( simulation_dict['stage0'][i], options=options) return simulation_dict
for y in range(blocks_per_dim1): surround = get_fan_in((x, y), dim_x_l=blocks_per_dim0, dim_y_l=blocks_per_dim0, dim_x_u=blocks_per_dim1, dim_y_u=blocks_per_dim1, block_x=square_size, block_y=square_size, radius=radius) dest = index1 + x * (blocks_per_dim1) + y # destination unit for xy in surround: source = index0 + xy[0] * blocks_per_dim0 + xy[ 1] # source unit # Prepare the input and corresponding delta block at source input_block = simulation_dict['stage0'][source]['output_block'] delta_block = SharedArray.SharedNumpyArray_like(input_block) simulation_dict['stage0'][source]['delta_blocks'].append( delta_block) # Prepare the context and corresonding delta block at destination context_block = simulation_dict['stage0'][dest]['output_block'] delta_block2 = SharedArray.SharedNumpyArray_like(context_block) simulation_dict['stage0'][dest]['delta_blocks'].append( delta_block2) # Connect the context block to the source simulation_dict['stage0'][source]['context_blocks'].append( (context_block, delta_block2, context_factor)) # Prepare the predicted blocks xx = xy[0] * hidden_size yy = xy[1] * hidden_size assert (predicted_array[xx:xx + dx, yy:yy + dy].shape == context_block.shape)
def generate_missing_parameters(parameters, options): """ This method can be called to generate all the missing dictionary parameters when all the other relevant variables are known. Leave empty if there is nothing more to generate. When complex_unit is False, a standard 3-layer MLP is used. When complex_unit is True, an MLP with additional hidden layers is used. There needs to be no return value, the method leaves a side effect by modifying the perameters dict. :param parameters: parameter dictionary :type parameters: dict """ complex_unit = options['unit_type'] == "complex" polynomial = options['polynomial'] == '1' autoencoder = options['autoencoder'] == '1' nhidden = np.prod(parameters['output_block'].shape) parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) ninputs = 0 noutputs = 0 ncontext = 0 # Any additional memory buffers needed in the operation of the unit parameters['internal_buffers'] = [] for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: ninputs += np.prod(block.shape) * len( ExecutionUnit.UNSUPERVISED_SIGNAL_INPUTS) for (block, delta, factor) in parameters['context_blocks']: ncontext += np.prod(block.shape) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: noutputs += 2 * np.prod( block.shape) # to predict two steps into the future nadditional = 0 for (block, delta, pblock) in parameters['readout_blocks']: nadditional += np.prod(block.shape) parameters["Primary_Predictor_params"] = {} parameters["Residual_Predictor_params"] = {} parameters["Readout_Predictor_params"] = {} if complex_unit: # 4 layer perceptron parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers( [ncontext + 1, 3 * nhidden + 1, 2 * nhidden + 1, noutputs + 1]) parameters["Residual_Predictor_params"]['layers'] = MLP.get_layers( [ninputs + 1, 2 * nhidden + 1, nhidden + 1, noutputs + 1]) parameters["complex"] = True else: # 3 layer perceptron Simple MLP Unit (not complex unit) parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers( [ncontext + 1, 2 * nhidden + 1, noutputs + 1]) parameters["Residual_Predictor_params"]['layers'] = MLP.get_layers( [ninputs + 2 * nhidden + 1, nhidden + 1, noutputs + 1]) parameters["Primary_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['beta'][0] = 1.0 parameters["Primary_Predictor_params"]['learning_rate'] = parameters[ 'primary_learning_rate'] parameters["Primary_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Primary_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights( parameters["Primary_Predictor_params"]['layers']) parameters["Residual_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Residual_Predictor_params"]['beta'][0] = 1.0 parameters["Residual_Predictor_params"]['learning_rate'] = parameters[ 'primary_learning_rate'] parameters["Residual_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Residual_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Residual_Predictor_params"]['weights'] = MLP.get_weights( parameters["Residual_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers( [nhidden + 1, 2 * nhidden + 1, nadditional + 1]) parameters["Readout_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"]['learning_rate'] = parameters[ 'readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Readout_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights( parameters["Readout_Predictor_params"]['layers']) parameters["Primary_Predictor_params"]['polynomial'] = polynomial parameters["Residual_Predictor_params"]['polynomial'] = polynomial parameters["Readout_Predictor_params"]['polynomial'] = polynomial parameters['autoencoder'] = autoencoder
def generate_missing_parameters(parameters, options): """ This method can be called to generate all the missing dictionary parameters when all the other relevant variables are known. Leave empty if there is nothing more to generate. When complex_unit is False, a standard 3-layer MLP is used. When complex_unit is True, an MLP with additional hidden layers is used. There needs to be no return value, the method leaves a side effect by modifying the perameters dict. :param parameters: parameter dictionary :type parameters: dict """ complex_unit = options['unit_type'] == "complex" polynomial = options['polynomial'] == '1' autoencoder = options['autoencoder'] == '1' use_t_2_block = options['use_t_minus_2_block'] == '1' use_derivative = options['use_derivative'] == '1' use_integral = options['use_integral'] == '1' use_error = options['use_error'] == '1' predict_2_steps = options['predict_two_steps'] == '1' use_global_backprop = options['use_global_backprop'] == '1' complex_context_in_second_layer = options[ 'feed_context_in_complex_layer'] == '1' parameters['normalize_output'] = options["normalize_output"] == "1" parameters['backpropagate_readout_error'] = options[ "backpropagate_readout_error"] == "1" nhidden = np.prod(parameters['output_block'].shape) parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) + 1 parameters['avg_delta'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) ninputs = 0 noutputs = 0 ncontext = 0 # Any additional memory buffers needed in the operation of the unit parameters['internal_buffers'] = [] parameters['integral_blocks'] = [] parameters['derivative_blocks'] = [] parameters['error_blocks'] = [] parameters['use_derivative'] = use_derivative parameters['use_integral'] = use_integral parameters['use_error'] = use_error parameters['use_t_2_block'] = use_t_2_block parameters['predict_2_steps'] = predict_2_steps for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) if use_derivative: parameters['derivative_blocks'].append( SharedArray.SharedNumpyArray_like(block)) if use_integral: parameters['integral_blocks'].append( SharedArray.SharedNumpyArray_like(block)) if use_error: parameters['error_blocks'].append( SharedArray.SharedNumpyArray_like(block)) input_block_features = 1 output_predictions = 1 if use_derivative: input_block_features += 1 if use_integral: input_block_features += 1 if use_error: input_block_features += 1 if use_t_2_block: input_block_features += 1 if predict_2_steps: output_predictions += 1 for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: ninputs += np.prod(block.shape) * input_block_features for (block, delta, factor) in parameters['context_blocks']: ncontext += np.prod(block.shape) for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: noutputs += np.prod(block.shape) * output_predictions nreadout = 0 for (block, delta, pblock) in parameters['readout_blocks']: nreadout += np.prod(block.shape) parameters["Primary_Predictor_params"] = {} parameters["Readout_Predictor_params"] = {} if complex_unit and complex_context_in_second_layer: # 4 layer perceptron parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ ninputs + 1, 2 * nhidden + ncontext + 1, nhidden + 1, noutputs + 1 ]) elif complex_unit: parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers([ ninputs + ncontext + 1, 2 * nhidden + 1, nhidden + 1, noutputs + 1 ]) else: # 3 layer perceptron Simple MLP Unit (not complex unit) parameters["Primary_Predictor_params"]['layers'] = MLP.get_layers( [ninputs + ncontext + 1, nhidden + 1, noutputs + 1]) parameters["Primary_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['beta'][0] = 1.0 parameters["Primary_Predictor_params"]['learning_rate'] = parameters[ 'primary_learning_rate'] parameters["Primary_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Primary_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Primary_Predictor_params"]['weights'] = MLP.get_weights( parameters["Primary_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers( [nhidden + 1, 2 * nhidden + 1, nreadout + 1]) parameters["Readout_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"]['learning_rate'] = parameters[ 'readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Readout_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['weights'] = MLP.get_weights( parameters["Readout_Predictor_params"]['layers']) parameters["Primary_Predictor_params"]['polynomial'] = polynomial parameters["Readout_Predictor_params"]['polynomial'] = polynomial parameters['autoencoder'] = autoencoder parameters['use_global_backprop'] = use_global_backprop parameters[ "complex_context_in_second_layer"] = complex_context_in_second_layer parameters["complex"] = complex_unit
def upgrade_to_ver_1(parameters): parameters['internal_buffers'] = [] parameters['output_min'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['output_max'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['avg_delta'] = SharedArray.SharedNumpyArray_like( parameters['output_block']) parameters['integral_blocks'] = [] parameters['derivative_blocks'] = [] parameters['error_blocks'] = [] parameters['use_derivative'] = True parameters['use_integral'] = True parameters['use_error'] = True parameters['use_t_2_block'] = False parameters['predict_2_steps'] = False parameters['use_global_backprop'] = False parameters['normalize_output'] = False parameters["complex_context_in_second_layer"] = False for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: block01 = SharedArray.SharedNumpyArray_like(block) block02 = SharedArray.SharedNumpyArray_like(block) block03 = SharedArray.SharedNumpyArray_like(block) parameters['internal_buffers'].append((block01, block02, block03)) parameters['derivative_blocks'].append( SharedArray.SharedNumpyArray_like(block)) parameters['integral_blocks'].append( SharedArray.SharedNumpyArray_like(block)) parameters['error_blocks'].append( SharedArray.SharedNumpyArray_like(block)) if "complex" not in parameters.keys(): parameters["complex"] = False if len(parameters["Primary_Predictor_params"]['layers']) == 4: parameters["complex"] = True if "autoencoder" not in parameters.keys(): parameters["autoencoder"] = False if "readout_learning_rate" not in parameters.keys(): parameters['readout_learning_rate'] = parameters[ "Primary_Predictor_params"]["learning_rate"] if "momentum" not in parameters.keys(): parameters['momentum'] = parameters["Primary_Predictor_params"][ "momentum"] nhidden = parameters["Primary_Predictor_params"]['layers'][-2][ 'activation'].shape[0] - 1 nreadout = 0 nouputs = 0 for (block, delta, pred_block, pred_block2) in parameters['signal_blocks']: nouputs += np.prod(block.shape) for (block, delta, pblock) in parameters['readout_blocks']: nreadout += np.prod(block.shape) if "Readout_Predictor_params" not in parameters.keys(): parameters["Readout_Predictor_params"] = {} parameters["Readout_Predictor_params"]['layers'] = MLP.get_layers( [nhidden + 1, nreadout + 1]) parameters["Readout_Predictor_params"][ 'beta'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"]['beta'][0] = 1.0 parameters["Readout_Predictor_params"][ 'learning_rate'] = parameters['readout_learning_rate'] parameters["Readout_Predictor_params"]['momentum'] = parameters[ 'momentum'] parameters["Readout_Predictor_params"][ 'mse'] = SharedArray.SharedNumpyArray((1, ), np.float) parameters["Readout_Predictor_params"][ 'weights'] = MLP.get_weights( parameters["Readout_Predictor_params"]['layers']) parameters["Readout_Predictor_params"]['weights'][ 0][:] = parameters["Primary_Predictor_params"]['weights'][ -1][:, nouputs:] old_weight_matrix = parameters["Primary_Predictor_params"][ 'weights'][-1] parameters["Primary_Predictor_params"]['weights'][ -1] = SharedArray.SharedNumpyArray((nhidden + 1, nouputs), np.float) parameters["Primary_Predictor_params"]['weights'][ -1][:] = old_weight_matrix[:, :nouputs] parameters["Primary_Predictor_params"]['layers'][-1] = { 'activation': SharedArray.SharedNumpyArray(nouputs, np.float), 'error': SharedArray.SharedNumpyArray(nouputs, np.float), 'delta': SharedArray.SharedNumpyArray(nouputs, np.float) } parameters['backpropagate_readout_error'] = True