def run_measurement(name, params, args, max_steps: int = None): """Runs the experiment with specified params, see the parse_test_args method for arguments""" experiment = Task0OnlineLearningTemplate( Task0ConvWideAdapter(), Task0ConvWideTopology, params, max_steps=read_max_steps(max_steps), num_training_steps=read_max_steps(max_steps) // 10 * 8, # (1 tenth of time is used for testing) num_classes=DatasetSeObjectsNode.label_size( ), # TODO make this somehow better num_layers=3, # TODO parametrized measurement_period=4, # 4 sliding_window_size= 300, # be aware that the mutual information depends on this value sliding_window_stride=50, # 50 sp_evaluation_period=200, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, experiment_folder=args.alternative_results_folder, disable_plt_show=True, just_hide_labels= True # do not switch to testing data, just hide labels?# ) if args.run_gui: run_just_model(Task0NarrowTopology(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
def run_measurement_task0( name, params, args, topology_class, run_labels, learning_rate=SeT0BasicTopologyRT211Phased.LEARNING_RATE): """Runs the experiment with specified params, see the parse_test_args method for arguments.""" experiment = Task0TrainTestTemplateRelearning( Task0RelearnBasicAdapter(), topology_class, params, run_labels=run_labels, overall_training_steps=NUM_TRAINING_PHASES * TRAINING_PHASE_STEPS, num_testing_phases=NUM_TRAINING_PHASES, num_testing_steps=TESTING_PHASE_STEPS, measurement_period=1, learning_rate=learning_rate, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, results_folder=args.alternative_results_folder) if args.run_gui: run_just_model(topology_class(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
def run_measurement_task0(name, params, args, topology_class, max_steps: int = None): """Runs the experiment with specified params, see the parse_test_args method for arguments.""" experiment = Task0ExperimentTemplate( Task0StatsBasicAdapter(), topology_class, params, max_steps=read_max_steps(max_steps), measurement_period=1, smoothing_window_size=29, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, experiment_folder=args.alternative_results_folder) if args.run_gui: run_just_model(SeT0BasicTopology(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
def run_experiment(run_gui: bool, save: bool, load: bool, clear: bool): params = [ {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, {"expert_width": 28, "n_cluster_centers": 100}, ] for i, param in enumerate(params): param['seed'] = i param['training_phase_steps'] = 500 param['testing_phase_steps'] = 1000 experiment = Lrf1SpFlockExperimentTemplate( Lrf1SpFlockMnistTemplate(), LrfTopology, params, max_steps=15000, measurement_period=1, save_cache=save, load_cache=load, clear_cache=clear ) if run_gui: run_just_model(LrfTopology(**params[0]), gui=True) else: experiment.run()
def run_measurement(name, params, args, max_steps: int = None, debug_mi: bool = False): """Runs the experiment with specified params, see the parse_test_args method for arguments.""" experiment = SpLearningConvergenceExperimentTemplate( MnistSpLearningConvergenceTopologyAdapter(), MnistSpTopology, params, max_steps=read_max_steps(max_steps), num_classes=10, measurement_period=1, sliding_window_size= 200, # be aware that the mutual information depends on this value sliding_window_stride=50, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, experiment_folder=args.alternative_results_folder, disable_plt_show=True, debug_mi=debug_mi, ) if args.run_gui: run_just_model(MnistSpTopology(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
def run_experiment(use_gui: bool, save: bool, load: bool, clear: bool): # specify the sets of params to be used in the consecutive runs inside the experiment.run() params = [ { 'eox': 2, 'eoy': 2, 'num_cc': 10 }, { 'eox': 2, 'eoy': 2, 'num_cc': 25 }, { 'eox': 2, 'eoy': 2, 'num_cc': 50 }, { 'eox': 2, 'eoy': 2, 'num_cc': 100 }, { 'eox': 2, 'eoy': 2, 'num_cc': 250 }, { 'eox': 2, 'eoy': 2, 'num_cc': 350 }, { 'eox': 2, 'eoy': 2, 'num_cc': 550 }, ] # NOT supported # experiment = SpLearningConvergenceExperimentTemplate( # MnistSpLearningConvergenceTopologyAdapter(), # MnistSpTopology, # params, # max_steps=100, # measurement_period=1, # evaluation_period=15) if use_gui: # run_just_model(SeDatasetSpLrfDebug(**params[0]), gui=True) run_just_model(SeDatasetSpLrfDebug(**params[0]), gui=True) else: # experiment.run() pass
def run_measurement_with_params( name, params, args, exp_pars: TrainTestExperimentTemplateParams = None, topology_class=Task0TaSeTopology): """Run a given experiment with given commandline params, topology and experiment params""" if exp_pars is None: exp_pars = full_params experiment = Task0TrainTestClassificationAccTemplate( ClassificationAccuracyModularAdapter(), topology_class, # Task0TaSeTopology or Task0NnTopology supported for now params, overall_training_steps=exp_pars.overall_training_steps, num_testing_steps=exp_pars.num_testing_steps, num_testing_phases=exp_pars.num_testing_phases, num_classes= 20, # this should match the configuration of SE or SE dataset that we'll use num_layers=2, measurement_period=exp_pars.measurement_period, sliding_window_size=1, # not used sliding_window_stride=1, # not used sp_evaluation_period=exp_pars.sp_evaluation_period, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, experiment_folder=args.alternative_results_folder, disable_plt_show=True) if args.run_gui: run_just_model(topology_class(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
def run_measurement(name, params, args, max_steps: int = None): """Runs the experiment with specified params, see the parse_test_args method for arguments""" experiment = DatasetSeSimulationRunningStatsExperimentTemplate( SeDatasetTaRunningStatsAdapter(), SeDatasetTaLrf, params, max_steps=read_max_steps(max_steps), measurement_period=1, smoothing_window_size=99, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, experiment_folder=args.alternative_results_folder) if args.run_gui: run_just_model(SeDatasetTaLrf(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
def run_measurement(name, params, args, debug: bool = False, num_layers: int = 3): """"Runs the experiment with specified params, see the parse_test_args method for arguments""" exp_pars = debug_params if debug else full_params experiment = Task0TrainTestLearningRateTemplate( LearningRateTaModularAdapter(), Task0TaSeTopology, params, overall_training_steps=exp_pars.overall_training_steps, num_testing_steps=exp_pars.num_testing_steps, num_testing_phases=exp_pars.num_testing_phases, num_classes=DatasetSeObjectsNode.label_size(), num_layers=num_layers, measurement_period=exp_pars.measurement_period, sliding_window_size=1, # not used sliding_window_stride=1, # not used sp_evaluation_period=exp_pars.sp_evaluation_period, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_name=name, computation_only=args.computation_only, experiment_folder=args.alternative_results_folder, disable_plt_show=True, show_conv_agreements=False) if args.run_gui: run_just_model(Task0TaSeTopology(**params[0]), gui=True) else: print( f'======================================= Measuring model: {name}') experiment.run()
else: cp.n_cluster_centers = 200 cp.rf_size = (8, 8) cp.rf_stride = (8, 8) tp = None if use_top_layer: tp = MultipleLayersParams() tp.n_cluster_centers = 20 tp.sp_buffer_size = 3000 tp.sp_batch_size = 2000 tp.learning_rate = 0.1 tp.cluster_boost_threshold = 1000 class_f = [1, 2, 3, 4] params = [{ 'conv_layers_params': cp, 'top_layer_params': tp, 'image_size': SeDatasetSize.SIZE_64, 'class_filter': class_f, 'model_seed': None, 'baseline_seed': None, 'noise_amp': 0.0, 'random_order': False }] run_just_model(Task0TaSeTopology(**params[0]), gui=True, persisting_observer_system=True)
"l_0_cluster_centers": 10, "l_1_cluster_centers": 20, "l_0_rf_dims": (3, 3), "l_0_rf_stride": None, "l_1_rf_dims": (2, 2), "sp_n_cluster_centers": 10 } t = Topology('cuda') if args.space_engineers: se_params = SeEnvironmentParams( shapes=list(range(SeEnvironmentParams.n_shapes))) params["bottom_layer_size"] = 5 params["env_size"] = se_params.env_size params["label_length"] = se_params.n_shapes env = SEEnvironment(se_params) else: params["bottom_layer_size"] = 2 params["env_size"] = (24, 24) params["label_length"] = 3 env = BallEnvironment(BallEnvironmentParams()) cnc1r1 = topology_class(**params) t.add_node(cnc1r1) t.add_node(env) env.outputs.connect_automatically(cnc1r1.inputs) run_just_model(model=t, gui=True, persisting_observer_system=args.persisting_observer_system)
def run_experiment(run_gui: bool, save: bool, load: bool, clear: bool, computation_only: bool, alternative_results_folder: str, convolutional: bool): expert_widths = [14, 7, 4, 2, 1] n_cluster_centers = [500, 100, 10, 5, 2] strides = range(1, 14) square_side = 28 params = [ dict([("expert_width", ew), ("n_cluster_centers", ncc), ("stride", stride)]) for ew, ncc, stride in itertools.product( expert_widths, n_cluster_centers, strides) if (square_side - ew) % stride == 0 and stride <= ew and ew + stride <= square_side and ( (square_side - (ew - stride)) // stride) * ncc < 1000 ] params.append({ "expert_width": square_side, "n_cluster_centers": 2, "stride": square_side }) params.append({ "expert_width": square_side, "n_cluster_centers": 5, "stride": square_side }) params.append({ "expert_width": square_side, "n_cluster_centers": 10, "stride": square_side }) params.append({ "expert_width": square_side, "n_cluster_centers": 100, "stride": square_side }) params.append({ "expert_width": square_side, "n_cluster_centers": 500, "stride": square_side }) for i, param in enumerate(params): param['is_convolutional'] = convolutional param['seed'] = 0 param['training_phase_steps'] = 200 param['testing_phase_steps'] = 800 params.sort( key=lambda x: x["n_cluster_centers"] / x["stride"] * x['expert_width'], reverse=True) # params = params[::-1] print('Parameters: _____________________________') for param in params: print(param) print(f"total {len(params)}") print('_________________________________________') experiment = Lrf1SpFlockExperimentTemplate( Lrf1SpFlockMnistTemplate(), LrfTopology, params, max_steps=30000, measurement_period=1, save_cache=save, load_cache=load, clear_cache=clear, computation_only=computation_only, experiment_folder=alternative_results_folder) if run_gui: run_just_model(LrfTopology(**params[0]), gui=True) else: experiment.run()
from eval_utils import parse_test_args, run_just_model from torchsim.research.research_topics.rt_1_1_4_task0_experiments.topologies.task0_conv_wide_topology_more_labels import \ Task0ConvWideTopologyMoreLabels if __name__ == '__main__': arg = parse_test_args() params = [ { 'num_cc': [170, 400, 400], 'lr': [0.1, 0.3, 0.3], 'batch_s': [4000, 1500, 1500], 'buffer_s': [4500, 2000, 2000], 'label_scale': 1 }, ] params2 = [{ 'num_cc': [100, 200, 200], 'lr': [0.4, 0.4, 0.4], 'batch_s': [1500, 1000, 1000], 'buffer_s': [3000, 1500, 1100], 'label_scale': 1 }] run_just_model(Task0ConvWideTopologyMoreLabels(**params2[0]), gui=True)
model_seed=model_seed, batch_s=batch_s, buffer_s=buffer_s, num_epochs=num_epochs, lr=lr) self.add_node(self._model) if use_grayscale: grayscale_node = GrayscaleNode(squeeze_channel=False) self.add_node(grayscale_node) Connector.connect(self._se_group.outputs.image, grayscale_node.inputs.input) Connector.connect(grayscale_node.outputs.output, self._model.inputs.image) else: Connector.connect(self._se_group.outputs.image, self._model.inputs.image) Connector.connect(self._se_group.outputs.labels, self._model.inputs.label) def restart(self): pass if __name__ == '__main__': params = [{'class_filter': (1, 2, 3, 4)}] run_just_model(Task0NnTopology(**params[0]), gui=True)
from eval_utils import run_just_model from torchsim.core.eval2.scaffolding import TopologyScaffoldingFactory from torchsim.research.research_topics.rt_2_1_2_learning_rate.node_groups.se_node_group import SeNodeGroup from torchsim.research.research_topics.rt_3_7_1_task0_analysis.node_groups.dummy_model_group import DummyModelGroup from torchsim.research.research_topics.rt_3_7_1_task0_analysis.topologies.task0_ta_analysis_topology import \ Task0TaAnalysisTopology logger = logging.getLogger(__name__) if __name__ == '__main__': num_conv_layers = 1 use_top_layer = True cf_easy = [1, 2, 3, 4] params = [ {'se_group': {'class_filter': cf_easy}, 'model': {}}, {'se_group': {'class_filter': cf_easy}, 'model': {} } ] scaffolding = TopologyScaffoldingFactory(Task0TaAnalysisTopology, se_group=SeNodeGroup, model=DummyModelGroup) run_just_model(scaffolding.create_topology(**params[0]), gui=True, persisting_observer_system=True)
'sp_n_cluster_centers': 10, 'env_size': (50, 50), 'l_0_rf_dims': (5, 5), 'l_1_rf_dims': (2, 2), 'ball_radius': 6 }, ] params = filter_params(args, params) experiment = Rt213ExperimentTemplate( adapter=Rt213Adapter(), topology_class=topology_class, models_params=params, overall_training_steps=50000, num_testing_steps=600, num_testing_phases=8, sub_experiment_name="MAIN", computation_only=args.computation_only, save_cache=args.save, load_cache=args.load, clear_cache=args.clear, experiment_folder=args.alternative_results_folder) if args.run_gui: run_just_model(topology_class(**params[0]), gui=True, persisting_observer_system=True) else: experiment.run()
self._node_se_dataset.switch_training(training_on=on, just_hide_labels=False) self._sp_reconstruction_layer.switch_learning(on) if __name__ == '__main__': """Just an example configuration for GUI""" expert_params = MultipleLayersParams() expert_params.n_cluster_centers = 200 expert_params.sp_buffer_size = 3000 expert_params.sp_batch_size = 1000 expert_params.learning_rate = 0.05 expert_params.cluster_boost_threshold = 1000 expert_params.compute_reconstruction = True class_f = None params = [ { 'top_layer_params': expert_params, 'image_size': SeDatasetSize.SIZE_64, 'class_filter': class_f, 'model_seed': None, 'baseline_seed': None, 'random_order': False, 'fof_fixed_size': None } ] run_just_model(Task0TaBottomUpClassificationTopology(**params[0]), gui=True, persisting_observer_system=True)
self.se_io.outputs.task_to_agent_label, self._rescale_node.inputs[0]) Connector.connect( self._rescale_node.outputs[0], self._join_node.inputs[1]) self.unsqueeze_node = UnsqueezeNode(0) self.add_node(self.unsqueeze_node) # join -> unsqueeze_node -> top_level_expert Connector.connect( self._join_node.outputs.output, self.unsqueeze_node.inputs.input) Connector.connect( self.unsqueeze_node.outputs.output, self._top_level_flock_node.inputs.sp.data_input) def restart(self): pass if __name__ == '__main__': params = [ {'num_cc': [150, 300], 'lr': [0.1, 0.001], # 0.2 0.2 - learns reasonably fast (oscillating) 'batch_s': [3000, 1000], 'label_scale': 1} ] run_just_model(ConvWideTwoLayerTopology(**params[0]), gui=True)