def train_model(prefix, interpreter: Interpreter, io_examples_tr, io_examples_val, io_examples_test, task_info, save=False, dir_path=None, last_recogniser_ti=None, last_rnn_ti=None, load=False): if task_info.task_type == TaskType.Recognise: output_type = ProgramOutputType.SIGMOID else: output_type = ProgramOutputType.INTEGER data_loader_tr = interpreter._get_data_loader(io_examples_tr) data_loader_val = interpreter._get_data_loader(io_examples_val) data_loader_test = interpreter._get_data_loader(io_examples_test) program, new_fns_dict, parameters = get_model(task_info, dir_path, last_recogniser_ti, last_rnn_ti, load=load) new_fns_dict, max_accuracy_val, evaluations_np = interpreter.learn_neural_network_( program, output_type=output_type, new_fns_dict=new_fns_dict, trainable_parameters=list(parameters), data_loader_tr=data_loader_tr, data_loader_val=data_loader_val, data_loader_test=data_loader_test) max_accuracy_test = interpreter._get_accuracy(program, data_loader_test, output_type, new_fns_dict) print(max_accuracy_test) num_examples = io_examples_tr[0].shape[0] if type( io_examples_tr) == tuple else io_examples_tr[0][0].shape[0] np.save( "{}/_{}__{}evaluations_np.npy".format(dir_path, prefix, num_examples), evaluations_np) if save: for key, value in new_fns_dict.items(): value.save("{}/Models/".format(dir_path)) return {"accuracy": max_accuracy_test}
def main(): for_realz = False if for_realz: data_size_tr = 6000 data_size_val = 2100 list_lengths_tr = [2, 3, 4, 5] list_lengths_val = [6, 7, 8] num_epochs = 20 else: data_size_tr = 150 # 12000 data_size_val = 150 # 2100 list_lengths_tr = [1] # [2, 3, 4, 5] list_lengths_val = [1] # [6, 7, 8] num_epochs = 1 # 20 lib = FnLibrary() addImageFunctionsToLibrary(lib, load_recognise_5s=False) interpreter = Interpreter( lib, batch_size=150, epochs=num_epochs) #, evaluate_every_n_percent=70) # 60) mnist_data_provider = MNISTDataProvider() mnist_dict_train, mnist_dict_val, mnist_dict_test = mnist_data_provider.split_into_train_and_validation( 0, 12, shuffleFirst=True) d1 = 1 io_examples_tr = mnist_data_provider.get_batch_count_var_len( [d1], data_size_tr, mnist_dict_train, list_lengths=list_lengths_tr, return_count_int=False) io_examples_val = mnist_data_provider.get_batch_count_var_len( [d1], data_size_val, mnist_dict_val, list_lengths=list_lengths_val, return_count_int=False) acc_np = [] acc_baseline = [] for i in range(10): acc_np.append( accuracy_test_np_model(interpreter, io_examples_tr, io_examples_val)) acc_baseline.append( accuracy_test_baseline_model(interpreter, io_examples_tr, io_examples_val)) print("NP average error: {}".format(sum(acc_np) / len(acc_np))) print("New average error: {}".format( sum(acc_baseline) / len(acc_baseline))) """
def train_summer(type, interpreter: Interpreter, io_examples_tr, io_examples_val, io_examples_test, dir_path, save=False): output_type = ProgramOutputType.INTEGER if type == "sa" or type == "wt": program, new_fns_dict, parameters = get_model_summer(dir_path, load=type == "wt") else: program, new_fns_dict, parameters = get_model_summer_pnn(dir_path) # output_type = ProgramOutputType.INTEGER data_loader_tr = interpreter._get_data_loader(io_examples_tr) data_loader_val = interpreter._get_data_loader(io_examples_val) data_loader_test = interpreter._get_data_loader(io_examples_test) new_fns_dict, max_accuracy_val, _, evaluations_np = interpreter.learn_neural_network_( program, output_type=output_type, new_fns_dict=new_fns_dict, trainable_parameters=list(parameters), data_loader_tr=data_loader_tr, data_loader_val=data_loader_val, data_loader_test=data_loader_test) max_accuracy_test = interpreter._get_accuracy(program, data_loader_test, output_type, new_fns_dict) print(max_accuracy_test) # num_examples = io_examples_tr[0].shape[0] if type(io_examples_tr) == tuple else io_examples_tr[0][0].shape[0] # np.save("{}/_{}__{}evaluations_np.npy".format(dir_path, prefix, num_examples), evaluations_np) if save: for key, value in new_fns_dict.items(): value.save("{}/Models/".format(dir_path)) return {"accuracy": max_accuracy_test}
def _mkNSynth(self): ea_synthesis_mode = self.settings.synthesizer == 'evolutionary' interpreter = Interpreter(self.seq.lib, epochs=self.settings.epochs, batch_size=self.settings.batch_size) nnprefix = self.seq.sname() + self.sname() if self.settings.synthesizer == 'enumerative': # concrete_types = [mkRealTensorSort([1, 64, 4, 4]), mkRealTensorSort([1, 50])] concreteTypes = [ mkRealTensorSort([1, 64, 4, 4]), mkBoolTensorSort([1, 1]), mkRealTensorSort([1, 50]) ] synth = SymbolicSynthesizer(self.seq.lib, self.fn_sort, nnprefix, concreteTypes) ns_settings = NeuralSynthesizerSettings(self.settings.N, self.settings.M, self.settings.K) assert self.seq.lib is not None nsynth = NeuralSynthesizer(interpreter, synth, self.seq.lib, self.fn_sort, self.settings.dbg_learn_parameters, ns_settings) return nsynth elif self.settings.synthesizer == 'evolutionary': concreteTypes = [ mkRealTensorSort([1, 64, 4, 4]), mkBoolTensorSort([1, 1]), mkRealTensorSort([1, 50]) ] synth = SymbolicSynthesizerEA(self.seq.lib, self.fn_sort, nnprefix, concreteTypes) # TODO: Do not hardcode G NUM_GENERATIIONS = 100 ns_settings = NeuralSynthesizerEASettings(G=NUM_GENERATIIONS, M=self.settings.M, K=self.settings.K) assert self.seq.lib is not None nsynth = NeuralSynthesizerEA(interpreter, synth, self.seq.lib, self.fn_sort, ns_settings) return nsynth
def main(task_id, sequence_str, sequence_name): # lib = mk_default_lib() lib = FnLibrary() addImageFunctionsToLibrary(lib, load_recognise_5s=False) task_settings = get_task_settings(settings["dbg_mode"], settings["dbg_learn_parameters"], synthesizer=None) seq_tasks_info = get_sequence_from_string(sequence_str) print("running task {} of the following sequence:".format(task_id + 1)) print_sequence(seq_tasks_info) interpreter = Interpreter(lib, batch_size=150, epochs=task_settings.epochs) prefix = "{}_{}".format(sequence_name, task_id) run_baseline_task(prefix, interpreter, task_settings, seq_tasks_info[:task_id + 1])
def test_zeros(): # IO Examples train, val = split_into_train_and_validation(0, 10) train_io_examples = get_batch_count_iseven(digits_to_count=[5], count_up_to=10, batch_size=100, digit_dictionary=train) val_io_examples = get_batch_count_iseven(digits_to_count=[5], count_up_to=10, batch_size=20, digit_dictionary=val) def mk_recognise_5s(): res = NetCNN("recognise_5s", input_ch=1, output_dim=1, output_activation=F.sigmoid) res.load('../Interpreter/Models/is5_classifier.pth.tar') return res # Library libSynth = FnLibrary() t = PPSortVar('T') t1 = PPSortVar('T1') t2 = PPSortVar('T2') libSynth.addItems([ PPLibItem('zeros', mkFuncSort(PPDimVar('a'), mkRealTensorSort([1, 'a'])), pp_map), # PPLibItem('zeros2', mkFuncSort(PPDimVar('a'), PPDimVar('b'), mkRealTensorSort(['a', 'b'])), pp_map), ]) fnSort = mkFuncSort(PPDimConst(2), mkRealTensorSort([2])) interpreter = Interpreter(libSynth) solver = SymbolicSynthesizer(interpreter, libSynth, fnSort, train_io_examples, val_io_examples) solver.setEvaluate(False) solution, score = solver.solve()
def testNeuralSynthesizerEA(): lib = getCountFiveLib() epochs = 2 interpreter = Interpreter(lib, epochs) sort = getCountFiveSort() synth = SymbolicSynthesizerEA(lib, sort) def createSettings(): G = 4 # Generations M = 100 # Evaluation limit K = 5 # Report top-K programs return NeuralSynthesizerEASettings(G, M, K) settings = createSettings() nsynth = NeuralSynthesizerEA(interpreter, synth, lib, sort, settings) digit = 5 tsize = 120 vsize = 100 tio, vio = get_io_examples_count_digit_occ(digit, tsize, vsize) for i, (ctio, _) in enumerate(iterate_diff_training_sizes(tio, [50, 100])): res = nsynth.solve(ctio, vio) print(res.top_k_solutions_results)
def main(dir_path, type): if for_realz: settings = { "data_size_tr": 6000, "data_size_val": 2100, "num_epochs": 30, "training_data_percentages": [2, 10, 20, 50, 100] } else: settings = { "data_size_tr": 150, "data_size_val": 150, "num_epochs": 1, "training_data_percentages": [100] } lib = FnLibrary() addImageFunctionsToLibrary(lib, load_recognise_5s=False) interpreter = Interpreter(lib, batch_size=150, epochs=settings["num_epochs"]) run_ss_classifier(interpreter, type, settings, dir_path) run_ss_summer(interpreter, type, settings, dir_path)
if __name__ == '__main__': results_dir = str(sys.argv[1]) # results_dir = "Results_maze_baselines" if not os.path.exists(results_dir): os.makedirs(results_dir) just_testing = False epochs_cnn = 1 # 000 epochs_nav = 10 batch_size = 150 lib = FnLibrary() lib.addItems( get_items_from_repo( ['flatten_2d_list', 'map_g', 'compose', 'repeat', 'conv_g'])) interpreter = Interpreter(lib, epochs=1, batch_size=batch_size) # interpreter.epochs = epochs_cnn # res1 = _train_s2t1(results_dir) # print("res1: {}".format(res1["accuracy"])) interpreter.epochs = epochs_nav res2 = _train_s2t2(results_dir, "s2t1_cnn", "s2t1_mlp") print("res2: {}".format(res2["accuracy"])) interpreter.epochs = epochs_nav res3 = _train_s2t3(results_dir, "s2t2_cnn", "s2t2_mlp", "s2t2_conv_g") print("res3: {}".format(res3["accuracy"]))
def main(): tio, vio = get_io_examples_classify_digits(2000, 200) # Task Name: classify_digits prog = PPTermUnk(name='nn_fun_cs1cd_1', sort=PPFuncSort(args=[PPTensorSort(param_sort=PPReal(), shape=[PPDimConst(value=1), PPDimConst(value=1), PPDimConst(value=28), PPDimConst(value=28)])], rtpe=PPTensorSort(param_sort=PPBool(), shape=[PPDimConst(value=1), PPDimConst(value=10)]))) unkSortMap = {'nn_fun_cs1cd_1': PPFuncSort(args=[PPTensorSort(param_sort=PPReal(), shape=[PPDimConst(value=1), PPDimConst(value=1), PPDimConst(value=28), PPDimConst(value=28)])], rtpe=PPTensorSort(param_sort=PPBool(), shape=[PPDimConst(value=1), PPDimConst(value=10)]))} lib = FnLibrary() lib.addItems([PPLibItem(name='compose', sort=PPFuncSort( args=[PPFuncSort(args=[PPSortVar(name='B')], rtpe=PPSortVar(name='C')), PPFuncSort(args=[PPSortVar(name='A')], rtpe=PPSortVar(name='B'))], rtpe=PPFuncSort(args=[PPSortVar(name='A')], rtpe=PPSortVar(name='C'))), obj=None), PPLibItem(name='repeat', sort=PPFuncSort( args=[ PPEnumSort( start=8, end=10), PPFuncSort( args=[ PPSortVar( name='A')], rtpe=PPSortVar( name='A'))], rtpe=PPFuncSort( args=[ PPSortVar( name='A')], rtpe=PPSortVar( name='A'))), obj=None), PPLibItem(name='map_l', sort=PPFuncSort(args=[PPFuncSort(args=[PPSortVar(name='A')], rtpe=PPSortVar(name='B'))], rtpe=PPFuncSort(args=[PPListSort(param_sort=PPSortVar(name='A'))], rtpe=PPListSort(param_sort=PPSortVar(name='B')))), obj=None), PPLibItem(name='fold_l', sort=PPFuncSort( args=[PPFuncSort(args=[PPSortVar(name='B'), PPSortVar(name='A')], rtpe=PPSortVar(name='B')), PPSortVar(name='B')], rtpe=PPFuncSort(args=[PPListSort(param_sort=PPSortVar(name='A'))], rtpe=PPSortVar(name='B'))), obj=None), PPLibItem(name='conv_l', sort=PPFuncSort( args=[PPFuncSort(args=[PPListSort(param_sort=PPSortVar(name='A'))], rtpe=PPSortVar(name='B'))], rtpe=PPFuncSort(args=[PPListSort(param_sort=PPSortVar(name='A'))], rtpe=PPListSort(param_sort=PPSortVar(name='B')))), obj=None), PPLibItem(name='zeros', sort=PPFuncSort(args=[PPDimVar(name='a')], rtpe=PPTensorSort(param_sort=PPReal(), shape=[PPDimConst(value=1), PPDimVar(name='a')])), obj=None)]) fn_sort = PPFuncSort(args=[PPTensorSort(param_sort=PPReal(), shape=[PPDimConst(value=1), PPDimConst(value=1), PPDimConst(value=28), PPDimConst(value=28)])], rtpe=PPTensorSort(param_sort=PPBool(), shape=[PPDimConst(value=1), PPDimConst(value=10)])) interpreter = Interpreter(lib, 150) res = interpreter.evaluate(program=prog, output_type_s=fn_sort.rtpe, unkSortMap=unkSortMap, io_examples_tr=tio, io_examples_val=vio)
def test6(): t = PPSortVar('T') t1 = PPSortVar('T1') t2 = PPSortVar('T2') def mk_recognise_5s(): res = NetCNN("recognise_5s", input_ch=1, output_dim=1, output_activation=F.sigmoid) res.load('../Interpreter/Models/is5_classifier.pth.tar') return res libSynth = FnLibrary() real_tensor_2d = mkTensorSort(PPReal(), ['a', 'b']) libSynth.addItems([ PPLibItem( 'recognise_5s', mkFuncSort(mkTensorSort(PPReal(), ['a', 1, 28, 28]), mkTensorSort(PPReal(), ['a', 1])), mk_recognise_5s()), PPLibItem( 'map', mkFuncSort(mkFuncSort(t1, t2), mkListSort(t1), mkListSort(t2)), pp_map), PPLibItem('reduce', mkFuncSort(mkFuncSort(t, t, t), mkListSort(t), t), pp_reduce), PPLibItem('add', mkFuncSort(real_tensor_2d, real_tensor_2d, real_tensor_2d), lambda x, y: x + y), ]) train, val = split_into_train_and_validation(0, 10) val_ioExamples = get_batch_count_iseven(digits_to_count=[5], count_up_to=10, batch_size=20, digit_dictionary=val) img = mkRealTensorSort([1, 1, 28, 28]) isFive = mkRealTensorSort([1, 1]) imgToIsFive = mkFuncSort(img, isFive) imgList = mkListSort(img) isFiveList = mkListSort(isFive) sumOfFives = mkRealTensorSort([1, 1]) fnSort = mkFuncSort(imgList, sumOfFives) interpreter = Interpreter(libSynth) """ targetProg = lambda inputs. reduce( add, map(lib.recognise_5s, inputs)) """ # TODO: use "search" instead of "solve" solver = SymbolicSynthesizer(interpreter, libSynth, fnSort, val_ioExamples, val_ioExamples) # solver.setEvaluate(False) solution, score = solver.solve() print(solution) print(score)