args = parser.parse_args() srun = TorchDatasetSearchRun(args) params = srun.default_params(args.activation_type) params['verbose'] = True base_model_name = (f'{params["net"]}_{params["activation_type"]}_' f'lr_{params["lr"]}') # change gridsearch values as desired if params["net"].startswith('nin'): weight_decay_list = [1e-4] else: weight_decay_list = [5e-4] search_len = len(weight_decay_list) start_idx, end_idx = srun.init_indexes(params['log_dir'], search_len) for idx in range(start_idx, end_idx): srun.update_index_json(idx) weight_decay = weight_decay_list[idx] params['weight_decay'] = weight_decay params['model_name'] = base_model_name + '_weight_decay_{:.1E}'.format( weight_decay) combination_str = (f'\nsearch idx {idx}/{end_idx-1}, ' 'weight decay {:.1E}.'.format(weight_decay)) params['combination_str'] = combination_str main_prog(copy.deepcopy(params))
if (n_rings > 0): print("\nThis is a cyclic species !!!\nProgram exiting...") print("\n==========================================\n") sys.exit(1) m_blk = Chem.MolToMolBlock(m) #print (m_blk) lines = m_blk.split('\n') if (len(sys.argv) > (i + 2)): if (sys.argv[3][0] != '-'): name = sys.argv[3][:9] else: name = "Molecule" else: name = "Molecule" #print(lines) main.main_prog(lines, name, out_file_path, doc_file_path) print("\n==========================================\n") elif (sys.argv[1] == "-sf"): #SMILES NAME s_file = open(sys.argv[sys.argv.index("-sf") + 1], 'r') m_lines = s_file.readlines() s_file.close() i = 0 for l in m_lines: m_splt = l.split() if (len(m_splt) == 0): continue elif (m_splt[0][0] == '#'): continue i = i + 1 m = Chem.MolFromSmiles(m_splt[0])
base_model_name += f'_S_apl_{params["S_apl"]}' else: base_model_name += f'_size{params["spline_size"]}' start_idx, end_idx = srun.init_indexes(params['log_dir'], args.num_runs) for idx in range(start_idx, end_idx): srun.update_index_json(idx) params['model_name'] = base_model_name + f'_run{idx}' combination_str = (f'\nrun {idx}/{end_idx-1}') params['combination_str'] = combination_str params['verbose'] = True start_time = time.time() results = main_prog(copy.deepcopy(params)) end_time = time.time() max_memory = torch.cuda.max_memory_allocated(device=params['device']) torch.cuda.reset_max_memory_cached(params['device']) # Log time/memory in train_results json file results_dict = Project.load_results_dict(params['log_dir']) results_dict[params['model_name']]['time'] = end_time - start_time results_dict[params['model_name']]['max_memory'] = max_memory Project.dump_results_dict(results_dict, params['log_dir'])
threshold_list = np.concatenate( (np.zeros(1), np.arange(0.0002, 0.004, 0.0002), np.arange(0.004, 1, 0.05), np.arange(1, 3, 0.2), np.arange(3, 10, 0.5), np.arange(10, 100, 2))) for k in range(threshold_list.shape[0]): threshold = threshold_list[k] params[ 'model_name'] = base_model_name + '_slope_diff_threshold_{:.4f}'.format( threshold) params['slope_diff_threshold'] = threshold sys.stdout = open(os.devnull, "w") main_prog(copy.deepcopy(params), isloaded_params=True) sys.stdout = sys.__stdout__ results_dict = Project.load_results_dict(args.out_log_dir) model_dict = results_dict[params['model_name']] if k == 0: assert np.allclose(threshold, 0) base_train_acc = model_dict['latest_train_acc'] acc_drop = np.clip( (model_dict['latest_train_acc'] - base_train_acc), a_max=100, a_min=-100) print('\nThreshold: {:.4f}'.format(threshold))