def update_data(data,dataset): tmp_data = "" if dataset == "cifar10": tmp_data = get_data_from_csv(FILE_CIFAR10) tmp_data = format_data_without_header(tmp_data) data = tmp_data[:] return data elif dataset == "mnist": tmp_data = get_data_from_csv(FILE_MNIST) tmp_data = format_data_without_header(tmp_data) data = tmp_data[:] return data
def combine(path,array_file): final_list_data = [] for file in array_file: file_with_path = add_path_to_file(path, file) data = get_data_from_csv(file_with_path) data = format_data_without_header(data) final_list_data += data return final_list_data
def pre_train_model(file_name): data = get_data_from_csv(file_name) data = format_data_without_header(data) # data = [['c_1','c_2','c_3','-']] for index in range(len(data)): # for index in range(2): train(data[index])
def main(): file_name = "fixed_model_dict.csv" file_name = "COMPLETE_CIFAR10.csv" #MNIST: CLEAN_CODE file_path = "/homes/nj2217/PROJECT/TMP5_CLEAN_CODE/FINISHED_MODEL/MNIST/CLEAN_CODE/" file = "fixed_model_dict.csv" file_name = file_path + file #CIFAR-10: TMP_CODE file_path = "/homes/nj2217/PROJECT/TMP5_CLEAN_CODE/FINISHED_MODEL/CIFAR-10/TMP_CODE/" file = "COMPLETE_CIFAR10.csv" file_name = file_path + file #CIFAR-10: TMP2_CLEAN_CODE # lost file #CIFAR-10: TMP3_CLEAN_CODE file_path = "/homes/nj2217/PROJECT/TMP5_CLEAN_CODE/FINISHED_MODEL/CIFAR-10/TMP3_CLEAN_CODE/" file = "COMPLETE_CIFAR10.csv" file_name = file_path + file #CIFAR-10: TMP_CODE_2 file_path = "/homes/nj2217/PROJECT/TMP5_CLEAN_CODE/FINISHED_MODEL/CIFAR-10/TMP_CODE_2/" file = "COMPLETE_CIFAR10.csv" file_name = file_path + file #CIFAR-10: TMP3_CLEAN_CODE file_path = "/homes/nj2217/PROJECT/TMP5_CLEAN_CODE/FINISHED_MODEL/CIFAR-10/TMP2_CODE_2/" file = "COMPLETE_CIFAR10.csv" file_name = file_path + file target = "loss" target = "accuracy" above_or_below_target = "below" above_or_below_target = "above" criteria = "mean" # criteria = 0.7413 data = get_data_from_csv(file_name) data = format_data_without_header(data) best_model = get_best_topology(data, target) worst_model = get_worst_topology(data, target) print("Std: ", get_standard_deviation(data, target)) print("Mean: ", get_mean(data, target)) print("Variance: ", get_var(data, target)) num_model, list_model = get_number_model(data, target, above_or_below_target, criteria) print("Total number of model: ", len(data)) print("Number of model->", num_model, " that is ", above_or_below_target, " (", criteria, " of ", target, ")")
def pre_train_model_cifar10(file_name, output_file_name): global MAIN_FILE MAIN_FILE = output_file_name data = get_data_from_csv(file_name) data = format_data_without_header(data) for index in range(len(data)): single_model = data[index] train_model_cifar10(single_model)
def update_data(data,dataset): ############################################################################ # FUNCTION DESCRIPTION: function to update experience replay into on-going processing # data after the agent puts new model into experience replay ############################################################################ tmp_data = "" tmp_data = get_data_from_csv(FILE) tmp_data = format_data_without_header(tmp_data) data = tmp_data[:] return data
def combine(path, array_file): ############################################################################ # FUNCTION DESCRIPTION: function to combine all file in path into one array ############################################################################ final_list_data = [] for file in array_file: file_with_path = add_path_to_file(path, file) data = get_data_from_csv(file_with_path) data = format_data_without_header(data) final_list_data += data return final_list_data
def pre_train_model_cifar10(file_name): data = get_data_from_csv(file_name) data = format_data_without_header(data) # print(data) # data = [['c_1','c_2','c_3','-']] # for index in range(len(data)): for index in range(2): # train_model_cifar10(data,data[index]) # list_data = [data[index]] # print(data[index]) single_model = data[index] train_model_cifar10(single_model)
def main(): CURRENT_DIR = os.getcwd() INPUT_FOLDER_NAME = 'CIFAR_DICT' INPUT_PATH = CURRENT_DIR + '/' + INPUT_FOLDER_NAME OUTPUT_FILE_NAME = "COMPLETE_DICT.csv" OUTPUT_FILE_PATH = CURRENT_DIR + '/' + OUTPUT_FILE_NAME INDEX_ACCURACY = -2 INDEX_LOSS = -1 SORTING_INDEX = INDEX_ACCURACY file_path = "/homes/nj2217/FINAL_PROJECT/MAIN/FINISHED_MODEL/" ''' file = file_path + file_type + main_folder + main_file data = get_data_from_csv(file) data = format_data_without_header(data) # sorted_data = sort_data(data,SORTING_INDEX) print("\nLast model is : ", data[-1]) file = file.strip('.csv') file += "_sorted.csv" # result_file_name = save_topology_in_csv(file,sorted_data) #Uncomment to save new data in file initial_eps_mean = get_mean_initial_ep(data,initial_episode,SORTING_INDEX) print("\nMean of initial: ",initial_episode, " is --> ", initial_eps_mean) data_without_initial_ep = remove_data_initial_ep(data,initial_episode) sorted_data = sort_data(data_without_initial_ep,SORTING_INDEX) num_model = 5 list_top_model = get_top_model(sorted_data,num_model) print("\nThe list of top ",num_model," are as follows:\n ",list_top_model) ''' CURRENT_WORKING_DIR = os.path.dirname(os.path.abspath(__file__)) '''Model with Random Search''' folder = "FINISHED_MODEL" file_type = "MODEL_WITH_RANDOM_SEARCH/" main_folder = "CIFAR-10/" # main_folder = "MNIST/" main_file = "original_model.csv" # file = file_path + file_type+ main_folder+ main_file file = os.path.join(CURRENT_WORKING_DIR, folder, file_type, main_folder, main_file) data = get_data_from_csv(file) data = format_data_without_header(data) sorted_data = sort_data(data, SORTING_INDEX) num_model = 5 list_top_model = get_top_model(sorted_data, num_model) print("\nThe list of top ", num_model, " are as follows:\n ", list_top_model)
def pre_train_model_mnist(file_name, output_file_name): ############################################################################ # FUNCTION DESCRIPTION: function to train several models from csv file ############################################################################ global MAIN_FILE MAIN_FILE = output_file_name data = get_data_from_csv(file_name) data = format_data_without_header(data) for index in range(len(data)): single_model = data[index] train_model_mnist(single_model)
def main(): file_name = "fixed_model_dict.csv" file_name = "COMPLETE_CIFAR10.csv" data = get_data_from_csv(file_name) data = format_data_without_header(data) print(data) output_file = "bad_model.csv" accuracy_threshold = 0.7 # loss_threshold = 1.0 bad_model = get_bad_topology(data, accuracy_threshold, "accuracy") # bad_model = get_bad_topology(data,loss_threshold,"loss") print(bad_model) bad_model_file = save_topology_in_csv(output_file, bad_model)
def main(): file_name = "fixed_model_dict.csv" file_name = "COMPLETE_CIFAR10.csv" data = get_data_from_csv(file_name) data = format_data_without_header(data) target = "accuracy" target = "loss" best_model = get_best_topology(data,target) worst_model = get_worst_topology(data,target) print("Std: ",get_standard_deviation(data,target)) print("Mean: ",get_mean(data,target)) print("Variance: ",get_var(data,target))
def main(): FILE_NAME = "fixed_model_dict.csv" FILE_NAME = "COMPLETE_CIFAR10.csv" data = get_data_from_csv(FILE_NAME) data = format_data_without_header(data) OUTPUT_FILE = "bad_model.csv" accuracy_threshold = 0.7 bad_model = get_bad_topology(data, accuracy_threshold, "accuracy") # loss_threshold = 1.0 # bad_model = get_bad_topology(data,loss_threshold,"loss") bad_model_file = save_topology_in_csv(OUTPUT_FILE, bad_model) print("Create file for bad topology: ", bad_model_file, " successfully!!")
DATASET = "mnist" best_topology = run_q_learning(data,DATASET) print("best_topology: ", best_topology) # verify_model(best_topology,DATASET) ''' ''' #Get random topologies then save to csv file INPUT_FILE_NAME_RANDOM_TOPO = 'test_random_topology.csv' NUM_MODEL = 1500 OUTPUT_FILE_NAME = "new_trained_cifar10.csv" INPUT_FILE_NAME = get_random_topology(NUM_MODEL, INPUT_FILE_NAME_RANDOM_TOPO) print(INPUT_FILE_NAME) pre_train_model_cifar10(INPUT_FILE_NAME,OUTPUT_FILE_NAME) ''' #Run Q-learning to find best topology file_name = "COMPLETE_CIFAR10.csv" data = get_data_from_csv(file_name) data = format_data_without_header(data) DATASET = "cifar10" best_topology = run_q_learning(data,DATASET) print("best_topology: ", best_topology) verify_model(best_topology,DATASET) ''' model = ['c_1','c_6','c_5','m_2'] # DATASET = 'cifar10' DATASET = 'mnist' verify_model(model, DATASET) '''
def pre_train_model_svhn(file_name): data = get_data_from_csv(file_name) data = format_data_without_header(data) for index in range(len(data)): single_model = data[index] train_model_svhn(single_model)
def update_data(data,dataset): tmp_data = "" tmp_data = get_data_from_csv(FILE) tmp_data = format_data_without_header(tmp_data) data = tmp_data[:] return data