def launchsession6(useServer, batch_size, samples_per_epoch, nb_epoch, optimizer_type, compact, dropout_fraction=0.0, batch_normalization=False, random_search=False): start = time.time() #Get data dataUtils.createDataPaths(useServer, 0.7) datagen = CNNData.getDataGenerator() augmented_datagen = CNNData.getAugmentedDataGenerator() train_generator, validation_generator, test_generator = CNNData.getDataOld( datagen, augmented_datagen, batch_size) #Create model if compact: model = CNNModel.createModel(dropout_fraction=dropout_fraction, batch_normalization=batch_normalization) else: model = CNNModel.createModelNC(dropout_fraction=dropout_fraction, batch_normalization=batch_normalization) #Train the model model = CNNModel.compileModel(model, optimizer_type) model.summary() model, history = CNNModel.trainModel(model, train_generator, samples_per_epoch, nb_epoch, validation_generator) #Evaluate the model if random_search: CNNModel.plotModelPerformanceRandom(history) else: CNNModel.plotModelPerformance(history) val_result = CNNModel.evaluateModel(model, validation_generator) test_result = CNNModel.evaluateModel(model, test_generator) print 'Validation result ' + str(val_result) print 'Test result ' + str(test_result) end = time.time() time_expend = end - start print 'Done in ' + str(time_expend) + ' secs.' return test_result, val_result, time_expend, history
def initializationProcedure(self): listOfRejMoves = list() numRejMoves = 0 iteration = 0 bestSolutionChange = False # Log self.writeFile("Init Procedure Start\n") print("Init Procedure Start") while numRejMoves < self.MAX_REJECTED_MOVES and iteration < self.MAX_INIT: self.writeFile(f"{'#' * 10} Init Iter: {iteration} {'#' * 10}\n") print(f"{'#' * 10} Init Iter: {iteration} {'#' * 10}") print("-" * 50) # Choose a move randomly self.modelNo = self.modelNo + 1 self.s_prime = cnnModelObj.CNNModel() self.s_prime.buildCNN(copy.deepcopy(self.s.topologyDict)) self.s_prime.trainModel(**self.initParameters, modelNo=self.modelNo) energyChange = self.s_prime.objectiveValue - self.s.objectiveValue if energyChange >= 0: listOfRejMoves.append(energyChange) numRejMoves = numRejMoves + 1 # Log self.writeFile(f"New Solution Rejected... - Objective: {self.s_prime.objectiveValue} \n") print(f"New Solution Rejected... - Objective: {self.s_prime.objectiveValue}") else: numRejMoves = 0 self.s = self.s_prime self.archiveList.append((self.s, self.s.objectiveValue)) # Put Archive List if self.s.objectiveValue < self.s_best.objectiveValue: self.s_best = self.s bestSolutionChange = True # Log self.writeFile(f"New Solution Accepted... - Objective: {self.s_prime.objectiveValue} \n") print(f"New Solution Accepted... - Objective: {self.s_prime.objectiveValue}") iteration = iteration + 1 # Log self.writeFile(f"Rejected Moves: {listOfRejMoves} \n") print("Rejected Moves:", listOfRejMoves) self.writeFile(f"Rejected Move Size: {numRejMoves} \n") print("Rejected Move Size:", numRejMoves) # Log self.writeFile("Init Procedure End\n") print("Init Procedure End") self.writeFile(f"{'-' * 50} \n") return self.calculateDValue(listOfRejMoves), bestSolutionChange
def __init__(self, x_train, y_train, x_valid, y_valid, batch_size, learning_rate): self.clearFile("models.txt") self.clearFile("result.txt") self.clearFile("model_history.txt") self.initParameters = {"x_train": x_train, "x_valid": x_valid, "y_train": y_train, "y_valid": y_valid, "batch_size": batch_size, "learning_rate": learning_rate} self.modelNo = self.modelNo + 1 self.s = cnnModelObj.CNNModel(self.NEW_BLOCK_PROB) self.s.buildInitSolution() self.s.trainModel(**self.initParameters, modelNo=self.modelNo) self.s_best = self.s self.results.append({'tr_acc': self.s.trainAccuracy, 'val_acc': self.s.validationAccuracy, 'flops': self.s.flops, 'totalParameter': self.s.parameterCount, 'time': self.s.trainTime, 'status':True}) # Log self.writeFile("Initial Solution Created...\n") print('Initial Solution Created...') self.writeFile(f"Initial Solution Objective Value: {self.s.objectiveValue} \n") print(f"Initial Solution Objective Value: {self.s.objectiveValue}")
def __init__(self, x_train, y_train, x_valid, y_valid, batch_size, learning_rate): self.clearFile("models.txt") self.clearFile("result.txt") self.clearFile("model_history.txt") print(f"MAX_SAMP = {self.MAX_SAMP}, MAX_INIT = {self.MAX_INIT}, MAX_REJEC_MOV = {self.MAX_REJECTED_MOVES}") self.initParameters = {"x_train": x_train, "x_valid": x_valid, "y_train": y_train, "y_valid": y_valid, "batch_size": batch_size, "learning_rate": learning_rate} self.modelNo = self.modelNo + 1 self.s = cnnModelObj.CNNModel() self.s.buildInitSolution() self.s.trainModel(**self.initParameters, modelNo=self.modelNo) self.s_best = self.s # Log self.writeFile("Initial Solution Created...\n") print('Initial Solution Created...') self.writeFile(f"Initial Solution Objective Value: {self.s.objectiveValue} \n") print(f"Initial Solution Objective Value: {self.s.objectiveValue}")
return x, y def one_hot(y_): """ Function to encode output labels from number indexes. E.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]] """ n_values = int(np.max(y_)) + 1 return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS x_t, y_t = Read("./TestData/train.csv") x_e, y_e = Read("./TestData/train.csv") y_t = one_hot(y_t) y_t = np.resize(y_t, (y_t.shape[0], 38)) y_e = one_hot(y_e) y_e = np.resize(y_t, (y_e.shape[0], 38)) net = rsn.CNN(176, 38) net.k_size = 10 net.ftl = 7 net.n_layers = 9 net.learning_rate = LEARNING_RATE net.epchs = EPOCHS net.batch_size = 60 net.build_model() net.build_adam_trainer() print("Тренировка модели") net.train(x_t, y_t, x_e, y_e) print("Тренировка закончена") input("Нажмите ентер")
import CNNModel import numpy as np model = CNNModel.build('saved_weights/CNN_mnist_weights.h5') def predict_digit(img): onehot = model.predict(img)[0] no = np.argmax(onehot) prob = onehot[no]*100 return f"{no} | Confidence: {prob:.2f}"
def startAlgorithm(self): inner_counter = 0 outer_counter = 0 archiveList = [] while outer_counter < self.nbr_outer_iter or self.modelNo < self.nbr_total_iter: # Outer Loop Info self.writeFile(f"{'#' * 10} Outer Iteration: {outer_counter} {'#' * 10}\n") print("-" * 50) inner_counter = 0 while inner_counter < self.nbr_inner_iter: # Inner Loop Info self.writeFile(f"{'#' * 10} Inner Iteration: {inner_counter} {'#' * 10}\n") print("-" * 50) # Apply Local Move self.modelNo = self.modelNo + 1 print(f"MODEL NO: {self.modelNo}") if self.modelNo > self.nbr_total_iter: break if self.modelNo % 50 == 0: self.NEW_BLOCK_PROB = self.NEW_BLOCK_PROB * 1.4 self.s_prime = cnnModelObj.CNNModel(self.NEW_BLOCK_PROB) self.s_prime.buildCNN(copy.deepcopy(self.s.topologyDict)) self.s_prime.trainModel(**self.initParameters, modelNo=self.modelNo) print("S - " + str(self.s.objectiveValue) + "-" + str(self.s.flops)) print("S_prime - " + str(self.s_prime.objectiveValue) + "-" + str(self.s_prime.flops)) print('S dominate S_prime:', self.isDominate(self.s, self.s_prime)) print("*"*50) ##################### Choose Next and Update ###################### acceptModel = False # Current dominates new if self.isDominate(self.s, self.s_prime): p_acc = self.accProb(self.s, self.s_prime, self.T_current) if self.rndProb() < p_acc: self.s = self.s_prime acceptModel = True else: if self.isSolutionDominateArchive(self.s_prime, self.archive): # a solution in Archive is dominated by new self.s = self.s_prime self.archive = self.updateArchive(self.s_prime, self.archive) acceptModel = True elif self.isArchiveDominateSolution(self.archive, self.s_prime): # a solution in Archive dominates new a_star = random.choice(self.archive) rnd01 = self.rndProb() if self.isDominate(self.s_prime, self.s): # S' dominates S p_acc = self.accProb(a_star, self.s_prime, self.T_current) if rnd01 < p_acc: self.s = self.s_prime acceptModel = True else: self.s = a_star acceptModel = True elif self.isDominate(self.s, self.s_prime) == False: # S does not dominate S' p_acc = self.accProb(self.s, self.s_prime, self.T_current) if rnd01 < p_acc: p_acc = self.accProb(a_star, self.s_prime, self.T_current) if rnd01 < p_acc: self.s = self.s_prime acceptModel = True else: self.s = a_star acceptModel = True else: p_acc = self.accProb(a_star, self.s, self.T_current) if rnd01 >= p_acc: self.s = a_star acceptModel = True else: # new does not dominate or is dominated by solutions in Archive self.s = self.s_prime self.archive = self.updateArchive(self.s_prime, self.archive) acceptModel = True ##################### Choose Next and Update ###################### if acceptModel: # New Solution Accept Info self.writeFile(f"New Solution Accepted... - Objective: {self.s.objectiveValue} \n") # Put Accepted Solution in Archive List archiveList.append((self.s, self.s.objectiveValue)) self.results.append({'tr_acc': self.s_prime.trainAccuracy, 'val_acc': self.s_prime.validationAccuracy, 'flops': self.s_prime.flops, 'totalParameter': self.s_prime.parameterCount, 'time': self.s_prime.trainTime, 'status':acceptModel}) self.printArchive(self.archive) # Increase Inner Counter inner_counter = inner_counter + 1 if outer_counter < self.nbr_outer_iter: self.T_current = self.T_current * self.cr # Increase Outer Counter outer_counter = outer_counter + 1 try: self.saveList(self.results, "mosa_sols") except Exception as e: print(f"Pickle Write Error... {e}") for index, solution in enumerate(self.archive): self.writeFile(str(solution.topologyDict) + "\n", _filePath="archive.txt") self.writeFile(f"Objective: {solution.objectiveValue} \n", _filePath="archive.txt") self.writeFile(f"Flops: {solution.flops} \n", _filePath="archive.txt") self.writeFile(f"{'*' * 50} \n", _filePath="archive.txt") # serialize model to JSON - kerasModel Silindi model_json = str(solution.modelJSON) with open(f"Results/model_{index}.json", "w") as json_file: json_file.write(model_json)
def startAlgorithm(self): inner_counter = 0 outer_counter = 0 archiveList = [] while outer_counter < self.nbr_outer_iter: # Outer Loop Info self.writeFile(f"{'#' * 10} Outer Iteration: {outer_counter} {'#' * 10}\n") print("-" * 50) inner_counter = 0 while inner_counter < self.nbr_inner_iter: # Inner Loop Info self.writeFile(f"{'#' * 10} Inner Iteration: {inner_counter} {'#' * 10}\n") print("-" * 50) if self.modelNo % 50 == 0: self.NEW_BLOCK_PROB = self.NEW_BLOCK_PROB * 1.4 # Apply Local Move self.modelNo = self.modelNo + 1 self.s_prime = cnnModelObj.CNNModel(self.NEW_BLOCK_PROB) self.s_prime.buildCNN(copy.deepcopy(self.s.topologyDict)) self.s_prime.trainModel(**self.initParameters, modelNo=self.modelNo) # Calculate Energy Change deltaE = self.s_prime.objectiveValue - self.s.objectiveValue self.writeFile(f"DeltaE:{deltaE}\n") acceptModel = False if deltaE < 0: # Kabul Edilecek acceptModel = True elif deltaE == 0: # Parametre sayısı kontrol edilcek if self.s_prime.parameterCount < self.s.parameterCount: acceptModel = True else: # Reddedildi self.writeFile(f"New Solution Rejected... - Objective: {self.s_prime.objectiveValue} \n") else: # Olasılık hesaplanacak, if self.rndProb() <= self.accProb(deltaE, self.T_current): acceptModel = True else: # Reddedildi self.writeFile(f"New Solution Rejected... - Objective: {self.s_prime.objectiveValue} \n") if acceptModel: # New Solution Accept Info self.writeFile(f"New Solution Accepted... - Objective: {self.s_prime.objectiveValue} \n") # Current Solution Update self.s = self.s_prime # Put Accepted Solution in Archive List archiveList.append((self.s, self.s.objectiveValue)) self.results.append({'tr_acc': self.s_prime.trainAccuracy, 'val_acc': self.s_prime.validationAccuracy, 'flops': self.s_prime.flops, 'totalParameter': self.s_prime.parameterCount, 'time': self.s_prime.trainTime, 'status':acceptModel}) # Increase Inner Counter inner_counter = inner_counter + 1 self.T_current = self.T_current * self.cr # Increase Outer Counter outer_counter = outer_counter + 1 try: self.saveList(self.results, "sau_sols") except Exception as e: print(f"Pickle Write Error... {e}") # Sort Archive Solutions sortedList = sorted(archiveList, key=lambda x: x[1])[:5] for index, solution in enumerate(sortedList): self.writeFile(str(solution[0].topologyDict) + "\n", _filePath="result.txt") self.writeFile(f"Objective: {solution[1]} \n", _filePath="result.txt") self.writeFile(f"{'*' * 50} \n", _filePath="result.txt") # serialize model to JSON - kerasModel Silindi model_json = str(solution[0].modelJSON) with open(f"Results/model_{index}.json", "w") as json_file: json_file.write(model_json)
plt.plot(iters, val_losses, label='Val') plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend(loc='best') plt.figure(2) plt.title("Train vs Validation Accuracy") plt.plot(iters, train_accs, label='Train') plt.plot(iters, val_accs, label='Val') plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.legend(loc='best') plt.show() return Balanced_all_dataset, train_dataset, val_dataset, test_dataset, overfit_dataset = inputManager.getDataLoader( ) model = Model.ECNN() model_1 = Model_baseline.Baseline() #ecc_pt1 is the parameters for lr = 0.005, batch = 1000, num of epoch = 40, for file name "ECNN_0.pt" train(model, train_dataset, val_dataset, lr=0.001, batch_size=9000, num_epoch=70, save='baseline_3.pt') #train(model, overfit_dataset, overfit_dataset, lr = 0.001, batch_size = 300, num_epoch= 20, save = 'ECNN_0.pt')
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training parameters.') # This method takes rediculously long #train(model, overfit_dataset, overfit_dataset, lr = 0.001, batch_size = 64, num_epoch= 25, save = 'vgg16_overfit_0.pt') #train(model, train_dataset, val_dataset, lr = 0.001, batch_size = 64, num_epoch= 2, save = 'ECNN_train_0.pt') # Try Pytorch Inputting the 1000 classification as input instead of modifying last layer # Load pretrain model and set to not training pre_model = models.vgg16(pretrained=True) for param in pre_model.parameters(): param.requires_grad = False num_ftrs = pre_model.classifier[6].out_features # 1000 print(num_ftrs) model = Model.ENN_0_5cls(num_ftrs) #overfit_dataset = inputManager_5cls.get_Input_Dataset(pre_model, overfit_dataset) #torch.save(overfit_dataset, os.path.join(os.getcwd(), 'cropped_pics_overfit_dataset.pt')) overfit_dataset = torch.load( os.path.join(os.getcwd(), 'cropped_pics_overfit_dataset.pt')) #train_dataset = inputManager_5cls.get_Input_Dataset(pre_model, train_dataset) #torch.save(train_dataset, os.path.join(os.getcwd(), 'cropped_pics_train_dataset.pt')) train_dataset = torch.load( os.path.join(os.getcwd(), 'cropped_pics_train_dataset.pt')) #val_dataset = inputManager_5cls.get_Input_Dataset(pre_model, val_dataset) #torch.save(val_dataset, os.path.join(os.getcwd(), 'cropped_pics_val_dataset.pt')) ##val_dataset = torch.load(os.path.join(os.getcwd(), 'cropped_pics_val_dataset.pt'))
def samplingProcedure(self, Dvalue): D_I = Dvalue cycle_count = 0 E_D = D_I bestSolutionChange = False # Log self.writeFile("Sampling Procedure Start\n") print("Sampling Procedure Start") while cycle_count < self.MAX_SAMP: # Log self.writeFile(f"{'#' * 10} SAMPLING ITER: {cycle_count} {'#' * 10}\n") print(f"{'#' * 10} SAMPLING ITER: {cycle_count} {'#' * 10}") self.modelNo = self.modelNo + 1 self.s_prime = cnnModelObj.CNNModel() self.s_prime.buildCNN(copy.deepcopy(self.s.topologyDict)) self.s_prime.trainModel(**self.initParameters, modelNo=self.modelNo) # Log self.writeFile(f"S' Solution Created... Current E_D: {E_D} Objective: {self.s_prime.objectiveValue} \n") print("S' Solution Created...", "Current E_D:", E_D, "Objective:", self.s_prime.objectiveValue) acceptModel = False energyChange = self.s_prime.objectiveValue - self.s.objectiveValue if energyChange <= 0: acceptModel = True if energyChange == 0 and self.s_prime.parameterCount > self.s.parameterCount: acceptModel = False if acceptModel: self.s = self.s_prime self.archiveList.append((self.s, self.s.objectiveValue)) # Put Archive List E_D = E_D - energyChange # Log self.writeFile(f"S' Solution Accepted, New E_D: {E_D} Energy Change: {energyChange}\n") print("S' Solution Accepted,", self.s.objectiveValue, "New E_D:", E_D, "Energy Change:", energyChange) self.writeFile(f"{'-' * 50} \n") print("-" * 50) elif energyChange > 0: if E_D - energyChange >= 0: acceptModel = True self.s = self.s_prime self.archiveList.append((self.s, self.s.objectiveValue)) # Put Archive List E_D = E_D - energyChange # Log self.writeFile(f"S' Solution Accepted, New E_D: {E_D} Energy Change: {energyChange}\n") print("S' Solution Accepted,", self.s.objectiveValue, "New E_D:", E_D, "Energy Change:", energyChange) self.writeFile(f"{'-' * 50} \n") print("-" * 50) # Compare the best solution and current solution. if acceptModel and self.s_prime.objectiveValue <= self.s_best.objectiveValue: self.s_best = self.s bestSolutionChange = True cycle_count = cycle_count + 1 # Log self.writeFile(f"Best Solution: {self.s_best.objectiveValue}\n") self.writeFile("Sampling Procedure End\n") print("Sampling Procedure End") return bestSolutionChange
dim = cst.lattice_size y_col = -2 #-3: temp, -2: energy, -1: magnetization split_test = 0.3 #test/train split queue_size = 2 #decrease this if you run out of memory #split for train/test files = glob.glob(data_directory) train, test = train_test_split(files, test_size=split_test) batches = Queue(maxsize=queue_size * len(train)) test_batches = Queue(maxsize=queue_size * len(test)) print("Calculating normalization parameters") mean, stddev, frac = get_normalization_params() model = mdl.CNN_model(n_layers, dim, \ mean, stddev, learning_rate) model.start_session() print("Creating threads") creator_thread1 = Process(target=create_batches,\ args=(batches, train, mean, stddev, y_col,), daemon=True) creator_thread2 = Process(target=create_test_batches,\ args=(test_batches, test, mean, stddev, y_col,), daemon=True) trainer_thread = threading.Thread(target=train_dataset, args=( model, max_epoch, ), daemon=True) print("Starting training")
's42': np.float64, 's43': np.float64, 's44': np.float64, 's45': np.float64, 'l1': np.float64, 'l2': np.float64, 'l3': np.float64 } dat = pd.read_csv(s, ';', dtype=dt) x = dat[dat.columns[:45]].values y = dat[dat.columns[45:]].values x = np.array(x) y = np.array(y) return x, y x_t, y_t = Read("./Data/train.csv") x_e, y_e = Read("./Data/test.csv") net = rsn.CNN(45) net.k_size = 21 net.ftl = 20 net.n_layers = 5 net.learning_rate = LEARNING_RATE net.epchs = EPOCHS net.batch_size = 512 net.build_model() net.build_mom_trainer() print("Тренировка модели") net.train(x_t, y_t, x_e, y_e) print("Тренировка закончена") input("Нажмите ентер")
fp = open(log_full_Path + log_file_name, 'w') train_data = DataSet.DataSet( r'\\storage.wsd.local\Warehouse\Data\zhujiangyuan\MNI_Test_150_Case\Test_Case_Parcel_Range\TestCase_Hippocampus\Train' ) train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=10, shuffle=True, num_workers=1) test_data = DataSet.DataSet( r'\\storage.wsd.local\Warehouse\Data\zhujiangyuan\MNI_Test_150_Case\Test_Case_Parcel_Range\TestCase_Hippocampus\Test' ) test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=1, shuffle=True, num_workers=1) model = CNNModel.CNNModel().cuda() optimizer = torch.optim.Adam(model.parameters(), lr=1e-1) loss_func = torch.nn.BCEWithLogitsLoss().cuda() optimizer.zero_grad() count = 0 for epoch in range(80): model.train() loss_vec = [] sample_num = 0 correct = 0 for step, (batch_x, batch_y) in enumerate(train_loader): count += 1 print(count) height, width, depth = train_data.getRawDataDimension() image = Variable(batch_x.view(-1, 1, depth, height, width).cuda())