def valid(model, valid_set, epoch, tsp_database_path): model.eval() valid_loss = 0 correct = 0 sum_duplicates = 0 sum_solution = 0 random.shuffle(valid_set) for data_file in valid_set: details = data_file.split('.')[0].split('_') nb_cities, instance_id = int(details[1]), int(details[2]) ordered_path, total_weight = read_tsp_choco_solution_file(nb_cities, instance_id, tsp_database_path) weight_matrix = ordered_path.get_weight_matrix() input_data = Variable(torch.tensor(normalize_weight_matrix(weight_matrix).reshape((1, nb_cities * nb_cities)), dtype=torch.float)) output = model(input_data) target = Variable(torch.tensor(ordered_path.to_ordered_path_binary_matrix().get_candidate(), dtype=torch.float), requires_grad=True) candidate = OrderedPath(np.array(torch.argmax(output.detach(), dim=1), dtype=int).transpose(), weight_matrix) valid_loss += model.loss_function(output, target) # sum up batch loss sum_duplicates += candidate.get_nb_duplicates() sum_solution += int(candidate.is_solution()) correct += int(candidate.is_solution()) valid_set_size = len(valid_set) sum_solution /= valid_set_size sum_duplicates /= valid_set_size print('Valid phase indicators : Solution: {}, Nb duplicates: {}'.format(sum_solution, sum_duplicates)) valid_loss /= valid_set_size print('Epoch {} - valid set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( epoch, valid_loss, correct, valid_set_size, 100. * correct / valid_set_size)) return correct / valid_set_size, valid_loss, sum_duplicates
def test(model, test_set, tsp_database_path): model.eval() test_loss = 0 correct = 0 random.shuffle(test_set) for data_file in test_set: details = data_file.split('.')[0].split('_') nb_cities, instance_id = int(details[1]), int(details[2]) ordered_path, total_weight = read_tsp_choco_solution_file(nb_cities, instance_id, tsp_database_path) weight_matrix = ordered_path.get_weight_matrix() input_data = Variable(torch.tensor(normalize_weight_matrix(weight_matrix).reshape((1, nb_cities * nb_cities)), dtype=torch.float)) output = model(input_data) target = Variable(torch.tensor(ordered_path.to_ordered_path_binary_matrix().get_candidate(), dtype=torch.float), requires_grad=True) candidate = OrderedPath(np.array(torch.argmax(output.detach(), dim=1), dtype=int).transpose(), weight_matrix) test_loss += model.loss_function(output, target) # sum up batch loss correct += int(candidate.is_solution()) test_set_size = len(test_set) test_loss /= test_set_size print('\n' + "test" + ' set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, test_set_size, 100. * correct / test_set_size)) return correct / test_set_size
def train(model, train_set, optimizer, tsp_database_path): model.train() sum_duplicates = 0 sum_solution = 0 random.shuffle(train_set) for data_file in train_set: details = data_file.split('.')[0].split('_') nb_cities, instance_id = int(details[1]), int(details[2]) ordered_path, total_weight = read_tsp_choco_solution_file(nb_cities, instance_id, tsp_database_path) weight_matrix = ordered_path.get_weight_matrix() input_data = Variable(torch.tensor(normalize_weight_matrix(weight_matrix).reshape((1, nb_cities * nb_cities)), dtype=torch.float), requires_grad=True) optimizer.zero_grad() output = model(input_data) # calls the forward function target = Variable(torch.tensor(ordered_path.to_ordered_path_binary_matrix().get_candidate(), dtype=torch.float), requires_grad=True) candidate = OrderedPath(np.array(torch.argmax(output.detach(), dim=1), dtype=int), weight_matrix) sum_duplicates += candidate.get_nb_duplicates() sum_solution += int(candidate.is_solution()) loss = model.loss_function(output, target) loss.backward() optimizer.step() train_set_size = len(train_set) print('Training indicators : Solution: {}, Nb duplicates: {}'.format(sum_solution / train_set_size, sum_duplicates / train_set_size)) return model
wm = torch.tensor(wm, dtype=torch.float, requires_grad=True) if trained_net == 0: # Generator training label = [0] g_optimizer.zero_grad() input_g = generator(wm) can = np.array([ np.argmax(input_g.detach().numpy()[k * 10:(k + 1) * 10]) for k in range(10) ], dtype=int) op_can = OrderedPath( can, wm.detach().numpy().astype(int).reshape(10, 10)) nb_solutions += int(op_can.is_solution()) nb_duplicates += op_can.get_nb_duplicates() output_g = torch.tensor(label, dtype=torch.float, requires_grad=False) predicted_output_g = discriminator(input_g) g_loss = F.binary_cross_entropy(predicted_output_g, output_g) avg_g_loss += g_loss.item() nb_g_loss += 1 if net_switch or first_g_loss is None: nb_iterations = 1 first_g_loss = g_loss.item() if last_g_loss is None: last_g_loss = first_g_loss