Example #1
0
def test(model, test_set, tsp_database_path):
    model.eval()
    test_loss = 0
    correct = 0
    random.shuffle(test_set)
    for data_file in test_set:
        details = data_file.split('.')[0].split('_')
        nb_cities, instance_id = int(details[1]), int(details[2])
        ordered_path, total_weight = read_tsp_choco_solution_file(nb_cities, instance_id, tsp_database_path)
        weight_matrix = ordered_path.get_weight_matrix()
        input_data = Variable(torch.tensor(normalize_weight_matrix(weight_matrix).reshape((1, nb_cities * nb_cities)),
                                           dtype=torch.float))
        output = model(input_data)

        target = Variable(torch.tensor(ordered_path.to_ordered_path_binary_matrix().get_candidate(), dtype=torch.float),
                          requires_grad=True)
        candidate = OrderedPath(np.array(torch.argmax(output.detach(), dim=1), dtype=int).transpose(), weight_matrix)

        test_loss += model.loss_function(output, target)  # sum up batch loss
        correct += int(candidate.is_solution())

    test_set_size = len(test_set)
    test_loss /= test_set_size
    print('\n' + "test" + ' set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, test_set_size, 100. * correct / test_set_size))
    return correct / test_set_size
Example #2
0
def valid(model, valid_set, epoch, tsp_database_path):
    model.eval()
    valid_loss = 0
    correct = 0
    sum_duplicates = 0
    sum_solution = 0
    random.shuffle(valid_set)
    for data_file in valid_set:
        details = data_file.split('.')[0].split('_')
        nb_cities, instance_id = int(details[1]), int(details[2])
        ordered_path, total_weight = read_tsp_choco_solution_file(nb_cities, instance_id, tsp_database_path)
        weight_matrix = ordered_path.get_weight_matrix()
        input_data = Variable(torch.tensor(normalize_weight_matrix(weight_matrix).reshape((1, nb_cities * nb_cities)),
                                           dtype=torch.float))
        output = model(input_data)

        target = Variable(torch.tensor(ordered_path.to_ordered_path_binary_matrix().get_candidate(), dtype=torch.float),
                          requires_grad=True)
        candidate = OrderedPath(np.array(torch.argmax(output.detach(), dim=1), dtype=int).transpose(), weight_matrix)

        valid_loss += model.loss_function(output, target)  # sum up batch loss
        sum_duplicates += candidate.get_nb_duplicates()
        sum_solution += int(candidate.is_solution())
        correct += int(candidate.is_solution())

    valid_set_size = len(valid_set)
    sum_solution /= valid_set_size
    sum_duplicates /= valid_set_size
    print('Valid phase indicators : Solution: {}, Nb duplicates: {}'.format(sum_solution,
                                                                            sum_duplicates))
    valid_loss /= valid_set_size
    print('Epoch {} - valid set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        epoch, valid_loss, correct, valid_set_size, 100. * correct / valid_set_size))
    return correct / valid_set_size, valid_loss, sum_duplicates
Example #3
0
def train(model, train_set, optimizer, tsp_database_path):
    model.train()
    sum_duplicates = 0
    sum_solution = 0
    random.shuffle(train_set)
    for data_file in train_set:
        details = data_file.split('.')[0].split('_')
        nb_cities, instance_id = int(details[1]), int(details[2])
        ordered_path, total_weight = read_tsp_choco_solution_file(nb_cities, instance_id, tsp_database_path)
        weight_matrix = ordered_path.get_weight_matrix()
        input_data = Variable(torch.tensor(normalize_weight_matrix(weight_matrix).reshape((1, nb_cities * nb_cities)),
                                           dtype=torch.float), requires_grad=True)
        optimizer.zero_grad()
        output = model(input_data)  # calls the forward function

        target = Variable(torch.tensor(ordered_path.to_ordered_path_binary_matrix().get_candidate(), dtype=torch.float),
                          requires_grad=True)
        candidate = OrderedPath(np.array(torch.argmax(output.detach(), dim=1), dtype=int), weight_matrix)
        sum_duplicates += candidate.get_nb_duplicates()
        sum_solution += int(candidate.is_solution())

        loss = model.loss_function(output, target)
        loss.backward()
        optimizer.step()

    train_set_size = len(train_set)
    print('Training indicators : Solution: {}, Nb duplicates: {}'.format(sum_solution / train_set_size,
                                                                         sum_duplicates / train_set_size))
    return model
Example #4
0
def test(model, test_set, database_path):
    model.eval()
    random.shuffle(test_set)
    sum_predictions = 0
    test_loss = 0
    for data_file in test_set:
        nb_cities, instance_id = data_file[0], data_file[1]
        visited_cities, current_city_one_hot, target_one_hot = data_file[
            2], data_file[3], data_file[4]
        ordered_path, total_weight = read_tsp_heuristic_solution_file(
            nb_cities, instance_id, database_path)
        formatted_weight_matrix = \
            normalize_weight_matrix(ordered_path.get_weight_matrix()).reshape((1, nb_cities * nb_cities))
        input_data = Variable(torch.tensor(np.concatenate(
            (formatted_weight_matrix, visited_cities, current_city_one_hot),
            axis=1),
                                           dtype=torch.float),
                              requires_grad=True)
        target = Variable(torch.tensor(target_one_hot, dtype=torch.float),
                          requires_grad=True)
        output = model(input_data)  # calls the forward function
        test_loss += model.loss_function(output, target)
        expected_next_city = int(torch.argmax(target, dim=1))
        predicted_next_city = int(torch.argmax(output, dim=1))
        sum_predictions += int(predicted_next_city == expected_next_city)

    test_set_size = len(test_set)
    test_loss /= test_set_size
    print('\n' + "test" +
          ' set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
              test_loss, sum_predictions, test_set_size, 100. *
              sum_predictions / test_set_size))
    return sum_predictions / test_set_size
Example #5
0
def train(model, train_set, optimizer, database_path):
    model.train()
    random.shuffle(train_set)
    sum_predictions = 0
    for data_file in train_set:
        nb_cities, instance_id = data_file[0], data_file[1]
        visited_cities, current_city_one_hot, target_one_hot = data_file[
            2], data_file[3], data_file[4]
        ordered_path, total_weight = read_tsp_heuristic_solution_file(
            nb_cities, instance_id, database_path)
        formatted_weight_matrix = \
            normalize_weight_matrix(ordered_path.get_weight_matrix()).reshape((1, nb_cities * nb_cities))
        input_data = Variable(torch.tensor(np.concatenate(
            (formatted_weight_matrix, visited_cities, current_city_one_hot),
            axis=1),
                                           dtype=torch.float),
                              requires_grad=True)
        target = Variable(torch.tensor(target_one_hot, dtype=torch.float),
                          requires_grad=True)
        optimizer.zero_grad()
        output = model(input_data)  # calls the forward function
        loss = model.loss_function(output, target)
        loss.backward()
        optimizer.step()

        expected_next_city = int(torch.argmax(target, dim=1))
        predicted_next_city = int(torch.argmax(output, dim=1))
        sum_predictions += int(predicted_next_city == expected_next_city)

    train_set_size = len(train_set)
    print('Training indicators : ' + 'Accuracy (predictions): {:.4f}%'.format(
        (sum_predictions / train_set_size) * 100.))
    return model
Example #6
0
 def test_normalize_weight_matrix(self):
     self.assertTrue((normalize_weight_matrix(weight_matrix_3) == np.array([[0., 1., 0.75, 0.25, 0.5],
                                                                            [1., 0., 1., 0.75, 0.5],
                                                                            [0.75, 0.25, 0., 0.75, 0.75],
                                                                            [0.25, 0.75, 1., 0., 0.25],
                                                                            [0.5, 0.5, 0.5, 0.25, 0.]],
                                                                           dtype=float)).all)