Example #1
0
def test(time_steps=32, target_cost_dev=0.05):

    test_samples = 32*32

    if not os.path.isdir('test'):
        print('Creating {} Complete Test instances'.format(test_samples), flush=True)
        create_dataset_metric(
            20, 20,
            1, 1,
            bins=10**6,
            samples=test_samples,
            path='test')
    #end

    d                       = 64
    epochs_n                = 100
    batch_size              = 1
    test_batches_per_epoch  = 16*32

    # Create test loader
    test_loader = InstanceLoader("test")

    # Build model
    print("Building model ...", flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto( device_count = {"GPU":0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        load_weights(sess,'./TSP-checkpoints-decision-0.05/epoch=200.0')
        
        with open('TSP-log.dat','w') as logfile:
            # Run for a number of epochs
            for epoch_i in range(1):

                test_loader.reset()

                test_loss   = np.zeros(test_batches_per_epoch)
                test_acc    = np.zeros(test_batches_per_epoch)
                test_sat    = np.zeros(test_batches_per_epoch)
                test_pred   = np.zeros(test_batches_per_epoch)

                print("Testing model...", flush=True)
                for (batch_i, batch) in islice(enumerate(test_loader.get_batches(batch_size, target_cost_dev=target_cost_dev)), test_batches_per_epoch):
                    test_loss[batch_i], test_acc[batch_i], test_sat[batch_i], test_pred[batch_i] = run_batch(sess, GNN, batch, batch_i, epoch_i, time_steps, train=False, verbose=True)[:4]
                #end
                summarize_epoch(epoch_i,test_loss,test_acc,test_sat,test_pred,train=False)
Example #2
0
def extract_embeddings_and_predictions(sess, model, instance, time_steps=32, target_cost_dev=0.05):

    Ma,Mw,route,nodes = instance
    n = Ma.shape[0]

    # Create batch of size 1
    route_cost = sum([ Mw[min(i,j),max(i,j)] for (i,j) in zip(route,route[1:]+route[:1]) ]) / n
    target_cost = (1+target_cost_dev)*route_cost
    batch = InstanceLoader.create_batch([(Ma,Mw,route)], target_cost=target_cost)
    EV, W, C, edges_mask, route_exists, n_vertices, n_edges = batch

    # Define feed dict
    feed_dict = {
        model['gnn'].matrix_placeholders['EV']: EV,
        model['gnn'].matrix_placeholders['W']: W,
        model['gnn'].matrix_placeholders['C']: C,
        model['gnn'].time_steps: time_steps,
        model['route_exists']: route_exists,
        model['n_vertices']: n_vertices,
        model['n_edges']: n_edges
    }

    # Run model to extract edge embeddings
    vertex_embeddings, edge_embeddings, predictions = sess.run([model['gnn'].last_states['V'].h, model['gnn'].last_states['E'].h, model['predictions']], feed_dict = feed_dict)

    return vertex_embeddings, edge_embeddings, predictions
Example #3
0
def test(time_steps=25):

    d = 128

    epochs_n = 100
    batch_size = 1
    test_batches_per_epoch = 16 * 32

    # Create train and test loaders
    train_loader = InstanceLoader("train", target_cost_dev=0.25 * 0.04)
    test_loader = InstanceLoader("test", target_cost_dev=0.25 * 0.04)

    # Build model
    print("Building model ...", flush=True)
    GNN = build_network_v2(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        load_weights(sess, './TSP-checkpoints-decision')

        with open('TSP-log.dat', 'w') as logfile:
            # Run for a number of epochs
            for epoch_i in range(1):

                test_loader.reset()

                test_loss = np.zeros(test_batches_per_epoch)
                test_acc = np.zeros(test_batches_per_epoch)
                test_sat = np.zeros(test_batches_per_epoch)
                test_pred = np.zeros(test_batches_per_epoch)

                print("Testing model...", flush=True)
                for (batch_i, batch) in islice(
                        enumerate(test_loader.get_batches(batch_size)),
                        test_batches_per_epoch):
                    test_loss[batch_i], test_acc[batch_i], test_sat[
                        batch_i], test_pred[batch_i] = run_batch_v2(
                            sess,
                            GNN,
                            batch,
                            batch_i,
                            epoch_i,
                            time_steps,
                            train=False,
                            verbose=True)
                #end
                summarize_epoch(epoch_i,
                                test_loss,
                                test_acc,
                                test_sat,
                                test_pred,
                                train=False)
Example #4
0
def extract_solution(sess, model, instance, time_steps=10):

    # Extract list of edges from instance
    Ma,Mw,route,nodes = instance
    edges = list(zip(np.nonzero(Ma)[0],np.nonzero(Ma)[1]))
    n = Ma.shape[0]
    m = len(edges)

    # Create batch of size 1
    route_cost = sum([ Mw[min(i,j),max(i,j)] for (i,j) in zip(route,route[1:]+route[:1]) ]) / n
    target_cost = 1.05*route_cost
    batch = InstanceLoader.create_batch([(Ma,Mw,route)], target_cost=target_cost)
    EV, W, C, edges_mask, route_exists, n_vertices, n_edges = batch

    # Define feed dict
    feed_dict = {
        model['gnn'].matrix_placeholders['EV']: EV,
        model['gnn'].matrix_placeholders['W']: W,
        model['route_costs']: C,
        model['gnn'].time_steps: time_steps,
        model['route_exists']: route_exists,
        model['n_vertices']: n_vertices,
        model['n_edges']: n_edges
    }

    # Run model to extract edge embeddings
    edge_embeddings, predictions = sess.run([model['gnn'].last_states['E'].h, model['predictions']], feed_dict = feed_dict)

    # Perform 2-clustering
    two_clustering = KMeans(n_clusters=2).fit(edge_embeddings)
    if abs(sum(two_clustering.labels_)-n) < abs(sum(1-two_clustering.labels_)-n):
        pos_indices = [ i for i,x in enumerate(two_clustering.labels_) if x==1 ]
        pos_center = two_clustering.cluster_centers_[0]
    else:
        pos_indices = [ i for i,x in enumerate(two_clustering.labels_) if x==0 ]
        pos_center = two_clustering.cluster_centers_[1]
    #end

    print('# pos_indices, # neg_indices: {},{}'.format(len(pos_indices), edge_embeddings.shape[0]-len(pos_indices)))

    # Get the indices of the n positive embeddings closest to their center
    top_n = sorted(pos_indices, key=lambda i: LA.norm(edge_embeddings[i,:]-pos_center) )#[:n]

    print('')
    print('Is there a route with cost < {target_cost:.3f}? R: {R}'.format(target_cost=target_cost, R='Yes' if route_exists[0]==1 else 'No'))
    print('Prediction: {}'.format('Yes' if predictions[0] >= 0.5 else 'No'))

    # Get list of predicted edges
    predicted_edges = [ (i,j) for e,(i,j) in enumerate(edges) if e in top_n ]

    return predicted_edges
Example #5
0
import numpy as np
from itertools import islice
from tsp_utils import InstanceLoader, create_graph_metric, create_dataset_metric
from anneal import SimAnneal

if __name__ == '__main__':
    test_batches_per_epoch = 1
    batch_size = 1
    bins = 10**6
    test_loader = InstanceLoader("test")
    with open('TSP-closest-anneal-log.dat', 'w') as logfile:
        print("inst_i\tinst_size\tclosest_fitness\tsa_fitness\tsa_iter",
              file=logfile)
        # Run for a number of epochs
        for epoch_i in range(1):

            sa_acc = 0
            cn_acc = 0

            print("Testing model...", flush=True)
            for (inst_i, inst) in enumerate(
                    test_loader.get_instances(len(test_loader.filenames))):
                _, Mw, _ = inst
                Mw = np.round(Mw * bins)
                sa = SimAnneal(Mw)
                cn_acc += sa.best_fitness
                print("{inst_i}\t{inst_size}\t{closest_fitness}\t".format(
                    inst_i=inst_i,
                    inst_size=Mw.shape[0],
                    closest_fitness=sa.best_fitness),
                      end="",
Example #6
0
        'bins': 10**6,
        'batches_per_epoch': 32,
        'samples': 1024
    }

    # Delete datasets if requested
    if vars(args)['newdatasets']:
        shutil.rmtree('train')
        shutil.rmtree('test')
    #end

    # Ensure that train and test datasets exist and create if inexistent
    ensure_datasets(batch_size, train_params, test_params)

    # Create train and test loaders
    train_loader = InstanceLoader("train")
    test_loader = InstanceLoader("test")

    # Build model
    print("Building model ...", flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run(tf.global_variables_initializer())

        # Restore saved weights