Exemplo n.º 1
0
#end build_networks

if __name__ == '__main__':
    d = 128
    epochs = 100
    batch_size = 32
    batches_per_epoch = 128
    time_steps = 200

    # Build model
    print("{timestamp}\t{memory}\tBuilding model ...".format(
        timestamp=timestamp(), memory=memory_usage()))
    GNN = build_network(d)

    # Create train and test instance generators
    train_generator = InstanceLoader("train")
    test_generator = InstanceLoader("test")

    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0})
    with tf.Session(config=config) as sess:
        # Initialize global variables
        print(
            "{timestamp}\t{memory}\tInitializing global variables ... ".format(
                timestamp=timestamp(), memory=memory_usage()))
        sess.run(tf.global_variables_initializer())

        # Run for a number of epochs
        print("{timestamp}\t{memory}\tRunning for {} epochs".format(
            epochs, timestamp=timestamp(), memory=memory_usage()))
        for epoch in range(epochs):
Exemplo n.º 2
0
    parser.add_argument('-d', default=64, type=int, help='Embedding size for vertices and edges')
    parser.add_argument('-time_steps', default=32, type=int, help='# Timesteps')
    parser.add_argument('-dev', default=0.02, type=float, help='Target cost deviation')
    parser.add_argument('-instances', default='instances/test', help='Path for the test instances')
    parser.add_argument('-checkpoint', default='training/dev=0.02/checkpoints/epoch=100', help='Path for the checkpoint of the trained model')

    # Parse arguments from command line
    args = parser.parse_args()

    # Setup parameters
    d                       = vars(args)['d']
    time_steps              = vars(args)['time_steps']
    target_cost_dev         = vars(args)['dev']

    # Create instance loader
    loader = InstanceLoader(vars(args)['instances'])

    # Build model
    print('Building model ...', flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto( device_count = {'GPU':0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        load_weights(sess,vars(args)['checkpoint']);
Exemplo n.º 3
0
def get_cost(sess,
             model,
             instance,
             time_steps,
             threshold=0.5,
             stopping_delta=0.01):

    # Extract information from instance
    Ma, Mw, route = instance
    edges = list(zip(np.nonzero(Ma)[0], np.nonzero(Ma)[1]))
    n = Ma.shape[0]
    m = len(edges)

    # Get loose minimum and maximum
    # wmin is the sum of the n lightest edges
    # wmax is the sum of the n heaviest edges
    wmin = np.minimum(np.sum(np.sort(np.triu(Mw).flatten())[:n]),
                      np.sum(np.sort(np.tril(Mw).flatten())[:n]))
    wmax = np.maximum(np.sum(np.sort(np.triu(Mw).flatten())[-n:]),
                      np.sum(np.sort(np.tril(Mw).flatten())[-n:]))
    # Normalize wmin and wmax
    wmin /= n
    wmax /= n

    # Start at the middle
    wpred = (wmin + wmax) / 2

    # Create batch of size 1 with the given instance
    route_cost = sum([
        Mw[min(i, j), max(i, j)]
        for (i, j) in zip(route, route[1:] + route[:1])
    ]) / n
    batch = InstanceLoader.create_batch([(Ma, Mw, route)], target_cost=wpred)
    EV, W, _, route_exists, n_vertices, n_edges = batch
    C = np.ones((m, 1))

    # Define feed dict
    feed_dict = {
        model['EV']: EV,
        model['W']: W,
        model['C']: None,
        model['time_steps']: time_steps,
        model['route_exists']: route_exists,
        model['n_vertices']: n_vertices,
        model['n_edges']: n_edges
    }

    # Run binary search
    iterations = 0
    while wmin < wpred * (1 - stopping_delta) or wpred * (
            1 + stopping_delta) < wmax:

        # Update feed dict
        feed_dict[model['C']] = C * wpred

        # Get predictions from trained model
        pred = sess.run(model['predictions'], feed_dict=feed_dict)

        # Update binary search limits
        if pred < threshold:
            wmin = wpred
        else:
            wmax = wpred
        #end
        wpred = (wmax + wmin) / 2

        # Increment iterations
        iterations += 1
    #end
    return wpred, pred, route_cost, iterations
Exemplo n.º 4
0
    test_params = {
        'n_min': train_params['n_min'],
        'n_max': train_params['n_max'],
        'conn_min': vars(args)['cmin'],
        'conn_max': vars(args)['cmax'],
        'batches_per_epoch': 32,
        'samples': 2**10,
        'distances': vars(args)['distances']
    }
    
    # Ensure that train and test datasets exist and create if inexistent
    ensure_datasets(batch_size, train_params, test_params)

    # Create train and test loaders
    train_loader    = InstanceLoader('instances/train')
    test_loader     = InstanceLoader('instances/test')

    # Build model
    print('Building model ...', flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    #config = tf.ConfigProto( device_count = {'GPU':0})
    with tf.Session() as sess:

        # Initialize global variables
        print('Initializing global variables ... ', flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
Exemplo n.º 5
0
    print('Building model ...')
    model = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={'GPU': 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print('Initializing global variables ...')
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        load_weights(sess, '../training/dev=0.02/checkpoints/epoch=100')

        # Init instance loader
        loader = InstanceLoader('../instances/test')

        avg_deviation = 0

        with open('results/binary-search.dat', 'w') as out:
            # Get instances from instance loader
            for instance in loader.get_instances(len(loader.filenames)):

                # Get number of cities
                n = instance[0].shape[0]

                # Compute cost with binary search
                pred_cost, pred_prob, real_cost, iterations = get_cost(
                    sess, model, instance, time_steps)
                deviation = (pred_cost - real_cost) / real_cost
Exemplo n.º 6
0
if __name__ == "__main__":
#    graph = [[0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
#             [1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
#             [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0],
#             [0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0],
#             [1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
#             [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1],
#             [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
#             [0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1],
#             [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0],
#             [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1],
#             [0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],
#             [0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0]]

    loader     = InstanceLoader('adversarial-testing')
    for (z, pair) in enumerate(loader.get_batches(1)):
        M, n_colors, VC, cn_exists, n_vertices, n_edges, f = pair
        # Compute the number of problems
        n_problems = n_vertices.shape[0]
        print(z)
        #open up the batch, which contains 2 instances
        for i in range(n_problems):
            n, m, c = n_vertices[i], n_edges[i], n_colors[i]
            
            n_acc = sum(n_vertices[0:i])
            c_acc = sum(n_colors[0:i])
            
            
            #subset matrices
            M_t = M[n_acc:n_acc+n, n_acc:n_acc+n]
Exemplo n.º 7
0
	embedding_size = 64
	epochs = 100
	batch_n_max = 4096
	batches_per_epoch = 32
	n_size_min = 20
	n_size_max = 512
	edge_probability = 0.25
	time_steps = 32
	n_instances_min = 8
	n_instances_max = 64
	n_instances = 1
	k = 30
	
	print( "{timestamp}\t{memory}\tBuilding model ...".format( timestamp = timestamp(), memory = memory_usage() ) )
	GNN = build_network(embedding_size)
	test_generator = InstanceLoader("./instances-test") 
	# Disallow GPU use
	config = tf.ConfigProto(
		device_count = {"GPU":0},
		inter_op_parallelism_threads=1,
		intra_op_parallelism_threads=1
	)
	
	# Create model saver
	saver = tf.train.Saver()
	with tf.Session() as sess:
		print( "{timestamp}\t{memory}\tInitializing global variables ... ".format( timestamp = timestamp(), memory= 			memory_usage() ) )
		sess.run( tf.global_variables_initializer() )

		# Restore saved weights
		print( "{timestamp}\t{memory}\tRestoring saved model ... ".format( timestamp = timestamp(), memory = memory_usage() ) )
Exemplo n.º 8
0
    epochs = 100
    batch_n_max = 4096
    batches_per_epoch = 32
    n_size_min = 20
    n_size_max = 512
    edge_probability = 0.25
    time_steps = 32
    n_instances_min = 8
    n_instances_max = 64
    n_instances = 32

    # Build model
    print("{timestamp}\t{memory}\tBuilding model ...".format(
        timestamp=timestamp(), memory=memory_usage()))
    GNN = build_network(embedding_size)
    instance_loader = InstanceLoader("./instances")
    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0},
                            inter_op_parallelism_threads=1,
                            intra_op_parallelism_threads=1)
    with tf.Session(config=config) as sess:
        # Initialize global variables
        print(
            "{timestamp}\t{memory}\tInitializing global variables ... ".format(
                timestamp=timestamp(), memory=memory_usage()))
        sess.run(tf.global_variables_initializer())

        # Run for a number of epochs
        print("{timestamp}\t{memory}\tRunning for {} epochs".format(
            epochs, timestamp=timestamp(), memory=memory_usage()))
        for epoch in range(epochs):
Exemplo n.º 9
0
    loadpath                = vars(args)['loadpath']
    load_checkpoints        = vars(args)['load']
    save_checkpoints        = vars(args)['save']
    runtabu                 = vars(args)['runtabu']

    train_params = {
        'batches_per_epoch': 128
    }

    test_params = {
        'batches_per_epoch': 1
    }
    
    # Create train and test loaders
    if vars(args)['train']:
        train_loader = InstanceLoader(path)
    else:
        test_loader  = InstanceLoader(path)

    # Build model
    print('Building model ...', flush=True)
    GNN = build_network(d)

    # Comment the following line to allow GPU use
    config = tf.ConfigProto()
    #config.gpu_options.per_process_gpu_memory_fraction = 0.5
    config.gpu_options.allow_growth=True

    with tf.Session(config=config) as sess:

        # Initialize global variables