def test_with(sess, solver, path, name, time_steps=26, batch_size=1): # Load test instances print("{timestamp}\t{memory}\tLoading test {name} instances ...".format( timestamp=timestamp(), memory=memory_usage(), name=name)) test_generator = instance_loader.InstanceLoader(path) test_loss = 0.0 test_accuracy = 0.0 test_avg_pred = 0.0 test_batches = 0 # Run with the test instances print("{timestamp}\t{memory}\t{name} TEST SET BEGIN".format( timestamp=timestamp(), memory=memory_usage(), name=name)) for b, batch in enumerate(test_generator.get_batches(batch_size)): l, a, p = run_and_log_batch(sess, solver, name, b, batch, time_steps, train=False) test_loss += l test_accuracy += a test_avg_pred += p test_batches += 1 #end for # Summarize results and print test summary test_loss /= test_batches test_accuracy /= test_batches test_avg_pred /= test_batches print( "{timestamp}\t{memory}\t{name} TEST SET END Mean loss: {loss:.4f} Mean Accuracy = {accuracy} Mean prediction {avg_pred:.4f}" .format(loss=test_loss, accuracy=test_accuracy, avg_pred=test_avg_pred, timestamp=timestamp(), memory=memory_usage(), name=name))
time_steps = 26 batch_size = 128 batches_per_epoch = 128 early_stopping_window = [0 for _ in range(3)] early_stopping_threshold = 0.85 # Build model print("{timestamp}\t{memory}\tBuilding model ...".format( timestamp=timestamp(), memory=memory_usage())) solver = build_neurosat(d) # Create batch loader print("{timestamp}\t{memory}\tLoading instances ...".format( timestamp=timestamp(), memory=memory_usage())) generator = instance_loader.InstanceLoader("./instances") # If you want to use the entire dataset on each epoch, use: # batches_per_epoch = len(generator.filenames) // batch_size test_generator = instance_loader.InstanceLoader("./test-instances") # Create model saver saver = tf.train.Saver() # Disallow GPU use config = tf.ConfigProto( #device_count = {"GPU":0}, gpu_options=tf.GPUOptions(allow_growth=True), ) with tf.Session(config=config) as sess: # Initialize global variables
time_steps = 32 batch_size = 4 batches_per_epoch = 128 early_stopping_window = [0 for _ in range(3)] early_stopping_threshold = 0.85 # Build model print("{timestamp}\t{memory}\tBuilding model ...".format( timestamp=timestamp(), memory=memory_usage())) solver = build_neurosat(d) # Create batch loader print("{timestamp}\t{memory}\tLoading instances ...".format( timestamp=timestamp(), memory=memory_usage())) generator = instance_loader.InstanceLoader("../adversarial-training-cnf") # Create model saver saver = tf.train.Saver() # Disallow GPU use config = tf.ConfigProto(device_count={"GPU": 0}) with tf.Session(config=config) as sess: # Initialize global variables print( "{timestamp}\t{memory}\tInitializing global variables ... ".format( timestamp=timestamp(), memory=memory_usage())) sess.run(tf.global_variables_initializer()) if os.path.exists("./tmp-64/neurosat.ckpt"):
with tf.Session() as sess: # Initialize global variables print( "{timestamp}\t{memory}\tInitializing global variables ... ".format( timestamp=timestamp(), memory=memory_usage())) sess.run(tf.global_variables_initializer()) # Restore saved weights print("{timestamp}\t{memory}\tRestoring saved model ... ".format( timestamp=timestamp(), memory=memory_usage())) saver.restore(sess, "./tmp backup/neurosat.ckpt") # Define loader and get a batch with size 1 (one instance) loader = instance_loader.InstanceLoader('test-instances/sat/') batch = list(itertools.islice(loader.get_batches(1), 1))[0] time_steps = 100 votes = np.zeros((time_steps, 2 * batch.n[0])) for t in range(0, time_steps): votes[t, :] = sess.run(solver["votes"], feed_dict={ solver["gnn"].time_steps: t, solver["gnn"].matrix_placeholders["M"]: batch.get_dense_matrix(), solver["instance_SAT"]: