Ejemplo n.º 1
0
def test(time_steps=25):

    d = 128

    epochs_n = 100
    batch_size = 1
    test_batches_per_epoch = 16 * 32

    # Create train and test loaders
    train_loader = InstanceLoader("train", target_cost_dev=0.25 * 0.04)
    test_loader = InstanceLoader("test", target_cost_dev=0.25 * 0.04)

    # Build model
    print("Building model ...", flush=True)
    GNN = build_network_v2(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        load_weights(sess, './TSP-checkpoints-decision')

        with open('TSP-log.dat', 'w') as logfile:
            # Run for a number of epochs
            for epoch_i in range(1):

                test_loader.reset()

                test_loss = np.zeros(test_batches_per_epoch)
                test_acc = np.zeros(test_batches_per_epoch)
                test_sat = np.zeros(test_batches_per_epoch)
                test_pred = np.zeros(test_batches_per_epoch)

                print("Testing model...", flush=True)
                for (batch_i, batch) in islice(
                        enumerate(test_loader.get_batches(batch_size)),
                        test_batches_per_epoch):
                    test_loss[batch_i], test_acc[batch_i], test_sat[
                        batch_i], test_pred[batch_i] = run_batch_v2(
                            sess,
                            GNN,
                            batch,
                            batch_i,
                            epoch_i,
                            time_steps,
                            train=False,
                            verbose=True)
                #end
                summarize_epoch(epoch_i,
                                test_loss,
                                test_acc,
                                test_sat,
                                test_pred,
                                train=False)
Ejemplo n.º 2
0
def draw_routes():
    d = 128
    n = 20
    bins = 10**6
    connectivity = 1

    # Build model
    print('Building model ...')
    model = build_network_v2(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={'GPU': 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print('Initializing global variables ...')
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        load_weights(sess, './TSP-checkpoints-decision')

        k = 3

        # Normalize item number values to colormap
        norm = mcolors.Normalize(vmin=0, vmax=k**2)

        for inst in range(k**2):

            instance = create_graph_metric(n, bins, connectivity)
            Ma, _, _, nodes = instance
            edges = list(zip(np.nonzero(Ma)[0], np.nonzero(Ma)[1]))

            predicted_edges = extract_solution(sess, model, instance)

            nodes_ = nodes + np.array([inst // k, inst % k])

            for (i, j) in edges:
                x0, y0 = nodes_[i]
                x1, y1 = nodes_[j]
                if (i, j) in predicted_edges:
                    plt.plot([x0, x1], [y0, y1],
                             color=cm.jet(norm(inst)),
                             linewidth=1,
                             zorder=2)
                else:
                    dummy = 0
                    #plt.plot([x0,x1],[y0,y1], 'r--', linewidth=0.5, zorder=1)
                #end
            #end

            plt.scatter(x=nodes_[:, 0],
                        y=nodes_[:, 1],
                        edgecolors='black',
                        c='w',
                        zorder=3)
        #end

        plt.show()
Ejemplo n.º 3
0
def test(time_steps=32, target_cost_dev=0.05):

    test_samples = 32*32

    if not os.path.isdir('test'):
        print('Creating {} Complete Test instances'.format(test_samples), flush=True)
        create_dataset_metric(
            20, 20,
            1, 1,
            bins=10**6,
            samples=test_samples,
            path='test')
    #end

    d                       = 64
    epochs_n                = 100
    batch_size              = 1
    test_batches_per_epoch  = 16*32

    # Create test loader
    test_loader = InstanceLoader("test")

    # Build model
    print("Building model ...", flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto( device_count = {"GPU":0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        load_weights(sess,'./TSP-checkpoints-decision-0.05/epoch=200.0')
        
        with open('TSP-log.dat','w') as logfile:
            # Run for a number of epochs
            for epoch_i in range(1):

                test_loader.reset()

                test_loss   = np.zeros(test_batches_per_epoch)
                test_acc    = np.zeros(test_batches_per_epoch)
                test_sat    = np.zeros(test_batches_per_epoch)
                test_pred   = np.zeros(test_batches_per_epoch)

                print("Testing model...", flush=True)
                for (batch_i, batch) in islice(enumerate(test_loader.get_batches(batch_size, target_cost_dev=target_cost_dev)), test_batches_per_epoch):
                    test_loss[batch_i], test_acc[batch_i], test_sat[batch_i], test_pred[batch_i] = run_batch(sess, GNN, batch, batch_i, epoch_i, time_steps, train=False, verbose=True)[:4]
                #end
                summarize_epoch(epoch_i,test_loss,test_acc,test_sat,test_pred,train=False)
Ejemplo n.º 4
0
    def __init__(self, gpu_id=0):
        # os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        cur_path = os.path.realpath(__file__)
        cur_dir = os.path.dirname(cur_path)
        cur_list = os.listdir(cur_dir)

        model_file_name = "kddata_decoder2_cls12_s2_ep-0065.params"
        for cur_file in cur_list:
            cur_name_list = cur_file.split(".")
            if len(cur_name_list) == 2:
                if cur_name_list[1] == "params":
                    model_file_name = cur_file

        self.weights = os.path.join(cur_dir, model_file_name)

        colours = os.path.join(cur_dir, 'self_clr.png')
        self.label_colours = cv2.imread(colours).astype(np.uint8)

        self.ignore_color = (0, 0, 0)
        clr_dict = {l.name: l.color for l in self_server_label}
        for name, color in clr_dict.items():
            if name == u'Ignore':
                self.ignore_color = color
                break

        network, net_args, net_auxs = load_weights(self.weights)
        self.scale = 1.0 / 255
        self.mean = [0.2476, 0.2469, 0.250]
        self.std = [0.1147, 0.1056, 0.0966]
        context = [mx.gpu(gpu_id)]
        self.mod = mx.mod.Module(network, context=context)

        self.result_shape = [1024, 2448]
        self.input_shape = [512, 1224]
        # self.batch_data_shape = (1, 3, 1024, 2448)
        self.batch_data_shape = (1, 3, 512, 1224)
        provide_data = [("data", self.batch_data_shape)]
        self.batch_label_shape = (1, 3, 512, 612)
        provide_label = [("softmax_label", self.batch_label_shape)]
        self.mod.bind(provide_data,
                      provide_label,
                      for_training=False,
                      force_rebind=True)
        self.mod.init_params(arg_params=net_args, aux_params=net_auxs)
        self._flipping = False

        self.batch_data = [mx.nd.empty(info[1]) for info in provide_data]
        self.batch_label = [mx.nd.empty(info[1]) for info in provide_label]

        symbol.cfg['workspace'] = 1024
        symbol.cfg['bn_use_global_stats'] = True
Ejemplo n.º 5
0
    def __init__(self, gpu_id=0):
        # os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        cur_path = os.path.realpath(__file__)
        cur_dir = os.path.dirname(cur_path)

        # model_file_name = "densenet-kd-169-0-5000.params"
        self.weights = model_file

        network, net_args, net_auxs = load_weights(self.weights)
        context = [mx.gpu(gpu_id)]
        self.mod = mx.mod.Module(network, context=context)

        self.input_shape = [224, 224]
        self.mod.bind(for_training=False,
                      data_shapes=[('data', (1, 3, 224, 224))],
                      label_shapes=None)
        self.mod.init_params(arg_params=net_args, aux_params=net_auxs)
        self._flipping = False
Ejemplo n.º 6
0
    def __init__(self, gpu_id=0):
        # os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

        cur_path = os.path.realpath(__file__)
        cur_dir = os.path.dirname(cur_path)
        cur_list = os.listdir(cur_dir)

        model_file_name = model_file
        self.weights = os.path.join(cur_dir, model_file_name)

        network, net_args, net_auxs = load_weights(self.weights)
        context = [mx.gpu(gpu_id)]
        self.mod = mx.mod.Module(network, context=context)

        self.input_shape = [112, 112]
        self.mod.bind(for_training=False,
                      data_shapes=[('data', (1, 3, 112, 112))],
                      label_shapes=[('softmax_label', (1, ))])
        self.mod.init_params(arg_params=net_args, aux_params=net_auxs)
        self._flipping = False
Ejemplo n.º 7
0
    def setup(self):
        # load data
        self.A,self.b,self.N,self.block_sizes,self.x_true,self.nz,self.f = \
                util.load_data(self.f)
        self.NT = self.N.T.tocsr()

        # Assumption: Gaussian noise is proportional to link volume
        if self.noise:
            self.b_true = self.b
            delta = np.random.normal(scale=self.b * self.noise)
            self.b = self.b + delta
        self.n = np.size(self.b)

        self.x0 = np.array(util.block_e(self.block_sizes - 1,
                                        self.block_sizes))
        # self.x0 = self.x_true

        logging.debug("Blocks: %s" % self.block_sizes.shape)

        self.options = {
            'max_iter': self.iter,
            'verbose': 1,
            'suff_dec': 0.003,  # FIXME unused
            'corrections': 500
        }  # FIXME unused

        self.proj = lambda x: simplex_projection(self.block_sizes - 1, x)
        # self.proj = lambda x: pysimplex_projection(self.block_sizes - 1,x)
        self.z0 = np.zeros(self.N.shape[1])

        if self.reg and self.weights == 'travel_time':
            self.D = util.load_weights('%s/%s/travel_times.pkl' %
                                       (c.DATA_DIR, c.ESTIMATION_INFO_DIR),
                                       self.block_sizes,
                                       weight=1)
            self.D2 = self.D * self.D
import utilData
import util
from model import df_basedYolo
from test import evaluate

import tqdm
import time
import os

if __name__ == '__main__':
    image_size = 192
    device = torch.device('cuda')
    now = time.strftime('%y%m%d_%H%M%S', time.localtime(time.time()))
    model = df_basedYolo(image_size, device).cuda()
    util.load_weights(model)
    dataSet = utilData.ListDataset('./dog_dataset/dog_images/',
                                   './dog_dataset/dog_annotations/',
                                   True,
                                   img_size=image_size)
    dataLoader = torch.utils.data.DataLoader(dataSet,
                                             batch_size=64,
                                             shuffle=True,
                                             num_workers=0,
                                             pin_memory=True,
                                             collate_fn=dataSet.collate_fn)

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.8)
Ejemplo n.º 9
0
def main(args):
    data = DataLoader(pca=args.PCA, norm=args.norm)

    train_captions, train_feature, train_url, train_len = data.get_Training_data(
        args.training)
    test_captions, test_feature, test_url, test_len = data.get_val_data(
        args.testing)
    f, c, _ = data.eval_data()

    writer = SummaryWriter()

    encoder = Encoder(input_size=train_feature.shape[1],
                      hidden_size=args.hidden_size) \
        .to(device)

    decoder = Decoder(embed_size=args.embed_size,
                      hidden_size=args.hidden_size, attention_dim=args.attention_size,
                      vocab_size=len(data.word_to_idx)) \
        .to(device)

    if args.load_weight:
        load_weights(encoder, args.model_path + "Jul28_10-04-57encoder")
        load_weights(decoder, args.model_path + "Jul28_10-04-57decoder")

    for epoch in range(args.num_epochs):
        params = list(decoder.parameters()) + list(encoder.parameters())
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(params=params, lr=args.learning_rate)

        # if epoch >= 100:
        training_loss = step(encoder=encoder,
                             decoder=decoder,
                             criterion=criterion,
                             data=(train_captions, train_feature, train_len),
                             optimizer=optimizer)
        # if epoch + 1 % 5 == 0:
        #     a = evaluate(encoder, decoder, train_feature[0:2], train_captions[0:2], 5, data.word_to_idx)
        #     print("bleu4 ", a)

        with torch.no_grad():
            test_loss = step(encoder=encoder,
                             decoder=decoder,
                             criterion=criterion,
                             data=(test_captions, test_feature, test_len))

        # if epoch > 1:
        b1, b2, b3, b4 = evaluate(encoder, decoder, f, c, 5, data.word_to_idx,
                                  data.idx_to_word)
        writer.add_scalars('BLEU', {
            'BLEU1': b1,
            'BLEU2': b2,
            'BLEU3': b3,
            'BLEU4': b4
        }, epoch + 1)
        if (epoch % 30) == 0:
            save_weights(encoder, args.model_path + "encoder" + str(epoch))
            save_weights(decoder, args.model_path + "decoder" + str(epoch))

        writer.add_scalars('loss', {
            'train': training_loss,
            'val': test_loss
        }, epoch + 1)

        print(
            'Epoch [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}, TestLoss: {:.4f}, TestPerplexity: {:5.4f}'
            .format(epoch + 1, args.num_epochs, training_loss,
                    np.exp(training_loss), test_loss, np.exp(test_loss)))

        args.learning_rate *= 0.995
        if args.save_weight:
            save_weights(encoder, args.model_path + "encoder" + str(epoch))
            save_weights(decoder, args.model_path + "decoder" + str(epoch))

    if args.save_weight:
        save_weights(encoder, args.model_path + "encoder")
        save_weights(decoder, args.model_path + "decoder")

    if args.predict:

        sample = Sample(encoder=encoder, decoder=decoder, device=device)

        train_mask = [
            random.randint(0, train_captions.shape[0] - 1)
            for _ in range(args.numOfpredection)
        ]
        test_mask = [
            random.randint(0, test_captions.shape[0] - 1)
            for _ in range(args.numOfpredection)
        ]

        train_featur = torch.from_numpy(train_feature[train_mask])
        train_featur = train_featur.to(device)
        train_encoder_out = encoder(train_featur)

        test_featur = torch.from_numpy(test_feature[test_mask])
        test_featur = test_featur.to(device)
        test_encoder_out = encoder(test_featur)

        train_output = []
        test_output = []

        for i in range(len(test_mask)):
            print(i)
            pre = sample.caption_image_beam_search(
                train_encoder_out[i].reshape(1, args.embed_size),
                data.word_to_idx, 2)
            train_output.append(pre)
            pre = sample.caption_image_beam_search(
                test_encoder_out[i].reshape(1, args.embed_size),
                data.word_to_idx, 50)
            test_output.append(pre)

        print_output(output=test_output,
                     sample=0,
                     gt=test_captions[test_mask],
                     img=test_url[test_mask],
                     title="val",
                     show_image=args.show_image,
                     idx_to_word=data.idx_to_word)

        print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
        print("")

        print_output(output=train_output,
                     sample=0,
                     gt=train_captions[train_mask],
                     img=train_url[train_mask],
                     title="traning",
                     show_image=args.show_image,
                     idx_to_word=data.idx_to_word)
Ejemplo n.º 10
0
    # Build model
    print("Building model ...", flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        if load_checkpoints:
            load_weights(
                sess,
                './TSP-checkpoints-{loss_type}-{target_cost_dev}/epoch=100.0'.
                format(loss_type=loss_type, target_cost_dev=target_cost_dev))

        print('Performing Stochastic Gradient Descent on {} loss...'.format(
            loss_type))

        with open(
                'TSP-log-{loss_type}-{target_cost_dev}.dat'.format(
                    loss_type=loss_type, target_cost_dev=target_cost_dev),
                'w') as logfile:
            # Run for a number of epochs
            for epoch_i in 100 + np.arange(epochs_n):

                train_loader.reset()
                test_loader.reset()
Ejemplo n.º 11
0
elisa_net = network.ElisaNet(args.c_feat).cuda()

params = [{'params': elisa_net.parameters()}]
solver = optim.Adam(params, lr=args.lr)

lmda = lambda x: 0.5  # TODO: can change this based on bad_epochs
scheduler = LS.MultiplicativeLR(solver, lr_lambda=lmda)

es = EarlyStopping(mode=args.es_mode,
                   min_delta=args.loss_delta,
                   patience=args.patience)

epoch = 0

if args.resume_epoch != 0:
    load_weights([elisa_net], solver, args.resume_epoch, args)
    epoch = args.resume_epoch
    solver = lr_resume(solver, args.lr_resume)
    print('Loaded weights from epoch {}'.format(args.resume_epoch))

while epoch < args.epochs and not args.eval:
    epoch += 1

    train_loss, _ = forward_pass(train_loader, elisa_net, solver, scheduler,
                                 'TRAIN', epoch, args)

    writer.add_scalars('Train Loss', {'ELISANET': np.mean(train_loss)}, epoch)

    if epoch % 50 == 0:
        valid_loss, valid_incorrect = forward_pass(valid_loader, elisa_net,
                                                   None, None, 'VALIDATE',
                g.write(sampled_translation + '\n')
                h.write('{0}/{1}\n'.format(count, sample))

        bar.update(l + 1)

    f.close()
    if sample:
        g.close()
        h.close()

    bar.finish()


if __name__ == "__main__":

    weightpath = 'trained-weights/eps-40k-ml10-3trans/trained-1-'
    parsepath = '../parses/dev/ml10-5trans/'
    # parsepath = '../parses/eps-40k-ml10-5trans/'
    # savepath = 'prediction/eps-40k-ml10-3trans/'
    savepath = 'prediction/nonempty/'
    # savepath = 'prediction/dev/ml10-3trans/'

    w = load_weights(weightpath)
    parses = [load_parses_separate(parsepath, k) for k in range(200)]
    predict(parses,
            w,
            k=1,
            savepath=savepath,
            scale_weights=False,
            sample=False)
Ejemplo n.º 13
0
    # Build model
    print('Building model ...', flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    #config = tf.ConfigProto( device_count = {'GPU':0})
    with tf.Session() as sess:

        # Initialize global variables
        print('Initializing global variables ... ', flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        if load_checkpoints: 
            start_epoch = load_weights(sess,loadpath)
        else:
            start_epoch = 0
        #end

        if not os.path.isdir('training'):
            os.makedirs('training')
        #end
        if not os.path.isdir('training/dev={dev}'.format(dev=dev)):
            os.makedirs('training/dev={dev}'.format(dev=dev))
        #end

        with open('training/dev={dev}/log.dat'.format(dev=dev),'a') as logfile:
            # Run for a number of epochs
            for epoch_i in np.arange(start_epoch, start_epoch + epochs_n):
Ejemplo n.º 14
0
    print("{timestamp}\t{memory}\tBuilding model ...".format(
        timestamp=timestamp(), memory=memory_usage()))
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={"GPU": 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print(
            "{timestamp}\t{memory}\tInitializing global variables ... ".format(
                timestamp=timestamp(), memory=memory_usage()))
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        load_weights(sess, "./TSP-checkpoints-{}".format(loss_type))

        k = 3
        # Run k² tests
        n = 40
        bins = 10**6
        connectivity = 1

        #normalize item number values to colormap
        norm = mcolors.Normalize(vmin=0, vmax=k**2)

        # Create metric TSP instance
        route = []
        while route == []:
            Ma, Mw, route, nodes = create_graph_metric(n, bins, connectivity)
        #end
Ejemplo n.º 15
0
def read_command(argv):
    """
    Processes the command used to run pacman from the command line.
    """

    from optparse import OptionParser

    usage_str = """
    USAGE:      python checkers.py <options>
    EXAMPLES:   (1) python checkers.py
                    - starts a two player game
    """
    parser = OptionParser(usage_str)

    parser.add_option('-n',
                      '--numGames',
                      dest='num_games',
                      type='int',
                      help=default('the number of GAMES to play'),
                      metavar='GAMES',
                      default=1)

    # k for keyboard agent
    # ab for alphabeta agent
    # rl for reinforcement learning agent
    parser.add_option('-f',
                      '--agentFirstType',
                      dest='first_agent',
                      type='string',
                      help=default('the first agent of game'),
                      default='k')

    parser.add_option(
        '-l',
        '--agentFirstLearn',
        dest='first_agent_learn',
        type='int',
        help=default('the first agent of game is learning ' +
                     '(only applicable for learning agents_checker)'),
        default=1)

    parser.add_option('-s',
                      '--agentSecondType',
                      dest='second_agent',
                      type='string',
                      help=default('the second agent of game'),
                      default='k')

    parser.add_option(
        '-d',
        '--agentsecondLearn',
        dest='second_agent_learn',
        type='int',
        help=default('the second agent of game is learning ' +
                     '(only applicable for learning agents_checker)'),
        default=1)

    parser.add_option('-t',
                      '--turn',
                      dest='turn',
                      type='int',
                      help=default('which agent should take first turn'),
                      default=1)

    parser.add_option(
        '-r',
        '--updateParam',
        dest='update_param',
        type='int',
        help=default('update learning parameters as time passes'),
        default=0)

    parser.add_option('-q',
                      '--quiet',
                      dest='quiet',
                      type='int',
                      help=default('to be quiet or not'),
                      default=0)

    parser.add_option(
        '-x',
        '--firstAgentSave',
        dest='first_save',
        type='string',
        help=default('file to save for the first agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/first_save')

    parser.add_option(
        '-y',
        '--secondAgentSave',
        dest='second_save',
        type='string',
        help=default('file to save for the second agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/second_save')

    parser.add_option(
        '-z',
        '--firstAgentWeights',
        dest='first_weights',
        type='string',
        help=default('file to save weights for the first agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/first_weights')

    parser.add_option(
        '-w',
        '--secondAgentWeights',
        dest='second_weights',
        type='string',
        help=default('file to save weights for the second agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/second_weights')

    parser.add_option(
        '-u',
        '--firstResult',
        dest='first_results',
        type='string',
        help=default('file to save results for the first agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/first_results')

    parser.add_option(
        '-v',
        '--secondResult',
        dest='second_results',
        type='string',
        help=default('file to save results for the second agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/second_results')

    parser.add_option(
        '-g',
        '--firstMResult',
        dest='first_m_results',
        type='string',
        help=default('file to save num moves for the first agent (used only ' +
                     'if this agent is a learning agent)'),
        default='./data/first_m_results')

    parser.add_option(
        '-i',
        '--secondMResult',
        dest='second_m_results',
        type='string',
        help=default(
            'file to save num moves for the second agent (used only ' +
            'if this agent is a learning agent)'),
        default='./data/second_m_results')

    parser.add_option(
        '-p',
        '--playSelf',
        dest='play_against_self',
        type='int',
        help=default('whether first agent is to play agains itself (only' +
                     'for rl agents_checker)'),
        default=0)

    options, garbage = parser.parse_args(argv)

    if len(garbage) > 0:
        raise Exception('Command line input not understood ' + str(garbage))

    args = dict()

    args['num_games'] = options.num_games

    first_weights = load_weights(options.first_weights)
    args['first_agent'] = load_agent(options.first_agent,
                                     options.first_agent_learn, first_weights)

    second_weights = load_weights(options.second_weights)
    args['second_agent'] = load_agent(options.second_agent,
                                      options.second_agent_learn,
                                      second_weights)

    args['first_agent_turn'] = options.turn == 1

    args['update_param'] = options.update_param

    args['quiet'] = True if options.quiet else False

    args['first_file_name'] = options.first_save
    args['second_file_name'] = options.second_save

    args['first_weights_file_name'] = options.first_weights
    args['second_weights_file_name'] = options.second_weights

    args['first_result_file_name'] = options.first_results
    args['second_result_file_name'] = options.second_results

    args['first_m_result_file_name'] = options.first_m_results
    args['second_m_result_file_name'] = options.second_m_results

    args['play_against_self'] = options.play_against_self == 1

    return args
Ejemplo n.º 16
0
def draw_projections():
    
    d = 64
    n = 20
    bins = 10**6
    connectivity = 1

    # Build model
    print('Building model ...')
    model = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto( device_count = {'GPU':0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print('Initializing global variables ...')
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        load_weights(sess,'./TSP-checkpoints-decision-0.05/epoch=100.0')

        target_cost_dev = +0.1

        # Create instance
        while True:
            instance = create_graph_metric(n,bins,connectivity)
            Ma,Mw,_,nodes = instance
            edges = list(zip(np.nonzero(Ma)[0],np.nonzero(Ma)[1]))
            edge_weights = [ Mw[i,j] for (i,j) in edges ]

            _,_, predictions = extract_embeddings_and_predictions(sess, model, instance, time_steps=32, target_cost_dev=target_cost_dev)
        
            if predictions[0] > 0.7:
                break
            #end
        #end

        # Define timesteps range
        timesteps = np.arange(20,32+1,4)
        # Init figure
        f, axes = plt.subplots(1, len(timesteps), dpi=200, sharex=True, sharey=True)
        # Iterate over timesteps
        for i,(t,ax) in enumerate(zip(timesteps,axes)):
            
            # Fetch embeddings and predictions
            vertex_embeddings, edge_embeddings, predictions = extract_embeddings_and_predictions(sess, model, instance, time_steps=t, target_cost_dev=target_cost_dev)

            # Compute accuracy
            acc = 100*( (target_cost_dev > 0) == (predictions[0] > 0.5) ).astype(float)

            # Obtain 2D vertex embedding projections
            vertex_projections = get_projections(vertex_embeddings,2)
            # Obtain 2D edge embedding projections
            edge_projections = get_projections(edge_embeddings,2)

            # Set subplot title
            ax.set_title('{t} steps\npred:{pred:.0f}%'.format(t=t,acc=acc,pred=100*predictions[0]))

            # Plot projections
            #ax.scatter(vertex_projections[:,0],vertex_projections[:,1], edgecolors='black')
            ax.scatter(edge_projections[:,0],edge_projections[:,1], edgecolors='black', c=edge_weights, cmap='jet')

        #end

        plt.show()
Ejemplo n.º 17
0
    if not os.path.exists('figures'): os.makedirs('figures')

    # Build model
    print('Building model ...')
    model = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto(device_count={'GPU': 0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print('Initializing global variables ...')
        sess.run(tf.global_variables_initializer())

        # Restore saved weights
        load_weights(sess, '../training/dev=0.02/checkpoints/epoch=100')

        # Init instance loader
        loader = InstanceLoader('../instances/test')

        avg_deviation = 0

        with open('results/binary-search.dat', 'w') as out:
            # Get instances from instance loader
            for instance in loader.get_instances(len(loader.filenames)):

                # Get number of cities
                n = instance[0].shape[0]

                # Compute cost with binary search
                pred_cost, pred_prob, real_cost, iterations = get_cost(
Ejemplo n.º 18
0
        print(aug_type, lam)
        folder_name = aug_type + '_' + str(lam)  # .replace('.','p')
        dirType = '/weights/'
        pathMkdir = os.getcwd() + dirType + folder_name + '/fake_samples/'
        # mkdir
        print(pathMkdir)
        try:
            os.mkdir(pathMkdir)
        except OSError:
            print("Creation of the directory %s failed" % pathMkdir)
        else:
            print("Successfully created the directory %s " % pathMkdir)
        #end mkdir

        path = os.getcwd() + dirType + folder_name
        netG = util.load_weights(netG, path=path+'/netG_epoch_23.pth', device=device)
        torch.manual_seed(42)
        for i in tqdm(range(20000)):
            fixed_noise2 = torch.randn(1, nz, 1, 1, device=device)
            fake         = netG(fixed_noise2)
            vutils.save_image(fake[0, :, :, :].detach(), path + '/fake_samples/fake_sample_%03d.png' % (i),
                              normalize=True)
print('finish create folders')

# create Real Data folder
# for i, data in enumerate(dataloader, 0):
#    real = data[0]
#    for j in range(real.shape[0]):
#        vutils.save_image(real[j, :, :, :].detach(), 'output/real_samples/real_sample_%03d.png' % (i*real.shape[0]+j), normalize=True)

# %% prepare FID
Ejemplo n.º 19
0
def draw_routes():
    
    d = 64
    n = 20
    bins = 10**6
    connectivity = 1

    # Build model
    print('Building model ...')
    model = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto( device_count = {'GPU':0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print('Initializing global variables ...')
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        load_weights(sess,'./TSP-checkpoints-decision-0.05/epoch=200.0')

        target_cost_dev = +0.05

        # Create instance
        while True:
            instance = create_graph_metric(n,bins,connectivity)
            Ma,Mw,route,nodes = instance
            edges = list(zip(np.nonzero(Ma)[0],np.nonzero(Ma)[1]))
            edge_weights = [ Mw[i,j] for (i,j) in edges ]

            _,_, predictions = extract_embeddings_and_predictions(sess, model, instance, time_steps=32, target_cost_dev=target_cost_dev)
        
            if predictions[0] < 1:
                break
            #end
        #end

        # Define timesteps range
        timesteps = np.arange(2,32+1,10)
        # Init figure
        f, axes = plt.subplots(1, len(timesteps), dpi=200, sharex=True, sharey=True)
        # Iterate over timesteps
        for i,(t,ax) in enumerate(zip(timesteps,axes)):
            
            # Fetch embeddings and predictions
            vertex_embeddings, edge_embeddings, predictions = extract_embeddings_and_predictions(sess, model, instance, time_steps=t, target_cost_dev=target_cost_dev)

            # Obtain 2D vertex embedding projections
            vertex_projections = get_projections(vertex_embeddings,2)
            # Obtain 2D edge embedding projections
            edge_projections = get_projections(edge_embeddings,2)

            # Obtain 2-clustering
            clusters, cluster_centers = get_k_cluster(edge_embeddings,2)

            print('#1 Cluster size, #2 Cluster size: {},{}'.format(len(clusters[0]),len(clusters[1])))

            # Set subplot title
            ax.set_title('{t} steps\npred:{pred:.0f}%'.format(t=t,pred=100*predictions[0]))

            if len(clusters[0]) < len(clusters[1]):
                clusters = clusters[::-1]
                cluster_centers = cluster_centers[::-1]
            #end

            # Plot edges
            for k in range(1,2):
                color = ['red','blue'][k]
                for e,(i,j) in enumerate(edges):
                    if e in clusters[k]:
                        x0,y0 = nodes[i,:]
                        x1,y1 = nodes[j,:]
                        ax.plot([x0,x1],[y0,y1], c=color, linewidth=0.5, zorder=1)
                    #end
                #end
            #end

            edge_in_route = []
            edge_not_in_route = []
            for e,(i,j) in enumerate(edges):
                if (i,j) in zip(route,route[:1]+route[1:]):
                    edge_in_route.append(e)
                else:
                    edge_not_in_route.append(e)
                #end
            #end

            ax.scatter(nodes[:,0], nodes[:,1], c='white', edgecolors='black', zorder=2)
            
            #ax.scatter(edge_projections[edge_not_in_route,0],edge_projections[edge_not_in_route,1], c='red', edgecolors='black')
            #ax.scatter(edge_projections[edge_in_route,0],edge_projections[edge_in_route,1], c='blue', edgecolors='black')

            #ax.scatter(edge_projections[clusters[1],0],edge_projections[clusters[1],1], c='red', edgecolors='black')
            #ax.scatter(edge_projections[clusters[0],0],edge_projections[clusters[0],1], c='blue', edgecolors='black')

        #end

        plt.show()
Ejemplo n.º 20
0
import pickle
from util import load_weights
from processing import load_parses_separate

savepath1 = 'trained-weights/nonempty/eps-40k-ml10-3trans/trained-1-'
savepath2 = 'trained-weights/eps-40k-ml10-3trans/trained-1-'

w1 = load_weights(savepath1)
w2 = load_weights(savepath2)


def check(w):
    for k, v in sorted(w.items(), key=lambda x: x[1], reverse=True):
        print('{}'.format(k).ljust(25) + '{}'.format(v))


def compare(w1, w2):
    for k, v in sorted(w1.items(), key=lambda x: x[1], reverse=True):
        print('{}'.format(k).ljust(25) + '{}'.format(v))
        print('\t{}'.format(k).ljust(25) + '{}'.format(w2[k]))


def check_difference(w1, w2):
    for k, v in sorted(w1.items(), key=lambda x: x[1], reverse=True):
        print('{}'.format(k).ljust(25) + '{}'.format(v - w2[k]))


check(w1)
#compare(w1, w2)
# check_difference(w1, w2)
Ejemplo n.º 21
0
    loader = InstanceLoader(vars(args)['instances'])

    # Build model
    print('Building model ...', flush=True)
    GNN = build_network(d)

    # Disallow GPU use
    config = tf.ConfigProto( device_count = {'GPU':0})
    with tf.Session(config=config) as sess:

        # Initialize global variables
        print("Initializing global variables ... ", flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        load_weights(sess,vars(args)['checkpoint']);

        n_instances = len(loader.filenames)
        stats = { k:np.zeros(n_instances) for k in ['loss','acc','sat','pred','TP','FP','TN','FN'] }

        # Create batches of size 1
        for (batch_i, batch) in enumerate(loader.get_batches(1, target_cost_dev)):
            stats['loss'][batch_i], stats['acc'][batch_i], stats['sat'][batch_i], stats['pred'][batch_i], stats['TP'][batch_i], stats['FP'][batch_i], stats['TN'][batch_i], stats['FN'][batch_i] = run_batch(sess, GNN, batch, batch_i, 0, time_steps, train=False, verbose=True)
        #end

        # Summarize
        summarize_epoch(0,stats['loss'],stats['acc'],stats['sat'],stats['pred'],train=False)

    #end

#end
Ejemplo n.º 22
0
    print('Building model ...', flush=True)
    GNN = build_network(d)

    # Comment the following line to allow GPU use
    config = tf.ConfigProto()
    #config.gpu_options.per_process_gpu_memory_fraction = 0.5
    config.gpu_options.allow_growth=True

    with tf.Session(config=config) as sess:

        # Initialize global variables
        print('Initializing global variables ... ', flush=True)
        sess.run( tf.global_variables_initializer() )

        # Restore saved weights
        if load_checkpoints: load_weights(sess,loadpath);
        
        if vars(args)['train']:
          ptrain = 'training_'+seed
          if not os.path.isdir(ptrain):
            os.makedirs(ptrain)
          with open(ptrain+'/log.dat','w') as logfile:
              # Run for a number of epochs
              for epoch_i in np.arange(epochs_n):

                  train_loader.reset()

                  train_stats = { k:np.zeros(train_params['batches_per_epoch']) for k in ['loss','acc','sat','pred','TP','FP','TN','FN'] }
                  

                  print('Training model...', flush=True)