예제 #1
0
def main():
    directory, make_plot, force_yes = parse_args()
    results = {
        'a-star': [],
        'bf': [],
        'greedy': [],
    }

    files = glob(f'{directory}/*.in')
    files.sort()
    experiment_start = process_time()
    tracemalloc.start()

    for i, file in enumerate(files):
        case_start = process_time()
        nodes = read_data(file)
        n = len(nodes)

        # Brute Force
        solver = BruteForce(nodes)
        path, cost, time = solver.run()
        results['bf'].append({'n': n, 'time': time})

        # A*
        solver = AStarAlgorithm(nodes)
        path, cost, time = solver.run()
        results['a-star'].append({'n': n, 'time': time})

        # Greedy
        solver = GreedyAlgorithm(nodes)
        path, cost, time = solver.run()
        results['greedy'].append({'n': n, 'time': time})

        case_end = process_time()
        case_time = case_end - case_start

        print(
            f'Case {i + 1}/{len(files)}: Finished {n} nodes in {case_time:.4f} seconds'
        )
        if not force_yes and case_time > 60 and i + 1 < len(files):
            response = input('Continue? [Y/n]: ')
            if response == 'n':
                break

    experiment_stop = process_time()
    experiment_time = experiment_stop - experiment_start
    print(f'Finished experiments in {experiment_time:.2f} seconds')
    _, peak = tracemalloc.get_traced_memory()
    print(f'Peak memory usage: {(peak / 10**6):.1f}MB')

    for algo in results.keys():
        with open(f'results_{algo}.csv', 'w') as file:
            writer = DictWriter(file, ['n', 'time'])

            writer.writeheader()
            writer.writerows(results[algo])
def single_experiment(input_file, algo):
    tracemalloc.start()

    nodes = read_data(input_file)
    n = len(nodes)

    if algo == 'bf':
        solver = BruteForce(nodes)
    elif algo == 'a-star':
        solver = AStarAlgorithm(nodes)
    elif algo == 'greedy':
        solver = GreedyAlgorithm(nodes)
    else:
        raise Exception(f'Algorithm {algo} not implemented!')

    _, cost, time = solver.run()

    _, peak = tracemalloc.get_traced_memory()

    return n, time, cost, peak
def main():
    file, algorithm, text_only = parse_args()
    nodes = read_data(file)

    if algorithm == 'bf':
        solver = BruteForce(nodes)
    elif algorithm == 'a-star':
        solver = AStarAlgorithm(nodes)
    elif algorithm == 'greedy':
        solver = GreedyAlgorithm(nodes)
    else:
        raise Exception(f'Algorithm {algorithm} not implemented!')

    path, cost, time = solver.run()

    labels = [node.label for node in path]
    print(' '.join(labels))
    print(cost)
    print(time)

    if not text_only:
        plot_path(path)
예제 #4
0
def run():
    # Get Description Vectors

    ## Training
    descr_train, word_dict_train, dict_size_train, label_id_to_idx_train, idx_to_label_train = read_data(
        FLAGS.descr_train)

    def map_labels_train(x):
        return label_id_to_idx_train.get(x)

    word_dict_train = embed(word_dict_train, FLAGS.word_embedding_path)
    descr_train = cbow(descr_train, word_dict_train)
    desc_train = torch.cat(
        [descr_train[i]["cbow"].view(1, -1) for i in descr_train.keys()], 0)
    desc_train_set = torch.cat([
        descr_train[i]["set"].view(-1, FLAGS.word_embedding_dim)
        for i in descr_train.keys()
    ], 0)
    desc_train_set_lens = [
        len(descr_train[i]["desc"]) for i in descr_train.keys()
    ]

    ## Development
    descr_dev, word_dict_dev, dict_size_dev, label_id_to_idx_dev, idx_to_label_dev = read_data(
        FLAGS.descr_dev)

    def map_labels_dev(x):
        return label_id_to_idx_dev.get(x)

    word_dict_dev = embed(word_dict_dev, FLAGS.word_embedding_path)
    descr_dev = cbow(descr_dev, word_dict_dev)
    desc_dev = torch.cat(
        [descr_dev[i]["cbow"].view(1, -1) for i in descr_dev.keys()], 0)
    desc_dev_set = torch.cat([
        descr_dev[i]["set"].view(-1, FLAGS.word_embedding_dim)
        for i in descr_dev.keys()
    ], 0)
    desc_dev_set_lens = [len(descr_dev[i]["desc"]) for i in descr_dev.keys()]

    desc_dev_dict = dict(desc=desc_dev,
                         desc_set=desc_dev_set,
                         desc_set_lens=desc_dev_set_lens)

    # Initialize Models
    config = AgentConfig()
    exchange_model = ExchangeModel(config)
    sender = Sender(config)
    receiver = Receiver(config)
    baseline_sender = Baseline(config, 'sender')
    baseline_receiver = Baseline(config, 'receiver')
    exchange = Exchange(exchange_model, sender, receiver, baseline_sender,
                        baseline_receiver, desc_train)
    trainer = Trainer(exchange)

    # Initialize Optimizer
    optimizer = optim.RMSprop(exchange.parameters(), lr=FLAGS.learning_rate)

    # Static Variables
    img_feat = "avgpool_512"
    topk = 5

    accs = []

    # Run Epochs
    for epoch in range(FLAGS.max_epoch):
        source = "directory"
        path = FLAGS.train_data
        loader_config = DirectoryLoaderConfig.build_with("resnet18")
        loader_config.map_labels = map_labels_train
        loader_config.batch_size = FLAGS.batch_size
        loader_config.shuffle = True
        loader_config.cuda = FLAGS.cuda

        dataloader = DataLoader.build_with(path, source,
                                           loader_config).iterator()

        for i_batch, batch in enumerate(dataloader):

            data = batch[img_feat]
            target = batch["target"]
            trainer_loss = trainer.run_step(data, target)
            loss = trainer.calculate_loss(trainer_loss)

            # Update Parameters
            optimizer.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm(exchange.parameters(), max_norm=1.)
            optimizer.step()

            y = trainer_loss.y
            topk_indices = y.sort()[1][:, -topk:]
            target_broadcast = target.view(-1,
                                           1).expand(FLAGS.batch_size, topk)
            accuracy = (topk_indices
                        == target_broadcast).sum().float() / float(
                            FLAGS.batch_size)
            accs.append(accuracy)
            mean_acc = sum(accs) / len(accs)

            print("Epoch = {}; Batch = {}; Accuracy = {}".format(
                epoch, i_batch, mean_acc))

            if len(accs) > 5:
                accs.pop(0)
## The name of the dataset is taken as argument from the command line
dataset = str(sys.argv[1])

## The variable index_experiment can be either 0 or 1
## It selects one of two possible configurations that were used to generate the datasets used in the experiments and it is taken as argument from the command line
index_experiment = int(sys.argv[3])

## The variable fold can be any integer between 0 and 4
## It selects one of five possible folds that were used to validate the competing methods
fold = int(sys.argv[2])

## Establish the root of the filenames pertaining to a given combination of dataset, experiment and fold
filename_root = "FOLDS/" + dataset + '_EXPERIMENT_' + str(index_experiment)

## Read data
x, y, coefs, initial_cond, tmin, tmax = misc.read_data(filename_root, fold)

## Initialize GP parameters sigma (marginal st dev) and l (length-scale)
sigma = float(np.log(torch.max(y).data.numpy() - torch.min(y).data.numpy()))
l = float(np.log(torch.max(x).data.numpy() - torch.min(x).data.numpy()))
print("Initial covariance parameters of GPs: \nlog-length-scale", l,
      "\nlog-marginal standard deviation", sigma, "\n")

## Initialize noise on observations
noise = float(
    np.log((torch.max(y).data.numpy() - torch.min(y).data.numpy()) / 1e5))
print("Initial log-standard deviation noise on observations:", noise)

## Set the right nuber of parameters for a given ODE
if dataset == 'Lotka-Volterra':
    N_ODE_parameters = 4