Exemplo n.º 1
0
def run_report_results(args, probe, dataset, model, loss, reporter, regimen):
    """
    Reports results from a structural probe according to args.
    By default, does so only for dev set.
    Requires a simple code change to run on the test set.
    """
    probe_params_path = os.path.join(args['reporting']['root'],
                                     args['probe']['params_path'])
    probe.load_state_dict(torch.load(probe_params_path))
    probe.eval()

    dev_dataloader = dataset.get_dev_dataloader()
    dev_predictions = regimen.predict(probe, model, dev_dataloader)
    dev_report = reporter(dev_predictions, dev_dataloader, 'dev', probe=probe)

    #train_dataloader = dataset.get_train_dataloader(shuffle=False)
    #train_predictions = regimen.predict(probe, model, train_dataloader)
    #reporter(train_predictions, train_dataloader, 'train')

    # Uncomment to run on the test set
    test_dataloader = dataset.get_test_dataloader()
    test_predictions = regimen.predict(probe, model, test_dataloader)
    test_report = reporter(test_predictions,
                           test_dataloader,
                           'test',
                           probe=probe)
    return dev_report, test_report
Exemplo n.º 2
0
def run_report_results(args, probe, dataset, model, loss, reporter, regimen):
  """
  Reports results from a structural probe according to args.
  By default, does so only for dev set.
  Requires a simple code change to run on the test set.
  """
  probe_params_path = os.path.join(args['reporting']['root'],args['probe']['params_path'])
  probe.load_state_dict(torch.load(probe_params_path))
  probe.eval()

  dev_dataloader = dataset.get_dev_dataloader()
  dev_predictions = regimen.predict(probe, model, dev_dataloader)
  reporter(dev_predictions, dev_dataloader, 'dev')
Exemplo n.º 3
0
def run_report_results(args, probe, dataset, model, loss, reporter, regimen):
    probe_params_path = os.path.join(args['reporting']['root'],
                                     args['probe']['params_path'])
    dev_dataloader = dataset.get_dev_dataloader()
    try:
        probe.load_state_dict(torch.load(probe_params_path))
        probe.eval()
        dev_predictions = regimen.predict(probe, model, dev_dataloader)
    except FileNotFoundError:
        print("No trained probe found.")
        dev_predictions = None

    reporter(dev_predictions, probe, model, dev_dataloader, 'dev')
def evaluate_vectors(args, probe, dataset, model, results_dir, output_name):
    probe_params_path = os.path.join(results_dir, args['probe']['params_path'])
    probe.load_state_dict(torch.load(probe_params_path))
    probe.eval()
    print(probe.proj)

    dataloader = dataset.get_dev_dataloader()

    projections = load_projected_representations(probe, model, dataloader)

    relations_to_projections = defaultdict(list)
    relations_to_sentences = defaultdict(list)
    relations_to_idxs = defaultdict(list)
    relations_to_words = defaultdict(list)
    for projection_batch, (data_batch, label_batch, length_batch,
                           observation_batch) in zip(projections, dataloader):
        for projection, label, length, (observation,
                                        _) in zip(projection_batch,
                                                  label_batch, length_batch,
                                                  observation_batch):
            for idx, word in enumerate(observation.sentence):
                if observation.head_indices[idx] == '0':
                    pass  # head word
                else:
                    head_index = int(observation.head_indices[idx])
                    proj_diff = projection[idx] - projection[head_index - 1]
                    relation = observation.governance_relations[idx]
                    relations_to_projections[relation].append(proj_diff)
                    relations_to_sentences[relation].append(" ".join(
                        observation.sentence))
                    relations_to_idxs[relation].append(idx)
                    relations_to_words[relation].append(word)

    relations_to_diffs = {}
    all_relations = []
    all_sentences = []
    all_idxs = []
    all_words = []
    y_list = []
    for relation in relations_to_projections:
        diffs = torch.FloatTensor(relations_to_projections[relation])
        # compute the SVD
        u, s, v = diffs.svd()
        average_diff = torch.mean(diffs, 0)
        relations_to_diffs[relation] = average_diff
        all_relations += relations_to_projections[relation]
        all_sentences += relations_to_sentences[relation]
        all_idxs += relations_to_idxs[relation]
        all_words += relations_to_words[relation]
        y_list += [relation] * len(relations_to_projections[relation])
    allDiff = torch.FloatTensor(all_relations)
    # print(y_list)
    sentences_idxs_words = np.array([all_sentences, all_idxs, all_words])
    if len(sys.argv) > 2:
        np.save(
            '/sailhome/ethanchi/structural-probes/relationOutputs/{}.npy'.
            format(output_name), allDiff.numpy())
        np.save(
            '/sailhome/ethanchi/structural-probes/relationOutputs/{}Y.npy'.
            format(output_name), np.array(y_list))
        np.save(
            '/sailhome/ethanchi/structural-probes/relationOutputs/{}-data.npy'.
            format(output_name), sentences_idxs_words)
    allDiff = torch.mean(allDiff, 0)
    cos = torch.nn.CosineSimilarity(dim=0, eps=1e-10)