Example #1
0
 def score(self, epoch_counter):
     """
         Scoring on the test set.
         """
     print("\n\nModel evaluation.\n")
     start_time = time.time()
     self.model.eval()
     self.scores = []
     self.ground_truth = []
     for test_graph_pair in tqdm(self.testing_graphs):
         data = process_pair(test_graph_pair)
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         print("\n" + str(test_graph_pair) + "- " + "Similarity/Target: " +
               str(prediction) + " / " + str(target))
         self.scores.append(calculate_loss(prediction, target))
     print("--- %s seconds ---" % (time.time() - start_time))
     model_error = self.print_evaluation()
     print('\n\n >>>>>>>>>>>>>>>>>>\t' + str(model_error) + '\n')
     with open("./outputFiles/test/test_error_graph.txt",
               "a") as test_error_writer:
         test_error_writer.write(
             str(epoch_counter) + ',' + str(model_error) + '\n')
     test_error_writer.close()
Example #2
0
    def score(self):
        """
        Scoring on the test set.
        """
        print("\n\nModel evaluation.\n")
        self.model.eval()
        self.scores = []
        self.ground_truth = []
        preds = []
        truths = []
        for graph_pair in tqdm(self.testing_graphs):
            data = process_pair(graph_pair)
            self.ground_truth.append(calculate_normalized_ged(data))
            data = self.transfer_to_torch(data)
            target = data["target"]
            prediction = self.model(data)
            self.scores.append(calculate_loss(prediction, target))

            preds.append(0 if prediction.item() < 0.5 else 1)
            truths.append(int(data["target"].item()))
        self.print_evaluation()
        plot_confusion_matrix(np.array(truths),
                              np.array(preds),
                              np.array([0, 1]),
                              title='SimGNN confusion matrix')
Example #3
0
    def load_model_parallel(self, pairList):

        #print("Parallel Execution of funcGNN from pretrained model")
        #self.model = funcGNN(self.args, self.number_of_labels)
        #self.model.load_state_dict(torch.load('./model_state.pth'))
        #self.model.eval()
        data = process_pair(pairList)
        self.ground_truth.append(calculate_normalized_ged(data))
        data = self.transfer_to_torch(data)
        target = data["target"]
        prediction = self.model(data)
        #print("\n" + str(pairList) + "- " + "Similarity/Target: " + str(prediction) + " / " + str(target))
        self.scores.append(calculate_loss(prediction, target))
Example #4
0
 def score(self):
     """
     Scoring on the test set.
     """
     print("\n\nModel evaluation.\n")
     self.scores = []
     self.ground_truth = []
     for graph_pair in tqdm(self.testing_graphs):
         data = process_pair(graph_pair)
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = self.model(data)
         prediction = self.model(data)
         self.scores.append(calculate_loss(prediction, target))
     self.print_evaluation()
Example #5
0
 def load_model(self):
     print("\nSerial Execution of funcGNN from pretrained model")
     start_time = time.time()
     self.model = funcGNN(self.args, self.number_of_labels)
     self.model.load_state_dict(torch.load('./model_state.pth'))
     self.model.eval()
     self.scores = []
     self.ground_truth = []
     for test_graph_pair in tqdm(self.random_graphs):
         data = process_pair(test_graph_pair)
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         #print("\n" + str(test_graph_pair) + "- " + "Similarity/Target: " + str(prediction) + " / " + str(target))
         self.scores.append(calculate_loss(prediction, target))
         self.scores.append(
             torch.nn.functional.mse_loss(prediction, data["target"]))
     print("--- %s seconds ---" % (time.time() - start_time))
Example #6
0
 def score(self):
     """
     Scoring on the test set.
     """
     print("\n\nModel evaluation.\n")
     if path.isfile(self.args.saved_model):
         self.model=torch.load(self.args.saved_model)
         self.model.train(False)
         self.model.eval()
     self.scores = []
     self.ground_truth = []
     for graph_pair in tqdm(self.testing_graphs):
         data = process_pair(graph_pair)
         data_org = data.copy()
         self.ground_truth.append(calculate_normalized_ged(data))
         data = self.transfer_to_torch(data)
         target = data["target"]
         prediction = self.model(data)
         print(f'Test target: {data_org["ged"]} {reverse_normalized_ged(-math.log(target),data_org)}, prediction: {reverse_normalized_ged(-math.log(prediction),data_org)}')
         self.scores.append(calculate_loss(prediction, target))
     self.print_evaluation()
Example #7
0
    def get_next_batch(self, env):

        for _ in range(C.NUM_EPOCHS):

            epoch_logits = torch.empty(size=(0, self.action_space_size),
                                       device=self.DEVICE)
            epoch_weighted_log_probs = torch.empty(size=(0, ),
                                                   dtype=torch.float,
                                                   device=self.DEVICE)
            total_rewards = deque([], maxlen=C.BATCH_SIZE_PER_THREAD)

            episode_counter = 0

            while episode_counter < C.BATCH_SIZE_PER_THREAD:

                episode_counter += 1

                # reset the environment to a random initial state every epoch
                state = env.reset()

                # initialize the episode arrays
                episode_actions = torch.empty(size=(0, ),
                                              dtype=torch.long,
                                              device=self.DEVICE)
                episode_logits = torch.empty(size=(0, C.action_space_size),
                                             device=self.DEVICE)
                average_rewards = np.empty(shape=(0, ), dtype=np.float)
                episode_rewards = np.empty(shape=(0, ), dtype=np.float)

                # episode loop
                for step_index in range(0, C.max_simulation_length):

                    # get the action logits from the agent - (preferences)
                    action_logits = self.m(
                        torch.tensor(state).float().unsqueeze(dim=0).to(
                            self.DEVICE))

                    # append the logits to the episode logits list
                    episode_logits = torch.cat((episode_logits, action_logits),
                                               dim=0)

                    # sample an action according to the action distribution
                    action = Categorical(logits=action_logits).sample()

                    # append the action to the episode action list to obtain the trajectory
                    # we need to store the actions and logits so we could calculate the gradient of the performance
                    episode_actions = torch.cat((episode_actions, action),
                                                dim=0)

                    # take the chosen action, observe the reward and the next state
                    state, reward, done, _ = env.step(
                        action=action.cpu().item())

                    # append the reward to the rewards pool that we collect during the episode
                    # we need the rewards so we can calculate the weights for the policy gradient
                    # and the baseline of average
                    episode_rewards = np.concatenate(
                        (episode_rewards, np.array([reward])), axis=0)

                    # here the average reward is state specific
                    average_rewards = np.concatenate(
                        (average_rewards,
                         np.expand_dims(np.mean(episode_rewards), axis=0)),
                        axis=0)

                # turn the rewards we accumulated during the episode into the rewards-to-go:
                # earlier actions are responsible for more rewards than the later taken actions
                discounted_rewards_to_go = utils.get_discounted_rewards(
                    rewards=episode_rewards, gamma=C.GAMMA)
                discounted_rewards_to_go -= average_rewards  # baseline - state specific average

                # calculate the sum of the rewards for the running average metric
                sum_of_rewards = np.sum(episode_rewards)

                # after each episode append the sum of total rewards to the deque
                total_rewards.append(sum_of_rewards)

                # set the mask for the actions taken in the episode
                mask = one_hot(episode_actions,
                               num_classes=C.action_space_size)

                # calculate the log-probabilities of the taken actions
                # mask is needed to filter out log-probabilities of not related logits
                episode_log_probs = torch.sum(
                    mask.float() * log_softmax(episode_logits, dim=1), dim=1)

                # weight the episode log-probabilities by the rewards-to-go
                episode_weighted_log_probs = episode_log_probs * \
                    torch.tensor(discounted_rewards_to_go).float().to(self.DEVICE)

                # calculate the sum over trajectory of the weighted log-probabilities
                sum_weighted_log_probs = torch.sum(
                    episode_weighted_log_probs).unsqueeze(dim=0)

                # append the weighted log-probabilities of actions
                epoch_weighted_log_probs = torch.cat(
                    (epoch_weighted_log_probs, sum_weighted_log_probs), dim=0)

                # append the logits - needed for the entropy bonus calculation
                epoch_logits = torch.cat((epoch_logits, episode_logits), dim=0)

                # calculate the loss
                loss, entropy = utils.calculate_loss(
                    C.BETA,
                    epoch_logits=epoch_logits,
                    weighted_log_probs=epoch_weighted_log_probs)

            yield loss, total_rewards
    C=C,
    tol=1e-10,
    fit_intercept=fit_intercept,
    random_state=24,
    verbose=False,
    max_iter = 1000)
model.fit(X_train, Y_train)
orig_theta = model.coef_.reshape(-1)
orig_bias = model.intercept_
# calculate the clean model acc
train_acc = model.score(X_train,Y_train)
test_acc = model.score(X_test,Y_test)

print(orig_theta.shape,X_train.shape,orig_bias.shape,Y_train.shape)
margins = Y_train*(X_train.dot(orig_theta) + orig_bias)
train_loss, train_err = calculate_loss(margins)
clean_total_train_loss = train_loss*X_train.shape[0]
print("train_acc:{}, train loss:{}, train error:{}".format(train_acc,clean_total_train_loss,train_err))
# test margins and loss
margins = Y_test*(X_test.dot(orig_theta) + orig_bias)
test_loss, test_err = calculate_loss(margins)
clean_total_test_loss = test_loss*X_test.shape[0]
print("test_acc:{}, test loss:{}, test error:{}".format(test_acc,clean_total_test_loss,test_err))

if not subpop:
    ym = (-1)*Y_test
    if args.dataset in ['mnist_17','dogfish']:
        # loss percentile and repeated points, used for indiscriminative attack
        if args.dataset == 'mnist_17':
            valid_theta_errs = [0.05,0.1,0.15]
        else:
def run(net,
        loader,
        optimizer,
        scheduler,
        tracker,
        train=False,
        has_answers=True,
        prefix='',
        epoch=0):
    """ Run an epoch over the given loader """
    assert not (train and not has_answers)
    if train:
        net.train()
        tracker_class, tracker_params = tracker.MovingMeanMonitor, {
            'momentum': 0.99
        }
    else:
        net.eval()
        tracker_class, tracker_params = tracker.MeanMonitor, {}
        answ = []
        idxs = []
        accs = []

    # set learning rate decay policy
    if epoch < len(config.gradual_warmup_steps
                   ) and config.schedule_method == 'warm_up':
        utils.set_lr(optimizer, config.gradual_warmup_steps[epoch])
        utils.print_lr(optimizer, prefix, epoch)
    elif (epoch in config.lr_decay_epochs
          ) and train and config.schedule_method == 'warm_up':
        utils.decay_lr(optimizer, config.lr_decay_rate)
        utils.print_lr(optimizer, prefix, epoch)
    else:
        utils.print_lr(optimizer, prefix, epoch)

    loader = tqdm(loader, desc='{} E{:03d}'.format(prefix, epoch), ncols=0)
    loss_tracker = tracker.track('{}_loss'.format(prefix),
                                 tracker_class(**tracker_params))
    acc_tracker = tracker.track('{}_acc'.format(prefix),
                                tracker_class(**tracker_params))

    for v, q, a, b, idx, v_mask, q_mask, q_len in loader:
        var_params = {
            'requires_grad': False,
        }
        v = Variable(v.cuda(), **var_params)
        q = Variable(q.cuda(), **var_params)
        a = Variable(a.cuda(), **var_params)
        b = Variable(b.cuda(), **var_params)
        q_len = Variable(q_len.cuda(), **var_params)
        v_mask = Variable(v_mask.cuda(), **var_params)
        q_mask = Variable(q_mask.cuda(), **var_params)

        out = net(v, b, q, v_mask, q_mask, q_len)
        if has_answers:
            answer = utils.process_answer(a)
            loss = utils.calculate_loss(answer, out, method=config.loss_method)
            acc = utils.batch_accuracy(out, answer).data.cpu()

        if train:
            optimizer.zero_grad()
            loss.backward()
            # print gradient
            if config.print_gradient:
                utils.print_grad([(n, p) for n, p in net.named_parameters()
                                  if p.grad is not None])
            # clip gradient
            clip_grad_norm_(net.parameters(), config.clip_value)
            optimizer.step()
            if (config.schedule_method == 'batch_decay'):
                scheduler.step()
        else:
            # store information about evaluation of this minibatch
            _, answer = out.data.cpu().max(dim=1)
            answ.append(answer.view(-1))
            if has_answers:
                accs.append(acc.view(-1))
            idxs.append(idx.view(-1).clone())

        if has_answers:
            loss_tracker.append(loss.item())
            acc_tracker.append(acc.mean())
            fmt = '{:.4f}'.format
            loader.set_postfix(loss=fmt(loss_tracker.mean.value),
                               acc=fmt(acc_tracker.mean.value))

    if not train:
        answ = list(torch.cat(answ, dim=0))
        if has_answers:
            accs = list(torch.cat(accs, dim=0))
        else:
            accs = []
        idxs = list(torch.cat(idxs, dim=0))
        #print('{} E{:03d}:'.format(prefix, epoch), ' Total num: ', len(accs))
        #print('{} E{:03d}:'.format(prefix, epoch), ' Average Score: ', float(sum(accs) / len(accs)))
        return answ, accs, idxs
Example #10
0
    def solve_environment(self):
        """
            The main interface for the Policy Gradient solver
        """
        # init the episode and the epoch
        episode = 0
        epoch = 0

        # init the epoch arrays
        # used for entropy calculation
        epoch_logits = torch.empty(size=(0, self.env.action_space.n), device=self.DEVICE)
        epoch_weighted_log_probs = torch.empty(size=(0,), dtype=torch.float, device=self.DEVICE)

        while epoch<self.NUM_EPOCHS:

            # play an episode of the environment
            (episode_weighted_log_prob_trajectory,
             episode_logits,
             sum_of_episode_rewards,
             episode) = play_episode.play_episode(self.env,self.DEVICE,self.action_space_size,self.agent,self.GAMMA,episode)

            # after each episode append the sum of total rewards to the deque
            self.total_rewards.append(sum_of_episode_rewards)

            # append the weighted log-probabilities of actions
            epoch_weighted_log_probs = torch.cat((epoch_weighted_log_probs, episode_weighted_log_prob_trajectory),
                                                 dim=0)

            # append the logits - needed for the entropy bonus calculation
            epoch_logits = torch.cat((epoch_logits, episode_logits), dim=0)

            # if the epoch is over - we have epoch trajectories to perform the policy gradient
            if episode > self.BATCH_SIZE:

                # reset the rendering flag
                self.finished_rendering_this_epoch = False

                # reset the episode count
                episode = 0

                # increment the epoch
                epoch += 1 

                # calculate the loss
                loss, entropy = utils.calculate_loss(self.BETA, epoch_logits=epoch_logits,
                                                    weighted_log_probs=epoch_weighted_log_probs)

                # zero the gradient
                self.adam.zero_grad()

                # backprop
                loss.backward()

                # update the parameters
                self.adam.step()

                # feedback
                #print("\r", f"Epoch: {epoch}, Avg Return per Epoch: {np.mean(self.total_rewards):.3f}",
                #      end="",
                #      flush=True)
                print("\r", f"Epoch: {epoch}, Avg Return per Epoch: {np.mean(self.total_rewards):.3f}",
                      flush=True)

                self.writer.add_scalar(tag='Average Return over 100 episodes',
                                       scalar_value=np.mean(self.total_rewards),
                                       global_step=epoch)

                self.writer.add_scalar(tag='Entropy',
                                       scalar_value=entropy,
                                       global_step=epoch)

                # reset the epoch arrays
                # used for entropy calculation
                epoch_logits = torch.empty(size=(0, self.env.action_space.n), device=self.DEVICE)
                epoch_weighted_log_probs = torch.empty(size=(0,), dtype=torch.float, device=self.DEVICE)

                # check if solved
                if np.mean(self.total_rewards) > 200:
                    print('\nSolved!')
                    print("\nSaving the final neural network")

                    #save the neural network in the end
                    cwd = os.getcwd()
                    parameter_file = 'cartpole_nn_trained_model.pt'
                    cwd = os.path.join(cwd,parameter_file)
                    torch.save(self.agent.state_dict(),cwd)

                    break

        # close the environment
        self.env.close()

        # close the writer
        self.writer.close()
def run(net,
        loader,
        optimizer,
        scheduler,
        tracker,
        train=False,
        prefix='',
        epoch=0):
    """ Run an epoch over the given loader """
    if train:
        net.train()
        # tracker_class, tracker_params = tracker.MovingMeanMonitor, {'momentum': 0.99}
    else:
        net.eval()

    tracker_class, tracker_params = tracker.MeanMonitor, {}

    # set learning rate decay policy
    if epoch < len(config.gradual_warmup_steps
                   ) and config.schedule_method == 'warm_up':
        utils.set_lr(optimizer, config.gradual_warmup_steps[epoch])

    elif (epoch in config.lr_decay_epochs
          ) and train and config.schedule_method == 'warm_up':
        utils.decay_lr(optimizer, config.lr_decay_rate)

    utils.print_lr(optimizer, prefix, epoch)

    loader = tqdm(loader, desc='{} E{:03d}'.format(prefix, epoch), ncols=0)
    loss_tracker = tracker.track('{}_loss'.format(prefix),
                                 tracker_class(**tracker_params))
    acc_tracker = tracker.track('{}_acc'.format(prefix),
                                tracker_class(**tracker_params))

    for v, q, a, b, idx, v_mask, q_mask, q_len in loader:
        var_params = {
            'requires_grad': False,
        }
        v = Variable(v.cuda(), **var_params)
        q = Variable(q.cuda(), **var_params)
        a = Variable(a.cuda(), **var_params)
        b = Variable(b.cuda(), **var_params)
        q_len = Variable(q_len.cuda(), **var_params)
        v_mask = Variable(v_mask.cuda(), **var_params)
        q_mask = Variable(q_mask.cuda(), **var_params)

        out = net(v, b, q, v_mask, q_mask, q_len)

        answer = utils.process_answer(a)
        loss = utils.calculate_loss(answer, out, method=config.loss_method)
        acc = utils.batch_accuracy(out, answer).data.cpu()

        if train:
            optimizer.zero_grad()
            loss.backward()
            # clip gradient
            clip_grad_norm_(net.parameters(), config.clip_value)
            optimizer.step()
            if config.schedule_method == 'batch_decay':
                scheduler.step()

        loss_tracker.append(loss.item())
        acc_tracker.append(acc.mean())
        fmt = '{:.4f}'.format
        loader.set_postfix(loss=fmt(loss_tracker.mean.value),
                           acc=fmt(acc_tracker.mean.value))

    return acc_tracker.mean.value, loss_tracker.mean.value
Example #12
0
            os.path.join(full_train_path, arg.slot_file),
            os.path.join(full_train_path, arg.intent_file), in_vocab,
            slot_vocab, intent_vocab)

    input_data, slots, slot_weights, seq_length, intent, _, _, _ \
        = data_processor.get_batch(arg.batch_size)

    input_data, slots, slot_weights, seq_length, intent \
        = conv_to_tensor(input_data, slots, slot_weights, seq_length, intent)

    # model predict
    slot_outputs, intent_output = model.forward(input_data=input_data)

    # loss
    slot_loss, intent_loss, total_loss = calculate_loss(
        slots, slot_outputs, slot_weights, slot_loss_fn, intent_output, intent,
        intent_loss_fn, arg.batch_size)

    # f1 metrics
    p_i, c_i, s_o, c_o, i_w = create_f1_lists(slot_outputs, intent_output,
                                              intent, slots, input_data,
                                              seq_length, slot_vocab, in_vocab)
    pred_intents.extend(p_i)
    correct_intents.extend(c_i)
    slot_outputs_pred.extend(s_o)
    correct_slots.extend(c_o)
    input_words.extend(i_w)

    # backprop
    optim.zero_grad()
    total_loss.backward()
Example #13
0

    ### Define loss function ### 
    criterion = nn.CrossEntropyLoss()

    ### Testing ### 
    history_training = {}

    history_training = test_model(model=model, hist=history_training, criterion=criterion, 
                                  dataloaders=dataloaders, dataset_sizes=dataset_sizes, half=HALF)

    # Give classification report
    classif_report(hist=history_training, list_names=LIST_CLASSES)

    # Give log losses
    calculate_loss(hist=history_training, list_names=LIST_CLASSES)

    ### Define Pruning class ###
    if MODEL_TYPE in ["ModelC", "ModelD"]:
        pruner_class = PrunerResnet(model=model, norme=NORME, amount=PRUNING_PERCENT, dimension=DIMENSION)

    elif MODEL_TYPE in ["ModelA", "ModelB"]:
        nb_layers = 4 if MODEL_TYPE == "ModelA" else 5 # "ModelA": VGG 4 layers | "ModelB": VGG 5 layers
        pruner_class = PrunerVGG(model=model, nb_layers=nb_layers, norme=NORME, amount=PRUNING_PERCENT, dimension=DIMENSION)

    else:
        raise ValueError('Pruning class not defined for this model.')


    print("\n== INFO ==\n")