def context_history(self):
        net_input = ContextTracker.sentence_processor()

        net_utilities = utils.create_emb_layer()
        emotion_net = sen_model.Net(net_utilities, num_classes=7)
        act_net = sen_model.Net(net_utilities, num_classes=4)

        emotion = emotion_net(net_input)
        act = act_net(net_input)

        sal_finder = salience.SalienceFinder(e_tag=emotion, a_tag=act)
        google_response_string = ""
        max_key, max_val = sal_finder.process(google_response_string)
        word_2_vec = ContextTracker.load_glove_model(GLOVE_PATH)

        if not self.context_hist:
            self.context_hist.append(max_key)
        else:
            if ContextTracker.cosine_distance(self.context_hist[-1], max_key, word_2_vec) > 0.5:
                flag = False
                for x in enumerate(self.context_hist[:-1]):
                    if ContextTracker.cosine_distance(self, x, max_key, word_2_vec) < 0.5:
                        self.context_hist.append(x)
                        flag = True
                if not flag:
                    self.context_hist.append(max_key)
            else:
                self.context_hist.append(self.context_hist[-1])
        if len(self.context_hist) > 50:
            self.context_hist = self.context_hist[25:-1]
        return self.context_hist, emotion, act, word_2_vec
Exemple #2
0
    """
    This function creates the embedding layer from the dataset weight_matrix
    :return: emb_layer ---> the embedding layer
            num_embeddings ---> the length of the dataset vocabulary
            embedding_dim ---> the dimension of each embedding
    """
    # weights_matrix = preprocess_dataset()
    weights_matrix = np.loadtxt('testing_weights_matrix.txt', dtype=int)
    num_embeddings = weights_matrix.shape[0]
    embedding_dim = weights_matrix.shape[1]
    weights_matrix = torch.from_numpy(weights_matrix).float()
    emb_layer = nn.Embedding(num_embeddings, embedding_dim)
    # torch.save(weights_matrix, "embed_weights.pth")
    # weights_matrix = torch.load("embed_weights.pth")  # <--- Only uncomment when required
    emb_layer.weight.requires_grad = False  # <--- Might need to be set to true as reqd
    emb_layer.weight = nn.Parameter(weights_matrix)
    return emb_layer, num_embeddings, embedding_dim


device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')
net = model.Net(create_emb_layer(), 7).to(device)
checkpoint = torch.load('./last_model_state.pth')
net.load_state_dict(checkpoint['model_state_dict'])

loaded_dataset = np.load("processed_test_dataset.npy")
loaded_labels = open(LABELS_PATH, "r").readlines()

net.eval()
with torch.no_grad():