示例#1
0
    def pre_train(self, dataset, **kwargs):
        train_x = []
        train_y = []

        for subj_data in dataset:
            for task_data in subj_data:
                syllogism = ccobra.syllogistic.Syllogism(task_data['item'])

                # Encode the task input
                task = onehot.onehot_syllogism_content(syllogism.encoded_task)

                # Encode the response output
                encoded_response = syllogism.encode_response(
                    task_data['response'])
                resp = onehot.onehot_response(encoded_response)

                train_x.append(task)
                train_y.append(resp)

        self.train_x = torch.from_numpy(np.array(train_x)).float()
        self.train_y = torch.from_numpy(np.array(train_y)).float()

        self.train_network(self.train_x,
                           self.train_y,
                           self.batch_size,
                           self.n_epochs,
                           verbose=True)
示例#2
0
    def pre_train(self, dataset):
        # Prepare the data for training by converting it into a 64 x n_subj x 12
        train_x = []
        train_y = []

        for subj_data in dataset:
            subj_train_x = []
            subj_train_y = []

            for task_data in subj_data:
                syllogism = ccobra.syllogistic.Syllogism(task_data['item'])

                # Onehot encodings
                onehot_task = onehot.onehot_syllogism_content(syllogism.encoded_task)
                onehot_response = onehot.onehot_response(
                    syllogism.encode_response(task_data['response']))

                subj_train_x.append(onehot_task)
                subj_train_y.append(onehot_response)

            train_x.append(subj_train_x)
            train_y.append(subj_train_y)

        self.train_x = torch.from_numpy(np.array(train_x)).float()
        self.train_y = torch.from_numpy(np.array(train_y)).float()

        self.train_network(self.train_x, self.train_y, self.n_epochs, verbose=True)
示例#3
0
    def predict(self, item, **kwargs):
        syllogism = ccobra.syllogistic.Syllogism(item)

        # Obtain the prediction
        input = torch.from_numpy(onehot.onehot_syllogism_content(syllogism.encoded_task)).float()
        output, self.hidden = self.net(input.view(1, 1, -1), self.hidden)

        # Return maximum response
        response = output.argmax().item()
        enc_response = ccobra.syllogistic.RESPONSES[response]
        return syllogism.decode_response(enc_response)
示例#4
0
    def predict(self, item, **kwargs):
        # Encode the task
        syllogism = ccobra.syllogistic.Syllogism(item)

        # Query the model
        inp = onehot.onehot_syllogism_content(syllogism.encoded_task)
        inp_tensor = torch.from_numpy(inp).float()
        output = self.net(inp_tensor)

        # Return maximum response
        response = output.argmax().item()
        enc_response = ccobra.syllogistic.RESPONSES[response]
        return syllogism.decode_response(enc_response)
示例#5
0
    def adapt(self, item, truth, **kwargs):
        syllogism = ccobra.syllogistic.Syllogism(item)

        # Onehot encoding
        onehot_syl = onehot.onehot_syllogism_content(syllogism.encoded_task)
        onehot_resp = onehot.onehot_response(syllogism.encode_response(truth))

        adapt_x = torch.from_numpy(onehot_syl.reshape(1, -1)).float()
        adapt_y = torch.from_numpy(onehot_resp.reshape(1, -1)).float()

        self.train_network(adapt_x,
                           adapt_y,
                           1,
                           self.n_epochs_adapt,
                           verbose=False)