def load_model(config):  # todo
        if type(config) == str:
            with open(config) as file:
                content = file.read()
                model_dir = os.path.abspath(os.path.join(config, os.pardir))
        else:
            model_dir = None
        with open(os.path.join(model_dir, "configuration.json")) as f:
            config = json.load(f)

        kge_model = pipeline.get_kge_model(config=config)

        if model_dir is not None:
            path_to_model = os.path.join(model_dir, "trained_model.pkl")
            kge_model.load_state_dict(torch.load(path_to_model))
        return kge_model
    def train(self, pos_triples: np.array, neg_triples: np.array):

        all_triples = np.concatenate((pos_triples, neg_triples))
        # testme
        self.config[keenConst.NUM_ENTITIES] = len(
            np.unique(np.concatenate((all_triples[:, 0], all_triples[:, 2]))))
        self.config[keenConst.NUM_RELATIONS] = len(np.unique(all_triples[:,
                                                                         1]))

        ## prepare model
        self.kge_model = pipeline.get_kge_model(config=self.config)
        self.kge_model = self.kge_model.to(self.device)

        optimizer = optim.SGD(self.kge_model.parameters(),
                              lr=self.config[keenConst.LEARNING_RATE])
        loss_per_epoch = []
        num_pos_examples = pos_triples.shape[0]
        num_neg_examples = neg_triples.shape[0]

        ### train model
        tqdmbuffer = TqdmBuffer() if globConst.GUI_MODE else None
        for _ in tqdm(range(self.config[keenConst.NUM_EPOCHS]),
                      file=tqdmbuffer):
            # create batches
            indices_pos = np.arange(num_pos_examples)
            np.random.shuffle(indices_pos)
            pos_triples = pos_triples[indices_pos]
            pos_batches = self._split_list_in_batches(
                input_list=pos_triples, batch_size=self.config["batch_size"])
            indices_neg = np.arange(num_neg_examples)
            np.random.shuffle(indices_neg)
            neg_triples = neg_triples[indices_neg]
            neg_batches = self._split_list_in_batches(
                input_list=neg_triples, batch_size=self.config["batch_size"])
            current_epoch_loss = 0.0
            #tqdmbuffer = TqdmBuffer() if globConst.GUI_MODE else None
            for pos_batch, neg_batch in tqdm(zip(pos_batches, neg_batches),
                                             total=len(neg_batches)):
                current_batch_size = len(pos_batch)

                # if not len(pos_batch) == len(neg_batch):
                #    raise RuntimeError('Pos and neg batches are not the same length')

                pos_batch_tensor = torch.tensor(pos_batch,
                                                dtype=torch.long,
                                                device=self.device)
                neg_batch_tensor = torch.tensor(neg_batch,
                                                dtype=torch.long,
                                                device=self.device)
                # Recall that torch *accumulates* gradients. Before passing in a
                # new instance, you need to zero out the gradients from the old instance
                optimizer.zero_grad()
                loss = self.kge_model(pos_batch_tensor, neg_batch_tensor)
                current_epoch_loss += loss.item() * current_batch_size

                loss.backward()
                optimizer.step()

            loss_per_epoch.append(current_epoch_loss / len(pos_triples))

        ### prepare results for output
        entity_to_embedding = {
            id: embedding.detach().cpu().numpy()
            for id, embedding in enumerate(
                self.kge_model.entity_embeddings.weight)
        }
        relation_to_embedding = {
            id: embedding.detach().cpu().numpy()
            for id, embedding in enumerate(
                self.kge_model.relation_embeddings.weight)
        }

        results = {
            "trained_model": self.kge_model,
            "loss_per_epoch": loss_per_epoch,
            "entity_to_embedding": entity_to_embedding,
            "relation_to_embedding": relation_to_embedding,
            "config": self.config,
        }
        self.output_results(results)

        return self.kge_model