def evaluate_model(model, model_dir, model_name, data_loaders, metrics, transformers_dict, prot_desc_dict,
                       tasks, view, sim_data_node=None):
        # load saved model and put in evaluation mode
        model.load_state_dict(load_model(model_dir, model_name))
        model.eval()

        print("Model evaluation...")
        start = time.time()
        n_epochs = 1

        # sub-nodes of sim data resource
        # loss_lst = []
        # train_loss_node = DataNode(label="training_loss", data=loss_lst)
        metrics_dict = {}
        metrics_node = DataNode(label="validation_metrics", data=metrics_dict)
        scores_lst = []
        scores_node = DataNode(label="validation_score", data=scores_lst)
        predicted_vals = []
        true_vals = []
        model_preds_node = DataNode(label="model_predictions", data={"y": true_vals,
                                                                     "y_pred": predicted_vals})

        # add sim data nodes to parent node
        if sim_data_node:
            sim_data_node.data = [metrics_node, scores_node, model_preds_node]

        # Main evaluation loop
        for epoch in range(n_epochs):

            for phase in ["test"]:
                # Iterate through mini-batches
                i = 0
                for batch in tqdm(data_loaders[phase]):
                    batch_size, data = batch_collator(batch, prot_desc_dict, spec=view)
                    # Data
                    if view == "gconv":
                        # graph data structure is: [(compound data, batch_size), protein_data]
                        X = ((data[view][0][0], batch_size), data[view][0][1])
                    else:
                        X = data[view][0]
                    y_true = data[view][1]
                    w = data[view][2].reshape(-1, 1).astype(np.float)

                    # forward propagation
                    with torch.set_grad_enabled(False):
                        y_predicted = model(X)

                        # apply transformers
                        predicted_vals.extend(undo_transforms(y_predicted.cpu().detach().numpy(),
                                                              transformers_dict[view]).squeeze().tolist())
                        true_vals.extend(undo_transforms(y_true,
                                                         transformers_dict[view]).astype(np.float).squeeze().tolist())

                    eval_dict = {}
                    score = CPIBaseline.evaluate(eval_dict, y_true, y_predicted, w, metrics, tasks,
                                                 transformers_dict[view])

                    # for sim data resource
                    scores_lst.append(score)
                    for m in eval_dict:
                        if m in metrics_dict:
                            metrics_dict[m].append(eval_dict[m])
                        else:
                            metrics_dict[m] = [eval_dict[m]]

                    print("\nEpoch={}/{}, batch={}/{}, "
                          "evaluation results= {}, score={}".format(epoch + 1, n_epochs, i + 1,
                                                                    len(data_loaders[phase]),
                                                                    eval_dict, score))

                    i += 1
                # End of mini=batch iterations.

        duration = time.time() - start
        print('\nModel evaluation duration: {:.0f}m {:.0f}s'.format(duration // 60, duration % 60))
Example #2
0
    def evaluate_model(model,
                       model_dir,
                       model_name,
                       data_loaders,
                       metrics,
                       transformers_dict,
                       prot_desc_dict,
                       tasks,
                       sim_data_node=None):
        # load saved model and put in evaluation mode
        model.load_state_dict(
            load_model(model_dir,
                       model_name,
                       dvc=torch.device(f'cuda:{dvc_id}')))
        model.eval()

        print("Model evaluation...")
        start = time.time()
        n_epochs = 1

        # sub-nodes of sim data resource
        # loss_lst = []
        # train_loss_node = DataNode(label="training_loss", data=loss_lst)
        metrics_dict = {}
        metrics_node = DataNode(label="validation_metrics", data=metrics_dict)
        scores_lst = []
        scores_node = DataNode(label="validation_score", data=scores_lst)
        predicted_vals = []
        true_vals = []
        model_preds_node = DataNode(label="model_predictions",
                                    data={
                                        "y": true_vals,
                                        "y_pred": predicted_vals
                                    })

        # add sim data nodes to parent node
        if sim_data_node:
            sim_data_node.data = [metrics_node, scores_node, model_preds_node]

        # Main evaluation loop
        for epoch in range(n_epochs):

            for phase in ["test"]:
                # Iterate through mini-batches
                i = 0
                for batch in tqdm(data_loaders[phase]):
                    batch_size, data = batch_collator(batch,
                                                      prot_desc_dict,
                                                      spec={
                                                          "gconv": True,
                                                          "ecfp8": True
                                                      })

                    # organize the data for each view.
                    Xs = {}
                    Ys = {}
                    Ws = {}
                    for view_name in data:
                        view_data = data[view_name]
                        if view_name == "gconv":
                            x = ((view_data[0][0], batch_size),
                                 view_data[0][1])
                            Xs["gconv"] = x
                        else:
                            Xs[view_name] = view_data[0]
                        Ys[view_name] = np.array([k for k in view_data[1]],
                                                 dtype=np.float)
                        Ws[view_name] = np.array([k for k in view_data[2]],
                                                 dtype=np.float)

                    # forward propagation
                    with torch.set_grad_enabled(False):
                        Ys = {k: Ys[k].astype(np.float) for k in Ys}
                        # Ensure corresponding pairs
                        for i in range(1, len(Ys.values())):
                            assert (list(Ys.values())[i - 1] == list(
                                Ys.values())[i]).all()

                        y_true = Ys[list(Xs.keys())[0]]
                        w = Ws[list(Xs.keys())[0]]
                        weights = torch.from_numpy(w).float()
                        X = ((Xs["gconv"][0], Xs["ecfp8"][0]), Xs["gconv"][1])
                        y_predicted = model(X)
                        if cuda:
                            weights = weights.cuda()
                        y_predicted = y_predicted * weights

                        # apply transformers
                        predicted_vals.extend(
                            undo_transforms(
                                y_predicted.cpu().detach().numpy(),
                                transformers_dict["gconv"]).squeeze().tolist())
                        true_vals.extend(
                            undo_transforms(y_true,
                                            transformers_dict["gconv"]).astype(
                                                np.float).squeeze().tolist())

                    eval_dict = {}
                    score = IntegratedViewDTI.evaluate(
                        eval_dict, y_true, y_predicted, w, metrics, tasks,
                        transformers_dict["gconv"])

                    # for sim data resource
                    scores_lst.append(score)
                    for m in eval_dict:
                        if m in metrics_dict:
                            metrics_dict[m].append(eval_dict[m])
                        else:
                            metrics_dict[m] = [eval_dict[m]]

                    print("\nEpoch={}/{}, batch={}/{}, "
                          "evaluation results= {}, score={}".format(
                              epoch + 1, n_epochs, i + 1,
                              len(data_loaders[phase]), eval_dict, score))

                    i += 1
                # End of mini=batch iterations.

        duration = time.time() - start
        print('\nModel evaluation duration: {:.0f}m {:.0f}s'.format(
            duration // 60, duration % 60))
    def explain_model(model,
                      model_dir,
                      model_name,
                      data_loaders,
                      prot_model_type,
                      transformers_dict,
                      prot_desc_dict,
                      sim_data_node,
                      max_print=10,
                      k=10):
        # load saved model and put in evaluation mode
        model.load_state_dict(
            jova.utils.io.load_model(
                model_dir,
                model_name,
                dvc='cuda' if torch.cuda.is_available() else 'cpu'))
        model.eval()

        print("Model evaluation...")
        start = time.time()

        # sub-nodes of sim data resource
        attn_ranking = []
        attn_ranking_node = DataNode(label="attn_ranking", data=attn_ranking)

        # add sim data nodes to parent node
        if sim_data_node:
            sim_data_node.data = [attn_ranking_node]

        # Main evaluation loop
        i = 0
        phase = 'test' if data_loaders['test'] is not None else 'val'
        for batch in tqdm(data_loaders[phase]):
            if i == max_print:
                print(
                    '\nMaximum number [%d] of samples limit reached. Terminating...'
                    % i)
                break
            i += 1
            batch_size, data = batch_collator(batch,
                                              prot_desc_dict,
                                              spec={
                                                  "weave": use_weave,
                                                  "gconv": use_gconv,
                                                  "gnn": use_gnn
                                              })
            # attention x data for analysis
            attn_data_x = {}

            # organize the data for each view.
            Xs = {}
            Ys = {}
            Ws = {}
            for view_name in data:
                view_data = data[view_name]
                if view_name == "gconv":
                    x = ((view_data[0][0], batch_size), view_data[0][1],
                         view_data[0][2])
                    Xs["gconv"] = x
                else:
                    Xs[view_name] = view_data[0]
                Ys[view_name] = view_data[1]
                Ws[view_name] = view_data[2].reshape(-1, 1).astype(np.float)
                attn_data_x[view_name] = view_data[0][3]

            # forward propagation
            with torch.set_grad_enabled(False):
                Ys = {k: Ys[k].astype(np.float) for k in Ys}
                # Ensure corresponding pairs
                for i in range(1, len(Ys.values())):
                    assert (list(Ys.values())[i - 1] == list(
                        Ys.values())[i]).all()

                y_true = Ys[list(Xs.keys())[0]]
                w = Ws[list(Xs.keys())[0]]
                if prot_model_type == "p2v" or prot_model_type == "rnn":
                    protein_x = Xs[list(Xs.keys())[0]][2]
                else:
                    protein_x = Xs[list(Xs.keys())[0]][1]
                attn_data_x[prot_model_type] = Xs[list(Xs.keys())[0]][2]

                X = []
                if use_prot:
                    X.append(protein_x)
                if use_weave:
                    X.append(Xs["weave"][0])
                if use_gconv:
                    X.append(Xs["gconv"][0])

                # register attention data for reverse-mapping
                two_way_attn.register_data(attn_data_x)

                # forward propagation
                y_predicted = model(X)

                # get segments ranking
                transformer = transformers_dict[list(Xs.keys())[0]]
                rank_results = {
                    'y_pred':
                    np_to_plot_data(
                        undo_transforms(y_predicted.cpu().detach().numpy(),
                                        transformer)),
                    'y_true':
                    np_to_plot_data(undo_transforms(y_true, transformer)),
                    'attn_ranking':
                    two_way_attn.get_rankings(k)
                }
                attn_ranking.append(rank_results)
        # End of mini=batch iterations.

        duration = time.time() - start
        print('\nPrediction interpretation duration: {:.0f}m {:.0f}s'.format(
            duration // 60, duration % 60))
Example #4
0
def compute_model_performance(metrics,
                              y_pred,
                              y,
                              w,
                              transformers,
                              tasks,
                              n_classes=2,
                              per_task_metrics=False):
    """
    Computes statistics of a model based and saves results to csv.

    :param metrics: list
        List of :Metric objects.
    :param y_pred: ndarray
        The predicted values.
    :param y: ndarray
        The ground truths.
    :param w: ndarray
        Label weights.
    :param transformers: list
        DeepChem/PADME data transformers used in the loading pipeline.
    :param n_classes: int, optional
        Number of classes in the data (for classification tasks only).
    :param per_task_metrics: bool, optional
        If true, return computed metric for each task on multitask dataset.
    :return:
    """

    if not len(metrics):
        return {}
    multitask_scores = {}
    all_task_scores = {}

    y = undo_transforms(y, transformers)
    y_pred = undo_transforms(y_pred, transformers)
    if len(w) != 0:
        w = np.array(w)
        w = np.reshape(w, newshape=y.shape)

    # Compute multitask metrics
    for metric in metrics:
        if per_task_metrics:
            multitask_scores[
                metric.name], computed_metrics = metric.compute_metric(
                    y,
                    y_pred,
                    w,
                    per_task_metrics=True,
                    n_classes=n_classes,
                    tasks=tasks)
            all_task_scores[metric.name] = computed_metrics
        else:
            multitask_scores[metric.name] = metric.compute_metric(
                y,
                y_pred,
                w,
                per_task_metrics=False,
                n_classes=n_classes,
                tasks=tasks)

    if not per_task_metrics:
        return multitask_scores
    else:
        return multitask_scores, all_task_scores
    def evaluate_model(model,
                       model_dir,
                       model_name,
                       data_loaders,
                       metrics,
                       prot_model_type,
                       transformers_dict,
                       prot_desc_dict,
                       tasks,
                       sim_data_node=None):
        # load saved model and put in evaluation mode
        model.load_state_dict(jova.utils.io.load_model(model_dir, model_name))
        model.eval()

        print("Model evaluation...")
        start = time.time()
        n_epochs = 1

        # sub-nodes of sim data resource
        attn_ranking = []
        attn_ranking_node = DataNode(label="attn_ranking", data=attn_ranking)

        # sub-nodes of sim data resource
        # loss_lst = []
        # train_loss_node = DataNode(label="training_loss", data=loss_lst)
        metrics_dict = {}
        metrics_node = DataNode(label="validation_metrics", data=metrics_dict)
        scores_lst = []
        scores_node = DataNode(label="validation_score", data=scores_lst)
        predicted_vals = []
        true_vals = []
        model_preds_node = DataNode(label="model_predictions",
                                    data={
                                        "y": true_vals,
                                        "y_pred": predicted_vals
                                    })

        # add sim data nodes to parent node
        if sim_data_node:
            sim_data_node.data = [metrics_node, scores_node, model_preds_node]

        # Main evaluation loop
        for epoch in range(n_epochs):

            for phase in ["test"]:
                # Iterate through mini-batches
                i = 0
                for batch in tqdm(data_loaders[phase]):
                    batch_size, data = batch_collator(batch,
                                                      prot_desc_dict,
                                                      spec={
                                                          "weave": use_weave,
                                                          "gconv": use_gconv,
                                                          "gnn": use_gnn
                                                      })
                    # organize the data for each view.
                    Xs = {}
                    Ys = {}
                    Ws = {}
                    for view_name in data:
                        view_data = data[view_name]
                        if view_name == "gconv":
                            x = ((view_data[0][0], batch_size),
                                 view_data[0][1], view_data[0][2])
                            Xs["gconv"] = x
                        else:
                            Xs[view_name] = view_data[0]
                        Ys[view_name] = view_data[1]
                        Ws[view_name] = view_data[2].reshape(-1, 1).astype(
                            np.float)

                    # forward propagation
                    with torch.set_grad_enabled(False):
                        Ys = {k: Ys[k].astype(np.float) for k in Ys}
                        # Ensure corresponding pairs
                        for i in range(1, len(Ys.values())):
                            assert (list(Ys.values())[i - 1] == list(
                                Ys.values())[i]).all()

                        y_true = Ys[list(Xs.keys())[0]]
                        w = Ws[list(Xs.keys())[0]]
                        if prot_model_type == "p2v" or prot_model_type == "rnn":
                            protein_x = Xs[list(Xs.keys())[0]][2]
                        else:
                            protein_x = Xs[list(Xs.keys())[0]][1]
                        X = []
                        if use_prot:
                            X.append(protein_x)
                        if use_weave:
                            X.append(Xs["weave"][0])
                        if use_gconv:
                            X.append(Xs["gconv"][0])
                        y_predicted = model(X)

                        # apply transformers
                        predicted_vals.extend(
                            undo_transforms(
                                y_predicted.cpu().detach().numpy(),
                                transformers_dict["gconv"]).squeeze().tolist())
                        true_vals.extend(
                            undo_transforms(y_true,
                                            transformers_dict["gconv"]).astype(
                                                np.float).squeeze().tolist())

                    eval_dict = {}
                    score = TwoWayAttnBaseline.evaluate(
                        eval_dict, y_true, y_predicted, w, metrics, tasks,
                        transformers_dict["gconv"])

                    # for sim data resource
                    scores_lst.append(score)
                    for m in eval_dict:
                        if m in metrics_dict:
                            metrics_dict[m].append(eval_dict[m])
                        else:
                            metrics_dict[m] = [eval_dict[m]]

                    print("\nEpoch={}/{}, batch={}/{}, "
                          "evaluation results= {}, score={}".format(
                              epoch + 1, n_epochs, i + 1,
                              len(data_loaders[phase]), eval_dict, score))

                    i += 1
                # End of mini=batch iterations.

        duration = time.time() - start
        print('\nModel evaluation duration: {:.0f}m {:.0f}s'.format(
            duration // 60, duration % 60))
Example #6
0
    def evaluate_model(data, model_dir, model_file, metrics, transformer,
                       drug_kernel_dict, prot_kernel_dict, tasks,
                       sim_data_node):
        print("Model evaluation...")
        start = time.time()
        eval_metrics_dict = {}
        eval_metrics_node = DataNode(label="validation_metrics",
                                     data=eval_metrics_dict)
        eval_scores_lst = []
        eval_scores_node = DataNode(label="validation_score",
                                    data=eval_scores_lst)
        eval_predicted_vals = []
        eval_true_vals = []
        eval_model_preds_node = DataNode(label="model_predictions",
                                         data={
                                             "y": eval_true_vals,
                                             "y_pred": eval_predicted_vals
                                         })
        if sim_data_node:
            sim_data_node.data = [
                eval_metrics_node, eval_scores_node, eval_model_preds_node
            ]

        kernel_data = data['kernel_data']

        # compute weights
        A = load_numpy_array(os.path.join(model_dir, model_file))

        # Test
        KD_eval = kernel_data['KD_test']
        KT_eval = kernel_data['KT_test']
        Y_eval = kernel_data['Y_test']
        W_eval = kernel_data['W_test']

        P_val = KD_eval @ A @ KT_eval.T
        y_hat = P_val.reshape(-1, 1)
        y = Y_eval.reshape(-1, 1)
        w = W_eval.reshape(-1, 1)
        eval_loss = mean_squared_error(y, y_hat, w)

        # Metrics
        eval_dict = {}
        score = KronRLS.evaluate(eval_dict, y, y_hat, w, metrics, tasks,
                                 transformer)
        for m in eval_dict:
            if m in eval_metrics_dict:
                eval_metrics_dict[m].append(eval_dict[m])
            else:
                eval_metrics_dict[m] = [eval_dict[m]]
        # apply transformers

        y_hat = y_hat[w.nonzero()[0]]
        y = y[w.nonzero()[0]]
        eval_predicted_vals.extend(
            undo_transforms(y_hat, transformer).squeeze().tolist())
        eval_true_vals.extend(
            undo_transforms(y, transformer).squeeze().tolist())
        eval_scores_lst.append(score)
        print(
            f'eval loss={eval_loss}, score={score}, metrics={str(eval_dict)}')

        duration = time.time() - start
        print('\nModel evaluation duration: {:.0f}m {:.0f}s'.format(
            duration // 60, duration % 60))
Example #7
0
    def train(data,
              reg_lambda,
              metrics,
              transformer,
              drug_kernel_dict,
              prot_kernel_dict,
              tasks,
              sim_data_node,
              is_hsearch=False):
        start = time.time()
        metrics_dict = {}
        metrics_node = DataNode(label="validation_metrics", data=metrics_dict)
        eval_scores_lst = []
        eval_scores_node = DataNode(label="validation_score",
                                    data=eval_scores_lst)
        eval_predicted_vals = []
        eval_true_vals = []
        eval_model_preds_node = DataNode(label="model_predictions",
                                         data={
                                             "y": eval_true_vals,
                                             "y_pred": eval_predicted_vals
                                         })
        if sim_data_node:
            sim_data_node.data = [
                metrics_node, eval_scores_node, eval_model_preds_node
            ]

        kernel_data = data['kernel_data']
        KD = kernel_data['KD']
        KT = kernel_data['KT']
        Y = kernel_data['Y']
        W = kernel_data['W']

        # Eigen decompositions
        Lambda, V = np.linalg.eigh(KD)
        Lambda = Lambda.reshape((-1, 1))
        Sigma, U = np.linalg.eigh(KT)
        Sigma = Sigma.reshape((-1, 1))

        # Compute C
        newevals = 1. / (Lambda @ Sigma.T + reg_lambda)
        # newevals = newevals.T
        VTYU = V.T @ Y @ U
        C = np.multiply(VTYU, newevals)

        # compute weights
        A = V @ C @ U.T

        # training loss
        P_train = KD @ A @ KT.T
        tr_loss = mean_squared_error(Y.reshape(-1, 1), P_train.reshape(-1, 1),
                                     W.reshape(-1, 1))

        # Evaluation
        print('Eval mat construction started')
        if is_hsearch:
            KD_eval = kernel_data['KD_val']
            KT_eval = kernel_data['KT_val']
            Y_eval = kernel_data['Y_val']
            W_eval = kernel_data['W_val']
        else:
            KD_eval = kernel_data['KD_test']
            KT_eval = kernel_data['KT_test']
            Y_eval = kernel_data['Y_test']
            W_eval = kernel_data['W_test']
        P_val = KD_eval @ A @ KT_eval.T
        y_hat = P_val.reshape(-1, 1)
        y = Y_eval.reshape(-1, 1)
        w = W_eval.reshape(-1, 1)
        eval_loss = mean_squared_error(y, y_hat, w)

        # Metrics
        eval_dict = {}
        score = KronRLS.evaluate(eval_dict, y, y_hat, w, metrics, tasks,
                                 transformer)
        for m in eval_dict:
            if m in metrics_dict:
                metrics_dict[m].append(eval_dict[m])
            else:
                metrics_dict[m] = [eval_dict[m]]
        print(
            f'Training loss={tr_loss}, evaluation loss={eval_loss}, score={score}, metrics={str(eval_dict)}'
        )

        # apply transformers
        y_hat = y_hat[w.nonzero()[0]]
        y = y[w.nonzero()[0]]
        eval_predicted_vals.extend(
            undo_transforms(y_hat, transformer).squeeze().tolist())
        eval_true_vals.extend(
            undo_transforms(y, transformer).squeeze().tolist())
        eval_scores_lst.append(score)
        print(
            f'eval loss={eval_loss}, score={score}, metrics={str(eval_dict)}')

        duration = time.time() - start
        print('\nModel training duration: {:.0f}m {:.0f}s'.format(
            duration // 60, duration % 60))
        return {'model': A, 'score': score, 'epoch': 0}
Example #8
0
    def evaluate_model(model, model_dir, model_name, data_loaders, metrics, transformers_dict, prot_desc_dict,
                       tasks, view, sim_data_node=None):
        comp_view, prot_view = view
        # load saved model and put in evaluation mode
        model.load_state_dict(load_model(model_dir, model_name, dvc=torch.device('cuda:0')))
        model.eval()

        print("Model evaluation...")
        start = time.time()
        n_epochs = 1

        # sub-nodes of sim data resource
        metrics_dict = {}
        metrics_node = DataNode(label="validation_metrics", data=metrics_dict)
        scores_lst = []
        scores_node = DataNode(label="validation_score", data=scores_lst)
        predicted_vals = []
        true_vals = []
        model_preds_node = DataNode(label="model_predictions", data={"y": true_vals,
                                                                     "y_pred": predicted_vals})

        # add sim data nodes to parent node
        if sim_data_node:
            sim_data_node.data = [metrics_node, scores_node, model_preds_node]

        # Main evaluation loop
        for epoch in range(n_epochs):

            for phase in ["test"]:
                # Iterate through mini-batches
                i = 0
                for batch in tqdm(data_loaders[phase]):
                    batch_size, data = batch_collator(batch, prot_desc_dict, spec=comp_view)
                    # Data
                    if prot_view in ["p2v", "rnn", "pcnn", "pcnna"]:
                        protein_x = data[comp_view][0][2]
                    else:  # then it's psc
                        protein_x = data[comp_view][0][1]
                    if comp_view == "gconv":
                        # graph data structure is: [(compound data, batch_size), protein_data]
                        X = ((data[comp_view][0][0], batch_size), protein_x)
                    else:
                        X = (data[comp_view][0][0], protein_x)
                    y = data[comp_view][1]
                    w = data[comp_view][2]
                    y = np.array([k for k in y], dtype=np.float)
                    w = np.array([k for k in w], dtype=np.float)

                    # prediction
                    y_predicted = model(X)

                    # apply transformers
                    predicted_vals.extend(undo_transforms(y_predicted.cpu().detach().numpy(),
                                                          transformers_dict[comp_view]).squeeze().tolist())
                    true_vals.extend(
                        undo_transforms(y, transformers_dict[comp_view]).astype(np.float).squeeze().tolist())

                    eval_dict = {}
                    score = SingleViewDTI.evaluate(eval_dict, y, y_predicted, w, metrics, tasks,
                                                   transformers_dict[comp_view])

                    # for sim data resource
                    scores_lst.append(score)
                    for m in eval_dict:
                        if m in metrics_dict:
                            metrics_dict[m].append(eval_dict[m])
                        else:
                            metrics_dict[m] = [eval_dict[m]]

                    print("\nEpoch={}/{}, batch={}/{}, "
                          "evaluation results= {}, score={}".format(epoch + 1, n_epochs, i + 1,
                                                                    len(data_loaders[phase]),
                                                                    eval_dict, score))

                    i += 1
                # End of mini=batch iterations.

        duration = time.time() - start
        print('\nModel evaluation duration: {:.0f}m {:.0f}s'.format(duration // 60, duration % 60))
Example #9
0
    def explain_model(model,
                      model_dir,
                      model_name,
                      data_loaders,
                      transformers_dict,
                      prot_desc_dict,
                      view_lbl,
                      sim_data_node=None,
                      max_print=10000,
                      k=10):
        # load saved model and put in evaluation mode
        model.load_state_dict(
            jova.utils.io.load_model(
                model_dir,
                model_name,
                dvc='cuda' if torch.cuda.is_available() else 'cpu'))
        model.eval()

        print("Model evaluation...")
        start = time.time()

        # sub-nodes of sim data resource
        attn_ranking = []
        attn_ranking_node = DataNode(label="attn_ranking", data=attn_ranking)

        # add sim data nodes to parent node
        if sim_data_node:
            sim_data_node.data = [attn_ranking_node]

        # Iterate through mini-batches
        i = 0
        # Since we're evaluating, join all data loaders
        all_loaders = chain()
        for loader in data_loaders:
            if data_loaders[loader] is not None:
                all_loaders = chain(all_loaders, data_loaders[loader])

        for batch in tqdm(all_loaders):
            if i == max_print:
                print(
                    '\nMaximum number [%d] of samples limit reached. Terminating...'
                    % i)
                break
            i += 1
            batch_size, data = batch_collator(batch,
                                              prot_desc_dict,
                                              spec=view_lbl,
                                              cuda_prot=False)

            # Data
            protein_x = data[view_lbl][0][2]
            if view_lbl == "gconv":
                # graph data structure is: [(compound data, batch_size), protein_data]
                X = ((data[view_lbl][0][0], batch_size), protein_x)
            else:
                X = (data[view_lbl][0][0], protein_x)
            y_true = np.array([k for k in data[view_lbl][1]], dtype=np.float)
            w = np.array([k for k in data[view_lbl][2]], dtype=np.float)

            # attention x data for analysis
            attn_data_x = {}
            attn_data_x['pcnna'] = protein_x

            # forward propagation
            with torch.set_grad_enabled(False):
                y_predicted = model(X)

            # get segments ranking
            transformer = transformers_dict[view_lbl]
            rank_results = {
                'y_pred':
                np_to_plot_data(
                    undo_transforms(y_predicted.cpu().detach().numpy(),
                                    transformer)),
                'y_true':
                np_to_plot_data(undo_transforms(y_true, transformer)),
                'smiles':
                data[view_lbl][0][3][0][0].smiles
            }
            attn_ranking.append(rank_results)
        # End of mini=batch iterations.

        duration = time.time() - start
        print('\nPrediction interpretation duration: {:.0f}m {:.0f}s'.format(
            duration // 60, duration % 60))