Example #1
0
 def calc_metrics(self, data_gen, history, dataset, logs):
     y_true = []
     predictions = []
     for i in range(data_gen.steps):
         if self.verbose == 1:
             print("\r\tdone {}/{}".format(i, data_gen.steps))
         (x, y_processed, y) = data_gen.next(return_y_true=True)
         pred = self.model.predict(x, batch_size=self.batch_size)
         if isinstance(x, list) and len(x) == 2:  # deep supervision
             if pred.shape[-1] == 1:  # regression
                 pred_flatten = pred.flatten()
             else:  # classification
                 pred_flatten = pred.reshape((-1, 10))
             for m, t, p in zip(x[1].flatten(), y.flatten(), pred_flatten):
                 if np.equal(m, 1):
                     y_true.append(t)
                     predictions.append(p)
         else:
             if pred.shape[-1] == 1:
                 y_true += list(y.flatten())
                 predictions += list(pred.flatten())
             else:
                 y_true += list(y)
                 predictions += list(pred)
     print("\n")
     if self.partition == 'log':
         predictions = [
             metrics.get_estimate_log(x, 10) for x in predictions
         ]
         ret = metrics.print_metrics_log_bins(y_true, predictions)
     if self.partition == 'custom':
         predictions = [
             metrics.get_estimate_custom(x, 10) for x in predictions
         ]
         ret = metrics.print_metrics_custom_bins(y_true, predictions)
     if self.partition == 'none':
         ret = metrics.print_metrics_regression(y_true, predictions)
     for k, v in ret.iteritems():
         logs[dataset + '_' + k] = v
     history.append(ret)
Example #2
0
 def calc_metrics(self, data_gen, history, dataset, logs):
     y_true = []
     predictions = []
     for i in range(data_gen.steps):
         if self.verbose == 1:
             print "\r\tdone {}/{}".format(i, data_gen.steps),
         (x, y_processed, y) = data_gen.next(return_y_true=True)
         pred = self.model.predict(x, batch_size=self.batch_size)
         if isinstance(x, list) and len(x) == 2:  # deep supervision
             if pred.shape[-1] == 1:  # regression
                 pred_flatten = pred.flatten()
             else:  # classification
                 pred_flatten = pred.reshape((-1, 10))
             for m, t, p in zip(x[1].flatten(), y.flatten(), pred_flatten):
                 if np.equal(m, 1):
                     y_true.append(t)
                     predictions.append(p)
         else:
             if pred.shape[-1] == 1:
                 y_true += list(y.flatten())
                 predictions += list(pred.flatten())
             else:
                 y_true += list(y)
                 predictions += list(pred)
     print "\n"
     if self.partition == 'log':
         predictions = [metrics.get_estimate_log(x, 10) for x in predictions]
         ret = metrics.print_metrics_log_bins(y_true, predictions)
     if self.partition == 'custom':
         predictions = [metrics.get_estimate_custom(x, 10) for x in predictions]
         ret = metrics.print_metrics_custom_bins(y_true, predictions)
     if self.partition == 'none':
         ret = metrics.print_metrics_regression(y_true, predictions)
     for k, v in ret.iteritems():
         logs[dataset + '_' + k] = v
     history.append(ret)
Example #3
0
    def calc_metrics(self, data_gen, history, dataset, logs):
        ihm_y_true = []
        decomp_y_true = []
        los_y_true = []
        pheno_y_true = []

        ihm_pred = []
        decomp_pred = []
        los_pred = []
        pheno_pred = []

        for i in range(data_gen.steps):
            if self.verbose == 1:
                print("\r\tdone {}/{}".format(i, data_gen.steps))
            (X, y, los_y_reg) = data_gen.next(return_y_true=True)
            outputs = self.model.predict(X, batch_size=self.batch_size)

            ihm_M = X[1]
            decomp_M = X[2]
            los_M = X[3]

            if not data_gen.target_repl:  # no target replication
                (ihm_p, decomp_p, los_p, pheno_p) = outputs
                (ihm_t, decomp_t, los_t, pheno_t) = y
            else:  # target replication
                (ihm_p, _, decomp_p, los_p, pheno_p, _) = outputs
                (ihm_t, _, decomp_t, los_t, pheno_t, _) = y

            los_t = los_y_reg  # real value not the label

            # ihm
            for (m, t, p) in zip(ihm_M.flatten(), ihm_t.flatten(),
                                 ihm_p.flatten()):
                if np.equal(m, 1):
                    ihm_y_true.append(t)
                    ihm_pred.append(p)

            # decomp
            for (m, t, p) in zip(decomp_M.flatten(), decomp_t.flatten(),
                                 decomp_p.flatten()):
                if np.equal(m, 1):
                    decomp_y_true.append(t)
                    decomp_pred.append(p)

            # los
            if los_p.shape[-1] == 1:  # regression
                for (m, t, p) in zip(los_M.flatten(), los_t.flatten(),
                                     los_p.flatten()):
                    if np.equal(m, 1):
                        los_y_true.append(t)
                        los_pred.append(p)
            else:  # classification
                for (m, t, p) in zip(los_M.flatten(), los_t.flatten(),
                                     los_p.reshape((-1, 10))):
                    if np.equal(m, 1):
                        los_y_true.append(t)
                        los_pred.append(p)

            # pheno
            for (t, p) in zip(pheno_t.reshape((-1, 25)),
                              pheno_p.reshape((-1, 25))):
                pheno_y_true.append(t)
                pheno_pred.append(p)
        print("\n")

        # ihm
        print("\n ================= 48h mortality ================")
        ihm_pred = np.array(ihm_pred)
        ihm_pred = np.stack([1 - ihm_pred, ihm_pred], axis=1)
        ret = metrics.print_metrics_binary(ihm_y_true, ihm_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_ihm_' + k] = v

        # decomp
        print("\n ================ decompensation ================")
        decomp_pred = np.array(decomp_pred)
        decomp_pred = np.stack([1 - decomp_pred, decomp_pred], axis=1)
        ret = metrics.print_metrics_binary(decomp_y_true, decomp_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_decomp_' + k] = v

        # los
        print("\n ================ length of stay ================")
        if self.partition == 'log':
            los_pred = [metrics.get_estimate_log(x, 10) for x in los_pred]
            ret = metrics.print_metrics_log_bins(los_y_true, los_pred)
        if self.partition == 'custom':
            los_pred = [metrics.get_estimate_custom(x, 10) for x in los_pred]
            ret = metrics.print_metrics_custom_bins(los_y_true, los_pred)
        if self.partition == 'none':
            ret = metrics.print_metrics_regression(los_y_true, los_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_los_' + k] = v

        # pheno
        print("\n =================== phenotype ==================")
        pheno_pred = np.array(pheno_pred)
        ret = metrics.print_metrics_multilabel(pheno_y_true, pheno_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_pheno_' + k] = v

        history.append(logs)
Example #4
0
    def calc_metrics(self, data_gen, history, dataset, logs):
        ihm_y_true = []
        decomp_y_true = []
        los_y_true = []
        pheno_y_true = []

        ihm_pred = []
        decomp_pred = []
        los_pred = []
        pheno_pred = []

        for i in range(data_gen.steps):
            if self.verbose == 1:
                print "\r\tdone {}/{}".format(i, data_gen.steps),
            (X, y, los_y_reg) = data_gen.next(return_y_true=True)
            outputs = self.model.predict(X, batch_size=self.batch_size)

            ihm_M = X[1]
            decomp_M = X[2]
            los_M = X[3]

            if not data_gen.target_repl:  # no target replication
                (ihm_p, decomp_p, los_p, pheno_p) = outputs
                (ihm_t, decomp_t, los_t, pheno_t) = y
            else:  # target replication
                (ihm_p, _, decomp_p, los_p, pheno_p, _) = outputs
                (ihm_t, _, decomp_t, los_t, pheno_t, _) = y

            los_t = los_y_reg  # real value not the label

            # ihm
            for (m, t, p) in zip(ihm_M.flatten(), ihm_t.flatten(), ihm_p.flatten()):
                if np.equal(m, 1):
                    ihm_y_true.append(t)
                    ihm_pred.append(p)

            # decomp
            for (m, t, p) in zip(decomp_M.flatten(), decomp_t.flatten(), decomp_p.flatten()):
                if np.equal(m, 1):
                    decomp_y_true.append(t)
                    decomp_pred.append(p)

            # los
            if los_p.shape[-1] == 1:  # regression
                for (m, t, p) in zip(los_M.flatten(), los_t.flatten(), los_p.flatten()):
                    if np.equal(m, 1):
                        los_y_true.append(t)
                        los_pred.append(p)
            else:  # classification
                for (m, t, p) in zip(los_M.flatten(), los_t.flatten(), los_p.reshape((-1, 10))):
                    if np.equal(m, 1):
                        los_y_true.append(t)
                        los_pred.append(p)

            # pheno
            for (t, p) in zip(pheno_t.reshape((-1, 25)), pheno_p.reshape((-1, 25))):
                pheno_y_true.append(t)
                pheno_pred.append(p)
        print "\n"

        # ihm
        print "\n ================= 48h mortality ================"
        ihm_pred = np.array(ihm_pred)
        ihm_pred = np.stack([1 - ihm_pred, ihm_pred], axis=1)
        ret = metrics.print_metrics_binary(ihm_y_true, ihm_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_ihm_' + k] = v

        # decomp
        print "\n ================ decompensation ================"
        decomp_pred = np.array(decomp_pred)
        decomp_pred = np.stack([1 - decomp_pred, decomp_pred], axis=1)
        ret = metrics.print_metrics_binary(decomp_y_true, decomp_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_decomp_' + k] = v

        # los
        print "\n ================ length of stay ================"
        if self.partition == 'log':
            los_pred = [metrics.get_estimate_log(x, 10) for x in los_pred]
            ret = metrics.print_metrics_log_bins(los_y_true, los_pred)
        if self.partition == 'custom':
            los_pred = [metrics.get_estimate_custom(x, 10) for x in los_pred]
            ret = metrics.print_metrics_custom_bins(los_y_true, los_pred)
        if self.partition == 'none':
            ret = metrics.print_metrics_regression(los_y_true, los_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_los_' + k] = v

        # pheno
        print "\n =================== phenotype =================="
        pheno_pred = np.array(pheno_pred)
        ret = metrics.print_metrics_multilabel(pheno_y_true, pheno_pred)
        for k, v in ret.iteritems():
            logs[dataset + '_pheno_' + k] = v

        history.append(logs)