예제 #1
0
    def evaluate(self):
        if len(self._predicted_labels) == 0:
            raise ValueError('No predicted labels available')

        from mitosCalsification import metrics
        fscore = metrics.fscore(self._labels, self._predicted_labels,
                                self.not_detected)
        print('fscore: {}'.format(fscore))
예제 #2
0
    def _validate(self):
        if not self.bval_data:
            return

        val_pred = self.model.predict(self.xv, self.batch_size)
        val_pred = np.amax(val_pred, axis=1)
        val_loss = binary_crossentropy(K.variable(self.yv),
                                       K.variable(val_pred))
        self.val_loss = K.eval(K.mean(val_loss))
        val_pred = np.round(val_pred, decimals=0).astype(int)
        self.val_fscore = metrics.fscore(self.yv, val_pred)
        self.val_history_list.append(self.val_fscore)
예제 #3
0
def _do_test(model, xt, yt):
    if isinstance(model, Bagging):
        yt2 = np.argmax(yt, axis=1)
        res_rounded = model.predict_on_batch(xt, yt2)
        res = res_rounded
    else:
        res = model.predict(xt)
        # res = np.argmax(res, axis=1)
        res_rounded = np.round(res, decimals=0).astype(int)

    cat_res = np_utils.to_categorical(res_rounded)
    fscore = metrics.fscore(yt, res_rounded)
    prec = K.eval(
        K.mean(binary_accuracy(K.variable(yt), K.variable(res_rounded))))

    return fscore, prec, res_rounded, res
예제 #4
0
    def _test(self, model=None, return_metrics=False):
        if model is None:
            model = self.model

        test_pred = model.predict(self.xt)
        round_test_pred = np.round(test_pred, decimals=0).astype(int)

        test_fscore = metrics.fscore(self.yt, round_test_pred)
        test_prec = K.eval(
            K.mean(
                binary_accuracy(K.variable(self.yt),
                                K.variable(round_test_pred))))

        if test_fscore > self.best_score:
            self.best_score = test_fscore
            self.best_model = keras_deep_copy_model(model)

        if return_metrics == False:
            self.iteration_test_fscore = test_fscore
            self.iteration_test_prec = test_prec
            self.test_history_list.append(test_fscore)
        else:
            return test_fscore, test_prec
예제 #5
0
 def _print_metrics(self, y_true, y_pred):
     from mitosCalsification import metrics
     metrics.print_conf_matrix(y_true, y_pred, self.not_detected)
     fscore = metrics.fscore(y_true, y_pred, self.not_detected)
     print('fscore: {}'.format(fscore))
예제 #6
0
def _do_train(model, train_data, val_data, epochs, batch_size):
    # generator = ImageDataGenerator(rotation_range=40,
    #                                width_shift_range=0.3,
    #                                height_shift_range=0.3,
    #                                shear_range=0.1,
    #                                zoom_range=0.2,
    #                                fill_mode='wrap',
    #                                horizontal_flip=True,
    #                                vertical_flip=True)
    generator = ImageDataGenerator()
    xe = train_data[0]
    # ye = np_utils.to_categorical(train_data[1])
    ye = train_data[1]
    xv = val_data[0]
    yv = val_data[1]
    # yv = np_utils.to_categorical(val_data[1])
    xt, yt = load_test_data()
    class_weight = _get_class_weights(ye)

    train_history_list = []
    val_history_list = []
    test_history_list = []
    test_res = 0

    for e in range(epochs):
        print('Epoch: {}/{}'.format(e + 1, epochs))
        batches = 0
        start_time = time.time()
        history_list = []
        for x_batch, y_batch in generator.flow(xe, ye, batch_size):
            history = model.train_on_batch(x_batch,
                                           y_batch,
                                           class_weight=class_weight)
            history_list.append(history)
            batches += 1
            if batches >= int(len(xe) / batch_size):
                break

        history = np.asarray(history_list).mean(axis=0)
        train_history_list.append(history)
        history_names = model.metrics_names

        train_val = model.predict(xv, batch_size)
        train_val = np.amax(train_val, axis=1)
        val_loss = binary_crossentropy(K.variable(yv), K.variable(train_val))
        val_loss = K.eval(K.mean(val_loss))
        # train_val = np.argmax(train_val, axis=1)
        train_val = np.round(train_val, decimals=0).astype(int)
        val_fscore = metrics.fscore(val_data[1], train_val)
        val_history_list.append((val_loss, val_fscore))

        fscore, _, test_res, _ = _do_test(model, xt, yt)
        test_history_list.append(fscore)

        end_time = time.time()
        print('time: {:.1f}'.format(end_time - start_time), end=' - ')
        for name, value in zip(history_names, history):
            print(name + ': {:.4f}'.format(value), end=' ')

        print('val_loss: {:.4f} val_mitos_fscore: {:.4f}'.format(
            val_loss, val_fscore),
              end=' ')
        print('test_fscore: {:.4f}'.format(fscore), flush=True)

    metrics.print_conf_matrix(yt, test_res)
    return np.transpose(train_history_list), np.transpose(
        val_history_list), test_history_list