Esempio n. 1
0
    def eval_on_data(eval_with_generator, input_data, data_type):
        model.load_best_model()
        if eval_with_generator:
            acc = model.evaluate_with_generator(generator=input_data, y=input_data.input_label)
        else:
            acc = model.evaluate(x=input_data['x'], y=input_data['y'])
        train_log['%s_acc' % data_type] = acc

        swa_type = None
        if 'swa' in config.callbacks_to_add:
            swa_type = 'swa'
        elif 'swa_clr' in config.callbacks_to_add:
            swa_type = 'swa_clr'
        if swa_type:
            print('Logging Info - %s Model' % swa_type)
            model.load_swa_model(swa_type=swa_type)
            swa_acc = model.evaluate(x=input_data['x'], y=input_data['y'])
            train_log['%s_%s_acc' % (swa_type, data_type)] = swa_acc

        ensemble_type = None
        if 'sse' in config.callbacks_to_add:
            ensemble_type = 'sse'
        elif 'fge' in config.callbacks_to_add:
            ensemble_type = 'fge'
        if ensemble_type:
            print('Logging Info - %s Ensemble Model' % ensemble_type)
            ensemble_predict = {}
            for model_file in os.listdir(config.checkpoint_dir):
                if model_file.startswith(config.exp_name+'_%s' % ensemble_type):
                    match = re.match(r'(%s_%s_)([\d+])(.hdf5)' % (config.exp_name, ensemble_type), model_file)
                    model_id = int(match.group(2))
                    model_path = os.path.join(config.checkpoint_dir, model_file)
                    print('Logging Info: Loading {} ensemble model checkpoint: {}'.format(ensemble_type, model_file))
                    model.load_model(model_path)
                    ensemble_predict[model_id] = model.predict(x=input_data['x'])
            '''
            we expect the models saved towards the end of run may have better performance than models saved earlier 
            in the run, we sort the models so that the older models ('s id) are first.
            '''
            sorted_ensemble_predict = sorted(ensemble_predict.items(), key=lambda x: x[0], reverse=True)
            model_predicts = []
            for model_id, model_predict in sorted_ensemble_predict:
                single_acc = eval_acc(model_predict, input_data['y'])
                print('Logging Info - %s_single_%d_%s Acc : %f' % (ensemble_type, model_id, data_type, single_acc))
                train_log['%s_single_%d_%s_acc' % (ensemble_type, model_id, data_type)] = single_acc

                model_predicts.append(model_predict)
                ensemble_acc = eval_acc(np.mean(np.array(model_predicts), axis=0), input_data['y'])
                print('Logging Info - %s_ensemble_%d_%s Acc : %f' % (ensemble_type, model_id, data_type, ensemble_acc))
                train_log['%s_ensemble_%d_%s_acc' % (ensemble_type, model_id, data_type)] = ensemble_acc
Esempio n. 2
0
    def evaluate(self, data):
        predictions = self.predict(data)
        labels = data['label']

        acc = eval_acc(labels, predictions)
        f1 = eval_f1(labels, predictions)
        macro_f1 = eval_macro_f1(labels, predictions)
        p = eval_precision(labels, predictions)
        r = eval_recall(labels, predictions)
        print('acc: {}, f1: {}, macro_f1 : {}, p: {}, r: {}'.format(
            acc, f1, macro_f1, p, r))
        return acc, f1, macro_f1, p, r
Esempio n. 3
0
    def evaluate(self, data):
        predictions = self.predict(data)
        if self.config.loss_function == 'binary_crossentropy':
            labels = data['label']
        else:
            labels = np.argmax(data['label'], axis=-1)

        acc = eval_acc(labels, predictions)
        f1 = eval_f1(labels, predictions)
        macro_f1 = eval_macro_f1(labels, predictions)
        p = eval_precision(labels, predictions)
        r = eval_recall(labels, predictions)
        print('acc : {}, f1 : {}, macro_f1: {}, p : {}, r: {}'.format(acc, f1, macro_f1, p, r))
        return acc, f1, macro_f1, p, r
Esempio n. 4
0
 def evaluate_with_generator(self, generator, y):
     prediction = self.predict_with_generator(generator)
     acc = eval_acc(y, prediction)
     print('Logging Info - Acc : %f' % acc)
     return acc
Esempio n. 5
0
 def evaluate(self, x, y):
     prediction = self.predict(x)
     acc = eval_acc(y, prediction)
     print('Logging Info - Acc : %f' % acc)
     return acc
 def evaluate(self, data):
     input_data, label = data
     prediction = self.predict(input_data)
     acc = eval_acc(label, prediction)
     logging.info('acc : %f', acc)
     return acc