예제 #1
0
#encoding: utf-8
from __future__ import print_function
import numpy as np
import os
from config import config as cfg
from training import init_env
init_env('0')
from proc.data import load_train_csv, split_train_val, load_test_csv
from model.resnet import ResNet50, preprocess_input
from proc.gennerator import BaseTestGenerator
from sklearn.metrics import roc_curve, auc
from matplotlib import pyplot as plt
from config import config as cfg
import scipy.optimize as opt
from sklearn.metrics import f1_score

def sigmoid_np(x):
    return 1.0/(1.0 + np.exp(-x))

def F1_soft(preds, targets, th=0.5, d=50.0):
    preds = sigmoid_np(d*(preds - th))
    targets = targets.astype(np.float)
    score = 2.0*(preds*targets).sum(axis=0)/((preds+targets).sum(axis=0) + 1e-6)

    return score

def fit_val(x, y, classes_num):
    params = 0.5*np.ones(classes_num)
    wd = 1e-5

    def error(p): return np.concatenate(
예제 #2
0
#encoding: utf-8
from __future__ import print_function
from keras import backend as K
from config import config as cfg
from training import init_env, get_number_of_steps

gpus = '1'
init_env(gpus)
n_gpus = len(gpus.split(','))
cfg.batch_size = cfg.batch_size * n_gpus
import os
from proc.data import load_train_csv, split_train_val
from proc.gennerator import BaseGenerator
from model.resnet import ResNet50, preprocess_input
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from training.callback import MultiGPUCheckpoint
from utils import makedir
from keras import optimizers as KO
from keras.utils import multi_gpu_model
import tensorflow as tf
import numpy as np
from proc.gennerator import BaseTestGenerator
import pandas as pd
from metrics import f1_score
from predict_resnet import predict_on_gennerator


def lr_schedule(epoch, lr=1e-2):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 30, 60, 90, 120 epochs.
예제 #3
0
    """pesudo struct
    """
    def __init__(self):
        pass

    def __str__(self):
        print("net work config")
        print("*" * 80)
        str_list = ['%-20s---%s' % item for item in self.__dict__.items()]
        return '\n'.join(str_list)


config = Struct()
config.gpu = '5'
config.n_gpus = len(config.gpu.split(','))
config.gpu_options = init_env(config.gpu)
config.sess = tf.Session(config=tf.ConfigProto(gpu_options=config.gpu_options,
                                               allow_soft_placement=True))
config.data_root = '/home/share/data_repos/chest_xray'
config.input_shape = [256, 256, 3]
config.steps = 'auto'
config.epochs = 100
config.n_works = 15
config.n_queue = 200
config.val_steps = 'auto'
config.batch_size = 32
config.weights_name = 'train_epoch_{epoch:02d}.hdf5'
config.log_dir = './logs'
config.train_val_list_file = os.path.join(config.data_root,
                                          'train_val_list.txt')
config.test_list_file = os.path.join(config.data_root, 'test_list.txt')
예제 #4
0
# encoding: utf-8
from __future__ import print_function
import numpy as np
import os
from config import config as cfg
from training import init_env
init_env('5')
cfg.batch_size = 2
from proc.data import load_train_csv, split_train_val, load_test_csv
from model._xception import Xception, preprocess_input
from proc.gennerator import BaseTestGenerator
from sklearn.metrics import roc_curve, auc, f1_score
from matplotlib import pyplot as plt
from config import config as cfg
import scipy.optimize as opt


def show_samples(test_X, test_Y, pred_Y):
    # 表现最差样本样本id
    sickest_idx = np.argsort(np.sum(test_Y, 1) < 1)
    fig, m_axs = plt.subplots(4, 2, figsize=(16, 32))

    for (idx, c_ax) in zip(sickest_idx, m_axs.flatten()):
        c_ax.imshow(test_X[idx, :, :, 0], cmap='bone')
        stat_str = [
            n_class[:6]
            for n_class, n_score in zip(cfg.label_names, test_Y[idx])
            if n_score > 0.5
        ]
        pred_str = [
            '%s:%2.0f%%' % (n_class[:4], p_score * 100) for n_class, n_score,
예제 #5
0
#encoding: utf-8
from __future__ import print_function
from keras import backend as K
from config import config as cfg
from training import init_env, get_number_of_steps
gpus = '0, 1'
init_env('gpus')
n_gpus = len(gpus.split(','))
import os
from proc.data import load_train_csv, split_train_val
from proc.gennerator import BaseGenerator
from model import Xception
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from training.callback import MultiGPUCheckpoint
from utils import makedir
from keras.utils import multi_gpu_model
import tensorflow as tf
from metrics import roc_auc_loss
from keras.optimizers import Adam


def train():
    train_val_df = load_train_csv(cfg)
    train_df, val_df = split_train_val(train_val_df, 0.25)
    train_gen = BaseGenerator(train_df, cfg.train_dir, batch_size=cfg.batch_size,
                              aug_args=cfg.aug_args,
                              target_shape=cfg.input_shape[:2],
                              use_yellow=False)

    val_gen = BaseGenerator(val_df, cfg.train_dir, batch_size=cfg.batch_size,
                            aug_args=cfg.aug_args,