Ejemplo n.º 1
0
batch_size=32

labels = pd.read_csv(path+'cross_validation/train_labels5.csv')
labels = labels.reset_index(drop=True)
split = GroupShuffleSplit(n_splits=1, test_size=0.11, random_state=24)
ind = split.split(labels, groups=labels['SourceReportName'])
trn_ind, val_ind = next(ind)
trn_df = labels.loc[trn_ind, ]
val_df = labels.loc[val_ind, ]

train_gen = ImageFrameGenerator( 
        rotation_range=180,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.1,
        zoom_range=0.2,
        horizontal_flip=True,
        vertical_flip=True,
        fill_mode='nearest')
test_gen = ImageFrameGenerator()
trn_itr = train_gen.flow_from_frame(data_path, trn_df, 'basefile', ['split0_123', 'split01_23', 'split012_3'], 
                                     target_size=(256, 320), label_types = ['categorical']*3, batch_size=batch_size)
val_itr = test_gen.flow_from_frame(data_path, val_df, 'basefile', ['split0_123', 'split01_23', 'split012_3'], 
                                     target_size=(256, 320), label_types = ['categorical']*3, batch_size=batch_size, shuffle=False)

from keras.applications.inception_v3 import InceptionV3

base_model = InceptionV3(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
Ejemplo n.º 2
0
video_labels = pd.read_csv(path + 'video_labels.csv')

from keras.applications.inception_v3 import InceptionV3

base_model = InceptionV3(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)

output1 = Dense(2, activation='softmax', name='split0_123')(x)
output2 = Dense(2, activation='softmax', name='split01_23')(x)
output3 = Dense(2, activation='softmax', name='split012_3')(x)

model = Model(inputs=base_model.input, outputs=[output1, output2, output3])

tst_gen = ImageFrameGenerator()
tst_itr = tst_gen.flow_from_frame(data_path,
                                  video_labels,
                                  'filename',
                                  ['split0_123', 'split01_23', 'split012_3'],
                                  target_size=(256, 320),
                                  label_types=['categorical'] * 3,
                                  batch_size=batch_size,
                                  shuffle=False)

thresh = 0.5
auc01_23 = []
sensi01_23 = []
speci01_23 = []
ppv01_23 = []
npv01_23 = []
Ejemplo n.º 3
0
G = 4
tot_bs = batch_size * G

df = pd.read_csv(path + 'train_labels.csv')

val_size = int(len(df) * 0.125 / tot_bs) * tot_bs
trn_size = int((len(df) - val_size) / tot_bs) * tot_bs

trn_df, val_df = train_test_split(
    df,
    test_size=val_size,
    train_size=trn_size,
    stratify=df[['CD_Active_AnyLocation', 'Fistula_Any', 'Abscess_any']],
    random_state=24)

gen = ImageFrameGenerator()
trn_itr = gen.flow_from_frame(
    path + 'low_resolution/',
    trn_df,
    'filename', ['CD_Active_AnyLocation', 'Fistula_Any', 'Abscess_any'],
    target_size=(224, 224, 128),
    color_mode='3d',
    batch_size=batch_size * G,
    shuffle=True)
val_itr = gen.flow_from_frame(
    path + 'low_resolution/',
    val_df,
    'filename', ['CD_Active_AnyLocation', 'Fistula_Any', 'Abscess_any'],
    target_size=(224, 224, 128),
    color_mode='3d',
    batch_size=batch_size * G,
Ejemplo n.º 4
0
data_path = path + 'subset/'

batch_size = 32

labels = pd.read_csv(path + 'train_labels.csv')

split = GroupShuffleSplit(n_splits=1, test_size=0.2, random_state=24)
ind = split.split(labels, groups=labels['SourceReportName'])
trn_ind, val_ind = next(ind)
trn_df = labels.loc[trn_ind, ]
val_df = labels.loc[val_ind, ]

train_gen = ImageFrameGenerator(rotation_range=180,
                                width_shift_range=0.2,
                                height_shift_range=0.2,
                                shear_range=0.1,
                                zoom_range=0.2,
                                horizontal_flip=True,
                                vertical_flip=True,
                                fill_mode='nearest')
test_gen = ImageFrameGenerator()
train_itr = train_gen.flow_from_frame(data_path,
                                      trn_df,
                                      'basefile', ['Mayo_score'],
                                      target_size=(256, 320),
                                      batch_size=batch_size)
val_itr = test_gen.flow_from_frame(data_path,
                                   val_df,
                                   'basefile', ['Mayo_score'],
                                   target_size=(256, 320),
                                   batch_size=batch_size)
Ejemplo n.º 5
0
batch_size=32

from keras.applications.inception_v3 import InceptionV3

base_model = InceptionV3(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)

output1 = Dense(2, activation='softmax', name='split0_123')(x)
output2 = Dense(2, activation='softmax', name='split01_23')(x)
output3 = Dense(2, activation='softmax', name='split012_3')(x)

model = Model(inputs=base_model.input, outputs=[output1, output2, output3])

validation_gen = ImageFrameGenerator()

thresh = 0.5
auc01_23 = []
sensi01_23 = []
speci01_23 = []
ppv01_23 = []
npv01_23 = []
acc0 = []
acc1 = []
acc2 = []
acc3 = []
kappa_linear = []
kappa_quad = []

for tst_seed in range(10):