예제 #1
0
def test():

    test_images, test_categories = [], []
    for fn in glob.glob('data/splits/test/*.tif'):
        img = np.array(io.imread(fn), dtype='float32')
        scaled = img / np.float32(255.0)
        scaled = downscale_local_mean(scaled, factors=(2, 2))
        test_images.append(scaled)
        test_categories.append(os.path.basename(fn).split('_')[0])

    label_encoder = pickle.load(
        open('models/' + MODEL_NAME + '/label_encoder.p', 'rb'))
    print('-> Working on classes:', label_encoder.classes_)

    test_int_labels = label_encoder.transform(test_categories)

    model = model_from_json(
        open('models/' + MODEL_NAME + '/architecture.json').read())

    model.load_weights('models/' + MODEL_NAME + '/weights.hdf5')

    test_preds = []

    for img in test_images:
        X = utils.augment_test_image(image=img,
                                     nb_rows=NB_ROWS,
                                     nb_cols=NB_COLS,
                                     nb_patches=NB_TEST_PATCHES)
        preds = np.array(model.predict(X), dtype='float64')
        av_pred = preds.mean(axis=0)
        test_preds.append(np.argmax(av_pred, axis=0))

    print('Test accuracy:', accuracy_score(test_int_labels, test_preds))

    # confusion matrix
    plt.clf()
    T = label_encoder.inverse_transform(test_int_labels)
    P = label_encoder.inverse_transform(test_preds)
    cm = confusion_matrix(T, P, labels=label_encoder.classes_)
    cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    np.set_printoptions(precision=2)
    sns.plt.figure()
    utils.plot_confusion_matrix(cm_normalized,
                                target_names=label_encoder.classes_)
    sns.plt.savefig('models/' + MODEL_NAME + '/test_conf_matrix.pdf')
예제 #2
0
def test():

    test_images, test_categories = [], []
    for fn in glob.glob('data/splits/test/*.tif'):
        img = np.array(io.imread(fn), dtype='float32')
        scaled = img / np.float32(255.0)
        scaled = downscale_local_mean(scaled, factors=(2,2))
        test_images.append(scaled)
        test_categories.append(os.path.basename(fn).split('_')[0])

    label_encoder = pickle.load( open('models/' + MODEL_NAME + '/label_encoder.p', 'rb'))
    print('-> Working on classes:', label_encoder.classes_)

    test_int_labels = label_encoder.transform(test_categories)
    
    model = model_from_json(open('models/' + MODEL_NAME + '/architecture.json').read())

    model.load_weights('models/' + MODEL_NAME + '/weights.hdf5')

    test_preds = []

    for img in test_images:
        X = utils.augment_test_image(image=img,
                                        nb_rows=NB_ROWS,
                                        nb_cols=NB_COLS,
                                        nb_patches=NB_TEST_PATCHES)
        preds = np.array(model.predict(X), dtype='float64')
        av_pred = preds.mean(axis=0)
        test_preds.append(np.argmax(av_pred, axis=0))

    print('Test accuracy:', accuracy_score(test_int_labels, test_preds))

    # confusion matrix
    plt.clf()
    T = label_encoder.inverse_transform(test_int_labels)
    P = label_encoder.inverse_transform(test_preds)
    cm = confusion_matrix(T, P, labels=label_encoder.classes_)
    cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    np.set_printoptions(precision=2)
    sns.plt.figure()
    utils.plot_confusion_matrix(cm_normalized, target_names=label_encoder.classes_)
    sns.plt.savefig('models/' + MODEL_NAME + '/test_conf_matrix.pdf')
예제 #3
0
    shutil.rmtree('viz/' + example_dir)
except:
    pass
os.mkdir('viz/' + example_dir)

img = io.imread(FILEPATH)

img = np.array(io.imread(FILEPATH), dtype='float32')
print(img.shape)
scipy.misc.imsave('viz/' + example_dir + '/orig.tiff', img)

scaled = img / np.float32(255.0)
scaled = downscale_local_mean(scaled, factors=(2, 2))

scipy.misc.imsave('viz/' + example_dir + '/downscaled' + '.tiff', scaled * 255)

train_crops, _ = utils.augment_train_images([scaled], [0], NB_ROWS, NB_COLS,
                                            NB_PATCHES)
for idx, x in enumerate(train_crops):
    x = x.reshape((x.shape[1], x.shape[2]))
    x *= 255
    scipy.misc.imsave(
        'viz/' + example_dir + '/train_crop_' + str(idx) + '.tiff', x)

test_crops = utils.augment_test_image(scaled, NB_ROWS, NB_COLS, NB_PATCHES)
for idx, x in enumerate(test_crops):
    x = x.reshape((x.shape[1], x.shape[2]))
    x *= 255
    scipy.misc.imsave(
        'viz/' + example_dir + '/test_crop_' + str(idx) + '.tiff', x)
예제 #4
0
def train():
    train_images, train_categories = utils.load_dir('data/splits/train')
    dev_images, dev_categories = utils.load_dir('data/splits/dev')

    try:
        os.mkdir('models')
    except:
        pass

    try:
        shutil.rmtree('models/' + MODEL_NAME)
    except:
        pass

    os.mkdir('models/' + MODEL_NAME)

    label_encoder = LabelEncoder().fit(train_categories)
    train_y_int = label_encoder.transform(train_categories)
    dev_y_int = label_encoder.transform(dev_categories)
    train_Y = np_utils.to_categorical(train_y_int,
                                      nb_classes=len(label_encoder.classes_))

    print('-> Working on', len(label_encoder.classes_), 'classes:', label_encoder.classes_)

    pickle.dump(label_encoder, open('models/' + MODEL_NAME + '/label_encoder.p', 'wb'))

    if MODEL_TYPE == 'resnet50':
        model = resnet50.ResNet50(weights = None,
                                  nb_classes = len(label_encoder.classes_),
                                  nb_rows = NB_ROWS,
                                  nb_cols = NB_COLS)
    elif MODEL_TYPE == 'vgg16':
        model = vgg16.VGG16(nb_classes = len(label_encoder.classes_),
                            nb_rows = NB_ROWS,
                            nb_cols = NB_COLS)
    else:
        raise ValueError('Unsupported model type: ' + MODEL_TYPE)

    model.summary()
    print(model.summary())
    
    with open('models/' + MODEL_NAME + '/architecture.json', 'w') as F:
        F.write(model.to_json())

    best_dev_acc = 0.0

    # build dev inputs once:
    print('-> building dev inputs once:')
    dev_inputs = []
    for idx, img in enumerate(dev_images):
        i = utils.augment_test_image(image=img,
                                    nb_rows=NB_ROWS,
                                    nb_cols=NB_COLS,
                                    nb_patches=NB_TEST_PATCHES)
        dev_inputs.append(i)



    for e in range(NB_EPOCHS):

        tmp_train_X, tmp_train_Y = utils.augment_train_images(images=train_images,
                                                      categories=train_Y,
                                                      nb_rows=NB_ROWS,
                                                      nb_cols=NB_COLS,
                                                      nb_patches=NB_TRAIN_PATCHES)

        for idx, p in enumerate(tmp_train_X):
            p = p.reshape((p.shape[1], p.shape[2]))
            imsave(str(idx)+'.png', p)
            if idx >= 30:
                break

        model.fit(tmp_train_X, tmp_train_Y,
                      batch_size=BATCH_SIZE,
                      nb_epoch=1,
                      shuffle=True)

        dev_preds = []
        for inp in dev_inputs:
            pred = model.predict(inp, batch_size=BATCH_SIZE)
            pred = pred.mean(axis=0)
            dev_preds.append(np.argmax(pred, axis=0))

        # calculate accuracy:
        curr_acc = accuracy_score(dev_preds, dev_y_int)
        print('  curr val acc:', curr_acc)

        # save weights, if appropriate:
        if curr_acc > best_dev_acc:
            print('    -> saving model')
            model.save_weights('models/' + MODEL_NAME + '/weights.hdf5',
                                overwrite=True)
            best_dev_acc = curr_acc

        # half learning rate:
        if e and e % 10 == 0:
            old_lr  = model.optimizer.lr.get_value()
            new_lr = np.float32(old_lr * 0.5)
            model.optimizer.lr.set_value(new_lr)
            print('\t- Lowering learning rate > was:', old_lr, ', now:', new_lr)
def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)

if os.path.isdir('crop_viz'):
    shutil.rmtree('crop_viz')
os.mkdir('crop_viz')

test_crops = []
for fn in glob.glob('data/splits/dev/*.tif'):
    img = None
    img = np.array(io.imread(fn), dtype='float32')
    scaled = None
    scaled = img / np.float32(255.0)
    scaled = downscale_local_mean(scaled, factors=(2,2))
    test_crops.extend(utils.augment_test_image(scaled, img_width, img_height, NB_TEST_PATCHES))

print(len(test_crops))

if LAYER_NAME == 'prediction':
    label_encoder = pickle.load( open('models/' + MODEL_NAME + '/label_encoder.p', 'rb'))
    print('-> Working on classes:', label_encoder.classes_)
    NB_FILTERS = len(label_encoder.classes_)

for filter_idx in range(0, NB_FILTERS):
    print('Processing filter', filter_idx)
        
    layer_output = layer_dict[LAYER_NAME].output
    if LAYER_NAME == 'prediction':
        loss = K.mean(layer_output[:, filter_idx])
    else: