Exemplo n.º 1
0
def run_test():
    BS = 128
    print('Loading and preprocessing test data...')
    mean, std = Learner.load_meanstd()
    
    imgs_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('Loading saved weights...')
    model = get_unet(Adam(0.001))
    print ('Loading weights from %s' % Learner.best_weight_path)
    model.load_weights(Learner.best_weight_path)
    
    print ('Augment')
    alen, dlen = len(transforms), len(imgs_test)
    test_x = np.ndarray((alen, dlen, 1, img_rows, img_cols), dtype=np.float32)
    for i in range(dlen):
        for j, transform in enumerate(transforms):
            test_x[j,i] = transform['do'](imgs_test[i].copy())
    #
    print('Predicting masks on test data...')
    outputs = []
    asis_res = model.predict(imgs_test, batch_size=BS, verbose=1)
    outputs.append(asis_res)
    for j, transform in enumerate(transforms):
        t_y = model.predict(test_x[j], batch_size=BS, verbose=1)
        outputs.append(t_y)
    #
    print('Analyzing')
    test_masks = np.ndarray((dlen, 1, img_rows, img_cols), dtype=np.float32)
    test_probs = np.ndarray((dlen, ), dtype=np.float32)
    for i in range(dlen):
        masks = np.ndarray((alen+1, 1, img_rows, img_cols), dtype=np.float32)
        probs = np.ndarray((alen+1, ), dtype=np.float32)
        for j, t_y in enumerate(outputs):
            mask, prob = t_y[0][i], t_y[1][i]
            if j:
                mask = transforms[j-1]['undo'](mask)
            masks[j] = mask
            probs[j] = prob
        #
        test_masks[i] = np.mean(masks, 0)
        test_probs[i] = np.mean(probs)
            
    print('Saving ')
    np.save(Learner.test_mask_res, test_masks)
    np.save(Learner.test_mask_exist_res, test_probs)
def run_test():
    BS = 256
    print('Loading and preprocessing test data...')
    mean, std = Learner.load_meanstd()

    imgs_test = load_test_data()
    #    imgs_test = imgs_test[:100]
    #    print ('test')
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('Augment')
    alen, dlen = len(transforms), len(imgs_test)
    test_x = np.ndarray((alen, dlen, 1, img_rows, img_cols), dtype=np.float32)
    for i in xrange(dlen):
        for j, transform in enumerate(transforms):
            test_x[j, i] = transform['do'](imgs_test[i].copy())
    #
    kfold = 6
    kfold_masks, kfold_prob = [], []
    for _iter in xrange(kfold):
        print('Iter=%d, Loading saved weights...' % _iter)
        model = get_unet(Adam(0.001))
        filepath = Learner.best_weight_path + '_%d.fold' % _iter
        print('Loading weights from %s' % filepath)
        model.load_weights(filepath)
        #
        print('Predicting masks on test data...')
        outputs = []
        asis_res = model.predict(imgs_test, batch_size=BS, verbose=1)
        outputs.append(asis_res)
        for j, transform in enumerate(transforms):
            t_y = model.predict(test_x[j], batch_size=BS, verbose=1)
            outputs.append(t_y)
        #
        print('Analyzing')
        test_masks = np.ndarray((dlen, 1, img_rows, img_cols),
                                dtype=np.float32)
        test_probs = np.ndarray((dlen, ), dtype=np.float32)
        for i in xrange(dlen):
            masks = np.ndarray((alen + 1, 1, img_rows, img_cols),
                               dtype=np.float32)
            probs = np.ndarray((alen + 1, ), dtype=np.float32)
            for j, t_y in enumerate(outputs):
                mask, prob = t_y[0][i], t_y[1][i]
                if j:
                    mask = transforms[j - 1]['undo'](mask.copy())
                masks[j] = mask
                probs[j] = prob
            #
            test_masks[i] = np.mean(masks, 0)
            test_probs[i] = np.mean(probs)
        kfold_masks.append(test_masks)
        kfold_prob.append(test_probs)

    print 'Summing results of ensemble'
    #
    res_masks = np.ndarray((dlen, 1, img_rows, img_cols), dtype=np.float32)
    res_probs = np.ndarray((dlen, ), dtype=np.float32)
    for i in xrange(dlen):
        masks = np.ndarray((kfold, 1, img_rows, img_cols), dtype=np.float32)
        probs = np.ndarray((kfold, ), dtype=np.float32)
        for k in xrange(kfold):
            masks[k] = kfold_masks[k][i]
            probs[k] = kfold_prob[k][i]
        res_masks[i] = np.mean(masks, 0)
        res_probs[i] = np.mean(probs)

    print('Saving ')
    np.save(Learner.test_mask_res, res_masks)
    np.save(Learner.test_mask_exist_res, res_probs)
def run_test():
    BS = 128
    print('Loading and preprocessing test data...')
    mean, std = Learner.load_meanstd()
    
    imgs_test, img_test_mask_gt = load_test_data()
    test_img_id = load_test_ids()

    imgs_test = preprocess(imgs_test)
    img_test_mask_gt = preprocess(img_test_mask_gt)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    img_test_mask_gt = img_test_mask_gt.astype('float32')
    # mask_array = np.array(mask_array, dtype=np.float32)
    img_test_mask_gt /= 255.0

    print('Loading saved weights...')
    model = get_unet(Adam(0.001))
    print ('Loading weights from %s' % Learner.best_weight_path)
    model.load_weights(Learner.best_weight_path)
    
    print ('Augment')
    alen, dlen = len(transforms), len(imgs_test)
    test_x = np.ndarray((alen, dlen, 1, img_rows, img_cols), dtype=np.float32)
    for i in range(dlen):
        for j, transform in enumerate(transforms):
            test_x[j,i] = transform['do'](imgs_test[i].copy())
    #
    print('Predicting masks on test data...')
    outputs = []
    asis_res = model.predict(imgs_test, batch_size=BS, verbose=1)
    outputs.append(asis_res)
    for j, transform in enumerate(transforms):
        t_y = model.predict(test_x[j], batch_size=BS, verbose=1)
        outputs.append(t_y)
    #
    print('Analyzing')
    test_masks = np.ndarray((dlen, 1, img_rows, img_cols), dtype=np.float32)
    test_probs = np.ndarray((dlen, ), dtype=np.float32)
    for i in range(dlen):
        masks = np.ndarray((alen+1, 1, img_rows, img_cols), dtype=np.float32)
        probs = np.ndarray((alen+1, ), dtype=np.float32)
        for j, t_y in enumerate(outputs):
            mask, prob = t_y[0][i], t_y[1][i]
            if j:
                mask = transforms[j-1]['undo'](mask)
            masks[j] = mask
            probs[j] = prob
        #
        test_masks[i] = np.mean(masks, 0)
        test_probs[i] = np.mean(probs)

    # test_eval = model.evaluate(imgs_test, img_test_mask_gt, batch_size=BS)
    print(img_test_mask_gt.shape)
    print(test_masks.shape)
    # pred_dir = 'preds'
    # if not os.path.exists(pred_dir):
    #     os.mkdir(pred_dir)
    # for image, image_id in zip(test_masks, test_img_id):
    #     # image = (image[:, :, 0] * 255.).astype(np.uint8)
    #     image = (image[0, :, :] * 255.)
    #     print(image)
    #     print(image.shape)
    #     imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
    print('Saving ')
    np.save(Learner.test_mask_res, test_masks)
    np.save(Learner.test_mask_exist_res, test_probs)
    np.save(Learner.test_mask_gt, img_test_mask_gt)
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()
    imgs_present = load_nerve_presence()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    # centering and standardising the images
    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)
    std = np.std(imgs_train)
    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to be in {0, 1} instead of {0, 255}

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    # load model - the Learning rate scheduler choice is most important here
    model = get_unet(optimizer=OPTIMIZER, pars=PARS)

    model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
    early_stopping = EarlyStopping(patience=5, verbose=1)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)

    if PARS['outputs'] == 1:
        imgs_labels = imgs_mask_train
    else:
        imgs_labels = [imgs_mask_train, imgs_present]

    model.fit(imgs_train, imgs_labels,
              batch_size=128, epochs=50,
              verbose=1, shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint, early_stopping])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.h5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)

    imgs_mask_test = model.predict(imgs_test, verbose=1)

    if PARS['outputs'] == 1:
        np.save('imgs_mask_test.npy', imgs_mask_test)
    else:
        np.save('imgs_mask_test.npy', imgs_mask_test[0])
        np.save('imgs_mask_test_present.npy', imgs_mask_test[1])