Ejemplo n.º 1
0
 def __init__(self):
     # load config file
     config = json.load(open("model/config.json"))
     # get the image processor
     self._imageProcessor = ImageProcessor(config)
     # load the DL model
     self._model = fcn_model((200, 200, 1), 2, weights=None)
     self._model.load_weights("model/weights.h5")
     self._model._make_predict_function()
Ejemplo n.º 2
0
def create_submission(dcm_list, data_path):
    crop_size = 200
    input_shape = (crop_size, crop_size, 1)
    num_classes = 2

    oweights = 'weights/lvsc_o.h5'
    iweights = 'weights/lvsc_i.h5'
    omodel = fcn_model(input_shape, num_classes, weights=oweights)
    imodel = fcn_model(input_shape, num_classes, weights=iweights)

    images = np.zeros((len(dcm_list), crop_size, crop_size, 1))
    for idx, dcm_path in enumerate(dcm_list):
        img = read_dicom(dcm_path)
        img = center_crop(img, crop_size=crop_size)
        images[idx] = img
    opred_masks = omodel.predict(images, batch_size=32, verbose=1)
    ipred_masks = imodel.predict(images, batch_size=32, verbose=1)

    save_dir = data_path + '_auto_contours'
    prefix = 'MYFCN_'  # change prefix to your unique initials
    for idx, dcm_path in enumerate(dcm_list):
        img = read_dicom(dcm_path)
        h, w, d = img.shape
        otmp = reshape(opred_masks[idx], to_shape=(h, w, d))
        otmp = np.where(otmp > 0.5, 255, 0).astype('uint8')
        itmp = reshape(ipred_masks[idx], to_shape=(h, w, d))
        itmp = np.where(itmp > 0.5, 255, 0).astype('uint8')
        assert img.shape == otmp.shape, 'Prediction does not match shape'
        assert img.shape == itmp.shape, 'Prediction does not match shape'
        tmp = otmp - itmp
        tmp = np.squeeze(tmp, axis=(2, ))
        sub_dir = dcm_path[dcm_path.find('CAP_'):dcm_path.rfind('DET')]
        filename = prefix + dcm_path[dcm_path.rfind('DET'):].replace(
            '.dcm', '.png')
        full_path = os.path.join(save_dir, sub_dir)
        if not os.path.exists(full_path):
            os.makedirs(full_path)
        cv2.imwrite(os.path.join(full_path, filename), tmp)
        in_ = cv2.imread(os.path.join(full_path, filename),
                         cv2.IMREAD_GRAYSCALE)
        if not np.allclose(in_, tmp):
            raise AssertionError('File read error: {:s}'.format(
                os.path.join(full_path, filename)))
Ejemplo n.º 3
0
def create_submission(contours, data_path):
    if contour_type == 'i':
        weights = 'weights/rvsc_i.h5'
    elif contour_type == 'o':
        weights = 'weights/rvsc_o.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)

    crop_size = 200
    images = np.zeros((len(contours), crop_size, crop_size, 1))
    for idx, contour in enumerate(contours):
        img, _ = read_contour(contour, data_path, return_mask=False)
        img = center_crop(img, crop_size=crop_size)
        images[idx] = img

    input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    model = fcn_model(input_shape, num_classes, weights=weights)
    pred_masks = model.predict(images, batch_size=32, verbose=1)
    
    save_dir = data_path + '_auto_contours'
    num = 0
    for idx, ctr in enumerate(contours):
        img, _ = read_contour(ctr, data_path, return_mask=False)
        h, w, d = img.shape
        tmp = reshape(pred_masks[idx], to_shape=(h, w, d))
        assert img.shape == tmp.shape, 'Shape of prediction does not match'
        tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('No detection: %s' % ctr.ctr_path)
            coords = np.ones((1, 1, 1, 2), dtype='int')
        if len(coords) > 1:
            print('Multiple detections: %s' % ctr.ctr_path)

            #cv2.imwrite('multiple_dets/'+contour_type+'{:04d}.png'.format(idx), tmp)
            
            lengths = []
            for coord in coords:
                lengths.append(len(coord))
            coords = [coords[np.argmax(lengths)]]
            num += 1
        filename = 'P{:s}-{:s}-'.format(ctr.patient_no, ctr.img_no)+contour_type+'contour-auto.txt'
        full_path = os.path.join(save_dir, 'P{:s}'.format(ctr.patient_no)+'contours-auto')
        if not os.path.exists(full_path):
            os.makedirs(full_path)
        with open(os.path.join(full_path, filename), 'w') as f:
            for coord in coords:
                coord = np.squeeze(coord, axis=(1,))
                coord = np.append(coord, coord[:1], axis=0)
                np.savetxt(f, coord, fmt='%i', delimiter=' ')
    
    print('Num of files with multiple detections: {:d}'.format(num))
Ejemplo n.º 4
0
    print('Done mapping training set')
    split = int(0.1 * len(train_ctrs))
    dev_ctrs = train_ctrs[0:split]
    train_ctrs = train_ctrs[split:]
    print('\nBuilding train dataset ...')
    img_train, mask_train = export_all_contours(train_ctrs,
                                                TRAIN_PATH,
                                                crop_size=crop_size)
    print('\nBuilding dev dataset ...')
    img_dev, mask_dev = export_all_contours(dev_ctrs,
                                            TRAIN_PATH,
                                            crop_size=crop_size)

    input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    model = fcn_model(input_shape, num_classes, weights=None)

    kwargs = dict(
        rotation_range=0,
        zoom_range=0.0,
        width_shift_range=0.0,
        height_shift_range=0.0,
        horizontal_flip=False,
        vertical_flip=False,
    )
    image_datagen = ImageDataGenerator(**kwargs)
    mask_datagen = ImageDataGenerator(**kwargs)

    epochs = 20
    mini_batch_size = 1
Ejemplo n.º 5
0
from helpers import center_crop, lr_poly_decay, get_SAX_SERIES
import matplotlib.pyplot as plt
from PIL import Image
import cv2

#   Choose GPU
os.environ["CUDA_VISIBLE_DEVICES"] = '1'


#   model canshu
crop_size = 512
input_shape = (crop_size, crop_size, 3)
num_classes = 4
# weight_path = 'model_logs/0111_sunnybrook_i_epoch_95.h5'
weight_path = None
model = fcn_model(input_shape, num_classes, weights=weight_path)
epochs = 20
mini_batch_size = 1

npy_path = '/media/lxy/F240900A408FD42F/'
print '#'*30
print 'Loading images and labels'
images = np.load(npy_path+'images_200.npy')
masks = np.load(npy_path+'masks_200.npy')
print 'Loading is readly, ', len(images), 'images has benn prepared\n'

max_iter = (len(images) / mini_batch_size) * epochs
curr_iter = 0
base_lr = K.eval(model.optimizer.lr)
lrate = lr_poly_decay(model, base_lr, curr_iter, max_iter, power=0.5)
for e in range(epochs):
Ejemplo n.º 6
0
def create_submission(data_path):
    print(len(list(contours)))
    if contour_type == 'i':
        weights = 'weights/sunnybrook_i.h5'
    elif contour_type == 'o':
        weights = 'weights/sunnybrook_o.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
    #pdb.set_trace()
    crop_size = 256
    images, masks = export_all_contours(contours, data_path, crop_size)
    
    input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    model = fcn_model(input_shape, num_classes, weights=weights)

    pred_masks = model.predict(images, batch_size=32, verbose=1)
    
    num = 0
    print(list(contours))
    for idx, ctr in enumerate(contours):
        img, mask = read_contour(ctr, data_path)
        h, w, d = img.shape
        tmp = reshape(pred_masks[idx], to_shape=(h, w, d))
        assert img.shape == tmp.shape, 'Shape of prediction does not match'
        tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('\nNo detection: %s' % ctr.ctr_path)
            coords = np.ones((1, 1, 1, 2), dtype='int')
        if len(coords) > 1:
            print('\nMultiple detections: %s' % ctr.ctr_path)
            
            #cv2.imwrite('multiple_dets/'+contour_type+'{:04d}.png'.format(idx), tmp)
            
            lengths = []
            for coord in coords:
                lengths.append(len(coord))
            coords = [coords[np.argmax(lengths)]]
            num += 1
        
        man_filename = ctr.ctr_path[ctr.ctr_path.rfind('/')+1:]
        auto_filename = man_filename.replace('manual', 'auto')
        img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
        man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual', 'IRCCI-expert')
        auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto', 'FCN')
        img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
        dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
        dcm_path = os.path.join(data_path, ctr.case, dcm)
        for dirpath in [man_full_path, auto_full_path, img_full_path]:
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            if 'manual' in dirpath:
                src = ctr.ctr_path
                dst = os.path.join(dirpath, man_filename)
                shutil.copyfile(src, dst)
            elif 'DICOM' in dirpath:
                src = dcm_path
                dst = os.path.join(dirpath, img_filename)
                shutil.copyfile(src, dst)
            else:
                dst = os.path.join(auto_full_path, auto_filename)
                with open(dst, 'w') as f:
                    for coord in coords:
                        coord = np.squeeze(coord, axis=(1,))
                        coord = np.append(coord, coord[:1], axis=0)
                        np.savetxt(f, coord, fmt='%i', delimiter=' ')
    
    print('\nNumber of multiple detections: {:d}'.format(num))
Ejemplo n.º 7
0
from helpers import center_crop
from fcn_model import fcn_model
import dicom
import cv2
import matplotlib.pyplot as plt
weights = 'rvsc_o_epoch_20.h5'

crop_size = (200,200,1)
num_class = 2

model = fcn_model(crop_size,num_class,weights)

# file name 'eval.dcm'
img_file = 'eval.dcm'

img = dicom.read_file(img_file)
img = img.pixel_array.astype('int32')
img = img.reshape(img.shape[0],img.shape[1],1)

img = center_crop(img,crop_size = 200)

plt.imshow(img.reshape((200,200)))
plt.show()


result = model.predict(img.reshape((1,200,200,1)))

plt.imshow(result.reshape((200,200)))
plt.show()