コード例 #1
0
def extract_segs_a2c_a4c(dicomdir, videofile, model, sess, lv_label, la_label,
                         lvo_label):
    framedict = create_imgdict_from_dicom(dicomdir, videofile)
    images, orig_images = extract_images(framedict)
    segs = []
    preds = np.argmax(model.predict(sess, images[0:1])[0, :, :, :], 2)
    label_all = range(1, 8)
    label_good = [lv_label, la_label, lvo_label]
    for i in label_all:
        if not i in label_good:
            preds[preds == i] = 0
    for i in range(len(images)):
        seg = np.argmax(model.predict(sess, images[i:i + 1])[0, :, :, :], 2)
        segs.append(seg)
    lv_segs = []
    lvo_segs = []
    la_segs = []
    for seg in segs:
        la_seg = create_seg(seg, la_label)
        lvo_seg = create_seg(seg, lvo_label)
        lv_seg = create_seg(seg, lv_label)
        lv_segs.append(lv_seg)
        lvo_segs.append(lvo_seg)
        la_segs.append(la_seg)
    return lv_segs, la_segs, orig_images, lvo_segs, preds
コード例 #2
0
def segmentChamber(videofile, dicomdir, view):
    mean = 24
    weight_decay = 1e-12
    learning_rate = 1e-4
    maxout = False
    sesses = []
    models = []
    if view == "a4c":
        g_1 = tf.Graph()
        with g_1.as_default():
            label_dim = 6 #a4c
            sess1 = tf.Session()
            model1 = Unet(mean, weight_decay, learning_rate, label_dim , maxout = maxout)
            sess1.run(tf.local_variables_initializer())
            sess = sess1
            model = model1
        with g_1.as_default():
            saver = tf.train.Saver()
            saver.restore(sess1,'./models/a4c_45_20_all_model.ckpt-9000')
    elif view == "a2c":
        g_2 = tf.Graph()
        with g_2.as_default():
            label_dim = 4 
            sess2 = tf.Session()
            model2 = Unet(mean, weight_decay, learning_rate, label_dim , maxout = maxout)
            sess2.run(tf.local_variables_initializer())
            sess = sess2
            model = model2
        with g_2.as_default():
            saver = tf.train.Saver()
            saver.restore(sess2,'./models/a2c_45_20_all_model.ckpt-10600')
    elif view == "a3c":
        g_3 = tf.Graph()
        with g_3.as_default():
            label_dim = 4 
            sess3 = tf.Session()
            model3 = Unet(mean, weight_decay, learning_rate, label_dim , maxout = maxout)
            sess3.run(tf.local_variables_initializer())
            sess = sess3
            model = model3
        with g_3.as_default():
            saver.restore(sess3,'./models/a3c_45_20_all_model.ckpt-10500')
    elif view == "psax":
        g_4 = tf.Graph()
        with g_4.as_default():
            label_dim = 4 
            sess4 = tf.Session()
            model4 = Unet(mean, weight_decay, learning_rate, label_dim , maxout = maxout)
            sess4.run(tf.local_variables_initializer())
            sess = sess4
            model = model4
        with g_4.as_default():
            saver = tf.train.Saver()
            saver.restore(sess4,'./models/psax_45_20_all_model.ckpt-9300')
    elif view == "plax":
        g_5 = tf.Graph()
        with g_5.as_default():
            label_dim = 7 
            sess5 = tf.Session()
            model5 = Unet(mean, weight_decay, learning_rate, label_dim , maxout = maxout)
            sess5.run(tf.local_variables_initializer())
            sess = sess5
            model = model5
        with g_5.as_default():
            saver = tf.train.Saver()
            saver.restore(sess5,'./models/plax_45_20_all_model.ckpt-9600')
    outpath = "./segment/" + view + "/"
    if not os.path.exists(outpath):
        os.makedirs(outpath)
    framedict = create_imgdict_from_dicom(dicomdir, videofile)
    images, orig_images = extract_images(framedict)
    if view == "a4c":
        a4c_lv_segs, a4c_la_segs, a4c_lvo_segs, preds = extract_segs(images, orig_images, model, sess, 2, 4, 1)
        np.save(outpath + '/' + videofile + '_lv', np.array(a4c_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_la', np.array(a4c_la_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lvo', np.array(a4c_lvo_segs).astype('uint8'))
    elif view == "a2c":
        a2c_lv_segs, a2c_la_segs, a2c_lvo_segs, preds = extract_segs(images, orig_images, model, sess, 2, 3, 1)
        np.save(outpath + '/' + videofile + '_lv', np.array(a2c_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_la', np.array(a2c_la_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lvo', np.array(a2c_lvo_segs).astype('uint8'))
    elif view == "psax":
        psax_lv_segs, psax_lvo_segs, psax_rv_segs, preds = extract_segs(images, orig_images, model, sess, 2, 1, 3)
        np.save(outpath + '/' + videofile + '_lv', np.array(psax_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lvo', np.array(psax_lvo_segs).astype('uint8'))
    elif view == "a3c":
        a3c_lv_segs, a3c_la_segs, a3c_lvo_segs, preds = extract_segs(images, orig_images, model, sess, 2, 3, 1)
        np.save(outpath + '/' + videofile + '_lvo', np.array(a3c_lvo_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_lv', np.array(a3c_lv_segs).astype('uint8'))
        np.save(outpath + '/' + videofile + '_la', np.array(a3c_la_segs).astype('uint8'))
    elif view == "plax":
        plax_chamber_segs, preds = extract_segs(images, orig_images, model, sess, 1)
        np.save(outpath + '/' + videofile + '_chamber_B1', np.array(plax_chamber_segs).astype('uint8'))
    j = 0
    nrow = orig_images[0].shape[0]
    ncol = orig_images[0].shape[1]
    print(nrow, ncol)
    plt.figure(figsize = (25, 25))
    plt.axis('off')
    plt.imshow(imresize(preds, (nrow,ncol)))
    plt.savefig(outpath + '/' + videofile + '_' + str(j) + '_' + 'CHOICE_FULL_segmentation_LV.png')
    plt.close() 
    plt.figure(figsize = (25, 25))
    plt.axis('off')
    plt.imshow(orig_images[0])
    plt.savefig(outpath + '/' + videofile + '_' + str(j) + '_' + 'CHOICE_FULL_original_LV.png')
    plt.close()   
    background = Image.open(outpath + '/' + videofile + '_' + str(j) + '_' + 'CHOICE_FULL_original_LV.png')
    overlay = Image.open(outpath + '/' + videofile + '_' + str(j) + '_' + 'CHOICE_FULL_segmentation_LV.png')
    background = background.convert("RGBA")
    overlay = overlay.convert("RGBA")
    outImage = Image.blend(background, overlay, 0.5)
    outImage.save(outpath + '/' + videofile + '_' + str(j) + '_' + 'CHOICE_FULL_overlay_LV.png', "PNG") 
    return 1
コード例 #3
0
def outputcropped(videoFile, view):
    flag = 1
    print(videoFile, view)
    outDir = "./straintmp/" + videoFile
    dicomdir = "./dicomsample/"
    if not os.path.exists(outDir):
        os.makedirs(outDir)
    outDir_left = outDir + "/maskedimages_left/"
    if not os.path.exists(outDir_left):
        os.makedirs(outDir_left)
    outDir_right = outDir + "/maskedimages_right/"
    if not os.path.exists(outDir_right):
        os.makedirs(outDir_right)
    npydir = "./segment/" + view
    lvo_segs = np.load(npydir + "/" + videoFile + "_lvo.npy")
    lv_segs = np.load(npydir + "/" + videoFile + "_lv.npy")
    imgdict = create_imgdict_from_dicom(dicomdir, videoFile)
    nrow = imgdict[0].shape[0]
    ncol = imgdict[0].shape[1]
    mask = create_mask(imgdict)
    kernel_i = np.ones((20, 20), np.uint8)  #narrower than 20,20
    kernel_o = np.ones((25, 25), np.uint8)  #narrower than 20,20
    for i in range(0, len(lv_segs)):
        lv_seg = lv_segs[i].copy()
        lvo_seg = lvo_segs[i].copy()
        lv_seg = imresize(lv_seg.copy(), (nrow, ncol), interp='nearest')
        lv_seg_dilate = cv2.dilate(lv_seg, kernel_i, iterations=2)
        lvo_seg = imresize(lvo_seg.copy(), (nrow, ncol), interp='nearest')
        lvo_seg_dilate = cv2.dilate(lvo_seg, kernel_o, iterations=1)
        y_i, x_i = np.where(lv_seg_dilate > 0)
        y_o, x_o = np.where(lvo_seg_dilate > 0)
        outer = np.transpose((y_o, x_o))
        inner = np.transpose((y_i, x_i))
        iset = set([tuple(x) for x in inner])
        oset = set([tuple(x) for x in outer])
        overlap = np.array([x for x in iset & oset])
        if len(overlap.shape) == 2:
            y = overlap[:, 0]
            x = overlap[:, 1]
            y_left_hi = y[(x < np.percentile(x, 43))
                          & (y < np.percentile(y, 50))]
            y_left_lo = y[(x < np.percentile(x, 35))
                          & (y > np.percentile(y, 50)) &
                          (y < np.percentile(y, 90))]
            x_left_hi = x[(x < np.percentile(x, 43))
                          & (y < np.percentile(y, 50))]
            x_left_lo = x[(x < np.percentile(x, 35))
                          & (y > np.percentile(y, 50)) &
                          (y < np.percentile(y, 90))]
            y_rite_hi = y[(x > np.percentile(x, 43))
                          & (y < np.percentile(y, 50))]
            y_rite_lo = y[(x > np.percentile(x, 35))
                          & (y > np.percentile(y, 50)) &
                          (y < np.percentile(y, 90))]
            x_rite_hi = x[(x > np.percentile(x, 43))
                          & (y < np.percentile(y, 50))]
            x_rite_lo = x[(x > np.percentile(x, 35))
                          & (y > np.percentile(y, 50)) &
                          (y < np.percentile(y, 90))]
            y_left = np.hstack((y_left_lo, y_left_hi))
            x_left = np.hstack((x_left_lo, x_left_hi))
            y_rite = np.hstack((y_rite_lo, y_rite_hi))
            x_rite = np.hstack((x_rite_lo, x_rite_hi))
            points_left = np.fliplr(np.transpose((y_left, x_left)))
            points_right = np.fliplr(np.transpose((y_rite, x_rite)))
            newimage_left = isolate_obj(imgdict[i], points_left)
            outfile_left = outDir_left + "/" + videoFile + "_" + str(
                i) + ".png"
            cv2.imwrite(outfile_left, newimage_left)
            newimage_right = isolate_obj(imgdict[i], points_right)
            outfile_right = outDir_right + "/" + videoFile + "_" + str(
                i) + ".png"
            cv2.imwrite(outfile_right, newimage_right)
        else:
            flag = 0
    if flag == 1:
        return 1
    else:
        return 0
    return 1