Beispiel #1
0
def Varidation():
    print("start predict")
    size = (512, 512)
    target_size = (size[0], size[1], 1)
    filename = "image11_with_tumor"
    path = "./image_data/"+filename
    cimage, gimage, sizedata = load_image(path, size)
    os.system("cls")

    # with open("./image_data/predict_gray.pkl",mode='rb') as f:
    #    gimage = pickle.load(f)
    # with open("./image_data/predict_clr.pkl",mode='rb') as f:
    #   cimage = pickle.load(f)

    #model = unet_2d(input_shape=target_size)
    model = unet_2d_GAM(input_shape=target_size)
    model.load_weights('./model/unet_2d_weights.hdf5')
    os.system("cls")

    for i in range(len(cimage)):
        mean = 0
        print("\r{0:d}".format(i), end="")

        w, h, c = gimage[i].shape
        target = gimage[i].reshape((1, w, h, 1))
        target = target / 255
        pred = model.predict(target)[0]
        pred = np.reshape(pred, (w, h, 1), order='F')

        # 閾値

        #mean = np.mean(pred)
        mean = 0.95

        # print(pred)
        pred[pred >= mean] = 255
        pred[pred < mean] = 0

        pred = np.asarray(pred, dtype=np.uint8)

        val = cimage[i]

        """
        val     :検出した領域の輪郭をoriginal画像と重ねた画像
        label   :検出した領域の輪郭
        val     :検出した領域
        """
        val, label, _ = draw_contours(val, pred)

        cv2.imwrite("./result/test/"+filename +
                    "/target/{0:04d}.jpg".format(i), cimage[i])
        cv2.imwrite("./result/test/"+filename +
                    "/pred/{0:04d}.jpg".format(i), pred)
        cv2.imwrite("./result/test/"+filename +
                    "/val/{0:04d}.jpg".format(i), val)
Beispiel #2
0
def test(dict):
    """
    #   # Amed Test Imageで抽出器の性能を評価する   #
    Parameter
    ---------
    dict    :辞書型
            -Parameter.txtから必要な情報を格納したdict
    Result
    ------
    抽出結果を //aka/share/amed/amed_unet_result に書き出し
    """
    print("start predict")
    size = dict["size"]
    root = dict["amedroot_path"]

    target_size = (size[0], size[1], 1)

    cimage, gimage, data_name = load_image_from_txt(dict)
    #cimage,gimage = load_image_s('./image/image.pkl')

    model = unet_2d_GAM(input_shape=target_size)

    model.load_weights(dict["model_path"])
    os.system("cls")

    for i in range(len(cimage)):

        #start = time.time()
        print("\r{0:d}".format(i), end="")  #
        w, h, c = gimage[i].shape
        target = cv2.resize(gimage[i], size)
        target = target.reshape((1, size[0], size[1], 1))
        target = target / 255  #正規化

        pred = model.predict(target)[0]
        pred = np.reshape(pred, (size[0], size[1], 1), order='F')

        #閾値

        Threshold = 0.7
        #推測
        pred[pred >= Threshold] = 1
        pred[pred < Threshold] = 0

        pred = np.asarray(pred, dtype=np.uint8)
        val = np.copy(cimage[i])
        """
        val     :検出した領域の輪郭をoriginal画像と重ねた画像
        label   :検出した領域の輪郭
        val     :検出した領域
        """

        pred_resize = cv2.resize(pred, (h, w))
        pred_resize = np.reshape(pred_resize, (w, h, 1), order='F')

        val, label, relabel = draw_contours(val, pred_resize)

        #pred_resize_2 = np.concatenate((pred_resize,pred_resize),axis=2)
        #pred_resize = np.concatenate((pred_resize_2,pred_resize),axis=2)

        result_image = (relabel / 255) * cimage[i]

        #1枚の処理時間
        #elapsed_time = time.time() - start
        #print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
        #print("pred_resize.shape",pred_resize.shape)
        #print("cimage[i].shape",cimage[i].shape)

        #save result
        result_rootpath = "/amed_unet_result/" + os.path.dirname(data_name[i])
        result_dir_path = root + result_rootpath

        check_existfile(result_dir_path)
        check_existfile(root + result_rootpath + "/val")
        check_existfile(root + result_rootpath + "/target")
        check_existfile(root + result_rootpath + "/pred")

        cv2.imwrite(
            root + result_rootpath + "/val/val_" +
            os.path.basename(data_name[i]), val)
        cv2.imwrite(root + "/amed_unet_result/" + data_name[i],
                    result_image * 255)
        cv2.imwrite(
            root + result_rootpath + "/target/target_" +
            os.path.basename(data_name[i]), cimage[i])
        cv2.imwrite(
            root + result_rootpath + "/pred/" + os.path.basename(data_name[i]),
            relabel * 255)
Beispiel #3
0
    x1,x2,y1,y2  = 200,500,200,500
    show(tumor_coord)
    #座標受け取り
    #x1,x2,y1,y2 = coord
    tumor_coord[y1:y2,x1:x2] = 1
    result = tumor_coord * pred

    tumor_area= np.count_nonzero(tumor_coord >= 1)#腫瘍領域の面積
    result_area = np.count_nonzero(result >= 1)#重複する面積


    Inclusion_rate = result_area / tumor_area
    print(Inclusion_rate)
    show(pred)
    show(result)
    input()

if __name__ == "__main__":
    
    test_amed()

    input()
    target_size = (640,640,1)
    #model
    seg_model = unet_2d_GAM(input_shape=target_size)
    seg_model.load_weights("./model/unet_2d_weights.hdf5")

    Segmentation(image,seg_model,target_size)


Beispiel #4
0
def test_amed():
    """
    """
    print("start predict")
    size = (512, 512)

    target_size = (size[0], size[1], 1)
    filename = "image06"
    path = "./image_data/"+filename
    cimage, gimage, sizedata = load_image(path, size)

    # with open("./image_data/predict_gray.pkl",mode='rb') as f:
    #    gimage = pickle.load(f)
    # with open("./image_data/predict_clr.pkl",mode='rb') as f:
    #    cimage = pickle.load(f)

    #model = unet_2d(input_shape=target_size)
    model = unet_2d_GAM(input_shape=target_size)
    model.load_weights('./model/unet_2d_weights.hdf5')
    os.system("cls")

    for i in range(len(cimage)):
        mean = 0

        start = time.time()

        print("\r{0:d}".format(i), end="")
        w, h, c = gimage[i].shape
        target = cv2.resize(gimage[i], size)
        target = target.reshape((1, 512, 512, 1))
        target = target / 255

        pred = model.predict(target)[0]
        pred = np.reshape(pred, (512, 512, 1), order='F')

        # Heat Map 作成
        #pred_heat = pred[:,:,0]

        gray = np.copy(gimage[i])
        # 疑似カラーを付与
        heatmap = np.round(pred*255)
        heatmap = heatmap.astype('uint8')
        heatmap = cv2.applyColorMap(heatmap, 2)

        # 画像合成
        heatmap = cv2.resize(heatmap, (h, w))

        # チャネル追加
        gray_2 = np.concatenate((gray, gray), axis=2)
        gray = np.concatenate((gray_2, gray), axis=2)

        heatmap = cv2.addWeighted(gray, 0.3, heatmap, 0.7, 0)

        check_existfile("./result/test/"+filename+"/heatmap")
        cv2.imwrite("./result/test/"+filename +
                    "/heatmap/{0:04d}.jpg".format(i), heatmap)

        #mean = np.mean(pred)
        mean = 0.7
        # 推測
        pred[pred >= mean] = 1
        pred[pred < mean] = 0

        pred = np.asarray(pred, dtype=np.uint8)
        val = np.copy(cimage[i])

        """
        val     :検出した領域の輪郭をoriginal画像と重ねた画像
        label   :検出した領域の輪郭
        val     :検出した領域
        """

        pred_resize = cv2.resize(pred, (h, w))
        pred_resize = np.reshape(pred_resize, (w, h, 1), order='F')

        val, label, _ = draw_contours(val, pred_resize)

        val_resize = cv2.resize(val, (h, w))

        pred_resize_2 = np.concatenate((pred_resize, pred_resize), axis=2)
        pred_resize = np.concatenate((pred_resize_2, pred_resize), axis=2)

        #result_image = pred_resize*cimage[i]

        elapsed_time = time.time() - start
        print("elapsed_time:{0}".format(elapsed_time) + "[sec]")

        #cv2.imwrite("//aka/share/amed/amed_liver_label/test_result/target/{0:04d}.jpg".format(i) , cimage[i])
        #cv2.imwrite("//aka/share/amed/amed_liver_label/test_result/pred/{0:04d}.jpg".format(i) , pred_resize*255)
        #cv2.imwrite("//aka/share/amed/amed_liver_label/test_result/val/{0:04d}.jpg".format(i) , val_resize)

        cv2.imwrite("./result/test/"+filename +
                    "/target/{0:04d}.jpg".format(i), cimage[i])
        cv2.imwrite("./result/test/"+filename +
                    "/pred/{0:04d}.jpg".format(i), pred_resize*255)
        cv2.imwrite("./result/test/"+filename +
                    "/val/{0:04d}.jpg".format(i), val_resize)
Beispiel #5
0
def train():
    """
    #   #   ネットワークモデルの学習をするための関数
    -Network model  :U-net
    -save weight only

    ##注意点
    -学習時の画像サイズを必ず記録する事
    """

    """
    画像読み込み
    前処理(サイズ統一、終わってたらスルー)
    学習(fit)
    検証(predict)
    精度と学習モデル保存
    """
    # Setup Parameter
    size = (512, 512)
    target_size = (size[0], size[1], 1)
    batch_size = 4
    epoch = 50
    classes = 2
    gen_num = 5
    seed = 10

    # 実行時間の計測用(処理実行開始時間)
    dt_now = datetime.datetime.now()
    days = dt_now.year+dt_now.month + dt_now.day

    # Load model
    #model = unet_2d(input_shape = target_size)
    model = unet_2d_GAM(input_shape=target_size)
    model.summary()

    # Setup dataset
    image = load_image_s('./image/image.pkl')
    label = load_image_s('./image/label.pkl')

    # preprocessing -前処理-
    # 前処理に使うのはやめた方がいいかもしれない
    #image,label = Echo_Extraction(image,label)
    # for ss in range(len(image)):
    #    cv2.imwrite("./image/check/image/{0:04d}.jpg".format(ss) , image[ss])
    #    cv2.imwrite("./image/check/label/{0:04d}.jpg".format(ss), label[ss])

    # 正規化
    image = image/255
    label = label/255
    train_image, val_image = train_test_split(
        image, test_size=0.3, random_state=seed, shuffle=True)
    train_label, val_label = train_test_split(
        label, test_size=0.3, random_state=seed, shuffle=True)

    # check dataset
    """
    t_image = train_image * 255
    t_label = train_label * 255
    v_image = val_image * 255
    v_label = val_label * 255

    for i in range(len(train_image)):
        cv2.imwrite("./image/check/train_image/{0:04d}.jpg".format(i) , t_image[i])
        cv2.imwrite("./image/check/train_label/{0:04d}.jpg".format(i), t_label[i])
    for i in range(len(val_image)):
        cv2.imwrite("./image/check/val_image/{0:04d}.jpg".format(i), v_image[i])
        cv2.imwrite("./image/check/val_label/{0:04d}.jpg".format(i), v_label[i])
    """

    # Data Augumentation
    nb_data = len(train_image) * gen_num
    val_nb_data = len(val_image) * gen_num
    #train_gen = ImageDataGenerator(preprocessing_function = myprocess_pixel_value,width_shift_range=[-10,10],height_shift_range=[-10,10])
    train_gen = ImageDataGenerator(
        width_shift_range=0.1, height_shift_range=0.1)
    val_gen = ImageDataGenerator()

    # Setup CallBacks
    ModelCheckpoint_ = ModelCheckpoint(
        './model/unet_2d_weights.hdf5', monitor='val_loss', save_weights_only=True)
    early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
    ReduceLROnPlateau_ = ReduceLROnPlateau(
        monitor='val_loss', factor=0.2, patience=3, verbose=1, mode='auto', epsilon=0.001, cooldown=0, min_lr=0.002)
    CSVLogger_ = keras.callbacks.CSVLogger(
        "./model/callback/log_learning.csv", separator=',', append=False)
    TensorBoard_ = keras.callbacks.TensorBoard(
        log_dir="./model/log", histogram_freq=1)

    # Set CallBack
    #callbacks = [ModelCheckpoint_,ReduceLROnPlateau_,early_stopping,CSVLogger_]
    callbacks = [ModelCheckpoint_, early_stopping, CSVLogger_]
    #callbacks = [ModelCheckpoint_]

    # Learning
    # (generator=train_gen.flow(train_image, train_label,batch_size=batch_size,save_to_dir="E:/output/data_augumentation/1", save_prefix='img', save_format='jpg')
    model.fit_generator(generator=train_gen.flow(train_image, train_label, batch_size=batch_size),
                        steps_per_epoch=nb_data,
                        epochs=epoch,
                        validation_data=val_gen.flow(
                            val_image, val_label, batch_size=batch_size),
                        validation_steps=val_nb_data,
                        max_queue_size=10,
                        callbacks=callbacks)

    # log保存
    with open("./model/model_detail.txt", mode='w') as f_dataset:
        f_dataset.write("作成日時     : " + days)
        f_dataset.write("画像サイズ   : " + size)
        f_dataset.write("seed         : " + seed)
        f_dataset.write("使用モデル   : unet_2d_GAM")
        f_dataset.write("\n画像枚数   :" + str(len(image)))
        f_dataset.write("\n     - train_image : " + str(len(train_image)))
        f_dataset.write("\n     - val_image   : " + str(len(val_image)))

    # 以下検証
    # 重みの保存位置修正
    model.load_weights('./model/unet_2d_weights.hdf5')

    score = model.evaluate(val_image, val_label, batch_size=3, verbose=1)

    print('Test loss     ', score[0])
    print('Test accuracy ', score[1])