コード例 #1
0
ファイル: testing.py プロジェクト: ricktonycr/testing
def run():
    image = io.imread(cfg.imagePath, as_grey=True)
    image_pyramid = pyramid_gaussian.get_pyramid(image)
    for img in image_pyramid[::-1]:
        cfg.num_of_patches = cfg.num_test_patches
        patches, centres = sample_patches.create_patches_randomly(img)
        F = extract_features.extractFeaturesForPatches(patches)
        landmark_detection.run()
コード例 #2
0
def run(imagepath):
    t0 = time()

    image = io.imread(imagepath, as_gray=True)
    pyramid = pyramid_gaussian.get_pyramid(image)

    cfg.num_of_patches = cfg.num_test_patches  # changing the number of patches

    for img, sc_name in zip(pyramid, cfg.scale_names):
        if sc_name == '0_12':
            init_flag = True
            ss = []
            ns = 0

            patches, centres = sample_patches.create_patches_randomly(
                img, subshape=ss, initialization=init_flag)
            f = extract_features.extractFeaturesForPatches(patches)

            # 0: femur
            # 1: cadera
            # 2: superior
            # 3: inferior
            d_tilde, f_tilde, c_tilde = build_matrices(cfg.bone_structures[3],
                                                       sc_name,
                                                       n_subs=ns)

            l = d_tilde.shape[0] // 2  # number of landmarks

            # Obtener los puntos
            f_hat = np.concatenate((f_tilde, f), axis=1)
            c_bar = compute_C_matrix(centres, l)
            c = np.tile(centres, (l, 1))
            d = compute_D_matrix(f_hat, d_tilde, c_bar, l)
            data = d + c
            kde_ = KernelDensity(kernel='gaussian', bandwidth=0.2).fit(data.T)
            sc = kde_.score_samples(data.T)
            max = np.argmax(np.exp(sc))
            shape = data[:, max]
            shape = np.reshape(shape, (l, 2))
            a = shape[:, 0]
            b = shape[:, 1]
            a *= 8
            b *= 8
            # fig, ax_ = plt.subplots()
            # ax_.imshow(image, cmap=plt.cm.gray)
            # ax_.plot(a, b, 'r.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[5], b[5], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[12], b[12], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.axis('off')
            # plt.show()
            a /= 4
            b /= 4

        else:
            for count in range(5):
                init_flag = False
                if count == 4:
                    ss = shape[(4 * count):(4 * count + 5), :]
                else:
                    ss = shape[(4 * count):(4 * count + 4), :]
                ns = count
                # if sc_name != '0_25':
                #     ss = shape[0:4,:]

                patches, centres = sample_patches.create_patches_randomly(
                    img, subshape=ss, initialization=init_flag)
                f = extract_features.extractFeaturesForPatches(patches)

                # 0: femur
                # 1: cadera
                # 2: superior
                # 3: inferior
                d_tilde, f_tilde, c_tilde = build_matrices(
                    cfg.bone_structures[3], sc_name, n_subs=ns)

                l = d_tilde.shape[0] // 2  # number of landmarks

                # Obtener los puntos
                f_hat = np.concatenate((f_tilde, f), axis=1)
                c_bar = compute_C_matrix(centres, l)
                c = np.tile(centres, (l, 1))
                d = compute_D_matrix(f_hat, d_tilde, c_bar, l)
                data = d + c
                kde_ = KernelDensity(kernel='gaussian',
                                     bandwidth=0.2).fit(data.T)
                sc = kde_.score_samples(data.T)
                max = np.argmax(np.exp(sc))
                shape1 = data[:, max]
                shape1 = np.reshape(shape1, (l, 2))
                # a = shape1[:, 0]
                # b = shape1[:, 1]
                # if sc_name == '0_25':
                #     a *= 4
                #     b *= 4
                # elif sc_name == '0_5':
                #     a *= 2
                #     b *= 2
                # fig, ax_ = plt.subplots()
                # ax_.imshow(image, cmap=plt.cm.gray)
                # ax_.plot(a, b, 'r.', markersize=8, mec='k', mew=0.3)
                # ax_.axis('off')
                # plt.show()
                # if sc_name == '0_25':
                #     a /= 2
                #     b /= 2
                if count == 4:
                    shape[(4 * count):(4 * count + 5), :] = shape1[0:5, :] * 2
                else:
                    shape[(4 * count):(4 * count + 4), :] = shape1[0:4, :] * 2

            a = shape[:, 0]
            b = shape[:, 1]
            if sc_name == '0_25':
                a = a * 2
                b = b * 2
            if sc_name == '1':
                a = a / 2
                b = b / 2
            # fig, ax_ = plt.subplots()
            # ax_.imshow(image, cmap=plt.cm.gray)
            # ax_.plot(a, b, 'r.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[5], b[5], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[12], b[12], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.axis('off')
            # plt.show()

    izquierdaX = np.copy(a)
    izquierdaY = np.copy(b)

    for img, sc_name in zip(pyramid, cfg.scale_names):
        if sc_name == '0_12':
            init_flag = True
            ss = []
            ns = 0

            patches, centres = sample_patches.create_patches_randomly(
                img, subshape=ss, initialization=init_flag)
            f = extract_features.extractFeaturesForPatches(patches)

            # 0: femur
            # 1: cadera
            # 2: superior
            # 3: inferior
            d_tilde, f_tilde, c_tilde = build_matrices(cfg.bone_structures[1],
                                                       sc_name,
                                                       n_subs=ns)

            l = d_tilde.shape[0] // 2  # number of landmarks

            # Obtener los puntos
            f_hat = np.concatenate((f_tilde, f), axis=1)
            c_bar = compute_C_matrix(centres, l)
            c = np.tile(centres, (l, 1))
            d = compute_D_matrix(f_hat, d_tilde, c_bar, l)
            data = d + c
            kde_ = KernelDensity(kernel='gaussian', bandwidth=0.2).fit(data.T)
            sc = kde_.score_samples(data.T)
            max = np.argmax(np.exp(sc))
            shape = data[:, max]
            shape = np.reshape(shape, (l, 2))
            a = shape[:, 0]
            b = shape[:, 1]
            a *= 8
            b *= 8
            # fig, ax_ = plt.subplots()
            # ax_.imshow(image, cmap=plt.cm.gray)
            # ax_.plot(a, b, 'r.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[5], b[5], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.axis('off')
            # plt.show()
            a /= 4
            b /= 4

        else:
            for count in range(5):
                init_flag = False
                if count == 4:
                    ss = shape[(4 * count):(4 * count + 5), :]
                else:
                    ss = shape[(4 * count):(4 * count + 4), :]
                ns = count
                # if sc_name != '0_25':
                #     ss = shape[0:4,:]

                patches, centres = sample_patches.create_patches_randomly(
                    img, subshape=ss, initialization=init_flag)
                f = extract_features.extractFeaturesForPatches(patches)

                # 0: femur
                # 1: cadera
                # 2: superior
                # 3: inferior
                d_tilde, f_tilde, c_tilde = build_matrices(
                    cfg.bone_structures[1], sc_name, n_subs=ns)

                l = d_tilde.shape[0] // 2  # number of landmarks

                # Obtener los puntos
                f_hat = np.concatenate((f_tilde, f), axis=1)
                c_bar = compute_C_matrix(centres, l)
                c = np.tile(centres, (l, 1))
                d = compute_D_matrix(f_hat, d_tilde, c_bar, l)
                data = d + c
                kde_ = KernelDensity(kernel='gaussian',
                                     bandwidth=0.2).fit(data.T)
                sc = kde_.score_samples(data.T)
                max = np.argmax(np.exp(sc))
                shape1 = data[:, max]
                shape1 = np.reshape(shape1, (l, 2))
                # a = shape1[:, 0]
                # b = shape1[:, 1]
                # if sc_name == '0_25':
                #     a *= 4
                #     b *= 4
                # elif sc_name == '0_5':
                #     a *= 2
                #     b *= 2
                # fig, ax_ = plt.subplots()
                # ax_.imshow(image, cmap=plt.cm.gray)
                # ax_.plot(a, b, 'r.', markersize=8, mec='k', mew=0.3)
                # ax_.axis('off')
                # plt.show()
                # if sc_name == '0_25':
                #     a /= 2
                #     b /= 2
                if count == 4:
                    shape[(4 * count):(4 * count + 5), :] = shape1[0:5, :] * 2
                else:
                    shape[(4 * count):(4 * count + 4), :] = shape1[0:4, :] * 2

            a = shape[:, 0]
            b = shape[:, 1]
            if sc_name == '0_25':
                a = a * 2
                b = b * 2
            if sc_name == '1':
                a = a / 2
                b = b / 2
            # fig, ax_ = plt.subplots()
            # ax_.imshow(image, cmap=plt.cm.gray)
            # ax_.plot(a, b, 'r.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[5], b[5], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.plot(a[12], b[12], 'b.', markersize=8, mec='k', mew=0.3)
            # ax_.axis('off')
            # plt.show()

    derechaX = np.copy(a)
    derechaY = np.copy(b)

    fig, ax_ = plt.subplots()
    ax_.imshow(image, cmap=plt.cm.gray)
    ax_.plot(a, b, 'r.', markersize=5, mec='k', mew=0.3)
    ax_.plot(izquierdaX, izquierdaY, 'r.', markersize=8, mec='k', mew=0.3)
    ax_.plot(derechaX, derechaY, 'r.', markersize=8, mec='k', mew=0.3)

    IT = (izquierdaY[12] - izquierdaY[5]) / (izquierdaX[12] - izquierdaX[5])
    DT = (derechaY[12] - derechaY[5]) / (derechaX[12] - derechaX[5])

    a1 = 2 * izquierdaX[5] - izquierdaX[12]
    a2 = 2 * izquierdaX[12] - izquierdaX[5]
    b1 = IT * (a1 - izquierdaX[5]) + izquierdaY[5]
    b2 = IT * (a2 - izquierdaX[5]) + izquierdaY[5]

    c1 = 2 * derechaX[5] - derechaX[12]
    c2 = 2 * derechaX[12] - derechaX[5]
    d1 = DT * (c1 - derechaX[5]) + derechaY[5]
    d2 = DT * (c2 - derechaX[5]) + derechaY[5]

    HT = (derechaY[12] - izquierdaY[12]) / (derechaX[12] - izquierdaX[12])

    e1 = a1
    e2 = c1
    f1 = HT * (e1 - izquierdaX[12]) + izquierdaY[12]
    f2 = HT * (e2 - izquierdaX[12]) + izquierdaY[12]

    g1 = izquierdaX[5]
    g2 = HT * (g1 - izquierdaX[12]) + izquierdaY[12]
    h1 = derechaX[5]
    h2 = HT * (h1 - izquierdaX[12]) + izquierdaY[12]

    ax_.plot([e1, e2], [f1, f2], 'g', markersize=8, mec='k', mew=0.3)
    ax_.plot([c1, c2], [d1, d2], 'g', markersize=8, mec='k', mew=0.3)
    ax_.plot([a1, a2], [b1, b2], 'g', markersize=8, mec='k', mew=0.3)
    ax_.plot(izquierdaX[5],
             izquierdaY[5],
             'b.',
             markersize=10,
             mec='k',
             mew=0.3)
    ax_.plot(izquierdaX[12],
             izquierdaY[12],
             'b.',
             markersize=10,
             mec='k',
             mew=0.3)
    ax_.plot(derechaX[5], derechaY[5], 'b.', markersize=10, mec='k', mew=0.3)
    ax_.plot(derechaX[12], derechaY[12], 'b.', markersize=10, mec='k', mew=0.3)
    ax_.plot([g1, h1], [g2, h2], 'b.', markersize=10, mec='k', mew=0.3)

    nume1 = izquierdaY[5] * (izquierdaX[12] - g1) + izquierdaY[12] * (
        g1 - izquierdaX[5]) + g2 * (izquierdaX[5] - izquierdaX[12])
    deno1 = (izquierdaX[5] - izquierdaX[12]) * (izquierdaX[12] - g1) + (
        izquierdaY[5] - izquierdaY[12]) * (izquierdaY[12] - g2)
    rati1 = nume1 / deno1
    angl1 = math.atan(rati1)
    deg1 = (angl1 * 180) / math.pi
    if deg1 < 0:
        deg1 = deg1 + 180
    print(deg1)

    nume2 = derechaY[5] * (derechaX[12] - h1) + derechaY[12] * (
        h1 - derechaX[5]) + h2 * (derechaX[5] - derechaX[12])
    deno2 = (derechaX[5] - derechaX[12]) * (derechaX[12] - h1) + (
        derechaY[5] - derechaY[12]) * (derechaY[12] - h2)
    rati2 = nume2 / deno2
    angl2 = math.atan(rati2)
    deg2 = (angl2 * 180) / math.pi
    if deg2 < 0:
        deg2 = deg2 + 180
        deg2 = 180 - deg2
    print(deg2)

    ax_.text(izquierdaX[12] - 20,
             izquierdaY[12] + 20,
             round(deg1, 2),
             color='yellow')
    ax_.text(derechaX[12] + 20,
             derechaY[12] + 20,
             round(deg2, 2),
             color='yellow')

    print('####\tNiña\tNiño')

    na = 'N'
    no = 'N'

    #1-2
    if deg1 > 36 or deg2 > 36:
        na = 'L'
    if deg1 > 41.5 or deg2 > 41.5:
        na = 'G'
    if deg1 > 29 or deg2 > 31:
        no = 'L'
    if deg1 > 33 or deg2 > 35:
        no = 'G'
    print('1-2\t' + na + '\t' + no)

    #3-4
    if deg1 > 31.5 or deg2 > 33:
        na = 'L'
    if deg1 > 36.5 or deg2 > 38.5:
        na = 'G'
    if deg1 > 28 or deg2 > 29:
        no = 'L'
    if deg1 > 32.5 or deg2 > 33.5:
        no = 'G'
    print('3-4\t' + na + '\t' + no)

    #5-6
    if deg1 > 27.5 or deg2 > 29.5:
        na = 'L'
    if deg1 > 32 or deg2 > 34:
        na = 'G'
    if deg1 > 24.5 or deg2 > 27:
        no = 'L'
    if deg1 > 29 or deg2 > 31.5:
        no = 'G'
    print('5-6\t' + na + '\t' + no)

    #7-9
    if deg1 > 25.5 or deg2 > 27:
        na = 'L'
    if deg1 > 29.5 or deg2 > 31.5:
        na = 'G'
    if deg1 > 24.5 or deg2 > 25.5:
        no = 'L'
    if deg1 > 29 or deg2 > 29.5:
        no = 'G'
    print('7-9\t' + na + '\t' + no)

    #2a-3a
    if deg1 > 22 or deg2 > 23.5:
        na = 'L'
    if deg1 > 25.5 or deg2 > 27:
        na = 'G'
    if deg1 > 21 or deg2 > 22.5:
        no = 'L'
    if deg1 > 25 or deg2 > 27:
        no = 'G'
    print('2a-3a\t' + na + '\t' + no)

    #3a-5a
    if deg1 > 18 or deg2 > 21:
        na = 'L'
    if deg1 > 25.5 or deg2 > 25.5:
        na = 'G'
    if deg1 > 19 or deg2 > 20:
        no = 'L'
    if deg1 > 23.5 or deg2 > 24:
        no = 'G'
    print('3a-5a\t' + na + '\t' + no)

    ax_.axis('off')
    plt.show()
    '''
    l = d_tilde.shape[0] // 2  # number of landmarks

    # Composed matrix
    f_hat = np.concatenate((f_tilde, f), axis=1)

    c_bar = compute_C_matrix(centres, l)
    c = np.tile(centres, (l, 1))

    d = compute_D_matrix(f_hat, d_tilde, c_bar, l)

    positions_ = d + c

    density_estimation(positions_, img,imagepath)

    '''
    '''
コード例 #3
0
ファイル: testing.py プロジェクト: ricktonycr/testing
def run(bones, gt=False):
    image = io.imread(cfg.imagePath, as_gray=True)
    images_pyramid = pyramid_gaussian.get_pyramid(image)

    sigma_values = np.array(cfg.sigma_values)

    patch_sizes = cfg.testing_patch_sizes

    print('Testing Image ...', cfg.imagePath)
    f = os.path.basename(cfg.imagePath)
    fn, ext = os.path.splitext(f)
    cfg.resultsFolderPath = os.path.join(cfg.resultsFolderPath, fn)

    subshapes = None
    cfg.num_of_patches = cfg.num_test_patches  # Modificar el numero de parches a muestrear

    final_shapes = []
    gt_shapes = []
    bones_name = '_'.join(bones)
    # Segmentation for each bone (femur, pelvis)
    for bone in bones:
        times = []

        for img, sc_name, s, patch_size, idx_sc in zip(
                images_pyramid, cfg.scale_names, sigma_values, patch_sizes,
                range(len(cfg.scale_names))):
            print('Testing Scale ', sc_name)
            subshapes_ = []
            if sc_name == cfg.init_scale:
                init_flag = True
                start = time.time()
                landmarks = landmark_detection.get_estimated_landmarks(
                    bone, img, sc_name, init_flag=init_flag)
                end = time.time()

                times.append(end - start)

                # added
                plot_segmented_shape(img, landmarks, sc_name, bone)

                subshapes = [
                    landmarks[x:x + cfg.subs_len]
                    for x in range(0, len(landmarks), cfg.subs_len)
                ]
                # avoid subshapes of 1 landmark
                if len(subshapes[-1]) == 1:
                    lmrk = subshapes.pop()
                    subshapes[-1] = np.vstack((subshapes[-1], lmrk))
                subshapes = np.array(
                    subshapes) * cfg.downScaleFactor  # upsample landmarks
                continue

            # Sequential Process
            """
            inicio = time.time()
            for n_sub, subs in enumerate(subshapes):
                print('Detecting Landmarks in Subshape', str(n_sub))

                landmarks = landmark_detection.get_estimated_landmarks(bone, img, sc_name, subs, n_sub)
                subshapes_.append(landmarks)
            fin = time.time()
            print('TIEMPO', fin - inicio)

            """

            # Parallel Process
            l = len(subshapes)
            print('LEN SUBSHAPES', l)
            num_cores = multiprocessing.cpu_count()
            # n = len(os.sched_getaffinity(0))

            pool = multiprocessing.Pool(processes=num_cores)
            try:
                start = time.time()

                subshapes_ = pool.starmap(
                    landmark_detection.get_estimated_landmarks,
                    zip(repeat(bone), repeat(img), repeat(sc_name), subshapes,
                        range(l)))

            finally:
                pool.close()
                pool.join()
                end = time.time()
                times.append(end - start)

            subshapes = np.array(subshapes_)
            shape = np.vstack(subshapes)  # Updated shape
            # added
            plot_segmented_shape(img, shape, sc_name, bone)

            # ASM
            print('Computing Active Shape Model...')
            start = time.time()
            shape = active_shape_model.run(bone, shape, sc_name)
            end = time.time()
            times.append(end - start)

            plot_segmented_shape(img, shape, sc_name, bone, refined=True)

            # GRADIENT PROFILING
            if sc_name != cfg.scale_names[-1]:
                print('Computing Gradient Profiling...')
                start = time.time()
                shape = gradient_profiling.run(bone, img, s, patch_size, shape,
                                               sc_name, idx_sc)
                end = time.time()
                times.append(end - start)

            if sc_name == cfg.scale_names[-1]:
                final_shapes.append(shape)
                if gt:
                    # leer el json y comparar
                    f = os.path.basename(cfg.imagePath)
                    fn, ext = os.path.splitext(f)
                    json = os.path.join(cfg.path_json, fn + '.json')
                    shapes_gt = json_reader.get_all_landmarks_(json)
                    shape_gt = shapes_gt[cfg.bone_structures.index(bone)]
                    # shape_gt /= 2
                    gt_shapes.append(shape_gt)

                    x, y = shape_gt.T
                    plt.plot(x,
                             y,
                             'g|-',
                             ms=1.8,
                             lw=0.5,
                             label='Gold Standard')

                    plot_segmented_shape(img, shape, sc_name, bone, gt=True)

                    # Metrics
                    metrics.compute_segmentation_metrics(
                        shape_gt, shape, image, bone)

            subshapes = [
                shape[x:x + cfg.subs_len]
                for x in range(0, len(shape), cfg.subs_len)
            ]
            # avoid subshapes of 1 landmark
            if len(subshapes[-1]) == 1:
                lmrk = subshapes.pop()
                subshapes[-1] = np.vstack((subshapes[-1], lmrk))
            subshapes = np.array(subshapes)

            subshapes *= cfg.downScaleFactor

        # Save the computation time
        header = 'Computation Time for {}\n'.format(bone)
        header += 'Landmark Detection Initial Scale, Landmark Detection, ASM, Gradient Profiling for other scales (25%,50%,100%)'
        metrics.save_computation_time(times, header, bone)

    # Final segmentation
    plt.imshow(image, cmap=plt.cm.gray)
    final_shapes = np.array(final_shapes)

    for sh in final_shapes:
        x, y = sh.T
        plt.plot(x, y, 'o-', ms=2, mec='k', mew=0.3)

    plt.axis('off')
    output_path = cfg.resultsFolderPath
    plt.savefig(output_path + '/final_segmentation_' + bones_name + '_.' +
                cfg.img_format,
                format=cfg.img_format,
                bbox_inches='tight',
                pad_inches=0,
                dpi=cfg.dpi)
    plt.close()

    # JSW
    if len(final_shapes) % 2 == 0:  # this must be modified
        seg_distances = calculate_jsw(image, final_shapes, bones_name)
        if gt:
            gt_shapes = np.array(gt_shapes)
            gt_distances = calculate_jsw(image, gt_shapes, bones_name, gt=True)

            # Error Rates
            metrics.compute_jsw_error_rates(gt_distances, seg_distances,
                                            bones_name)
コード例 #4
0
def run():

    images_list = os.listdir(cfg.datasetRoot)
    images_list = filter(lambda element: '.JPG' in element, images_list)

    dataset_images = os.listdir(cfg.datasetRoot)
    dataset_images = filter(lambda element: '.JPG' in element, dataset_images)
    count = len(list(dataset_images))

    print('Training', count, 'X-Ray Images')

    femur_shape = np.array([(141.33333333333334, 610.6666666666666),
                            (148.0, 604.0),
                            (155.33333333333334, 598.6666666666666),
                            (163.33333333333334, 592.0),
                            (169.33333333333334, 584.0),
                            (172.66666666666666, 575.3333333333334),
                            (174.66666666666666, 566.6666666666666),
                            (173.5, 556.5), (168.66666666666666, 548.0),
                            (165.33333333333334, 540.0), (168.0, 533.0),
                            (172.0, 527.5), (176.0, 521.5), (181.5, 515.5),
                            (186.5, 509.5), (190.5, 504.0), (196.5, 499.5),
                            (202.5, 495.0), (209.0, 492.0), (219.0, 493.5),
                            (231.0, 495.0), (244.0, 495.5), (254.0, 489.0),
                            (262.5, 480.0), (268.5, 471.5), (274.0, 462.5),
                            (275.5, 451.0), (275.5, 438.0), (274.0, 418.5),
                            (273.0, 406.5), (267.5, 396.5), (259.5, 387.5),
                            (249.0, 381.0), (238.0, 376.5), (226.0, 373.5),
                            (214.0, 372.5), (200.5, 374.5), (187.5, 378.5),
                            (176.5, 384.0), (166.0, 392.5), (160.0, 402.5),
                            (155.5, 414.0), (149.5, 422.5), (138.5, 425.0),
                            (126.0, 425.0), (115.5, 424.5), (105.5, 420.0),
                            (99.5, 414.0), (91.5, 405.5), (81.0, 400.0),
                            (70.5, 399.5), (61.5, 410.0), (51.0, 420.5),
                            (43.5, 431.0), (38.5, 443.5), (31.0, 456.0),
                            (26.0, 468.0), (22.5, 483.0), (25.5, 496.5),
                            (32.5, 507.0), (37.5, 517.5), (41.5, 528.5),
                            (44.5, 540.5), (46.5, 552.0), (48.0, 563.0),
                            (49.0, 574.5), (50.0, 585.0), (51.0, 598.0),
                            (51.0, 609.0)])
    db = cfg.database_root_gp
    data_storage.create_database(db)

    for filename in images_list:
        print('Processing Image: %s...' % filename)
        image_path = cfg.datasetRoot + '/' + filename
        image = io.imread(image_path, as_gray=True)
        fn = filename[0:-4]
        g = data_storage.create_group(fn)
        # Falta leer un JSON para cada estructura y cada forma
        shape = femur_shape  # shape simulada

        pyramid = pyramid_gaussian.get_pyramid(image)
        sigma = cfg.sigma_values
        patch_size = cfg.patch_sizes
        scales = cfg.scale_names
        matrix = []
        for p, s, ps, sc in zip(pyramid, sigma, patch_size, scales):
            sg = data_storage.create_group(db, 'scale_' + sc, g)
            img_grad = gp.get_gaussian_gradient_magnitude(p, s)
            # Modificar mtx
            patches = gp.sample_patches(img_grad, shape, (ps, ps))

            for patch, i in zip(patches, range(1, len(patches) + 1)):
                data_storage.save_data(sg, 'patch_' + str(i), patch)

            shape /= cfg.downScaleFactor  # escalamos las posiciones de los landmarks
コード例 #5
0
centre = []
shape = np.vstack(subshapes[0])
x_val, y_val = shape.T

# subshape = json_reader.get_subshape(path, 12)
subshape = subshapes[0][3]
(cx, cy) = centroid(subshape)
centre.append((cx, cy))

print(shape.shape)
# print('All:',subshapes)
# print('One:', subshape)
print('Centroid:', cx, cy)

image = io.imread(cfg.imagePath, as_grey=True)
image = pg.get_pyramid(image)[0]
img_h, img_w = image.shape
# image = np.pad(image, ((50,), (50,)), 'constant', constant_values=(0,0))
fig, ax = plt.subplots()

################
# mapa de colores
# c = np.arange(1,20)
# cm = plt.get_cmap('rainbow') # pueden haber otras opciones en lugar de hsv,brg,etc
# no_points = len(c)
# ax.set_prop_cycle(cycler('color', [cm(1.*i/(no_points-1)) for i in range(no_points-1)]))
ax.set_prop_cycle(cycler('color', plt.cm.hsv(np.linspace(0.05, 1, 20))))

(patch_width, patch_height) = cfg.patch_shape
# x_values, y_values = sample_patches.sample_around_subshape(centre)
# x_values, y_values = sample_patches.sample_around_subshape(subshape, img_w, img_h)
コード例 #6
0
                             (429.0, 536.5), (445.0, 530.0), (447.0, 514.5),
                             (448.5, 498.0), (449.5, 481.5), (447.0, 467.5),
                             (436.5, 468.5), (425.0, 468.0), (415.0, 467.5),
                             (403.5, 466.0), (393.5, 463.5), (383.5, 459.0),
                             (374.5, 454.0), (365.5, 448.5), (358.0, 441.0),
                             (352.0, 434.0), (346.0, 426.0), (340.0, 420.5),
                             (335.5, 412.5), (329.0, 405.5), (324.5, 397.5),
                             (318.5, 389.5), (313.5, 381.5), (308.0, 374.5),
                             (303.5, 366.5), (298.5, 358.0), (294.5, 350.5),
                             (289.0, 342.0), (286.5, 333.0), (284.0, 324.5),
                             (282.0, 315.5), (282.0, 305.0), (285.5, 295.5),
                             (293.0, 286.0), (295.0, 280.0)])

    image = img
    shape = femur_shape + 50
    images_pyramid = pyramid_gaussian.get_pyramid(image)
    patch_sizes = cfg.patch_sizes

    for img, ps in zip(images_pyramid[::-1], patch_sizes[::-1]):
        img = np.pad(img, cfg.padding, 'constant', constant_values=(0, 0))
        img_grad = get_gaussian_gradient_magnitude(img)
        # show_gradient_image(img_grad)
        # show_gradient_image_patches(img_grad, shape, ps)
        shape = (shape + 50) / cfg.downScaleFactor
    # ax[1].imshow(img_grad, cmap=plt.cm.gray)

    # plt.show()

    # patches = sample_patches(img_grad, shape, (20, 20)) # estos parches se deben almacenar, vec es una matriz de parches
    # plt.imshow(patches[0], cmap=plt.cm.gray)
    # fig, (ax1,ax2) = plt.subplots(1,2, sharex=True, sharey=True)
コード例 #7
0
def run():
    gp_DB = cfg.database_root_gp
    storage.create_database()  # database for landmark detection
    storage.create_database(gp_DB)  # DB for gradient profiling

    # images = glob.glob(cfg.datasetRoot+'/*.png')

    # List of X-Ray images in PNG format
    images_list = os.listdir(cfg.datasetRoot)
    images_list = filter(lambda element: '.JPG' in element, images_list)
    """
    images = []
    for img in images_list:
        if img.endswith('.png') or img.endswith('.JPG'):
            images.append(img)
    
    """
    ###
    # images_list = [img for img in os.listdir(cfg.datasetRoot) if img.endswith('.JPG')]

    ##############################################################
    dataset_images = os.listdir(cfg.datasetRoot)
    dataset_images = filter(lambda element: '.JPG' in element, dataset_images)
    count = len(list(dataset_images))

    print('Training', count, 'X-Ray Images')

    # List for each bone structure
    R_femurs = []
    L_femurs = []
    R_pelvis = []
    L_pelvis = []

    for filename in images_list:
        print('Training Image: {0}...'.format(filename))

        image_path = os.path.join(cfg.datasetRoot, filename)
        image = io.imread(image_path, as_gray=True)
        fn = filename[0:-4]

        pyramid = pyramid_gaussian.get_pyramid(image)

        json_file = os.path.join(cfg.path_json, fn + '.gsmdc')
        structures = reader.get_all_subshapes(json_file)

        im = storage.create_group(name=fn)
        gp_gr = storage.create_group(gp_DB, fn)

        for subshapes_, bone_name in zip(structures, cfg.bone_structures):
            # For ASM
            print('STRUCTURES', len(structures))
            shape_ = np.array(np.vstack(subshapes_))
            print('size', len(shape_))
            if bone_name == cfg.bone_structures[0]:
                R_femurs.append(shape_)
            elif bone_name == cfg.bone_structures[1]:
                R_pelvis.append(shape_)
            elif bone_name == cfg.bone_structures[2]:
                L_femurs.append(shape_)
            elif bone_name == cfg.bone_structures[3]:
                L_pelvis.append(shape_)

            # For initial scale
            subshapes = subshapes_ / (cfg.downScaleFactor ** (len(cfg.scale_names) - 1))
            shape = shape_ / (cfg.downScaleFactor ** (len(cfg.scale_names) - 1))
            init_flag = False

            g = storage.create_group(name=bone_name, parent=im)
            gp_group = storage.create_group(gp_DB, name=bone_name, parent=gp_gr)

            for img, scale, sigma, patch_size in zip(pyramid, cfg.scale_names, cfg.sigma_values, cfg.patch_sizes):
                # LANDMARK DETECTION DATA
                print('Scale: {0} - Subshapes: {1} '.format(scale, len(subshapes)))
                sg = storage.create_group(name='scale_' + scale, parent=g)
                if scale == cfg.init_scale:
                    init_flag = True
                    subshapes = np.array([shape])

                for i in range(len(subshapes)):
                    subs_g = storage.create_group(name='subshape_' + str(i), parent=sg)

                    try:
                        D, F, C = training_an_image(img, subshapes[i], init_flag)
                        storage.save_data(subs_g, 'D_' + fn, D)
                        storage.save_data(subs_g, 'F_' + fn, F)
                        storage.save_data(subs_g, 'C_' + fn, C)

                        if init_flag:
                            subshapes = subshapes_ / (cfg.downScaleFactor ** (len(cfg.scale_names) - 1))
                            init_flag = False
                    except:
                        print('Except subshape', i)

                # GRADIENT PROFILING DATA
                if scale != cfg.init_scale:
                    gp_g = storage.create_group(gp_DB, 'scale_' + scale, gp_group)
                    img_grad = grad_prof.get_gaussian_gradient_magnitude(img, sigma)
                    # img_grad = img

                    patches = grad_prof.sample_patches(img_grad, shape, (patch_size, patch_size))

                    mtx_patches = [np.ravel(patch) for patch in patches]

                    mtx_patches = np.array(mtx_patches).transpose()

                    storage.save_data(gp_g, 'patches', mtx_patches, gp_DB)

                shape *= cfg.downScaleFactor
                subshapes *= cfg.downScaleFactor

    bone_structures = [R_femurs, R_pelvis, L_femurs, L_pelvis]

    # ACTIVE SHAPE MODEL DATA
    for bone_shapes, model in zip(bone_structures, cfg.models):
        shapes = np.array(bone_shapes)
        if len(shapes) > 1:
            pca.run(shapes, model)

    print('Training Finished')