Ejemplo n.º 1
0
def copyPanel(ima,imb,p):
    a = fromimage(ima)
    b = fromimage(imb)
    try:
        a[panels[p]] = b[panels[p]]
    except:
        # print a.shape,ima.size,b.shape,imb.size
        ima.show()
        imb.show()
    return toimage(a)
def sample_hard_negatives(image, roi_mask, out_dir, image_id, abn_id,
                          patch_size=256, neg_cutoff=.35, nb_bkg=100,
                          start_sample_nb=0,
                          bkg_dir="background", verbose=False):
    """WARNING: the definition of hns may be problematic.
    There has been study showing that the context of an ROI is also useful
    for classification.
    """
    bkg_out = os.path.join(out_dir, bkg_dir)
    if not os.path.exists(bkg_out):
        os.makedirs(bkg_out)

    basename = "_".join([image_id, str(abn_id)])

    image = add_img_margins(image, patch_size/2)
    roi_mask = add_img_margins(roi_mask, patch_size/2)
    # Get ROI bounding box.
    roi_mask_8u = roi_mask.astype("uint8")
    ver = (cv2.__version__).split(".")
    contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cont_areas = [cv2.contourArea(cont) for cont in contours]
    idx = np.argmax(cont_areas)  # find the largest contour.
    rx, ry, rw, rh = cv2.boundingRect(contours[idx])
    if verbose:
        M = cv2.moments(contours[idx])
        cx = int(M["m10"]/M["m00"])
        cy = int(M["m01"]/M["m00"])
        print "ROI centroid=", (cx, cy)
        sys.stdout.flush()

    rng = np.random.RandomState(12345)
    # Sample hard negative samples.
    sampled_bkg = start_sample_nb
    while sampled_bkg < start_sample_nb + nb_bkg:
        x1, x2 = (rx - patch_size/2, rx + rw + patch_size/2)
        y1, y2 = (ry - patch_size/2, ry + rh + patch_size/2)
        x1 = crop_val(x1, patch_size/2, image.shape[1] - patch_size/2)
        x2 = crop_val(x2, patch_size/2, image.shape[1] - patch_size/2)
        y1 = crop_val(y1, patch_size/2, image.shape[0] - patch_size/2)
        y2 = crop_val(y2, patch_size/2, image.shape[0] - patch_size/2)
        x = rng.randint(x1, x2)
        y = rng.randint(y1, y2)
        if not overlap_patch_roi((x, y), patch_size, roi_mask, cutoff=neg_cutoff):
            patch = image[y - patch_size/2:y + patch_size/2,
                          x - patch_size/2:x + patch_size/2]
            patch = patch.astype("int32")
            patch_image = toimage(patch, high=patch.max(), low=patch.min(),
                                  mode="I")
            filename = basename + "_%04d" % (sampled_bkg) + ".png"
            fullname = os.path.join(bkg_out, filename)
            patch_image.save(fullname)
            sampled_bkg += 1
            if verbose:
                print "sampled a hns patch at (x,y) center=", (x, y)
                sys.stdout.flush()
def sample_blob_negatives(image, roi_mask, out_dir, image_id, abn_id, blob_detector,
                          patch_size=256, neg_cutoff=.35, nb_bkg=100,
                          start_sample_nb=0,
                          bkg_dir="background", verbose=False):
    bkg_out = os.path.join(out_dir, bkg_dir)
    if not os.path.exists(bkg_out):
        os.makedirs(bkg_out)

    basename = "_".join([image_id, str(abn_id)])

    image = add_img_margins(image, patch_size/2)
    roi_mask = add_img_margins(roi_mask, patch_size/2)
    # Get ROI bounding box.
    roi_mask_8u = roi_mask.astype("uint8")
    ver = (cv2.__version__).split(".")
    contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cont_areas = [cv2.contourArea(cont) for cont in contours]
    idx = np.argmax(cont_areas)  # find the largest contour.
    rx, ry, rw, rh = cv2.boundingRect(contours[idx])
    if verbose:
        M = cv2.moments(contours[idx])
        cx = int(M["m10"]/M["m00"])
        cy = int(M["m01"]/M["m00"])
        print "ROI centroid=", (cx, cy)
        sys.stdout.flush()

    # Sample blob negative samples.
    key_pts = blob_detector.detect((image/image.max()*255).astype("uint8"))
    rng = np.random.RandomState(12345)
    key_pts = rng.permutation(key_pts)
    sampled_bkg = 0
    for kp in key_pts:
        if sampled_bkg >= nb_bkg:
            break
        x, y = int(kp.pt[0]), int(kp.pt[1])
        if not overlap_patch_roi((x, y), patch_size, roi_mask, cutoff=neg_cutoff):
            patch = image[y - patch_size/2:y + patch_size/2,
                          x - patch_size/2:x + patch_size/2]
            patch = patch.astype("int32")
            patch_image = toimage(patch, high=patch.max(), low=patch.min(),
                                  mode="I")
            filename = basename + "_%04d" % (start_sample_nb + sampled_bkg) + ".png"
            fullname = os.path.join(bkg_out, filename)
            patch_image.save(fullname)
            if verbose:
                print "sampled a blob patch at (x,y) center=", (x, y)
                sys.stdout.flush()
            sampled_bkg += 1
    return sampled_bkg
Ejemplo n.º 4
0
def crop(im,slices):
    return toimage(fromimage(im)[slices])
Ejemplo n.º 5
0
def _summarize_progress(train_data,
                        feature,
                        label,
                        gene_output,
                        batch,
                        suffix,
                        max_samples=8,
                        gene_param=None):

    td = train_data

    size = [label.shape[1], label.shape[2]]

    # complex input zpad into r and channel
    complex_zpad = feature

    # zpad magnitude
    if True:
        mag_zpad = tf.sqrt(complex_zpad[:, :, :, 0]**2 +
                           complex_zpad[:, :, :, 1]**2)
    else:
        mag_zpad = tf.sqrt(complex_zpad[:, :, :, 0]**2)

    # output image
    if True:
        gene_output_complex = tf.complex(gene_output[:, :, :, 0],
                                         gene_output[:, :, :, 1])
    else:
        gene_output_complex = gene_output
    mag_output = tf.abs(gene_output_complex)  #print('size_mag_output', mag)

    if True:
        label_complex = tf.complex(label[:, :, :, 0], label[:, :, :, 1])
    else:
        label_complex = label
    mag_gt = tf.abs(label_complex)

    # calculate SSIM SNR and MSE for test images
    signal = mag_gt[:, 20:size[0] - 20, 24:size[1] - 24]  # crop out edges
    Gout = mag_output[:, 20:size[0] - 20, 24:size[1] - 24]
    SSIM = tf.reduce_mean(
        tf.image.ssim(tf.expand_dims(signal, -1),
                      tf.expand_dims(Gout, -1),
                      max_val=1.0))
    signal = tf.reshape(signal, (FLAGS.batch_size, -1))  # and flatten
    Gout = tf.reshape(Gout, (FLAGS.batch_size, -1))
    s_G = tf.abs(signal - Gout)
    SNR_output = 10 * tf.reduce_sum(
        tf.log(
            tf.reduce_sum(signal**2, axis=1) /
            tf.reduce_sum(s_G**2, axis=1))) / tf.log(10.0) / FLAGS.batch_size
    MSE = tf.reduce_mean(s_G)

    # concate for visualize image
    if True:
        image = tf.concat(axis=2,
                          values=[
                              mag_zpad, mag_output, mag_gt,
                              50 * abs(mag_output - mag_zpad),
                              50 * abs(mag_gt - mag_output)
                          ])
    else:
        image = tf.concat(
            axis=2,
            values=[mag_zpad, mag_output, mag_gt,
                    abs(mag_gt - mag_zpad)])
    image = image[0:max_samples, :, :]
    image = tf.concat(axis=0,
                      values=[image[i, :, :] for i in range(int(max_samples))])
    image, snr, mse, ssim = td.sess.run([image, SNR_output, MSE, SSIM])
    # save to image file
    filename = 'batch%06d_%s.png' % (batch, suffix)
    filename = os.path.join(FLAGS.train_dir, filename)
    try:
        scipy.misc.toimage(image, cmax=1.0, cmin=0).save(filename)
    except:
        import pilutil
        pilutil.toimage(image, cmax=1.0, cmin=0).save(filename)
    print("    Saved %s" % (filename, ))
    return snr, mse, ssim
Ejemplo n.º 6
0
def _summarize_progress(train_data,
                        feature,
                        label,
                        gene_output,
                        batch,
                        suffix,
                        max_samples=8,
                        gene_param=None):

    td = train_data

    size = [label.shape[1], label.shape[2]]

    # complex input zpad into r and channel
    complex_zpad = tf.image.resize_nearest_neighbor(feature, size)
    complex_zpad = tf.maximum(tf.minimum(complex_zpad, 1.0), 0.0)

    # zpad magnitude
    if FLAGS.use_phase == True:
        mag_zpad = tf.sqrt(complex_zpad[:, :, :, 0]**2 +
                           complex_zpad[:, :, :, 1]**2)
    else:
        mag_zpad = tf.sqrt(complex_zpad[:, :, :, 0]**2)
    mag_zpad = tf.maximum(tf.minimum(mag_zpad, 1.0), 0.0)
    mag_zpad = tf.reshape(mag_zpad, [FLAGS.batch_size, size[0], size[1], 1])
    mag_zpad = tf.concat(axis=3, values=[mag_zpad, mag_zpad])

    # output image
    if FLAGS.use_phase == True:
        gene_output_complex = tf.complex(gene_output[:, :, :, 0],
                                         gene_output[:, :, :, 1])
    else:
        gene_output_complex = gene_output
    mag_output = tf.maximum(tf.minimum(tf.abs(gene_output_complex), 1.0), 0.0)
    mag_output = tf.reshape(mag_output,
                            [FLAGS.batch_size, size[0], size[1], 1])
    #print('size_mag_output', mag)
    mag_output = tf.concat(axis=3, values=[mag_output, mag_output])

    if FLAGS.use_phase == True:
        label_complex = tf.complex(label[:, :, :, 0], label[:, :, :, 1])
    else:
        label_complex = label
    label_mag = tf.abs(label_complex)
    label_mag = tf.reshape(label_mag, [FLAGS.batch_size, size[0], size[1], 1])
    mag_gt = tf.concat(axis=3, values=[label_mag, label_mag])

    # concate for visualize image
    if FLAGS.use_phase == True:
        image = tf.concat(axis=2,
                          values=[complex_zpad, mag_zpad, mag_output, mag_gt])
    else:
        image = tf.concat(axis=2, values=[mag_zpad, mag_output, mag_gt])
    image = image[0:max_samples, :, :, :]
    image = tf.concat(
        axis=0, values=[image[i, :, :, :] for i in range(int(max_samples))])
    image = td.sess.run(image)
    print('save to image size {0} type {1}', image.shape, type(image))

    # 3rd channel for visualization
    mag_3rd = np.maximum(image[:, :, 0], image[:, :, 1])
    image = np.concatenate((image, mag_3rd[:, :, np.newaxis]), axis=2)

    # save to image file
    print('save to image,', image.shape)
    filename = 'batch%06d_%s.png' % (batch, suffix)
    filename = os.path.join(FLAGS.train_dir, filename)
    try:
        scipy.misc.toimage(image, cmin=0., cmax=1.).save(filename)
    except:
        import pilutil
        pilutil.toimage(image, cmin=0., cmax=1.).save(filename)
    print("    Saved %s" % (filename, ))

    #gene_output_abs = np.abs(gene_output)
    # save layers and var_list
    if gene_param is not None:
        #add feature
        print('dimension for input, ref, output:', feature.shape, label.shape,
              gene_output.shape)
        gene_param['feature'] = feature.tolist()
        gene_param['label'] = label.tolist()
        gene_param['gene_output'] = gene_output.tolist()
        # add input arguments
        # print(FLAGS.__dict__['__flags'])
        # gene_param['FLAGS'] = FLAGS.__dict__['__flags']

        # save json
        '''
        def do_sampling(pat_df, out_dir):
            calc_dir = os.path.join(out_dir, "calc")
            mass_dir = os.path.join(out_dir, "mass")
            neg_dir = os.path.join(out_dir, "neg")
            if not os.path.exists(calc_dir):
                os.makedirs(calc_dir)
            if not os.path.exists(mass_dir):
                os.makedirs(mass_dir)
            if not os.path.exists(neg_dir):
                os.makedirs(neg_dir)

            for patient_id, side, view in pat_df.index.unique():
                cur_desc = description_df.loc[patient_id].loc[side].loc[view]
                abn_ids = cur_desc["abnormality id"]
                pathologies = cur_desc["pathology"]
                abn_types = cur_desc["abnormality type"]
                if isinstance(cur_desc, pd.Series):
                    abn_ids = [abn_ids]
                    pathologies = [pathologies]
                    abn_types = [abn_types]

                # Read mask image(s).
                for abn_id, pathology, abn_type in zip(abn_ids, pathologies,
                                                       abn_types):
                    # NOTE csv not reliable due to formatting error, and there are missing files.
                    # image_path = cur_desc["image file path"]
                    # mask_path = cur_desc["ROI mask file path"]
                    image_id = "_".join([patient_id, side, view])
                    base_name = "_".join([image_id, str(abn_id)])
                    file_name = base_name + ".png"
                    if pathology.startswith("MALIGNANT"):
                        if abn_type == "calcification":
                            save_dir = calc_dir
                        elif abn_type == "mass":
                            save_dir = mass_dir
                    else:
                        save_dir = neg_dir
                    full_path = os.path.join(save_dir, file_name)

                    if os.path.exists(full_path):
                        print "already exists:", full_path
                        continue

                    try:
                        image, _ = get_image_and_mask(
                            split="Training" if
                            ("train" in description_path) else "Test",
                            patient_id=patient_id,
                            side=side,
                            view=view,
                            image_dir=image_dir,
                            roi_mask_dir=roi_mask_dir,
                            abn_type=abn_type,
                            abn_id=abn_id,
                            target_height=target_height,
                            target_width=target_width)

                        print "ID:%s, read image of size=%s" % (image_id,
                                                                image.shape)

                        image = image.astype("int32")
                        image = toimage(image,
                                        high=image.max(),
                                        low=image.min(),
                                        mode="I")
                        # image = image.reshape((image.shape[0], image.shape[1], 1))
                        # import pdb; pdb.set_trace()
                        image.save(full_path)
                    except RuntimeError as exception:
                        print exception

                    print ""
def sample_patches(image,
                   roi_mask,
                   out_dir,
                   image_id,
                   abn_id,
                   pos,
                   patch_size=256,
                   pos_cutoff=.75,
                   neg_cutoff=.35,
                   nb_bkg=100,
                   nb_abn=100,
                   start_sample_nb=0,
                   abn_type="calcification",
                   bkg_dir="background",
                   calc_pos_dir="calc_mal",
                   calc_neg_dir="calc_ben",
                   mass_pos_dir="mass_mal",
                   mass_neg_dir="mass_ben",
                   verbose=False):
    if pos:
        if abn_type == "calcification":
            roi_out = os.path.join(out_dir, calc_pos_dir)
        else:
            roi_out = os.path.join(out_dir, mass_pos_dir)
    else:
        if abn_type == "calcification":
            roi_out = os.path.join(out_dir, calc_neg_dir)
        else:
            roi_out = os.path.join(out_dir, mass_neg_dir)
    bkg_out = os.path.join(out_dir, bkg_dir)

    if not os.path.exists(roi_out):
        os.mkdir(roi_out)
    if not os.path.exists(bkg_out):
        os.mkdir(bkg_out)

    base_name = "_".join([image_id, str(abn_id)])

    image = add_img_margins(image, patch_size / 2)
    roi_mask = add_img_margins(roi_mask, patch_size / 2)
    # Get ROI bounding box.
    roi_mask_8u = roi_mask.astype("uint8")
    contours, _ = cv2.findContours(roi_mask_8u.copy(), cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)
    cont_areas = [cv2.contourArea(cont) for cont in contours]
    idx = np.argmax(cont_areas)  # find the largest contour.
    rx, ry, rw, rh = cv2.boundingRect(contours[idx])
    if verbose:
        M = cv2.moments(contours[idx])
        try:
            cx = int(M["m10"] / M["m00"])
            cy = int(M["m01"] / M["m00"])
            print "ROI centroid=", (cx, cy)
            sys.stdout.flush()
        except ZeroDivisionError:
            cx = rx + int(rw / 2)
            cy = ry + int(rh / 2)
            print "ROI centroid=Unknown, use b-box center=", (cx, cy)
            sys.stdout.flush()

    rng = np.random.RandomState(12345)
    # Sample abnormality first.
    sampled_abn = 0
    nb_try = 0
    while sampled_abn < nb_abn:
        file_name = base_name + "_%04d" % (sampled_abn) + ".png"
        full_path = os.path.join(roi_out, file_name)
        if os.path.exists(full_path):
            print "already exists:", full_path
            sampled_abn += 1
            continue

        if nb_abn > 1:
            x = rng.randint(rx, rx + rw)
            y = rng.randint(ry, ry + rh)
            nb_try += 1
            if nb_try >= 1000:
                print "Nb of trials reached maximum, decrease overlap cutoff by 0.05"
                sys.stdout.flush()
                pos_cutoff -= .05
                nb_try = 0
                if pos_cutoff <= .0:
                    raise Exception("overlap cutoff becomes non-positive, "
                                    "check roi mask input.")
        else:
            x = cx
            y = cy

        # import pdb; pdb.set_trace()
        if nb_abn == 1 or overlap_patch_roi(
            (x, y), patch_size, roi_mask, cutoff=pos_cutoff):
            patch = image[y - patch_size / 2:y + patch_size / 2,
                          x - patch_size / 2:x + patch_size / 2]
            patch = patch.astype("int32")
            patch_image = toimage(patch,
                                  high=patch.max(),
                                  low=patch.min(),
                                  mode="I")
            # patch = patch.reshape((patch.shape[0], patch.shape[1], 1))
            # import pdb; pdb.set_trace()
            patch_image.save(full_path)
            sampled_abn += 1
            nb_try = 0
            if verbose:
                print "sampled an", abn_id, "patch at (x,y) center=", (x, y)
                sys.stdout.flush()

    # Sample background.
    sampled_bkg = start_sample_nb
    while sampled_bkg < start_sample_nb + nb_bkg:
        file_name = base_name + "_%04d" % (sampled_bkg) + ".png"
        full_path = os.path.join(bkg_out, file_name)
        if os.path.exists(full_path):
            print "already exists:", full_path
            sampled_bkg += 1
            continue

        x = rng.randint(patch_size / 2, image.shape[1] - patch_size / 2)
        y = rng.randint(patch_size / 2, image.shape[0] - patch_size / 2)
        if not overlap_patch_roi(
            (x, y), patch_size, roi_mask, cutoff=neg_cutoff):
            patch = image[y - patch_size / 2:y + patch_size / 2,
                          x - patch_size / 2:x + patch_size / 2]
            patch = patch.astype("int32")
            patch_image = toimage(patch,
                                  high=patch.max(),
                                  low=patch.min(),
                                  mode="I")
            patch_image.save(full_path)
            sampled_bkg += 1
            if verbose:
                print "sampled a bkg patch at (x,y) center=", (x, y)
                sys.stdout.flush()