Example #1
0
def main():
    args = get_args()
    output_path = args.output
    img_size = args.img_size

    mypath = '../data/CACD2000'
    isPlot = False
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    #    landmark_list = []
    #    for i in tqdm(range(len(onlyfiles))):
    #        landmark_list.append(get_landmarks(onlyfiles[i], args))

    landmark_ref = np.matrix(
        np.load('../data/CACD_mean_face.npy', allow_pickle=True))

    # Points used to line up the images.
    ALIGN_POINTS = list(range(16))

    for i in tqdm(range(len(onlyfiles))):

        img_name = onlyfiles[i]
        input_img = cv2.imread(mypath + '/' + img_name)
        input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB)
        img_h, img_w, _ = np.shape(input_img)

        landmark = get_landmarks(img_name, args)[0]
        M = transformation_from_points(landmark_ref[ALIGN_POINTS],
                                       landmark[ALIGN_POINTS])
        input_img = warp_im(input_img, M, (256, 256, 3))
        io.imsave(args.output + '/' + img_name, input_img)
Example #2
0
def getMaskContour(mask_dir, atlas_img, predicted_pts, actual_pts, cwd, n, main_mask):
    """
    Gets the contour of the brain's boundaries and applies a piecewise affine transform to the brain atlas
    based on the cortical landmarks predicted in dlc_predict (and peaks of activity on the sensory map, if available).
    :param mask_dir: The path to the directory containing the U-net masks of the brain's boundaries.
    :param atlas_img: The brain atlas to be transformed.
    :param predicted_pts: The coordinates of the cortical landmarks predicted in dlc_predict (or, for the second run
    of this function, the coordinates of the peaks of activity in the sensory map).
    :param actual_pts: The fixed coordinates of the cortical landmarks on the brain atlas (or, for the second run of
    this function, the fixed coordinates of the peaks of sensory activity on the brain atlas).
    :param cwd: The path to the current working directory.
    :param n: The number of the current image in the directory.
    """
    c_landmarks = np.empty([0, 2])
    c_atlas_landmarks = np.empty([0, 2])
    mask = cv2.imread(mask_dir, cv2.IMREAD_GRAYSCALE)
    atlas_to_warp = atlas_img
    mask = np.uint8(mask)
    cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
    for cnt in cnts:
        cnt = cnt[:, 0, :]
        cnt = np.asarray(cnt).astype("float32")
        c_landmarks = np.concatenate((c_landmarks, cnt))
        c_atlas_landmarks = np.concatenate((c_atlas_landmarks, cnt))
    c_landmarks = np.concatenate((c_landmarks, predicted_pts))
    c_atlas_landmarks = np.concatenate((c_atlas_landmarks, actual_pts))
    tform = PiecewiseAffineTransform()
    tform.estimate(c_atlas_landmarks, c_landmarks)
    dst = warp(atlas_to_warp, tform, output_shape=(512, 512))
    if main_mask:
        io.imsave(os.path.join(cwd, "mask_{}.png".format(n)), mask)
    return dst
def processOneImage(inputPath, outputPath):
    image = io.imread(inputPath)
    greyImage = rgb2grey(image)
    threshold = threshold_otsu(greyImage)
    imgout = closing(greyImage > threshold, square(1))
    imgout = crop(imgout)
    imgout = transform.resize(imgout, (max(imgout.shape), max(imgout.shape)))
    io.imsave(outputPath, imgout)
def processOneImage(inputPath, outputPath):
    image = io.imread(inputPath)
    greyImage = rgb2grey(image)
    threshold = threshold_otsu(greyImage)
    imgout = closing(greyImage > threshold, square(1))
    imgout = crop(imgout)
    imgout = transform.resize(imgout, (max(imgout.shape), max(imgout.shape)))
    io.imsave(outputPath, imgout)
Example #5
0
def save_image(path, image):
    # Un-normalize the image so that it looks good
    image = image + CONFIG.MEANS
    # image = image * 255
    # Clip and Save the image
    image = np.clip(image[0], 0, 255).astype('uint8')
    # scipy.misc.imsave(path, image)
    io.imsave(path, image)
Example #6
0
def convert_pascal_berkeley_augmented_mat_annotations_to_png(pascal_berkeley_augmented_root):
    """ Creates a new folder in the root folder of the dataset with annotations stored in .png.
    The function accepts a full path to the root of Berkeley augmented Pascal VOC segmentation
    dataset and converts annotations that are stored in .mat files to .png files. It creates
    a new folder dataset/cls_png where all the converted files will be located. If this
    directory already exists the function does nothing. The Berkley augmented dataset
    can be downloaded from here:
    http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz

    Parameters
    ----------
    pascal_berkeley_augmented_root : string
        Full path to the root of augmented Berkley PASCAL VOC dataset.
    """
    def read_class_annotation_array_from_berkeley_mat(mat_filename, key='GTcls'):
        #  Mat to png conversion for http://www.cs.berkeley.edu/~bharath2/codes/SBD/download.html
        # 'GTcls' key is for class segmentation
        # 'GTinst' key is for instance segmentation
        #  Credit: https://github.com/martinkersner/train-DeepLab/blob/master/utils.py
        mat = scipy.io.loadmat(mat_filename, mat_dtype=True, squeeze_me=True, struct_as_record=False)
        return mat[key].Segmentation

    mat_file_extension_string = '.mat'
    png_file_extension_string = '.png'
    relative_path_to_annotation_mat_files = 'dataset/cls'
    relative_path_to_annotation_png_files = 'dataset/cls_png'

    annotation_mat_files_fullpath = os.path.join(pascal_berkeley_augmented_root,
                                                 relative_path_to_annotation_mat_files)
    annotation_png_save_fullpath = os.path.join(pascal_berkeley_augmented_root,
                                                relative_path_to_annotation_png_files)
    # Create the folder where all the converted png files will be placed
    # If the folder already exists, do nothing
    if not os.path.exists(annotation_png_save_fullpath):
        os.makedirs(annotation_png_save_fullpath)
    else:
        return

    for current_mat_file_name in os.listdir(annotation_mat_files_fullpath):
        current_file_name_without_extention = current_mat_file_name[:-len(mat_file_extension_string)]
        current_mat_file_full_path = os.path.join(annotation_mat_files_fullpath, current_mat_file_name)
        current_png_file_full_path_to_be_saved = os.path.join(annotation_png_save_fullpath, current_file_name_without_extention)
        current_png_file_full_path_to_be_saved += png_file_extension_string

        annotation_array = read_class_annotation_array_from_berkeley_mat(current_mat_file_full_path)

        io.imsave(current_png_file_full_path_to_be_saved, annotation_array)
Example #7
0
def atlas_from_mat(input_file, mat_cnt_list):
    """
    Generates a binary brain atlas from a .mat file.
    :param input_file: The input .mat file representing a brain atlas (with white = 255 and black = 0)
    :param mat_cnt_list: The list to which mat files should be appended
    :return: A thresholded binary brain atlas.
    """
    file = input_file
    atlas_base = np.zeros((512, 512), dtype="uint8")
    if glob.glob(os.path.join(input_file, "*.mat")):
        mat = scipy.io.loadmat(file)
        mat_shape = mat[list(mat.keys())[3]]
        if len(mat_shape.shape) > 2:
            for val in range(0, mat_shape.shape[2]):
                mat_roi = mat_shape[:, :, val]
                mat_resize = cv2.resize(mat_roi, (512, 512))
                mat_resize = np.uint8(mat_resize)
                ret, thresh = cv2.threshold(mat_resize, 5, 255, cv2.THRESH_BINARY_INV)
                mat_roi_cnt = cv2.findContours(
                    thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
                )
                mat_roi_cnt = imutils.grab_contours(mat_roi_cnt)
                c_to_save = max(mat_roi_cnt, key=cv2.contourArea)
                mat_cnt_list.append(c_to_save)
                cv2.drawContours(atlas_base, mat_roi_cnt, -1, (255, 255, 255), 1)
            ret, thresh = cv2.threshold(atlas_base, 5, 255, cv2.THRESH_BINARY_INV)
            io.imsave("atlas_unresized_test.png", thresh)
        else:
            mat = mat["atlas"]
            mat_resize = cv2.resize(mat, (512, 512))
            ret, thresh = cv2.threshold(mat_resize, 5, 255, cv2.THRESH_BINARY_INV)
    else:
        atlas_im = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
        atlas_resize = np.uint8(atlas_im)
        ret, atlas_resize = cv2.threshold(atlas_resize, 127, 255, 0)
        io.imsave("atlas_unresized_test.png", atlas_resize)
        roi_cnt, hierarchy = cv2.findContours(
            atlas_resize, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
        )[-2:]
        for val in roi_cnt:
            c_to_save = max(val, key=cv2.contourArea)
            mat_cnt_list.append(c_to_save)
            cv2.drawContours(atlas_base, val, -1, (255, 255, 255), 1)
        ret, thresh = cv2.threshold(atlas_base, 5, 255, cv2.THRESH_BINARY_INV)
    return thresh
Example #8
0
def train(train_loader, model, criterion, optimizer, epoch, result_dir):
    losses = AverageMeter()
    model.train()

    for ind, (noise_img, origin_img) in enumerate(train_loader):
        st = time.time()

        input_var = noise_img.cuda()
        target_var = origin_img.cuda()

        output = model(input_var)
        loss = criterion(output, target_var)

        losses.update(loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        print('[{0}][{1}]\t'
              'lr: {lr:.5f}\t'
              'Loss: {loss.val:.4f} ({loss.avg:.4f})\t'
              'Time: {time:.3f}'.format(epoch,
                                        ind,
                                        lr=optimizer.param_groups[-1]['lr'],
                                        loss=losses,
                                        time=time.time() - st))

        if epoch % args.save_freq == 0:
            if not os.path.isdir(os.path.join(result_dir, '%04d' % epoch)):
                os.makedirs(os.path.join(result_dir, '%04d' % epoch))

            origin_np = origin_img.numpy()
            noise_np = noise_img.numpy()
            output_np = output.cpu().detach().numpy()

            origin_np_img = chw_to_hwc(origin_np[0])
            noise_np_img = chw_to_hwc(noise_np[0])
            output_img = chw_to_hwc(np.clip(output_np[0], 0, 1))

            temp = np.concatenate((origin_np_img, noise_np_img, output_img),
                                  axis=1)
            io.imsave(
                os.path.join(result_dir, '%04d/train_%d.jpg' % (epoch, ind)),
                np.uint8(temp * 255))
Example #9
0
def homography_match(warp_from, warp_to, output_mask_path, n):
    # Create ORB detector with 5000 features.
    orb_detector = cv2.ORB_create(5000)

    # Find keypoints and descriptors.
    # The first arg is the image, second arg is the mask
    #  (which is not reqiured in this case).
    kp1, d1 = orb_detector.detectAndCompute(warp_from, None)  # im
    kp2, d2 = orb_detector.detectAndCompute(warp_to, None)  # atlas_warped

    # Match features between the two images.
    # We create a Brute Force matcher with
    # Hamming distance as measurement mode.
    matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Match the two sets of descriptors.
    matches = matcher.match(d1, d2)

    # Sort matches on the basis of their Hamming distance.
    matches.sort(key=lambda x: x.distance)

    # Take the top 90 % matches forward.
    matches = matches[: int(len(matches) * 90)]
    no_of_matches = len(matches)

    # Define empty matrices of shape no_of_matches * 2.
    p1 = np.zeros((no_of_matches, 2))
    p2 = np.zeros((no_of_matches, 2))

    for i in range(len(matches)):
        p1[i, :] = kp1[matches[i].queryIdx].pt
        p2[i, :] = kp2[matches[i].trainIdx].pt

    # Find the homography matrix.
    homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)

    # Use this matrix to transform the
    # colored image wrt the reference image.
    atlas_warped = cv2.warpPerspective(warp_to, homography, (512, 512))
    atlas_homography_transform_path = os.path.join(
        output_mask_path, "{}_atlas_homography.png".format(str(n))
    )
    io.imsave(atlas_homography_transform_path, atlas_warped)
    return atlas_warped
Example #10
0
def loadNpy():
    testImgs = np.load('./data/test_set_fer2013.npy')
    testLabel = np.load('./data/test_labels_fer2013.npy')

    trainImgs = np.load('./data/data_set_fer2013.npy')
    trainLabel = np.load('./data/data_labels_fer2013.npy')

    testPath = './data/test'
    trainPath = './data/train'

    if not os.path.exists(testPath):
        os.mkdir(testPath)

    if not os.path.exists(trainPath):
        os.mkdir(trainPath)

    for ii in range(7):
        path = join(testPath, str(ii))
        if not os.path.exists(path):
            os.mkdir(path)
        path = join(trainPath, str(ii))
        if not os.path.exists(path):
            os.mkdir(path)

    num = 0
    # write train images
    for ii in range(len(trainImgs)):
        label = trainLabel[ii]
        imgdata = trainImgs[ii]
        img = tile(
            np.reshape(imgdata, [imgdata.shape[0], imgdata.shape[1], 1]), [3])
        # io.imshow(img)
        spath = '%s/%s/%05d.png' % (trainPath, list(label).index(1), num)
        io.imsave(spath, img)

    # write test images
    for ii in range(len(testImgs)):
        img = testImgs[ii]
        label = testLabel[ii]

    print('end loadNpy()')
Example #11
0
    def compute_circle_masks(self, radius):
        path = pjoin(self.root, 'SegmentationClass', 'masks')
        if not os.path.exists(path):
            os.makedirs(path)

            pbar = tqdm(total=len(self.all_files))
            for f in self.all_files:
                truth_path = f
                segm = Image.open(pjoin(self.segm_path, truth_path + ".png"))
                truth = np.asarray(segm)
                truths = [(truth == l).astype(int)
                          for l in np.unique(truth)[1:]]
                shape = np.array(truth.shape)
                for i, t in enumerate(truths):
                    path_out = pjoin(path, '{}_{}.png'.format(f, i))
                    if not (os.path.exists(path_out)):
                        out_filt = PascalVOCLoader.get_circle_region_mask(
                            t, radius=np.max(shape) * radius)[..., np.newaxis]
                        out_filt = (np.repeat(out_filt, 3, axis=-1) *
                                    255).astype(np.uint8)
                        with warnings.catch_warnings():
                            warnings.simplefilter("ignore")
                            io.imsave(path_out, out_filt)
                pbar.update(1)
Example #12
0
rows = image.shape[0]
cols = image.shape[1]
image = image.reshape(image.shape[0] * image.shape[1], 3)

#  kmeans algorithms with with 16 colors and max iter 10
kmeans = KMeans(n_clusters=128, n_init=10, max_iter=10)
kmeans.fit(image)

clusters = np.asarray(kmeans.cluster_centers_, dtype=np.uint8)
labels = np.asarray(kmeans.labels_, dtype=np.uint8)
labels = labels.reshape(rows, cols)

# saving in standard binary file format
np.save('codebook_tiger.npy', clusters)
io.imsave('compressed_bird_small.png', labels)

print "\nReconstructing main features - "
# load saved numpy array of clusters and respective lebels
centers = np.load('codebook_tiger.npy')
c_image = io.imread('compressed_bird_small.png')

image = np.zeros((c_image.shape[0], c_image.shape[1], 3), dtype=np.uint8)
for i in range(c_image.shape[0]):
    for j in range(c_image.shape[1]):
        image[i, j, :] = centers[c_image[i, j], :]

print "\nReconstructing done..!!"

io.imsave('reconstructed_bird_small.png', image)
io.imshow(image)
def readCropEqualize(path, extension, crop, doRecognition, equalize=False,
                     isColoured=False):
  if not crop and doRecognition:
    raise Exception("you asked for the reading process to crop the images but do no face detection")

  if equalize:
    dirforres = "detection-cropped-equalized"
  else:
    dirforres = "detection-cropped"

  pathForCropped = os.path.join(path, dirforres)

  if crop:
    if doRecognition:
      if not os.path.exists(pathForCropped):
        os.makedirs(pathForCropped)

      imageFiles = [(os.path.join(dirpath, f), f)
        for dirpath, dirnames, files in os.walk(path)
        for f in fnmatch.filter(files, '*.' + extension)]

      images = []

      for fullPath, shortPath in imageFiles:
        # Do not do this for already cropped images
        if pathForCropped in fullPath:
          continue

        print fullPath
        print shortPath
        # img = Image.open(fullPath)
        # img = np.array(img.getdata()).reshape(img.size[0], img.size[1])
        # print img.shape
        img = cv2.imread(fullPath, 0)
        print img == None

        face = facedetection.cropFace(img)


        if not face == None:

          face = resize(face, SMALL_SIZE)
          if equalize:
            face = equalizeFromFloatCLAHE(face)

          face = face.reshape(SMALL_SIZE)
          # Only do the resizing once you are done with the cropping of the faces
          # Check that you are always saving them in the right format
          print "face.min"
          print face.min()

          print "face.max"
          print face.max()

          assert face.min() >=0 and face.max() <=1
          images += [face.reshape(-1)]

          # Save faces as files
          croppedFileName = os.path.join(pathForCropped, shortPath)
          io.imsave(croppedFileName, face)
    # If not doing face detection live
    else:
      images = []
      imageFiles = [os.path.join(dirpath, f)
        for dirpath, dirnames, files in os.walk(pathForCropped)
        for f in fnmatch.filter(files, '*.' + extension)]

      for f in imageFiles:
        img = cv2.imread(f, 0)
        if type(img[0,0]) == np.uint8:
          print "rescaling unit"
          img = img / 255.0

        img = resize(img, SMALL_SIZE)
        images += [img.reshape(-1)]
  # If not doing recognition here, just reading from the initial faces
  else:
    images = []
    imageFiles = [os.path.join(dirpath, f)
      for dirpath, dirnames, files in os.walk(path) if dirnames not in ["detection-cropped-equalized", "detection-cropped"]
      for f in fnmatch.filter(files, '*.' + extension)]

    for i in imageFiles:
      assert not "detection-cropped" in imageFiles

    for f in imageFiles:
      img = cv2.imread(f, 0)
      if type(img[0,0]) == np.uint8:
        print "rescaling unit"
        img = img / 255.0

      img = resize(img, SMALL_SIZE)

      if equalize:
        img = equalizeFromFloatCLAHE(img)

      images += [img.reshape(-1)]

  assert len(images) != 0

  print len(images)
  return np.array(images)
Example #14
0
def atlasBrainMatch(
    brain_img_dir,
    sensory_img_dir,
    coords_input,
    sensory_match,
    mat_save,
    threshold,
    git_repo_base,
    region_labels,
    landmark_arr_orig,
    use_unet,
    use_dlc,
    atlas_to_brain_align,
    model,
    olfactory_check,
    plot_landmarks,
    align_once,
    original_label,
    use_voxelmorph,
    exist_transform,
    voxelmorph_model="motif_model_atlas.h5",
    vxm_template_path="templates",
    dlc_template_path="dlc_templates",
    flow_path="",
):
    """
    Align and overlap brain atlas onto brain image based on four landmark locations in the brain image and the atlas.
    :param brain_img_dir: The directory containing each brain image to be used.
    :param sensory_img_dir: The directory containing each sensory image to be used (if you are aligning each brain
    image using a sensory map).
    :param coords_input: Predicted locations of the four landmarks on the brain image from the file generated by
    DeepLabCut.
    :param sensory_match: Whether or not a sensory map is to be used.
    :param mat_save: Whether or not to export each brain region to a .mat file in applyMask, which is called at the end
    of this function.
    :param threshold: The threshold for the cv2.opening operation carried out in applyMask, which is called at the end
    of this function.
    :param git_repo_base: The path to the base git repository containing necessary resources for MesoNet (reference
    atlases, DeepLabCut config files, etc.)
    :param region_labels: Choose whether or not to attempt to label each region with its name from the Allen Institute
    Mouse Brain Atlas.
    :param landmark_arr_orig: The original array of landmarks from DeepLabCut (to be distinguished from any automatic
    exclusions to landmark array based on prediction quality).
    :param use_unet: Choose whether or not to identify the borders of the cortex using a U-net model.
    :param atlas_to_brain_align: If True, registers the atlas to each brain image. If False, registers each brain image
    to the atlas.
    :param model: The name of the U-net model (for passthrough to mask_functions.py)
    :param olfactory_check: If True, draws olfactory bulb contours on the brain image.
    :param plot_landmarks: If True, plots DeepLabCut landmarks (large circles) and original alignment landmarks (small
    circles) on final brain image.
    :param align_once: if True, carries out all alignments based on the alignment of the first atlas and brain. This can
    save time if you have many frames of the same brain with a fixed camera position.
    :param original_label: if True, uses a brain region labelling approach that attempts to automatically sort brain
    regions in a consistent order (left to right by hemisphere, then top to bottom for vertically aligned regions). This
    approach may be more flexible if you're using a custom brain atlas (i.e. not one in which region is filled with a
    unique number).
    :param exist_transform: if True, uses an existing voxelmorph transformation field for all data instead of predicting
    a new transformation.
    :param voxelmorph_model: the name of a .h5 model located in the models folder of the git repository for MesoNet,
    generated using voxelmorph and containing weights for a voxelmorph local deformation model.
    :param vxm_template_path: the path to a template atlas (.npy or .mat( to which the brain image will be aligned in
    voxelmorph.
    :param flow_path: the path to a voxelmorph transformation field that will be used to transform all data instead of
    predicting a new transformation if exist_transform is True.
    """
    # load brain images folder
    brain_img_arr = []
    dlc_img_arr = []
    peak_arr = []
    atlas_label_list = []
    dst_list = []
    vxm_template_list = []
    br_list = []

    voxelmorph_model_path = os.path.join(
        git_repo_base, "models", "voxelmorph", voxelmorph_model
    )

    # Prepare template for VoxelMorph
    convert_to_png(vxm_template_path)
    vxm_template_orig = cv2.imread(
        glob.glob(os.path.join(git_repo_base, "atlases", vxm_template_path, "*.png"))[0]
    )

    # Prepare template for DeepLabCut + VoxelMorph
    # convert_to_png(dlc_template_path)
    # dlc_template = cv2.imread(
    #     glob.glob(os.path.join(git_repo_base, "atlases", dlc_template_path, "*.png"))[0]
    # )
    # dlc_template = np.uint8(dlc_template)
    # dlc_template = cv2.resize(dlc_template, (512, 512))

    # Prepare output folder
    cwd = os.getcwd()
    output_mask_path = os.path.join(cwd, "../output_mask")
    # Output folder for transparent masks and masks overlaid onto brain image
    output_overlay_path = os.path.join(cwd, "../output_overlay")
    if not os.path.isdir(output_mask_path):
        os.mkdir(output_mask_path)
    if not os.path.isdir(output_overlay_path):
        os.mkdir(output_overlay_path)

    if not atlas_to_brain_align:
        im = cv2.imread(
            os.path.join(git_repo_base, "atlases/Atlas_workflow2_binary.png")
        )
    else:
        if use_voxelmorph and not use_dlc:
            im = cv2.imread(
                os.path.join(git_repo_base, "atlases/Atlas_for_Voxelmorph_binary.png")
            )
        else:
            im = cv2.imread(
                os.path.join(git_repo_base, "atlases/Atlas_workflow1_binary.png")
            )
        im_left = cv2.imread(os.path.join(git_repo_base, "atlases/left_hemi.png"))
        ret, im_left = cv2.threshold(im_left, 5, 255, cv2.THRESH_BINARY_INV)
        im_right = cv2.imread(os.path.join(git_repo_base, "atlases/right_hemi.png"))
        ret, im_right = cv2.threshold(im_right, 5, 255, cv2.THRESH_BINARY_INV)
        im_left = np.uint8(im_left)
        im_right = np.uint8(im_right)
        im = np.uint8(im)
    # im = atlas_from_mat(os.path.join(git_repo_base, 'atlases/atlas_ROIs.mat'))
    atlas = im
    # FOR ALIGNING BRAIN TO ATLAS
    # im_binary = np.uint8(im)

    for num, file in enumerate(os.listdir(cwd)):
        if fnmatch.fnmatch(file, "*.png") and "mask" not in file:
            dlc_img_arr.append(os.path.join(cwd, file))
    for num, file in enumerate(os.listdir(brain_img_dir)):
        if fnmatch.fnmatch(file, "*.png"):
            brain_img_arr.append(os.path.join(brain_img_dir, file))
            brain_img_arr.sort(key=natural_sort_key)
        elif fnmatch.fnmatch(file, "*.tif"):
            tif_stack = imageio.mimread(os.path.join(brain_img_dir, file))
            for tif_im in tif_stack:
                brain_img_arr.append(tif_im)
    # i_coord, j_coord = np.array([(100, 256, 413, 256), (148, 254, 148, 446)])
    # https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
    coord_circles_img = cv2.imread(
        os.path.join(
            git_repo_base, "atlases", "multi_landmark", "landmarks_new_binary.png"
        ),
        cv2.IMREAD_GRAYSCALE,
    )
    coord_circles_img = np.uint8(coord_circles_img)
    # detect circles in the image
    circles, hierarchy = cv2.findContours(
        coord_circles_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
    )[-2:]
    # ensure at least some circles were found
    if circles is not None:
        # convert the (x, y) coordinates and radius of the circles to integers
        atlas_arr = np.array(
            [
                (
                    int(cv2.moments(circle)["m10"] / cv2.moments(circle)["m00"]),
                    int(cv2.moments(circle)["m01"] / cv2.moments(circle)["m00"]),
                )
                for circle in circles
            ]
        )

    atlas_arr = np.array(
        [
            (102, 148),
            (166, 88),
            (214, 454),
            (256, 88),
            (256, 256),
            (256, 428),
            (410, 148),
            (346, 88),
            (298, 454),
        ]
    )

    peak_arr_flat = []
    peak_arr_total = []

    if sensory_match:
        for num, file in enumerate(brain_img_arr):
            img_name = str(os.path.splitext(os.path.basename(file))[0])
            sensory_img_for_brain = os.path.join(sensory_img_dir, img_name)
            print(img_name)
            print(sensory_img_for_brain)
            if glob.glob(sensory_img_for_brain):
                sensory_img_for_brain_dir = os.listdir(sensory_img_for_brain)
                sensory_img_for_brain_dir.sort(key=natural_sort_key)
                for num_im, file_im in enumerate(sensory_img_for_brain_dir):
                    sensory_im = io.imread(
                        os.path.join(sensory_img_dir, img_name, file_im)
                    )
                    sensory_im = trans.resize(sensory_im, (512, 512))
                    io.imsave(
                        os.path.join(sensory_img_dir, img_name, file_im), sensory_im
                    )
                    peak = find_peaks(os.path.join(sensory_img_dir, img_name, file_im))
                    peak_arr.append(peak)
            for x in peak_arr:
                for y in x:
                    peak_arr_flat.append(y)
            peak_arr_total.append(peak_arr_flat)
            peak_arr_flat = []
            peak_arr = []

    dlc_pts = []
    atlas_pts = []
    sensory_peak_pts = []
    sensory_atlas_pts = []
    sub_dlc_pts = []
    sub_atlas_pts = []
    sub_sensory_peak_pts = []
    sub_sensory_atlas_pts = []

    bregma_index_list = []
    bregma_list = []
    if use_dlc:
        bregma_present = True
    else:
        bregma_present = False

    coords = pd.read_csv(coords_input)
    x_coord = coords.iloc[2:, 1::3]
    y_coord = coords.iloc[2:, 2::3]
    accuracy = coords.iloc[2:, 3::3]
    acc_left_total = accuracy.iloc[:, 0:5]
    acc_right_total = accuracy.iloc[:, 3:8]
    landmark_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8]  # [0, 3, 2, 1]
    for arr_index, i in enumerate(range(0, len(x_coord))):
        landmark_arr = landmark_arr_orig
        x_coord_flat = x_coord.iloc[i].values.astype("float32")
        y_coord_flat = y_coord.iloc[i].values.astype("float32")
        x_coord_flat = x_coord_flat[landmark_arr]
        y_coord_flat = y_coord_flat[landmark_arr]
        dlc_list = []
        atlas_list = []
        for (coord_x, coord_y) in zip(x_coord_flat, y_coord_flat):
            dlc_coord = (coord_x, coord_y)
            dlc_list.append(dlc_coord)
        for coord_atlas in atlas_arr:
            atlas_coord = (coord_atlas[0], coord_atlas[1])
            atlas_list.append(atlas_coord)
        atlas_list = [atlas_list[i] for i in landmark_arr]
        # Initialize result as max value

        landmark_indices = landmark_indices[0 : len(landmark_arr)]

        # atlas_indices = min_landmark_arr
        atlas_indices = landmark_arr

        # print('atlas indices: {}'.format(atlas_indices))
        # print('landmark indices: {}'.format(landmark_indices))
        # print('x coords: {}'.format(x_coord_flat))

        pts_dist = np.absolute(
            np.asarray(atlas_list) - np.asarray((im.shape[0] / 2, im.shape[1] / 2))
        )
        pts_avg_dist = [np.mean(v) for v in pts_dist]
        # print("bregma dist: {}".format(pts_avg_dist))
        bregma_index = np.argmin(np.asarray(pts_avg_dist))
        # print("bregma index: {}".format(bregma_index))

        for j in landmark_indices:
            sub_dlc_pts.append([x_coord_flat[j], y_coord_flat[j]])
        for j in atlas_indices:
            sub_atlas_pts.append([atlas_arr[j][0], atlas_arr[j][1]])

        dlc_pts.append(sub_dlc_pts)
        atlas_pts.append(sub_atlas_pts)
        coords_to_mat(
            sub_dlc_pts, i, output_mask_path, bregma_present, bregma_index, landmark_arr
        )
        bregma_index_list.append(bregma_index)
        sub_dlc_pts = []
        sub_atlas_pts = []
    if sensory_match:
        k_coord, m_coord = np.array([(189, 323, 435, 348), (315, 315, 350, 460)])
        coords_peak = peak_arr_total
        for img_num, img in enumerate(brain_img_arr):
            for j in [1, 0, 3, 2]:  # Get peak values from heatmaps
                sub_sensory_peak_pts.append(
                    [coords_peak[img_num][j][0], coords_peak[img_num][j][1]]
                )
            for j in [0, 1, 2, 3]:  # Get circle locations
                sub_sensory_atlas_pts.append([k_coord[j], m_coord[j]])
            sensory_peak_pts.append(sub_sensory_peak_pts)
            sensory_atlas_pts.append(sub_sensory_atlas_pts)
            sensory_to_mat(
                sub_sensory_peak_pts, dlc_pts[img_num][3], img_num, output_mask_path
            )
            sub_sensory_peak_pts = []
            sub_sensory_atlas_pts = []
        sensory_peak_pts, sensory_atlas_pts = (
            np.asarray(sensory_peak_pts).astype("float32"),
            np.asarray(sensory_atlas_pts).astype("float32"),
        )

    for (n, br) in enumerate(brain_img_arr):
        vxm_template = np.uint8(vxm_template_orig)
        vxm_template = cv2.resize(vxm_template, (512, 512))

        align_val = n
        if atlas_to_brain_align:
            im = np.uint8(im)
            br = cv2.imread(br)
            br = np.uint8(br)
            br = cv2.resize(br, (512, 512))
        else:
            # FOR ALIGNING BRAIN TO ATLAS
            if ".png" in br:
                im = cv2.imread(br)
            else:
                im = br
            im = np.uint8(im)
            im = cv2.resize(im, (512, 512))

        if atlas_to_brain_align:
            # atlas_mask_dir = os.path.join(git_repo_base, "atlases/Atlas_workflow1_smooth_binary.png")
            if use_voxelmorph and not use_dlc:
                atlas_mask_dir = os.path.join(
                    git_repo_base, "atlases/Atlas_for_Voxelmorph_border.png"
                )
            else:
                atlas_mask_dir = os.path.join(
                    git_repo_base, "atlases/atlas_smooth2_binary.png"
                )
            atlas_mask_dir_left = os.path.join(
                git_repo_base, "atlases/left_hemisphere_smooth.png"
            )
            atlas_mask_dir_right = os.path.join(
                git_repo_base, "atlases/right_hemisphere_smooth.png"
            )
            atlas_label_mask_dir = os.path.join(
                git_repo_base, "atlases/diff_colour_regions/Common_atlas.mat"
            )
            atlas_label_mask_dir_left = os.path.join(
                git_repo_base, "atlases/diff_colour_regions/atlas_left_hemisphere.csv"
            )
            atlas_label_mask_dir_right = os.path.join(
                git_repo_base, "atlases/diff_colour_regions/atlas_right_hemisphere.csv"
            )
            # atlas_label_mask_left = atlas_from_mat(atlas_label_mask_dir_left, [])
            # atlas_label_mask_right = atlas_from_mat(atlas_label_mask_dir_right, [])
            atlas_label_mask_left = np.genfromtxt(
                atlas_label_mask_dir_left, delimiter=","
            )
            atlas_label_mask_right = np.genfromtxt(
                atlas_label_mask_dir_right, delimiter=","
            )
            atlas_mask_left = cv2.imread(atlas_mask_dir_left, cv2.IMREAD_UNCHANGED)
            atlas_mask_left = cv2.resize(atlas_mask_left, (im.shape[0], im.shape[1]))
            atlas_mask_left = np.uint8(atlas_mask_left)
            atlas_mask_right = cv2.imread(atlas_mask_dir_right, cv2.IMREAD_UNCHANGED)
            atlas_mask_right = cv2.resize(atlas_mask_right, (im.shape[0], im.shape[1]))
            atlas_mask_right = np.uint8(atlas_mask_right)
        else:
            atlas_mask_dir = os.path.join(
                git_repo_base, "atlases/atlas_smooth2_binary.png"
            )
        atlas_mask = cv2.imread(atlas_mask_dir, cv2.IMREAD_UNCHANGED)
        atlas_mask = cv2.resize(atlas_mask, (im.shape[0], im.shape[1]))
        atlas_mask = np.uint8(atlas_mask)
        mask_dir = os.path.join(cwd, "../output_mask/{}.png".format(n))

        print("Performing first transformation of atlas {}...".format(n))

        mask_warped_path = os.path.join(
            output_mask_path, "{}_mask_warped.png".format(str(n))
        )
        if use_dlc:
            # First alignment of brain atlas using three cortical landmarks and standard affine transform
            atlas_pts_for_input = np.array([atlas_pts[n][0 : len(dlc_pts[n])]]).astype(
                "float32"
            )
            pts_for_input = np.array([dlc_pts[n]]).astype("float32")

            if align_once:
                align_val = 0
            else:
                align_val = n

            if len(atlas_pts_for_input[0]) == 2:
                atlas_pts_for_input = np.append(atlas_pts_for_input[0], [[0, 0]], axis=0)
                pts_for_input = np.append(pts_for_input[0], [[0, 0]], axis=0)
            if len(atlas_pts_for_input[0]) <= 2:
                warp_coords = cv2.estimateAffinePartial2D(
                    atlas_pts_for_input, pts_for_input
                )[0]
                if atlas_to_brain_align:
                    atlas_warped_left = cv2.warpAffine(im_left, warp_coords, (512, 512))
                    atlas_warped_right = cv2.warpAffine(im_right, warp_coords, (512, 512))
                    atlas_warped = cv2.bitwise_or(atlas_warped_left, atlas_warped_right)
                    ret, atlas_warped = cv2.threshold(
                        atlas_warped, 5, 255, cv2.THRESH_BINARY_INV
                    )
                    atlas_left_transform_path = os.path.join(
                        output_mask_path, "{}_atlas_left_transform.png".format(str(n))
                    )
                    atlas_right_transform_path = os.path.join(
                        output_mask_path, "{}_atlas_right_transform.png".format(str(n))
                    )
                    io.imsave(atlas_left_transform_path, atlas_warped_left)
                    io.imsave(atlas_right_transform_path, atlas_warped_right)
                else:
                    atlas_warped = cv2.warpAffine(im, warp_coords, (512, 512))
            elif len(atlas_pts_for_input[0]) == 3:
                warp_coords = cv2.getAffineTransform(atlas_pts_for_input, pts_for_input)
                atlas_warped = cv2.warpAffine(im, warp_coords, (512, 512))
            elif len(atlas_pts_for_input[0]) >= 4:
                im_final_size = (512, 512)

                left = acc_left_total.iloc[n, :].values.astype("float32").tolist()
                right = acc_right_total.iloc[n, :].values.astype("float32").tolist()
                left = np.argsort(left).tolist()
                right = np.argsort(right).tolist()
                right = [x + 1 for x in right]
                if set([1, 3, 5, 7]).issubset(landmark_arr):
                    left = [1, 3, 5]
                    right = [3, 5, 7]
                else:
                    left = [x for x in landmark_indices if x in range(0, 6)][0:2]
                    right = [x for x in landmark_indices if x in range(3, 9)][0:2]

                try:
                    atlas_pts_left = np.array(
                        [
                            atlas_pts[align_val][left[0]],
                            atlas_pts[align_val][left[1]],
                            atlas_pts[align_val][left[2]],
                        ],
                        dtype=np.float32,
                    )
                    atlas_pts_right = np.array(
                        [
                            atlas_pts[align_val][right[0]],
                            atlas_pts[align_val][right[1]],
                            atlas_pts[align_val][right[2]],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_left = np.array(
                        [
                            dlc_pts[align_val][left[0]],
                            dlc_pts[align_val][left[1]],
                            dlc_pts[align_val][left[2]],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_right = np.array(
                        [
                            dlc_pts[align_val][right[0]],
                            dlc_pts[align_val][right[1]],
                            dlc_pts[align_val][right[2]],
                        ],
                        dtype=np.float32,
                    )

                except:
                    atlas_pts_left = np.array(
                        [
                            atlas_pts[align_val][0],
                            atlas_pts[align_val][2],
                            atlas_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )
                    atlas_pts_right = np.array(
                        [
                            atlas_pts[align_val][1],
                            atlas_pts[align_val][2],
                            atlas_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_left = np.array(
                        [
                            dlc_pts[align_val][0],
                            dlc_pts[align_val][2],
                            dlc_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_right = np.array(
                        [
                            dlc_pts[align_val][1],
                            dlc_pts[align_val][2],
                            dlc_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )

                warp_coords_left = cv2.getAffineTransform(atlas_pts_left, dlc_pts_left)
                warp_coords_right = cv2.getAffineTransform(atlas_pts_right, dlc_pts_right)
                if atlas_to_brain_align:
                    atlas_warped_left = cv2.warpAffine(
                        im_left, warp_coords_left, im_final_size
                    )
                    atlas_warped_right = cv2.warpAffine(
                        im_right, warp_coords_right, im_final_size
                    )
                    atlas_warped = cv2.bitwise_or(atlas_warped_left, atlas_warped_right)
                    ret, atlas_warped = cv2.threshold(
                        atlas_warped, 5, 255, cv2.THRESH_BINARY_INV
                    )
                    if not original_label:
                        atlas_label_left = cv2.warpAffine(
                            atlas_label_mask_left, warp_coords_left, im_final_size
                        )
                        atlas_label_right = cv2.warpAffine(
                            atlas_label_mask_right, warp_coords_right, im_final_size
                        )
                        atlas_label = cv2.bitwise_or(atlas_label_left, atlas_label_right)

                else:
                    pts_np = np.array(
                        [
                            dlc_pts[align_val][0],
                            dlc_pts[align_val][1],
                            dlc_pts[align_val][2],
                        ],
                        dtype=np.float32,
                    )
                    atlas_pts_np = np.array(
                        [
                            atlas_pts[align_val][0],
                            atlas_pts[align_val][1],
                            atlas_pts[align_val][2],
                        ],
                        dtype=np.float32,
                    )
                    warp_coords = cv2.getAffineTransform(pts_np, atlas_pts_np)
                    atlas_warped = cv2.warpAffine(im, warp_coords, (512, 512))
                    print(warp_coords)
                    # vxm_template = cv2.warpAffine(vxm_template, warp_coords, (512, 512))
                    # try:
                    #    atlas_warped = niftyreg_align(git_repo_base, atlas_warped, output_mask_path, n)
                    # except:
                    #    print("ERROR: could not use niftyreg to warp atlas {}! Please check inputs.".format(str(n)))

            if atlas_to_brain_align:
                if len(atlas_pts_for_input[0]) == 2:
                    atlas_mask_left_warped = cv2.warpAffine(
                        atlas_mask_left, warp_coords, (512, 512)
                    )
                    atlas_mask_right_warped = cv2.warpAffine(
                        atlas_mask_right, warp_coords, (512, 512)
                    )
                    atlas_mask_warped = cv2.bitwise_or(
                        atlas_mask_left_warped, atlas_mask_right_warped
                    )
                if len(atlas_pts_for_input[0]) == 3:
                    atlas_mask_warped = cv2.warpAffine(atlas_mask, warp_coords, (512, 512))
                if len(atlas_pts_for_input[0]) >= 4:
                    atlas_mask_left_warped = cv2.warpAffine(
                        atlas_mask_left, warp_coords_left, (512, 512)
                    )
                    atlas_mask_right_warped = cv2.warpAffine(
                        atlas_mask_right, warp_coords_right, (512, 512)
                    )
                    atlas_mask_warped = cv2.bitwise_or(
                        atlas_mask_left_warped, atlas_mask_right_warped
                    )
                atlas_mask_warped = np.uint8(atlas_mask_warped)

            # Second alignment of brain atlas using cortical landmarks and piecewise affine transform
            print("Performing second transformation of atlas {}...".format(n))

            atlas_first_transform_path = os.path.join(
                output_mask_path, "{}_atlas_first_transform.png".format(str(n))
            )

            dst = atlas_warped

            io.imsave(atlas_first_transform_path, dst)

            # If a sensory map of the brain is provided, do a third alignment of the brain atlas using up to
            # four peaks of sensory activity
            if sensory_match:
                original_label = True
                # COMMENT OUT FOR ALIGNING BRAIN TO ATLAS
                # mask_dir
                # atlas_first_transform_path
                if atlas_to_brain_align:
                    dst = getMaskContour(
                        mask_dir,
                        atlas_warped,
                        sensory_peak_pts[align_val],
                        sensory_atlas_pts[align_val],
                        cwd,
                        align_val,
                        False,
                    )
                    atlas_mask_warped = getMaskContour(
                        atlas_first_transform_path,
                        atlas_mask_warped,
                        sensory_peak_pts[align_val],
                        sensory_atlas_pts[align_val],
                        cwd,
                        align_val,
                        False,
                    )
                    atlas_mask_warped = cv2.resize(
                        atlas_mask_warped, (im.shape[0], im.shape[1])
                    )
                else:
                    dst = atlas_warped
        else:
            # If we're not using DeepLabCut...
            if atlas_to_brain_align and not use_voxelmorph:
                dst = cv2.bitwise_or(im_left, im_right)
                ret, dst = cv2.threshold(
                    dst, 5, 255, cv2.THRESH_BINARY_INV
                )
            else:
                dst = im
            dst = np.uint8(dst)

        # if use_voxelmorph:
        #     if atlas_to_brain_align:
        #         _, flow = voxelmorph_align(
        #             voxelmorph_model_path, br_vxm, vxm_template, exist_transform, flow_path
        #         )
        #     else:
        #         dst, flow = voxelmorph_align(
        #             voxelmorph_model_path, dst, vxm_template, exist_transform, flow_path
        #         )
        #     flow_path_after = os.path.join(output_mask_path, "{}_flow.npy".format(str(n)))
        #     np.save(flow_path_after, flow)
        #     if not exist_transform:
        #         if atlas_to_brain_align:
        #             dst_gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
        #             dst = vxm_transform(dst_gray, flow_path_after)
        #             ret, dst = cv2.threshold(
        #                 dst, 5, 255, cv2.THRESH_BINARY
        #             )
        #             dst = np.uint8(dst)

        if use_dlc:
            if atlas_to_brain_align:
                io.imsave(mask_warped_path, atlas_mask_warped)
            else:
                io.imsave(mask_warped_path, atlas_mask)
        else:
            io.imsave(mask_warped_path, dst)
            if atlas_to_brain_align:
                if use_voxelmorph:
                    atlas_mask_warped = atlas_mask
                else:
                    atlas_mask_warped = cv2.bitwise_or(
                        atlas_mask_left, atlas_mask_right
                    )
                atlas_mask_warped = cv2.cvtColor(atlas_mask_warped, cv2.COLOR_BGR2GRAY)
                #
                # atlas_mask_warped = vxm_transform(atlas_mask_warped, flow_path_after)
                ret, atlas_mask_warped = cv2.threshold(
                    atlas_mask_warped, 5, 255, cv2.THRESH_BINARY
                )
                atlas_mask_warped = np.uint8(atlas_mask_warped)
                original_label = True
            else:
                atlas_mask_warped = atlas_mask
                #
                # atlas_mask_warped = vxm_transform(atlas_mask_warped, flow_path_after)
            io.imsave(mask_warped_path, atlas_mask_warped)
        # Resize images back to 512x512
        dst = cv2.resize(dst, (im.shape[0], im.shape[1]))
        atlas_path = os.path.join(output_mask_path, "{}_atlas.png".format(str(n)))

        if atlas_to_brain_align:
            io.imsave(atlas_path, dst)
            br_list.append(br)
        else:
            brain_warped_path = os.path.join(
                output_mask_path, "{}_brain_warp.png".format(str(n))
            )
            vxm_template_output_path = os.path.join(
                output_mask_path, "{}_vxm_template.png".format(str(n))
            )
            dst_list.append(dst)
            if use_voxelmorph:
                vxm_template_list.append(vxm_template)
                io.imsave(vxm_template_output_path, vxm_template_list[n])
            io.imsave(brain_warped_path, dst)
            io.imsave(atlas_path, atlas)

        if atlas_to_brain_align:
            if original_label:
                atlas_label = []
            atlas_label = atlas_to_mask(
                atlas_path,
                mask_dir,
                mask_warped_path,
                output_mask_path,
                n,
                use_unet,
                atlas_to_brain_align,
                git_repo_base,
                olfactory_check,
                atlas_label
            )
            atlas_label_list.append(atlas_label)
        elif not use_dlc:
            io.imsave(os.path.join(output_mask_path, "{}.png".format(n)), dst)
        if bregma_present:
            bregma_val = int(bregma_index_list[n])
            bregma_list.append(dlc_pts[n][bregma_val])

    # Carries out VoxelMorph on each motif-based functional map (MBFM) that has been aligned to a raw brain image
    if use_dlc and use_voxelmorph and align_once:
        for (n_post, dst_post), vxm_template_post in zip(enumerate(dst_list), vxm_template_list):
            _, flow_post = voxelmorph_align(
               voxelmorph_model_path, dst_post, vxm_template_post, exist_transform, flow_path
            )
            flow_path_after = os.path.join(output_mask_path, "{}_flow.npy".format(str(n_post)))
            np.save(flow_path_after, flow_post)
            if not exist_transform:
                dst_gray = cv2.cvtColor(atlas, cv2.COLOR_BGR2GRAY)
                dst_post = vxm_transform(dst_gray, flow_path_after)
                ret, dst_post = cv2.threshold(
                    dst_post, 5, 255, cv2.THRESH_BINARY
                )
                dst_post = np.uint8(dst_post)

            mask_warped_path = os.path.join(
                output_mask_path, "{}_mask_warped.png".format(str(n_post))
            )

            atlas_first_transform_path_post = os.path.join(
                output_mask_path, "{}_atlas_first_transform.png".format(str(n_post))
            )

            io.imsave(atlas_first_transform_path_post, dst_post)

            atlas_path = os.path.join(output_mask_path, "{}_atlas.png".format(str(n_post)))

            brain_warped_path = os.path.join(
                output_mask_path, "{}_brain_warp.png".format(str(n_post))
            )
            mask_dir = os.path.join(cwd, "../output_mask/{}.png".format(n_post))
            dst_post = cv2.resize(dst_post, (im.shape[0], im.shape[1]))
            if not atlas_to_brain_align:
                atlas_to_brain_align = True
                original_label = True
            if atlas_to_brain_align:
                io.imsave(atlas_path, dst_post)
            else:
                io.imsave(brain_warped_path, dst_post)
            if atlas_to_brain_align:
                if original_label:
                    atlas_label = []
                atlas_label = atlas_to_mask(
                    atlas_path,
                    mask_dir,
                    mask_warped_path,
                    output_mask_path,
                    n_post,
                    use_unet,
                    atlas_to_brain_align,
                    git_repo_base,
                    olfactory_check,
                    atlas_label
                )
                atlas_label_list.append(atlas_label)


    # Converts the transformed brain atlas into a segmentation template for the original brain image
    applyMask(
        brain_img_dir,
        output_mask_path,
        output_overlay_path,
        output_overlay_path,
        mat_save,
        threshold,
        git_repo_base,
        bregma_list,
        atlas_to_brain_align,
        model,
        dlc_pts,
        atlas_pts,
        olfactory_check,
        use_unet,
        use_dlc,
        plot_landmarks,
        align_once,
        atlas_label_list,
        region_labels,
        original_label,
    )
Example #15
0
def readCropEqualize(path,
                     extension,
                     crop,
                     doRecognition,
                     equalize=False,
                     isColoured=False):
    if not crop and doRecognition:
        raise Exception(
            "you asked for the reading process to crop the images but do no face detection"
        )

    if equalize:
        dirforres = "detection-cropped-equalized"
    else:
        dirforres = "detection-cropped"

    pathForCropped = os.path.join(path, dirforres)

    if crop:
        if doRecognition:
            if not os.path.exists(pathForCropped):
                os.makedirs(pathForCropped)

            imageFiles = [(os.path.join(dirpath, f), f)
                          for dirpath, dirnames, files in os.walk(path)
                          for f in fnmatch.filter(files, '*.' + extension)]

            images = []

            for fullPath, shortPath in imageFiles:
                # Do not do this for already cropped images
                if pathForCropped in fullPath:
                    continue

                print fullPath
                print shortPath
                # img = Image.open(fullPath)
                # img = np.array(img.getdata()).reshape(img.size[0], img.size[1])
                # print img.shape
                img = cv2.imread(fullPath, 0)
                print img == None

                face = facedetection.cropFace(img)

                if not face == None:

                    face = resize(face, SMALL_SIZE)
                    if equalize:
                        face = equalizeFromFloatCLAHE(face)

                    face = face.reshape(SMALL_SIZE)
                    # Only do the resizing once you are done with the cropping of the faces
                    # Check that you are always saving them in the right format
                    print "face.min"
                    print face.min()

                    print "face.max"
                    print face.max()

                    assert face.min() >= 0 and face.max() <= 1
                    images += [face.reshape(-1)]

                    # Save faces as files
                    croppedFileName = os.path.join(pathForCropped, shortPath)
                    io.imsave(croppedFileName, face)
        # If not doing face detection live
        else:
            images = []
            imageFiles = [
                os.path.join(dirpath, f)
                for dirpath, dirnames, files in os.walk(pathForCropped)
                for f in fnmatch.filter(files, '*.' + extension)
            ]

            for f in imageFiles:
                img = cv2.imread(f, 0)
                if type(img[0, 0]) == np.uint8:
                    print "rescaling unit"
                    img = img / 255.0

                img = resize(img, SMALL_SIZE)
                images += [img.reshape(-1)]
    # If not doing recognition here, just reading from the initial faces
    else:
        images = []
        imageFiles = [
            os.path.join(dirpath, f)
            for dirpath, dirnames, files in os.walk(path) if dirnames not in
            ["detection-cropped-equalized", "detection-cropped"]
            for f in fnmatch.filter(files, '*.' + extension)
        ]

        for i in imageFiles:
            assert not "detection-cropped" in imageFiles

        for f in imageFiles:
            img = cv2.imread(f, 0)
            if type(img[0, 0]) == np.uint8:
                print "rescaling unit"
                img = img / 255.0

            img = resize(img, SMALL_SIZE)

            if equalize:
                img = equalizeFromFloatCLAHE(img)

            images += [img.reshape(-1)]

    assert len(images) != 0

    print len(images)
    return np.array(images)
Example #16
0
def convert_mat_annotations_to_png(masks_root):
    """ Creates a new folder in the root folder of the dataset with annotations stored in .png.
    The function accepts a full path to the root of segmentation
    dataset and converts annotations that are stored in .mat files to .png files. It creates
    a new folder dataset/masks_png where all the converted files will be located. If this
    directory already exists the function does nothing. 
    
    Parameters
    ----------
    masks_root : string
        Full path to the root of patient dataset.
    
    """

    import scipy.io

    import skimage.io as io

    def decode_mat_color(mat_filename, key='ATmask'):

        mat = scipy.io.loadmat(mat_filename,
                               mat_dtype=True,
                               squeeze_me=True,
                               struct_as_record=False)
        h, w = mat[key].shape

        outputs = np.zeros((h, w, 3), dtype=np.uint8)
        num_classes = 5

        img = Image.new('RGB', (h, w))
        pixels = img.load()
        for j_, j in enumerate(mat[key]):
            for k_, k in enumerate(j):
                if k < num_classes:
                    pixels[k_, j_] = label_colours[int(k - 1)]
        outputs = np.array(img)
        return outputs

    def decode_mat_1_channel(mat_filename, key='ATmask'):

        mat = scipy.io.loadmat(mat_filename,
                               mat_dtype=True,
                               squeeze_me=True,
                               struct_as_record=False)

        return mat[key] - 1

    mat_file_extension_string = '.mat'
    png_file_extension_string = '.png'
    relative_path_to_annotation_mat_files = 'masks'
    relative_path_to_annotation_png_files = 'masks_1_channel'

    mat_file_extension_string_length = len(mat_file_extension_string)

    annotation_mat_files_fullpath = os.path.join(
        masks_root, relative_path_to_annotation_mat_files)

    annotation_png_save_fullpath = os.path.join(
        masks_root, relative_path_to_annotation_png_files)

    # Create the folder where all the converted png files will be placed
    # If the folder already exists, do nothing
    if not os.path.exists(annotation_png_save_fullpath):

        os.makedirs(annotation_png_save_fullpath)
    else:

        return

    mat_files_names = os.listdir(annotation_mat_files_fullpath)

    for current_mat_file_name in mat_files_names:

        current_file_name_without_extention = current_mat_file_name[:
                                                                    -mat_file_extension_string_length]

        current_mat_file_full_path = os.path.join(
            annotation_mat_files_fullpath, current_mat_file_name)

        current_png_file_full_path_to_be_saved = os.path.join(
            annotation_png_save_fullpath, current_file_name_without_extention)

        current_png_file_full_path_to_be_saved += png_file_extension_string

        #        annotation_array = decode_mat_color(current_mat_file_full_path)

        annotation_array = decode_mat_1_channel(current_mat_file_full_path)
        ann = annotation_array.astype(np.uint8)
        #        infer_label = Image.fromarray(annotation_array)
        # TODO: hide 'low-contrast' image warning during saving.
        io.imsave(current_png_file_full_path_to_be_saved, ann)
Example #17
0
images_list = glob.glob(
    os.path.join(dataset_dir, prepared_dataset_dir, "train", "images") +
    "/*.png")[0]
image = img_to_array(load_img(images_list, color_mode='rgb'))
mask = img_to_array(
    load_img(
        os.path.join(dataset_dir, prepared_dataset_dir, "train", "masks") +
        "/" + os.path.basename(images_list),
        color_mode='grayscale'))
model = sm.Unet('efficientnetb0', classes=1, activation='sigmoid')
model = keras.models.load_model("results/weights/cpm15_Best.h5", compile=False)
image = image.reshape(1, 256, 256, 3)
pred = model.predict(image / 255.0)
pred = pred.reshape(256, 256, 1)
image = image.reshape(256, 256, 3)
io.imsave(os.path.join(save_dir, dataset_name + ".png"), image)
io.imsave(os.path.join(save_dir, dataset_name + "_mask.png"), mask * 255.0)
io.imsave(os.path.join(save_dir, dataset_name + "_pred.png"), pred * 255.0)

dataset_name = "cpm17"
dataset_dir = "dataset/cpm17"
prepared_dataset_dir = "prepared_dataset"
images_list = glob.glob(
    os.path.join(dataset_dir, prepared_dataset_dir, "train", "images") +
    "/*.png")[0]
image = img_to_array(load_img(images_list, color_mode='rgb'))
mask = img_to_array(
    load_img(
        os.path.join(dataset_dir, prepared_dataset_dir, "train", "masks") +
        "/" + os.path.basename(images_list),
        color_mode='grayscale'))
Example #18
0
                     os.path.splitext(os.path.basename(img_path))[0]) + "*")[0]
    #print(label_path)
    label = scipy.io.loadmat(label_path)
    label = label['inst_map']
    label = label.reshape(label.shape[0], label.shape[1], 1)
    #print(image.shape,label.shape)

    patches = view_as_windows(
        np.concatenate(
            (image, label[:, :, 0].reshape(image.shape[0], image.shape[1], 1)),
            axis=2), (patch_width, patch_height, 4),
        (patch_width // 4, patch_height // 4, 4))
    patches = patches.reshape(-1, patch_width, patch_height, 4)

    print(patches.shape)

    for idx, patch in enumerate(patches):
        io.imsave(
            os.path.join(
                dataset_dir, prepared_dataset_dir, "images",
                os.path.splitext(os.path.basename(img_path))[0] + "_" +
                str(idx) + ".png"), img_as_ubyte(patch[:, :, 0:3]))
        temp_mask = patch[:, :, 3]
        temp_mask[temp_mask > 0] = 1
        #io.imsave(os.path.join(dataset_dir,prepared_dataset_dir,"masks",os.path.splitext(os.path.basename(img_path))[0]+"_"+str(idx)+".png"),temp_mask)
        cv2.imwrite(
            os.path.join(
                dataset_dir, prepared_dataset_dir, "masks",
                os.path.splitext(os.path.basename(img_path))[0] + "_" +
                str(idx) + ".png"), temp_mask)
    img1=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
    img2=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    return img1,img2

def elastic_transform(image,image_mask,alpha,sigma,random_state=None):

    # assert len(image.shape)==2

    if random_state is None:
        random_state=np.random.RandomState(None)
    shape=image.shape
    dx = gaussian_filter((np.random.rand(*shape)*2-1),sigma,mode='constant',cval=0)*alpha
    dy = gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode='constant', cval=0) * alpha
    x,y=np.meshgrid(np.arange(shape[0]),np.arange(shape[1]),indexing='ij')

    indices=np.reshape(x+dx,(-1,1)),np.reshape(y+dy,(-1,1))

    return map_coordinates(image,indices,order=1).reshape(shape),map_coordinates(image_mask,indices,order=1).reshape(shape)

str1=ReadPath1+'/*.jpg'
str2=ReadPath2+'/*_mask.jpg'
coll1 = io.ImageCollection(str1)
coll2 = io.ImageCollection(str2)

for i in range(len(coll1)):
    im,im_mask=rgbtogrey(coll1[i],coll2[i])
    im_t,im_mask_t=elastic_transform(im,im_mask, im.shape[1] * 2, im.shape[1] * 0.08, im.shape[1] * 0.08)
    io.imsave(SavePath+'/'+np.str(i)+'.jpg',im_t)  #循环保存图片
    io.imsave(SavePath+'/'+np.str(i)+'_mask.jpg',im_mask_t)

Example #20
0
def get_echo(img, company, R, C):
    #thresh = to(I)

    #binary = I>thresh

    #### img = the image object in grayscale
    #### company = company tag in dicom file
    #### R = height
    #### C = width

    dic_m = {}

    dic_m["'SIEMENS' 1024 768"] = [[30, 474], [469, 0], [673, 0], [673, 1024],
                                   [469, 1023], [30, 551]]

    '/data/Gurpreet/Echo/Images/35/Images/TEE_35_24_1.jpg'
    dic_m["'INFINITT' 967 872"] = [[3, 319], [306, 21], [398, 201], [415, 380],
                                   [291, 613], [3, 319]]

    dic_m["'GEMS Ultrasound' 636 422"] = [[0, 316], [311, 0], [411, 186],
                                          [421, 306], [421, 586], [304, 629],
                                          [0, 316]]

    dic_m["'Philips Medical Systems' 800 600"] = [[98, 426], [560, 6],
                                                  [568, 636], [432, 758],
                                                  [426, 98]]

    '/data/Gurpreet/Echo/Images/35/Images/TEE_35_24_1.jpg'
    dic_m["'Philips Medical Systems' 1024 768"] = [[99, 429], [433, 100],
                                                   [600, 322], [599]]

    '/data/Gurpreet/Echo/Sorted_Images/A2C/EQo_78_42_1.jpg'
    dic_m["'GE Vingmed Ultrasound' 636 434"] = [[7, 319], [322,
                                                           13], [401, 241],
                                                [401, 582], [7, 577]]

    dic_m["'GEMS Ultrasound' 636 434"] = [[10, 320], [320, 10], [402, 162],
                                          [400, 600], [124, 578], [10, 320]]

    dic_m["'NeXus-Community Medical Picture DEPT' 636 436"] = [[10, 320],
                                                               [320, 10],
                                                               [402, 162],
                                                               [400, 600],
                                                               [124, 578],
                                                               [10, 320]]

    dic_m["'GE Healthcare Ultrasound' 1016 708"] = [[66, 510], [505, 84],
                                                    [650, 313], [663, 524],
                                                    [574, 985], [60, 510]]

    dic_m["'INFINITT' 966 873"] = [[112, 482], [554, 44], [721, 350],
                                   [721, 908], [480, 908], [112, 482]]

    dic_m["'TOSHIBA_MEC_US' 960 720"] = [[88, 480], [477, 100], [619, 479],
                                         [465, 867], [88, 480]]
    '''
    Cannot process --

    'TOSHIBA_MEC' 512 512
    [["'48'", "'1'"], ["'48'", "'2'"], ["'48'", "'3'"], ["'48'", "'4'"], ["'48'", "'5'"], ["'48'", "'6'"], ["'48'", "'7'"], ["'48'", "'8'"], ["'48'", "'9'"]]

    "INFINITT' 967 832"] = [["'8'", "'46'"], ["'15'", "'59'"], ["'24'", "'76'"], ["'39'", "'39'"], ["'68'", "'24'"], ["'92'", "'34'"]]

    "'INFINITT' 1603 928"] = [["'14'", "'45'"], ["'14'", "'48'"], ["'14'", "'49'"], ["'14'", "'50'"], ["'14'", "'51'"], ["'28'", "'3'"], ["'28'", "'4'"], ["'93'", "'49'"]]

    "'INFINITT' 967 834"] = [[62,455],[526,50],[704,312],[622,950],[525,939],[62,455]]

    'INFINITT' 1024 1024
    [["'48'", "'10'"]]

    'GEMS Ultrasound' 640 458
    [["'69'", "'73'"], ["'69'", "'74'"], ["'77'", "'61'"], ["'77'", "'62'"], ["'77'", "'63'"]]

    'GE Vingmed Ultrasound' 640 480
    [["'73'", "'59'"], ["'73'", "'60'"]]

    'INFINITT' 967 808
    [["'72'", "'89'"], ["'72'", "'91'"]]

    '''
    if (company.lower() == 'siemens' and R == 1024 and C == 768):
        coords = np.array([np.arange(469, 30, -1), np.arange(30, 469)]).T
        for i in coords:
            #print(i)
            img[:i[0], :i[1]] = 0

        img[674:, :997] = 0

        coords1 = np.array([np.arange(36, 469), np.arange(561, 993)]).T
        #print(coords1.shape)
        #         for i in coords1:
        #             #print(coords1[i])
        #             #break
        #             print(i)
        #             img[0:i[0],i[1]]=0
        img[:258, 755:] = 0
        plt.imshow(img), plt.show()

    elif (company.lower() == 'gems ultrasound' and R == 636 and C == 422):
        coords = np.array([np.arange(6, 316), np.arange(316, 6, -1)]).T
        for i in coords:
            #print(i)
            img[:i[0], :i[1]] = 0

        coords = np.array([np.arange(350, 420), np.arange(316, 6, -1)]).T

        img[387:, :170] = 0
        img[390:, 603:] = 0
        img[8:173, 602:] = 0

    elif (company.lower() == 'philips medical systems' and R == 800
          and C == 600):
        img[:130, 0:900] = 0
        #print('here')
        img[:, :94] = 0
        img[61:191, :220] = 0
        img[:93, :431] = 0
        img[:380, 746:, ] = 0
        img[538:, 679:] = 0
        img[555:, :] = 0

    elif (company.lower() == 'philips medical systems' and R == 1024
          and C == 768):
        img[:133, :] = 0
        img[:280, :280] = 0
        img[:, :117] = 0
        ekg = img[716:, :]
        img[716:, :] = 0
        img[:345, 770:] = 0

    elif (company.lower() == 'ge vingmed ultrasound' and R == 636
          and C == 434):
        img[:8, :] = 0
        img[0:42, :95] = 0
        img[:63, :131] = 0
        img[:10, :] = 0
        img[:207, 592:] = 0
        ekg = img[395:, :]
        img[393:, :] = 0

    #io.imsave('/data/gabriel/orig116.png',binary)

    elif company.lower() == 'gems ultrasound' and R == 636 and C == 434:
        img[:8, :] = 0
        img[0:42, :95] = 0
        img[:63, :131] = 0
        img[:10, :] = 0
        img[:207, 592:] = 0
        ekg = img[395:, :]
        img[393:, :] = 0

    elif company.lower(
    ) == 'ge healthcare ultrasound' and R == 1016 and C == 708:
        img[:148, :259] = 0
        ekg = img[650:, :]
        img[650:, :] = 0
        img[:, 915:] = 0

    elif company.upper() == 'TOSHIBA_MEC_US' and R == 960 and C == 720:
        ekg = img[586:, :]
        img[586:, :] = 0
        img[:, :123] = 0
        imf = np.fliplr(img)

        coords = np.array([np.arange(88, 480), np.arange(480, 88, -1)]).T
        for i in coords:
            #print(i)
            img[:i[0], :i[1]] = 0
            imf[:i[0], :i[1]] = 0

        #img=imf
        #img[674:,:997]=0

    #binary = img_as_uint(img)
    plt.imshow(img), plt.show()
    io.imsave('/data/gabriel/imgtemp.png', img)
    return img