def generate_report(snapcat_json, outdir):

    bursts = tools.get_bursts(snapcat_json)

    cats_detected = dict()

    for burst in bursts:

        cat_detected = False
        for image in burst:
            if tools.cat_label_exists(snapcat_json, image):
                cat_detected = True
                break

        image = burst[0]
        image_path = snapcat_json.json_data[image]["path"]
        image_directory = os.path.dirname(image_path)
        image_directory = os.path.split(image_directory)[-1]

        if not image_directory in cats_detected:
            cats_detected[image_directory] = 0

        if cat_detected:
            cats_detected[image_directory] = cats_detected[image_directory] + 1

    # sort data alphabetically
    num_cats = []
    for camera in cats_detected:
        num_cats.append(cats_detected[camera])

    # create plot
    y_pos = np.arange(len(cats_detected))
    plt.figure(figsize=(20, 10))
    plt.bar(y_pos, num_cats, align='center', alpha=0.5)
    plt.xticks(y_pos, cats_detected)
    plt.ylabel('Number of Cats')
    plt.title('Number of Cats Per Camera')

    # save and show plot
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    plt.savefig(os.path.join(outdir, 'report.png'))
    plt.show()
def user_label_images_burst(snapcat_json):
    ######################### sort image bursts #########################

    bursts = tools.get_bursts(snapcat_json)

    # Skip bursts that definitely contain a cat
    # only review bursts that have an unsure label
    unsure_bursts = []

    random.shuffle(bursts)

    for burst in bursts:

        image_labeled = False

        num_burst_images = len(burst)
        for image_name in burst:

            #TODO - classifier_label will be associated with an area of interest
            if image_name in snapcat_json.json_data and "classifier_label" in snapcat_json.json_data[
                    image_name] and snapcat_json.json_data[image_name][
                        "classifier_label"] == "cat":
                image_labeled = True
                break

            if image_name in snapcat_json.json_data and "user_burst_label" in snapcat_json.json_data[
                    image_name] and snapcat_json.json_data[image_name][
                        "user_burst_label"] != None:
                image_labeled = True
                break

            if image_name in snapcat_json.json_data and "areas_of_interest" in snapcat_json.json_data[
                    image_name] and "aoi_user_labels" in snapcat_json.json_data[
                        image_name]:
                num_aoi = len(
                    snapcat_json.json_data[image_name]["areas_of_interest"])
                num_aois_labeled = len(
                    snapcat_json.json_data[image_name]["aoi_user_labels"])

                if num_aoi == num_aois_labeled:
                    break

        if not image_labeled:
            unsure_bursts.append(burst)

    # iterate over all of the bursts and get a label
    if len(unsure_bursts) == 0:
        if len(bursts) == 0:
            print("ERROR: there were no images to classify")
        else:
            print("No images remain to be classified - done")
        return

    done = False
    index = 0

    while not done:
        image_list = []

        for image_name in unsure_bursts[index]:
            image_path = snapcat_json.json_data[image_name]["path"]
            if os.path.isfile(image_path):
                image_list.append(image_path)
            else:
                print("ERROR: image does not exist:", image_path)

        #print( image_list )

        directories_to_ignore = [
            "Cabritos_Part2\\camara 08", "Mona\\27", "Mona\\5A",
            "trampa\\piedra a 1"
        ]

        skip = False
        for image in image_list:
            for ignore_string in directories_to_ignore:
                if ignore_string in image:
                    print("ignoring image:", image)
                    skip = True
                else:
                    print("image:", image)

        if skip:
            index = index + 1
            continue

        key = display_directory_get_input(image_list)

        if key == LEFT_KEY:
            update_user_burst_label(snapcat_json, unsure_bursts[index],
                                    INVALID_STRING)
            index = index + 1

        elif key == RIGHT_KEY:
            update_user_burst_label(snapcat_json, unsure_bursts[index],
                                    VALID_STRING)
            index = index + 1

        elif key == DOWN_KEY:
            update_user_burst_label(snapcat_json, unsure_bursts[index],
                                    UNSURE_STRING)
            index = index + 1

        elif key == BACKSPACE_KEY:
            # ensure we don't go negative with the index
            if (index > 0):
                index = index - 1

            update_user_burst_label(snapcat_json, unsure_bursts[index],
                                    NONE_STRING)

        elif key == ESCAPE_KEY:
            cv2.destroyAllWindows()
            done = True

        if index >= len(unsure_bursts):
            done = True

    cv2.destroyAllWindows()
    snapcat_json.save()
def segment_images(snapcat_json,
                   speckle_removal_size=10,
                   expansion=50,
                   interactive_examine=False):
    # Create CLAHE object (contrast limited adaptive histogram equalization, compensates for brightness changes)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))

    pbar = ProgressBar()
    bursts = tools.get_bursts(snapcat_json)

    for burst in bursts:

        pbar = ProgressBar()
        pbar.maxval = len(burst)
        pbar.start()

        burst_imgs = []

        for filename in pbar(burst):

            image_path = snapcat_json.json_data[filename]["path"]

            img = cv2.imread(image_path)

            if type(img) == None.__class__:
                continue

            #TODO: make this a setting in the settings file as it is used later when we crop the image
            img = img[50:-50, :]  # remove the black bar from top and bottom
            burst_imgs.append((image_path, img))

        # Image bursts less than 3 images can't be median'd, so ignore
        if len(burst_imgs) < 3:
            continue

        # Get all the images in the burst
        imgs = [b[1] for b in burst_imgs]
        # Scale them to fit within 320x320 (speeds up calculations)
        img_scale = max(imgs[0].shape[0] / 320.0, imgs[0].shape[1] / 320.0)
        imgs_small = [
            cv2.resize(
                i, (int(i.shape[1] / img_scale), int(i.shape[0] / img_scale)),
                interpolation=cv2.INTER_AREA) for i in imgs
        ]
        # Apply CLAHE to compensate for in-burst brightness changes. Also convert to grayscale
        imgs_small = [
            clahe.apply(np.uint8(np.mean(i, -1))) for i in imgs_small
        ]

        shapes = []
        paths = []
        same_dim = True
        dim = burst_imgs[0][1].shape

        for b in burst_imgs:
            if not b[1].shape == dim:
                same_dim = False

        if not same_dim:
            continue

        # Take the median across the images pixel by pixel
        avg_burst_img = np.median(imgs_small, axis=0)
        """for b in burst_imgs:
			cv2.imshow("i'manimage", b[1])
			if cv2.waitKey(0):
				continue
		"""

        # Calculate diff images from the median
        diffimgs = [
            np.abs(smimg.astype(np.int16) - avg_burst_img)
            for smimg in imgs_small
        ]

        # Start with a high motion threshold for the burst and iteratively bring it down if we don't find anything
        for seq_thres in [30, 25, 20, 15, 10]:
            tot_imgs = 0
            areas_of_interest = {
                image_path: []
                for image_path, _ in burst_imgs
            }
            # Process each image in the burst
            for burst_index, (image_path, i) in enumerate(burst_imgs):
                diffimg = diffimgs[burst_index]
                # Threshold the image based on our threshold
                thresimg = np.uint8(diffimg > seq_thres)
                # This morphology operation first shrinks any 'islands' of motion (which deletes little speckles),
                # then it expands the remaining islands back to approximate original size.
                # To deal with larger 'speckle' areas, increase the size of the MORPH_ELLIPSE
                thresimg = cv2.morphologyEx(
                    thresimg, cv2.MORPH_OPEN,
                    cv2.getStructuringElement(
                        cv2.MORPH_ELLIPSE,
                        (speckle_removal_size, speckle_removal_size)))
                # Now to ensure we have a border around the subject motion, expand islands. This will also merge nearby islands
                thresimg = cv2.dilate(
                    thresimg,
                    cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                              (expansion, expansion)))
                if interactive_examine:
                    cv2.imshow('diff', diffimg / np.max(diffimg))
                    cv2.imshow('thres', thresimg * 255)
                # Now label each island individually with a number in a new label image
                nlabels, labelimg = cv2.connectedComponents(thresimg,
                                                            connectivity=4)
                nimgs = 0
                # Take each island individually
                if nlabels == 0:
                    # If there is no AOI found
                    # the AOI is the entire image
                    dim = min(imgs[0].shape[0], imgs[0].shape[1])
                    areas_of_interest[image_path].append([0, dim, 0, dim])
                else:
                    for l in range(1, nlabels):
                        # This magic incantation gets the bounding box of that island
                        x1, x2 = np.int32(
                            np.where(np.any(labelimg == l, 0))[0][[0, -1]] *
                            img_scale)
                        y1, y2 = np.int32(
                            np.where(np.any(labelimg == l, 1))[0][[0, -1]] *
                            img_scale)
                        # Make sure it's a square region
                        w = x2 - x1
                        h = y2 - y1
                        if w > h:
                            y1 -= (w - h) // 2
                            y2 = y1 + w
                        elif h > w:
                            x1 -= (h - w) // 2
                            x2 = x1 + h
                        # That may have pushed the region off the bounds of the image.
                        # Shrink the square until it is no longer out of bounds.
                        while x1 < 0 or y1 < 0 or x2 >= i.shape[
                                1] or y2 >= i.shape[0]:
                            x1 += 1
                            x2 -= 1
                            y1 += 1
                            y2 -= 1

                        # Save the area of interest for this image.
                        areas_of_interest[image_path].append([x1, x2, y1, y2])
                        if interactive_examine:
                            subimg = i[y1:y2, x1:x2, :]
                            print(x1, x2, y1, y2)
                            cv2.imshow('image', subimg)
                            if cv2.waitKey(0) == ord('q'):
                                exit(1)

            if len(areas_of_interest) > 0:
                break

        for image_path in areas_of_interest.keys():
            image_name = os.path.basename(image_path)
            snapcat_json.update(image_name, "areas_of_interest",
                                areas_of_interest[image_path])

    snapcat_json.save()
def user_label_images_burst( snapcat_json ):
  ######################### sort image bursts #########################

  bursts = tools.get_bursts( snapcat_json )

  done = False
  index = 0

  # Skip bursts that definitely contain a cat
  # only review bursts that have an unsure label
  unsure_bursts = []
  for burst in bursts:

    cat_detected = False
    unsure_label = False
    for image_name in burst:
      if snapcat_json.json_data[image_name]["classifier_label"] == "cat":
        cat_detected = True
        break

      if snapcat_json.json_data[image_name]["classifier_label"] == "unsure":
        unsure_label = True

    if cat_detected:
      continue
    elif unsure_label:
      unsure_bursts.append( burst )


  # iterate over all of the bursts and get a label
  while not done:
    image_list = []

    for image_name in unsure_bursts[index]:
      image_list.append( snapcat_json.json_data[image_name]["path"] )
      
    print( image_list )

    key = display_directory_get_input( image_list )
    
    if key == LEFT_KEY:
      update_user_burst_label( snapcat_json, unsure_bursts[index], INVALID_STRING)
      index = index + 1

    elif key == RIGHT_KEY:
      update_user_burst_label( snapcat_json, unsure_bursts[index], VALID_STRING)
      index = index + 1

    elif key == DOWN_KEY:
      update_user_burst_label( snapcat_json, unsure_bursts[index], UNSURE_STRING)
      index = index + 1

    elif key == BACKSPACE_KEY:
      # ensure we don't go negative with the index
      if ( index > 0 ):
        index = index - 1      

      update_user_burst_label( snapcat_json, unsure_bursts[index], NONE_STRING )

    elif key == ESCAPE_KEY:
      cv2.destroyAllWindows()
      done = True
    
    if index >= len(unsure_bursts):
      done = True

  cv2.destroyAllWindows()