Ejemplo n.º 1
0
def sobel_filter_test(file):
   image = cv2.imread(file, 0)
   blur = cv2.GaussianBlur(image,(3,3),0)
   laplacian = cv2.Laplacian(image, cv2.CV_64F, 3, 1)
   sobel = bv.sobel_filter(image, 3)
   entropy = bv.entropy_local(image, 7)
   height, width = image.shape[:2]

   # dpi = 80.0
   # xpixels, ypixels = 800, 800

   # fig = plt.figure(figsize=(height/dpi, width/dpi), dpi=dpi)
   # fig.figimage(image)
   # plt.show()

   # plt.subplot(1,3,1),plt.imshow(image,cmap = 'gray')
   # plt.title('Original'), plt.xticks([]), plt.yticks([])
   # plt.subplot(1,3,2),plt.imshow(laplacian,cmap = 'gray')
   # plt.title('Laplacian'), plt.xticks([]), plt.yticks([])
   # plt.subplot(1,3,3),plt.imshow(sobel,cmap = 'gray')
   # plt.title('Sobel'), plt.xticks([]), plt.yticks([])
   # plt.subplot(2,2,4),plt.imshow(sobely,cmap = 'gray')
   # plt.title('Sobel Y'), plt.xticks([]), plt.yticks([])
   # plt.show()
   bui.plot_images([image, laplacian, sobel, entropy])
Ejemplo n.º 2
0
def entropy_sequence_test2(location):

   # Load frames from a location
   def get_frames(location, start_index=0, max_frames=10, step=1):
      frames = []
      count = start_index
      while count < start_index + step * max_frames:
         image = cv2.imread("%s/frame%d.jpg"%(location, count), 0)
         image = cv2.GaussianBlur(image, (5,5), 0)
         frames.append(image)
         count = count + step
      return frames

   # Load the frames and calculate the entropy_stack for each frame.
   frames = get_frames(location, 0, 200)
   entropy_frames = []
   for frame in frames:
      entropy_frames.append(bv.entropy_local(frame, 31, 16))

   result = []
   for i in range(10, len(entropy_frames)):
      frame = frames[i]
      e_frame = entropy_frames[i]
      sequence = bv.entropy_sequential(entropy_frames[i-10:i])
      bui.plot_images([frame, e_frame, sequence])   
Ejemplo n.º 3
0
def entropy_shift_test(file1, file2):
   image1 = cv2.imread(file1, 0)
   image1 = cv2.GaussianBlur(image1, (5,5), 0)
   image1 = bv.entropy_local(image1, 7, normalize=False)
   image2 = cv2.imread(file2, 0)
   image2 = cv2.GaussianBlur(image2, (5,5), 0)
   image2 = bv.entropy_local(image2, 7, normalize=False)

   print("image1 max: %f"%float(image1.max()))
   print("image2 max: %f"%float(image2.max()))
   print("image1 min: %f"%float(image1.min()))
   print("image2 min: %f"%float(image2.min()))
   in1 = bv.normalize_array(image1, max_value=255, dtype=np.uint8)
   in2 = bv.normalize_array(image2, max_value=255, dtype=np.uint8)
   seq = bv.entropy_sequential([in1, in2], normalize=False)
   print("seq max: %f"%float(seq.max()))
   print("seq min: %f"%float(seq.min()))
   norm1 = bv.normalize_array(seq)
   #bui.plot_images([in1, in2, norm1])
   norm2 = bv.normalize_array(seq, max_rel=image1.max())
   norm3 = bv.normalize_array(seq, max_rel=image2.max())
   print("norm1 max: %f"%float(norm1.max()))
   print("norm2 max: %f"%float(norm2.max()))
   print("norm3 max: %f"%float(norm3.max()))
   print(norm2.max())
   bui.plot_images([in1 ,in2, norm1 ,norm2, norm3], norm=mpl.colors.NoNorm())
Ejemplo n.º 4
0
def entropy_diff_test(file1, file2):
   """Here we simply calculate the raw difference between entropy values
   calculated from two sequential images.

   Notes
   ----------

   """

   image1 = cv2.imread(file1, 0)
   image1 = cv2.GaussianBlur(image1, (5,5), 0)
   image2 = cv2.imread(file2, 0)
   image2 = cv2.GaussianBlur(image2, (5,5), 0)

   results1 = bv.entropy_stack(image1, normalize=False)
   results2 = bv.entropy_stack(image2, normalize=False)
   #diffs = bv.image_diffs(results1, results2)

   diffs_e = []
   for i in range(0, len(results1)):
      diffs_e.append(np.absolute(results2[i] - results1[i]))

   diff_a = np.absolute(image2 - image1)

   normalized1 = list(map(lambda x:bv.normalize_array(x), results1))
   normalized2 = list(map(lambda x:bv.normalize_array(x), results2))

   bui.plot_images([image1, image2, diff_a, diff_a] + normalized1 + normalized2 + diffs_e, max_cols=4)   
Ejemplo n.º 5
0
def entropy_image_test(file):
   """Calculates an "entropy stack" of images for an input image. 
   Displays the original image, along with low, low_medium, high_medium,
   and high resolution entropy calculations.

   """

   image = cv2.imread(file, 0)
   blur = cv2.GaussianBlur(image, (5,5), 0)
   results = bv.entropy_stack(image)
   bui.plot_images([image] + results)
Ejemplo n.º 6
0
def entropy_sequence_test(location):
   """Calculates low, low_medium, high_medium, and high entropy 
   resolution images for a sequence of frames. In this case, we
   caltulate 4 entropy images for each of two frames. Finally, we
   calculate the entropy between the two frames. In the output, the top
   row shows the original frames. The second row shows the entropy
   images for frame 1, and the third row shows the entropy images for
   frame 2. The bottom row shows the entropy between the images in
   rows 2 and 3.

   """ 

   # Load frames from a location
   def get_frames(location, start_index=0, max_frames=10, step=1):
      frames = []
      count = start_index
      while count < start_index + step * max_frames:
         image = cv2.imread("%s/frame%d.jpg"%(location, count), 0)
         image = cv2.GaussianBlur(image, (5,5), 0)
         frames.append(image)
         count = count + step
      return frames

   # Load the frames and calculate the entropy_stack for each frame.
   frames = get_frames(location, start_index=0, max_frames=200)
   # e_seq = bv.entropy_sequential(frames)
   # bui.plot_images([frames[0], frames[-1]]+[e_seq])

   entropy_frames = [[], [], [], []]
   for frame in frames:
      e_stack = bv.entropy_stack(frame)
      for i in range(0, len(e_stack)):
         entropy_frames[i].append(e_stack[i])

   result = []
   for e_frame in entropy_frames:
      result.append(bv.entropy_sequential(e_frame))

   bui.plot_images([frames[0]] + [frames[-1]] + [frames[0]] + [frames[-1]]
                   + [entropy_frames[0][0]]
                   + [entropy_frames[1][0]]
                   + [entropy_frames[2][0]]
                   + [entropy_frames[3][0]]
                   + [entropy_frames[0][-1]]
                   + [entropy_frames[1][-1]]
                   + [entropy_frames[2][-1]]
                   + [entropy_frames[3][-1]]
                   + result, max_cols=4)
Ejemplo n.º 7
0
def entropy_uniqueness_test(file1, file2):
   """The idea behind this test is to calculate the entropy of two
   images in sequence, then extract the unique entropy values from each.
   Then, we filter the unique values down to find unique values that
   exist in both filtered images, and compare the locations at which
   those values appeared in the original image. The idea is to try to
   find "entropy features" that are shared in both images.

   Notes
   ----------
   It's not clear whether or not this is helpful.

   """

   image1 = cv2.imread(file1, 0)
   image1 = cv2.GaussianBlur(image1, (5,5), 0)
   hi_rez1 = bv.entropy_local(image1, 7, normalize=False)

   image2 = cv2.imread(file2, 0)
   image2 = cv2.GaussianBlur(image2, (5,5), 0)
   hi_rez2 = bv.entropy_local(image2, 7, normalize=False)


   def original_index(index, shape):
      y = math.floor(index / shape[1])
      x = index - (y * shape[1])
      return y, x

   def find_unique(unique, indices, counts):
      unique_count = 0
      unique_locations = []
      unique_values = []
      for i in range(0, len(counts)):
         if counts[i] == 1: 
            unique_count = unique_count + 1
            unique_locations.append(indices[i])
            unique_values.append(unique[i])
      return unique_count, unique_locations, unique_values

   unique, indices, counts = np.unique(hi_rez1, return_index=True, return_counts=True)
   ucount1, ulocation1, uvalue1 = find_unique(unique, indices, counts)
   unique, indices, counts = np.unique(hi_rez2, return_index=True, return_counts=True)
   ucount2, ulocation2, uvalue2 = find_unique(unique, indices, counts)

   print("image1 unique count: %d, image2 unique count: %d"%(ucount1, ucount2))

   shared_feature_locations1 = []
   shared_feature_locations2 = []
   for i in range(0, len(uvalue1)):
      for j in range(0, len(uvalue2)):
         if uvalue1[i] == uvalue2[j]:
            y, x = original_index(ulocation1[i], hi_rez1.shape)
            shared_feature_locations1.append(np.array([y, x]))
            y, x = original_index(ulocation2[j], hi_rez2.shape)
            shared_feature_locations2.append(np.array([y, x]))
            break

   print("found %d shared features"%(len(shared_feature_locations1)))
   print("first feature:")
   print(shared_feature_locations1[0])
   print(shared_feature_locations2[0])
   print("second feature:")
   print(shared_feature_locations1[1])
   print(shared_feature_locations2[1])

   total = np.array([0, 0])
   for i in range(0, len(shared_feature_locations1)):
      total = total + (shared_feature_locations2[i] - shared_feature_locations1[i])

   print("total vector:")
   print(total)
   print(total.dtype)
   total = np.divide(total, len(shared_feature_locations1))
   print("average vector:")
   print(total)

   bui.plot_images([image1, image2, bv.normalize_array(hi_rez1), bv.normalize_array(hi_rez2)], max_cols=2)