def correlate_image(self, image_one, image_two):
        try:
            max_range = c2d(image_one, image_one, mode='same')
            img_result = c2d(image_one, image_two, mode='same')
            result_final = ((img_result.max() / max_range.max()) * 100)
            return result_final

        except Exception, e:
            flash("failed:" + str(e))
            return 105
    def correlate_image(self, image_one, image_two):
        try:
            max_range=c2d(image_one, image_one, mode='same')
            img_result=c2d(image_one, image_two, mode='same')
            result_final=((img_result.max()/max_range.max())*100)
            return result_final

        except Exception, e:
            flash("failed:"+str(e))
            return 105
 def correlate_image(self, image_one, image_two):
     try:
         max_range=c2d(image_one, image_one, mode='same')
         img_result=c2d(image_one, image_two, mode='same')
         result_final=((img_result.max()/max_range.max())*100)
         print "Images are", result_final,"percent similar\n"
         if result_final==100.0:
             print"Images are exactly same\n"
         elif result_final==0.0:
             print "Images are very different\n"
     except:
         print "\nAwwh, somethings not right! Try again\n"
         exit()
 def correlate_image(self, image_one, image_two):
     try:
         max_range = c2d(image_one, image_one, mode='same')
         img_result = c2d(image_one, image_two, mode='same')
         result_final = ((img_result.max() / max_range.max()) * 100)
         print "Images are", result_final, "percent similar\n"
         if result_final == 100.0:
             print "Images are exactly same\n"
         elif result_final == 0.0:
             print "Images are very different\n"
     except:
         print "\nAwwh, somethings not right! Try again\n"
         exit()
 def correlate_image(self, image_one, image_two, filename):
     try:
         max_range=c2d(image_one, image_one, mode='same')
         img_result=c2d(image_one, image_two, mode='same')
         result_final=((img_result.max()/max_range.max())*100)
         my_array.append({'name': filename, 'number': result_final})
         print "with: " + filename + " | Images are", result_final,"percent similar\n"
         if result_final==100.0:
             print "with: " + filename + " | Images are exactly same\n"
         elif result_final==0.0:
             print "with: " + filename + " | Images are very different\n"
     except:
         print "\nAwwh, somethings not right! Try again\n"
         exit()
Пример #6
0
def sim(img1, img2, scale_percent=15, multichannel=True):
    '''sim: img1과 비교해서 img2와 얼마나 차이가 나는가'''
    img1 = img_thresh(img1)
    img2 = img_thresh(img2)

    re_im1 = img_resize(scale_percent, img1)
    re_im2 = img_resize(scale_percent, img2)

    c11 = c2d(re_im1, re_im1, mode='same').ravel().tolist()
    c12 = c2d(re_im1, re_im2, mode='same').ravel().tolist()
    print(np.nanmax(c12), np.nanmax(c11))
    if np.nanmax(c12) == np.nan:
        print(c11)
        print(c12)
    return np.nanmax(c12) / np.nanmax(c11)
def cross_corr(project):
    f = open('/home/cuda/Stefanie/HOST01to07/out' + project + '.txt', 'w')
    for project in project_lst:
        for shot in shots:
            for slide in slides:
                index1 = 0
                pict_lst = os.listdir("/home/cuda/Stefanie/images75/" +
                                      project + "/" + shot + "/" + slide + "/")
                for x in range(len(pict_lst)):
                    im = "/home/cuda/Stefanie/images75/" + project + "/" + shot + "/" + slide + "/" + pict_lst[
                        index1]

                    for pic in pict_lst:
                        im2 = "/home/cuda/Stefanie/images75/" + project + "/" + shot + "/" + slide + "/" + pict_lst[
                            index2]
                        if (pict_lst[index2],
                                pict_lst[index1]) not in ready_lst:
                            data1 = get(im)
                            data2 = get(im2)
                            cc = c2d(data1, data2, mode='same')
                            f.write(project + '\t' + pict_lst[index1] + '\t' +
                                    pict_lst[index2] + '\t' + str(cc.max()) +
                                    '\n')
                            ready_lst.add((pict_lst[index1], pict_lst[index2]))
                            #print project, shot, slide, pict_lst[index1], pict_lst[index2], cc.max()

                        index2 = index2 + 1

                    index1 = index1 + 1
                    index2 = 0
            ready_lst.clear()
Пример #8
0
def compare_images(imgfile1, imgfile2):
	'''Compare the images as per https://stackoverflow.com/questions/1819124/image-comparison-algorithm
	This is really slow, so we start with a tiny size and gradually go larger if we find them to look alike.'''
	size = 5

	origimage1, origimage2 = read_image(imgfile1), read_image(imgfile2)
	if origimage1 == None or origimage2 == None:
		# bad image
		return False, 0, size

	while True:
		data1, data2 = get_resized_data(origimage1, size), get_resized_data(origimage2, size)
		correlation = c2d(data1, data2, mode="same").max()
		if correlation > size**2 * 0.75:
			if size >= 80:
				# seems like a good match
#				print "\tMATCH", size, imgfile1, imgfile2
				return True, correlation, size
			else:
				# refine
#				print "\tPossible correlation!", size, imgfile1, imgfile2
				size *= 2
		else:
			# not a match
			return False, correlation, size
Пример #9
0
def compare_images(orig_url, file_name_1, file_name_2):
	file_path_1 = global_vars.img_filepath + file_name_1 + '-clipped'
	file_path_2 = global_vars.img_filepath + file_name_2 + '-clipped' 
	
	hash_of_url = hashlib.md5(orig_url)
	hash_link = global_vars.cache_url + hash_of_url.hexdigest() + '/'

	f1_open = False

	if os.path.isfile(file_path_1 + '.png'):
		img1 = get(file_path_1)
		f1_open = True
	else:
  		f = open(global_vars.failed_pages, 'a')
  		f.write(file_name_1 + "\n")
  		f.close()

  	if os.path.isfile(file_path_2 + '.png'):
  		if f1_open == True:
  			img2 = get(file_path_2)
  			#c11 = c2d(img1, img1, mode='same') 
			c12 = c2d(img1, img2, mode='same')
			diff = c12.max()
			skew = c12.max()/30000.

			if skew < global_vars.quality_threshold:
				# write results to results file
				f = open(global_vars.comparison_results, 'a')
				f.write(file_name_1 + ',' + file_name_2 + ',' + orig_url + ',' + hash_link + ',' + str(skew) + "\n")
				f.close()

	else:
		f = open(global_vars.failed_pages, 'a')
		f.write(file_name_2 + "\n")
		f.close()
Пример #10
0
def compare_meanshift(dir_mainfolder):
    ##Compare_ls: Store score of comparation between images
    compare_ls = []
    ##Base_img: Image draw from user
    print "get base"
    base_img = cv2.imread(dir_mainfolder + '/img_result/0.jpg')
    img0 = cv2.cvtColor(base_img,cv2.COLOR_BGR2RGB)
    hist_base = cv2.calcHist([img0], [0, 1, 2], None, [8, 8, 8],
		[0, 256, 0, 256, 0, 256])
    hist_base = cv2.normalize(hist_base).flatten()
    base_img = cv2.resize(base_img, (100, 100))

    ##Get inner product user draw image
    base_img = sp.inner(base_img, [299, 587, 114]) / 1000.0
    base_img = (base_img - base_img.mean()) / base_img.std()
    base_img.shape
    num = 1
    ##Compare the user draw image to all images
    list_img =[]
    for img2 in glob.glob(dir_mainfolder + "/*.jpg"):
        n= cv2.imread(img2)
        list_img.append(n)
    for img2 in glob.glob(dir_mainfolder + "/*.png"):
        n= cv2.imread(img2)
        list_img.append(n)
    print "jump for"
    for img in list_img:
        img = get(num,dir_mainfolder,0)
        img.shape

        print "compare: " + num
        ##Use correlation 2d to compare between image
        compare_img = c2d(base_img, img, mode='same')
        value_c2d_pixel = compare_img.max()*0.4/1000
        
        ##Read image and calculated image histogram
        data = imread(dir_mainfolder+'/img_meanshift/%s.jpg' % num)
        img_1= cv2.cvtColor(data,cv2.COLOR_BGR2RGB)
        hist_img = cv2.calcHist([img_1], [0, 1, 2], None, [8, 8, 8],
		[0, 256, 0, 256, 0, 256])
        hist_img = cv2.normalize(hist_img).flatten()

        ##Comare histograms
        compare = cv2.compareHist(hist_base,hist_img,cv2.cv.CV_COMP_CORREL)
        compare = compare*0.6
        avg_value = (compare + value_c2d_pixel)/2
        compare_ls.append((num,avg_value))
        
        print "compare done"
        num = num + 1

    ##Sorting the score of comparation DESC
    compare_ls = sorted(compare_ls,key = itemgetter(1), reverse = True)
    i = 1
    
    ##Write ranking image to folder
    for ls in compare_ls:
        cv2.imwrite(dir_mainfolder+"/img_rank/"+str(i)+".jpg",list_img[ls[0] - 1])
        i = i+1
def main():
    
    checker = True
    while checker == True:
        testfile = input('Enter the file path to JPG: ')
        if os.path.exists(testfile):
            checker = False
        else:
            print("Not valid file path.")
    
    img_test = Image.open(testfile)
    img_test.thumbnail([50,50], Image.ANTIALIAS)
    img_test.save(testfile, "JPEG")
    
    
    testValue = valueIt(testfile)

    count = [0,0,0,0,0]
    for eachkind in range(1, 6): 
        for eachfile in listdir("Data/0"+str(eachkind)):
            
            img = Image.open("Data/0"+str(eachkind)+"/"+ eachfile)
            img.thumbnail([50,50], Image.ANTIALIAS)
            img.save("Data/0"+str(eachkind)+"/"+ eachfile, "JPEG")
            
            
            compareValue = valueIt("Data/0"+str(eachkind)+"/"+ eachfile)
            CompareCode = c2d(testValue, compareValue, mode='same')
            count[eachkind-1] += CompareCode.max()
            
    
    numofFile = []
    for eachkind in range(1, 6): 
         numofFile.append(len(listdir("Data/0"+str(eachkind))))            
    fianlpercentage = []
    fianlpercentage.append(count[0]/numofFile[0])
    fianlpercentage.append(count[1]/numofFile[1])
    fianlpercentage.append(count[2]/numofFile[2])
    fianlpercentage.append(count[3]/numofFile[3])
    fianlpercentage.append(count[4]/numofFile[4])
    
    
    thehighest = 0
    theindex = 0
    
    for eachindex in range(0,5):
        if fianlpercentage[eachindex] > thehighest:
            thehighest = fianlpercentage[eachindex]
            theindex = eachindex
    if theindex == 0:
        print("It is Smile")
    elif theindex == 1:
        print("It is Hat")
    elif theindex == 2:
        print("It is Hash")
    elif theindex == 3:
        print("It is Heart")
    elif theindex == 4:
        print("It is Dollar")
Пример #12
0
 def corelationImage1Image2(imageArray1, imageArray2):
     image1 = scipy.inner(numpy.asarray(imageArray1), [299, 587, 114]) / 1000.0
     image2 = scipy.inner(numpy.asarray(imageArray2), [299, 587, 114]) / 1000.0
     
     image1 = (image1 - image1.mean())/ image1.std()
     image2 = (image2 - image2.mean())/ image2.std()
     
     corelationimage1Withimage2 = c2d(image1, image2, mode = 'same')
     return corelationimage1Withimage2.max()
Пример #13
0
def correlateKNN(choices, item):
    reslst=np.zeros(10)
    for i, d in enumerate(choices):
        c11 = c2d(item, d, mode='full')
        #print c11
        reslst[i]=c11.max()
    #print reslst
    pred=np.argmax(reslst)
    return pred
Пример #14
0
def compare(file1, file2, diff):
     """
     Compares two files (JPEG, PNG or PDF) 
     """
     im1 = load_img(file1)
     im2 = load_img(file2)
     c11 = c2d(im1, im1, mode='same')  # baseline
     c22 = c2d(im2, im2, mode='same')  # baseline
     c12 = c2d(im1, im2, mode='same')
     m = [c11.max(), c12.max(), c22.max()]
     diff_ab = 100 * (1-m[1]/m[0])
     diff_ba = 100 * (1-m[1]/m[2])
     
     fail=max(diff_ab,diff_ba) > diff

     if fail:
          raise CompareImageException(c11.max(), c12.max(), c22.max())

     return fail
Пример #15
0
def main():
  if len(sys.argv)!=3:
    print 'Usage: mirror.py <url1> <url2>'
    sys.exit(-1)
  outfile1=sys.argv[1].split('/')[-1]
  urlretrieve(sys.argv[1],outfile1 )
  outfile2=sys.argv[2].split('/')[-1]
  urlretrieve(sys.argv[2],outfile2 )
  print 'start'
  im1 = get(outfile1)
  print 'got img 1'
  im2=get(outfile2)
  ti=time.time()
  print  im1.shape, im2.shape
  c11 = c2d(im1, im1, mode='same')
  print c11.max(),time.time()-ti
  ti=time.time()
  c12 = c2d(im1, im2, mode='same')
  print  c12.max(),time.time()-ti
  print 'Similarity :',((c12.max()/c11.max())*100)
Пример #16
0
def image2letter( image, digits_map ):
    best_key = None
    best_score = None
    #h1 = misc.toimage( image ).histogram()
    im1 = prepare_image( misc.fromimage( image ) )
    for ( k, ir ) in digits_map.items():
        #h2 = misc.toimage( ir ).histogram()
        #rms = math.sqrt( reduce( operator.add, map( lambda a, b: ( a-b )**2, h1, h2 ) )/ len( h1 ) )
        rms = c2d( im1, prepare_image( ir ), 'same' ).max()
        if not best_score or best_score > rms:
            best_key = k
            best_score = rms
    return best_key[0] if best_key else None, best_score
Пример #17
0
def captcha_solver(pid):
    captcha=Image.open(resource_path('Captcha\\captcha%s.png'%pid))
    captcha.convert('RGBA')
    for letter in range (0,4):
        #Slice captcha image into four 40*40 pieces
        box=[letter*38,0,(letter+1)*38+1,40]
        temp=captcha.crop(box).resize((19,20),Image.ANTIALIAS)
        pixdata=temp.load()
        #Map RGBA colour codes so reduces noises
        for y in range(temp.size[1]):
            for x in range(temp.size[0]):
                if pixdata[x, y][0] < 95:
                    pixdata[x, y] = (0, 0, 0, 255)
                if pixdata[x, y][1] < 95:
                    pixdata[x, y] = (0, 0, 0, 255)
                if pixdata[x, y][2] > 150:
                    pixdata[x, y] = (255, 255, 255, 255)
        temp.save(resource_path('Captcha\\captcha%s_%s.png'%(pid,letter)))

    folders_list=['0','1','2','3','4','5','6','7','8','9']
    answer_list=[]
    for letter in range (0,4):
        corr_list=[]
        img1=get(resource_path('Captcha\\captcha%s_%s.png'%(pid,letter)))
        baseline=c2d(img1,img1,mode='same').max()
        for folder in folders_list:
            sum_corr=0
            counter=0
            for img in os.listdir(resource_path('Iconset\\%s'%folder)):
                img2=get(resource_path('Iconset\\%s\\%s'%(folder,img)))
                corr=c2d(img1,img2,mode='same').max()
                sum_corr+=corr
                counter+=1
            ave_corr=sum_corr/counter
            corr_list.append(abs(ave_corr-baseline))
        answer_list.append(folders_list[corr_list.index(min(corr_list))])
    answer = "".join(str(x) for x in answer_list)
    return answer
Пример #18
0
def compare(INPUTFILE,wire_loc):
    global pics,piclen
    test = imread(INPUTFILE)
    test = imresize(test,0.4)
    test = sp.inner(test, [299, 587, 114]) / 1000.0
    test = (test - test.mean()) / test.std()
    choose(wire_loc)
    if not pics:
        print "I cannot recognise what is in front of me"   
        return 
    for a in range (0,piclen):
        for b in range(1,4):
            im1 = get(pics[a],b)
            com = c2d(im1, test, mode='same')
            #print '%s%d' %(pics[a],b)
            #print com.max()
            if com.max() > 7000:
                pics[a] = pics[a].replace("_", " ")
                print '%s' %pics[a]
                return
    print "I cannot recognise what is in front of me"   
Пример #19
0
def compare_images(orig_url, file_name_1, file_name_2):
    file_path_1 = global_vars.img_filepath + file_name_1 + '-clipped'
    file_path_2 = global_vars.img_filepath + file_name_2 + '-clipped'

    hash_of_url = hashlib.md5(orig_url)
    hash_link = global_vars.cache_url + hash_of_url.hexdigest() + '/'

    f1_open = False

    if os.path.isfile(file_path_1 + '.png'):
        img1 = get(file_path_1)
        f1_open = True
    else:
        f = open(global_vars.failed_pages, 'a')
        f.write(file_name_1 + "\n")
        f.close()

    if os.path.isfile(file_path_2 + '.png'):
        if f1_open == True:
            img2 = get(file_path_2)
            #c11 = c2d(img1, img1, mode='same')
            c12 = c2d(img1, img2, mode='same')
            diff = c12.max()
            skew = c12.max() / 30000.

            if skew < global_vars.quality_threshold:
                # write results to results file
                f = open(global_vars.comparison_results, 'a')
                f.write(file_name_1 + ',' + file_name_2 + ',' + orig_url +
                        ',' + hash_link + ',' + str(skew) + "\n")
                f.close()

    else:
        f = open(global_vars.failed_pages, 'a')
        f.write(file_name_2 + "\n")
        f.close()
Пример #20
0
    # get JPG image as Scipy array, RGB (3 layer)
    # data = imread('base%s.jpg' % i)
    data = io.imread('base%s.jpg' % i)
    # convert to grey-scale using W3C luminance calc
    data = sp.inner(data, [299, 587, 114]) / 1000.0
    # data = numpy.inner(data, [299, 587, 114]) / 1000.0
    # normalize per http://en.wikipedia.org/wiki/Cross-correlation
    return (data - data.mean()) / data.std()


im1 = get(1)
im2 = get(2)
im3 = get(5)
im1.shape
print(im1.shape)
# (105, 401)
im2.shape
print(im2.shape)
# (109, 373)
im3.shape
print(im3.shape)
# (121, 457)

#   cd2(COMPARA_ESSA_IMAGEM, COM_ESSA_IMAGEM)
c11 = c2d(im1, im1, mode='same')  # baseline
c12 = c2d(im1, im2, mode='same')
c13 = c2d(im1, im3, mode='same')
c23 = c2d(im2, im3, mode='same')
c11.max(), c12.max(), c13.max(), c23.max()
# (42105.00000000259, 39898.103896795357, 16482.883608327804, 15873.465425120798)
#   VALORES_GRANDES : PARECIDOS          , PEQUENOS: DIFERENTES
Пример #21
0
def extractLetter(ref):
	ref = rgb2gray(ref)
	# a, b = compare_images(img, ref)	
	img1 = get(rgb2gray(imread('letters/a-new.crop.png')))
	img2 = get(rgb2gray(imread('letters/b-new.crop.png')))
	img3 = get(rgb2gray(imread('letters/c-new.crop.png')))
	img4 = get(rgb2gray(imread('letters/d-new.jpg')))

	c1 = c2d(img1, ref, mode='same')  # baseline
	c2 = c2d(img2, ref, mode='same')
	c3 = c2d(img3, ref, mode='same')
	c4 = c2d(img4, ref, mode='same')

	result = np.array([c1.max(), c2.max(), c3.max(), c4.max()])
	letters = ['a', 'b', 'c', 'd']
	ridx = np.unravel_index(result.argmax(),result.shape) # finding max index

	if (letters[ridx[0]] == 'd' and result[ridx[0]] < 190): # select next match
		result[ridx[0]] = 0
		ridx = np.unravel_index(result.argmax(),result.shape) # finding max index

	return letters[ridx[0]], result[ridx[0]]



# refim = (imgBinarize(rgb2gray(imread('letters/d-new.jpg'))))	
# im = (imgBinarize(rgb2gray(imread('letters/all-letters.jpg'))))
# # extractLetter(ref)


# table = buildRefTable(refim)
# acc = matchTable(im, table)
# val, ridx, cidx = findMaxima(acc)

# # find the half-width and height of template
# hheight = np.floor(refim.shape[0] / 2) + 1
# hwidth = np.floor(refim.shape[1] / 2) + 1

# cstart = max(cidx - hwidth, 1)
# cend = min(cidx + hwidth, im.shape[1] - 1)
# # mcq_col = im[0:im.shape[0]-1, cstart:cend] # column containing all mcq's

# # imsave('match-col-orig.jpg', mcq_col) # saving the matched component

# # convolving our reference image on mcq column
# # acc = matchTable(mcq_col, table)
# # val, ridx, cidx = findMaxima(acc)

# # find coordinates of the box
# rstart = max(ridx - hheight, 1)
# rend = min(ridx + hheight, im.shape[0] - 1)
# # cstart = max(cidx - hwidth, 1)
# # cend = min(cidx + hwidth, im.shape[1] - 1)

# # draw the box
# im[rstart:rend, cstart] = 255
# im[rstart:rend, cend] = 255

# im[rstart, cstart:cend] = 255
# im[rend, cstart:cend] = 255

# plt.imshow(im, cmap='gray')
# plt.show()
Пример #22
0
            n_m, n_0 = compare_images(img1, img2)
            diff = n_m/img1.size
            maxDiff = max(maxDiff, diff)

    #cutoff will be 9 (for now - just picking a value)
    if maxDiff >= 9.:
        #assume that there is an animal there
        continue

    #similarity test
    similarity_l = []
    for blank_pic,possible_pic in zip(blankFiles[0],photos):
        p1 = get("day/"+blank_pic+".thumbnail")
        p2 = get("day/"+possible_pic+".thumbnail")

        similarity_l.append(c2d(p1, p2, mode='same').max())

    overallSimilarity = min(similarity_l)

    if photos in blankFiles:
        blankDiff.append(overallSimilarity)
        blankIndex.append(i)
    else:
        animalDiff.append(overallSimilarity)
        animalIndex.append(i)

        if (i == 7):
            print(overallSimilarity)
            print(daylight_photos[i])
            assert(False)
Пример #23
0
def extractNumber(ref):
    ref = rgb2gray(ref)
    # a, b = compare_images(img, ref)
    img1 = get(imgBinarize(rgb2gray(imread('numbers/1.crop.png'))))
    img2 = get(imgBinarize(rgb2gray(imread('numbers/2.crop.png'))))
    img3 = get(imgBinarize(rgb2gray(imread('numbers/3.crop.png'))))
    img4 = get(imgBinarize(rgb2gray(imread('numbers/4.crop.png'))))
    img5 = get(imgBinarize(rgb2gray(imread('numbers/5.crop.png'))))
    img6 = get(imgBinarize(rgb2gray(imread('numbers/6.crop.png'))))
    img7 = get(imgBinarize(rgb2gray(imread('numbers/7.crop.png'))))
    img8 = get(imgBinarize(rgb2gray(imread('numbers/8.crop.png'))))
    img9 = get(imgBinarize(rgb2gray(imread('numbers/9.crop.png'))))
    img10 = get(imgBinarize(rgb2gray(imread('numbers/10.crop.png'))))
    img11 = get(imgBinarize(rgb2gray(imread('numbers/11.crop.png'))))
    img12 = get(imgBinarize(rgb2gray(imread('numbers/12.crop.png'))))
    img13 = get(imgBinarize(rgb2gray(imread('numbers/13.crop.png'))))
    img14 = get(imgBinarize(rgb2gray(imread('numbers/14.crop.png'))))
    img15 = get(imgBinarize(rgb2gray(imread('numbers/15.crop.png'))))

    c1 = c2d(img1, ref, mode='same')  # baseline
    c2 = c2d(img2, ref, mode='same')
    c3 = c2d(img3, ref, mode='same')
    c4 = c2d(img4, ref, mode='same')
    c5 = c2d(img5, ref, mode='same')
    c6 = c2d(img6, ref, mode='same')
    c7 = c2d(img7, ref, mode='same')
    c8 = c2d(img8, ref, mode='same')
    c9 = c2d(img9, ref, mode='same')
    c10 = c2d(img10, ref, mode='same')
    c11 = c2d(img11, ref, mode='same')
    c12 = c2d(img12, ref, mode='same')
    c13 = c2d(img13, ref, mode='same')
    c14 = c2d(img14, ref, mode='same')
    c15 = c2d(img15, ref, mode='same')

    result = np.array([
        c1.max(),
        c2.max(),
        c3.max(),
        c4.max(),
        c5.max(),
        c6.max(),
        c7.max(),
        c8.max(),
        c9.max(),
        c10.max(),
        c11.max(),
        c12.max(),
        c13.max(),
        c14.max(),
        c15.max()
    ])
    letters = [
        '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13',
        '14', '15'
    ]
    ridx = np.unravel_index(result.argmax(), result.shape)  # finding max index

    # if (letters[ridx[0]] == 'd' and result[ridx[0]] < 190): # select next match
    result[ridx[0]] = 0
    ridx = np.unravel_index(result.argmax(), result.shape)  # finding max index

    return letters[ridx[0]], result[ridx[0]]
Пример #24
0
import time

folder = "resized/"
images = {}
scores = {}
os.chdir(folder)

def get(image):
	data = imread(image)
	data = sp.inner(data, [299, 587, 114]) / 1000.0
	return (data - data.mean()) / data.std()
start = time.clock()
search = get(sys.argv[1])

for image in glob.glob("*.jpg"):
	images[image] = get(image)

with warnings.catch_warnings():
	warnings.simplefilter("ignore")
	# This is to ignore a warning saying that imaginary part in complex numbers
	# is being discarded
	for image in images:
		scores[image] = c2d(search, images[image], mode = "same").max()
stop = time.clock()

print "done"
for score in scores:
	print score, scores[score]

print stop-start
Пример #25
0
# from: http://stackoverflow.com/questions/1819124/image-comparison-algorithm
import scipy as sp
from scipy.misc import imread
from scipy.signal.signaltools import correlate2d as c2d

def get(i):
    # get JPG image as Scipy array, RGB (3 layer)
    data = imread('im%s.jpg' % i)
    # convert to grey-scale using W3C luminance calc
    data = sp.inner(data, [299, 587, 114]) / 1000.0
    # normalize per http://en.wikipedia.org/wiki/Cross-correlation
    return (data - data.mean()) / data.std()

im1 = get(1)
im2 = get(2)
im3 = get(3)
c11 = c2d(im1, im1, mode='same')  # baseline
c12 = c2d(im1, im2, mode='same')
c13 = c2d(im1, im3, mode='same')
c23 = c2d(im2, im3, mode='same')
print c11.max(), c12.max(), c13.max(), c23.max()
Пример #26
0
def correlate2d(file, output_folder, pairs):
    logger = logging.getLogger('')
    # load all submatrices and correlate them
    all_arrays = defaultdict(list)
    information_regions = defaultdict(list)
    pairs_dict = defaultdict(str)
    for pair_ix, reference_region, query_region in pairs:
        pairs_dict[int(
            pair_ix)] = str(reference_region) + '-' + str(query_region)

    with open(file, 'r') as r:
        for line in r:
            region, position, x_min, x_max, y_min, y_max = line.split(',')[:6]
            pair_id = pairs_dict[int(region)]
            line_float = [float(x) for x in line.split(',')[6:]]
            height, width = int(y_max) - int(y_min), int(x_max) - int(x_min)
            mat = np.asanyarray(line_float).reshape(int(height), int(width))
            all_arrays[int(position)].append(mat)
            information_regions[int(position)].append((pair_id, int(position)))

    logger.info(
        '[MAIN]: All submatrices loaded, starting 2D cross-correlation')
    tag = file.split('/')[-1].split('_')[0]
    correlation_dataframe = pd.DataFrame(index=range(len(all_arrays.keys())),
                                         columns=range(len(all_arrays.keys())))

    for a, b in tqdm(itertools.combinations(range(len(all_arrays.keys())), 2)):
        c11 = c2d(all_arrays[a][0], all_arrays[b][0], mode='same')
        c112 = c2d(all_arrays[b][0], all_arrays[a][0], mode='same')
        transp1 = c2d(np.fliplr(all_arrays[a][0]),
                      all_arrays[b][0],
                      mode='same')
        transp2 = c2d(np.fliplr(all_arrays[b][0]),
                      all_arrays[a][0],
                      mode='same')
        best = max(c11.max(), c112.max(), transp1.max(), transp2.max())
        correlation_dataframe.loc[a, b] = best
        correlation_dataframe.loc[b, a] = best
    logger.info('[MAIN]: 2D cross-correlation done')

    correlation_dataframe = correlation_dataframe.replace(np.Inf, np.nan)
    correlation_dataframe = correlation_dataframe.replace(-np.Inf, np.nan)
    scaled = scale(correlation_dataframe.fillna(0.))

    # save dataframe ##
    correlation_dataframe.to_csv(
        path.join(output_folder, 'correlation_dataframe_%s.csv' % (tag)))

    Sum_of_squared_distances = []
    K = range(1, 15)
    for k in K:
        km = KMeans(n_clusters=k)
        km = km.fit(scaled)
        Sum_of_squared_distances.append(km.inertia_)

    kn = KneeLocator(range(1,
                           len(Sum_of_squared_distances) + 1),
                     Sum_of_squared_distances,
                     curve='convex',
                     direction='decreasing')
    optimal_number_clusters = kn.knee

    plt.figure(figsize=(4, 3))
    plt.xlabel('number of clusters k')
    plt.ylabel('Sum of squared distances')
    plt.plot(range(1,
                   len(Sum_of_squared_distances) + 1),
             Sum_of_squared_distances, 'bx-')
    plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
    plt.savefig(path.join(output_folder, 'Elbow_index_%s.pdf' % tag))

    # save positions and belonging cluster
    logger.info('[MAIN]: Classification of the features')
    kmeans = KMeans(n_clusters=optimal_number_clusters,
                    random_state=0,
                    precompute_distances=True).fit(scaled)
    w = open(
        path.join(
            output_folder, 'subregions_%s_clusters_%s.tsv' %
            (str(optimal_number_clusters), tag)), 'a+')
    for n, i in enumerate(list(kmeans.labels_)):
        t1, t2 = information_regions[n][0]
        w.write('{}\t{}\t{}\n'.format('Cluster ' + str(i), t1, t2))
    w.close()
Пример #27
0
 def selfCorrelation(imageArray):    #imageArray is the resut of Image.open()
     image = scipy.inner(numpy.asarray(imageArray), [299, 587, 114]) / 1000.0
     image = (image - image.mean())/ image.std()
     selfcorelationimageWithimage = c2d(image, image, mode = 'same')
     return selfcorelationimageWithimage.max()
Пример #28
0
    # convert to grey-scale using W3C luminance calc
    data = sp.inner(data, [299, 587, 114]) / 1000.0
    # normalize per http://en.wikipedia.org/wiki/Cross-correlation
    return (data - data.mean()) / data.std()

pics = os.listdir("D:\\Eigene Datein\\Python\\Image Similarity\\src2\\test_resize75")
index1 = 0
index2 = 0
f = open('out.txt', 'w')
for x in range(len(pics)):
    im = "D:\\Eigene Datein\\Python\\Image Similarity\\src2\\test_resize75\\" + pics[index1]
    #print pics
    for pic in pics:
        try:
            im2 = "D:\\Eigene Datein\\Python\\Image Similarity\\src2\\test_resize75\\" + pics[index2]
            data1 = get(im)
            data2 = get(im2)
            cc = c2d(data1, data2, mode='same')
            f.write(pics[index1] + '\t' + pics[index2] + '\t' + str(cc.max()) + '\n')
            print pics[index1], pics[index2], cc.max()
        except IOError:
            pass
        
        index2 = index2 + 1
    index1 = index1 + 1
    index2 = 0
end = time.clock()
print end-start 
f.close()

Пример #29
0
    lum = [299, 587, 114]
    if len(data[0][0]) > 3:
        lum.append(0)
    data = sp.inner(data, lum) / 1000.0
    # normalize per http://en.wikipedia.org/wiki/Cross-correlation
    return (data - data.mean()) / data.std()


im1 = load_img(sys.argv[1])
im2 = load_img(sys.argv[2])
diff_allowed = float(sys.argv[3])

#pprint([im1.shape])
#pprint([im2.shape])

c11 = c2d(im1, im1, mode='same')  # baseline
c22 = c2d(im2, im2, mode='same')  # baseline
c12 = c2d(im1, im2, mode='same')
m = [c11.max(), c12.max(), c22.max()]
# (42105.00000000259, 39898.103896795357, 16482.883608327804, 15873.465425120798)
# [7100.0000000003838, 7028.5659939232246, 7100.0000000000318]

diff_ab = 100 * (1 - m[1] / m[0])
diff_ba = 100 * (1 - m[1] / m[2])
print "diff a-b: %.2f%%" % (diff_ab)
print "diff b-a: %.2f%%" % (diff_ba)
fail = max(diff_ab, diff_ba) > diff_allowed
if fail: pprint([c11.max(), c12.max(), c22.max()])
print "limit: %.2f%% -> %s" % (diff_allowed, ("OK", "FAIL")[fail])
if fail:
    sys.exit(1)
Пример #30
0
# This script a rough file to test the matching measure
# Check the file location properly.
#

import scipy as sp
from scipy.misc import imread
from scipy.signal.signaltools import correlate2d as c2d
import warnings

folder = "resized/"

def get(image):
	data = imread(image)
	data = sp.inner(data, [299, 587, 114]) / 1000.0
	return (data - data.mean()) / data.std()

im1 = get(folder + 'capture1.jpg')
im2 = get(folder + 'capture2.jpg')

with warnings.catch_warnings():
	warnings.simplefilter("ignore")
	# This is to suppress a warning saying that imaginary part in complex numbers
	# is being discarded
	c11 = c2d(im1, im1, mode="same")
	c22 = c2d(im2, im2, mode="same")
	c12 = c2d(im1, im2, mode="same")
	c21 = c2d(im2, im1, mode="same")

print c11.max(), c22.max(), c12.max(), c21.max()
Пример #31
0
     ## ValueError: matrices are not aligned, if alpha channel...
     lum = [299, 587, 114]
     if len(data[0][0]) > 3:
       lum.append(0)
     data = sp.inner(data, lum) / 1000.0
     # normalize per http://en.wikipedia.org/wiki/Cross-correlation
     return (data - data.mean()) / data.std()

im1 = load_img(sys.argv[1])
im2 = load_img(sys.argv[2])
diff_allowed = float(sys.argv[3])

#pprint([im1.shape])
#pprint([im2.shape])

c11 = c2d(im1, im1, mode='same')  # baseline
c22 = c2d(im2, im2, mode='same')  # baseline
c12 = c2d(im1, im2, mode='same')
m = [c11.max(), c12.max(), c22.max()]
# (42105.00000000259, 39898.103896795357, 16482.883608327804, 15873.465425120798)
# [7100.0000000003838, 7028.5659939232246, 7100.0000000000318]

diff_ab = 100 * (1-m[1]/m[0])
diff_ba = 100 * (1-m[1]/m[2])
print "diff a-b: %.2f%%" % (diff_ab)
print "diff b-a: %.2f%%" % (diff_ba)
fail = max(diff_ab,diff_ba) > diff_allowed
if fail: pprint([c11.max(), c12.max(), c22.max()])
print "limit: %.2f%% -> %s" % (diff_allowed, ("OK","FAIL")[fail])
if fail: sys.exit(1)
im2 = get(dirr + 'mri2.png', 1)

print ''
print ''
print ''
print '-----------------------------------------------------'
print ''
print 'ref image shape:', im1.shape
print ''
print '------------%%%%%%%%%%%%%%%%%%%%%%%------------------'
print ''
print 'moving image shape:', im2.shape
print ''
print '-----------------------------------------------------'

c11 = c2d(im1, im1, mode='same', boundary='symm')  # baseline
imsave(dirr + 'c11.png', c11)
c12 = c2d(im1, im2, mode='same', boundary='symm')
imsave(dirr + 'c12.png', c12)

# c13 = c2d(im1, im3, mode='same')
# c23 = c2d(im2, im3, mode='same')
print c11.max()
print c12.max()  # , c13.max(), c23.max()
print ''
print ''
print ''
perf = c12.max() * 100 / c11.max()
print '-----------------------------------------------------'
print ''
print "perf", perf, "%"
Пример #33
0
import scipy as sp
from scipy.misc import imread
from scipy.signal.signaltools import correlate2d as c2d

def get(i):
	data = imread('im%s.jpg' % i)
	data = sp.inner(data, [299, 587, 114]) / 1000.0
	return (data - data.mean()) / data.std()

im1 = get(1)
im2 = get(2)
im3 = get(3)

#c11 = c2d(im1, im1, mode='same') 
c12 = c2d(im1, im2, mode='same')
#c13 = c2d(im1, im3, mode='same')
#c23 = c2d(im2, im3, mode='same')
#c11.max(), c12.max(), c13.max(), c23.max()
print c12.max()

Пример #34
0
    data = cv2.imread('im%s.jpg' % i)
    img = cv2.cvtColor(data, cv2.COLOR_BGR2RGB)
    short_edge = min(img.shape[0], img.shape[1])
    #                 print(short_edge)
    yy = int((img.shape[0] - short_edge) // 2)
    xx = int((img.shape[1] - short_edge) // 2)
    img = img[yy: yy + short_edge, xx: xx + short_edge]
    # 缩放图片统一尺寸
    img = cv2.resize(img, (160, 160))
    img = sp.inner(img, [299, 587, 114]) / 1000.0
    print('done')
    return (img - img.mean()) / img.std()


im1 = get(1)
im2 = get(2)
im3 = get(3)
print(im1.shape)
print(im2.shape)
print(im3.shape)

c11 = c2d(im1, im1, mode='same')
# print(c11)
c12 = c2d(im1, im2, mode='same')
# print(c12)
c13 = c2d(im1, im3, mode='same')
# print(c13)

# c11.max(), c12.max(), c13.max()
print(c11.max(), c12.max(), c13.max())
Пример #35
0
#!/usr/bin/env python
import scipy as sp
from scipy.misc import imread
from scipy.signal.signaltools import correlate2d as c2d


def get(fName):
    # get JPG image as Scipy array, RGB (3 layer)
    data = imread(fName)
    # convert to grey-scale using W3C luminance calc
    data = sp.inner(data, [299, 587, 114]) / 1000.0
    # normalize per http://en.wikipedia.org/wiki/Cross-correlation
    return (data - data.mean()) / data.std()

im1 = get("/home/ggdhines/Databases/serengeti/temp2/im2.jpg")

im2 = get("/home/ggdhines/Databases/serengeti/temp2/im3.jpg")
im3 = get("/home/ggdhines/Databases/serengeti/temp2/im1.jpg")
print "analyzing"
c11 = c2d(im1, im1, mode='same')
print c11.max()
c12 = c2d(im1, im2, mode='same')
print c12.max()
c13 = c2d(im1, im3, mode='same')


print c13.max()