예제 #1
0
    def __init__(self, name):
        """This initializes the object"""
        self.name = name
        self.last_frame = None
        self.current_frame = None
        self.match_frames = None

        #Create an ORB object for keypoint detection
        self.orb = ORB_create(nfeatures=100,
                              scaleFactor=2,
                              edgeThreshold=100,
                              fastThreshold=10)

        #Create a BF object for keypoint matching
        self.bf = BFMatcher(NORM_L1, crossCheck=True)

        #For keeping track of the drone's current velocity
        self.vel_x = None
        self.vel_y = None

        #For plotting the drone's current and past velocities
        self.times = []
        self.x_s = []
        self.y_s = []

        #Creating a publisher that will publish the calculated velocity to the ROS topic /quadrotor/ardrone/calc_vel
        self.pub = rospy.Publisher("/quadrotor/ardrone/calc_vel",
                                   Twist,
                                   queue_size=10)
예제 #2
0
def get_H(img1, img2):
    '''
    img1 - center
    img2 - panned
    '''

    # detect kepoints and their descriptor for 'img1' using SIFT
    sift = SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # Keypoints matching
    bf = BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    good = []

    for m in matches:
        if (m[0].distance < 0.5 * m[1].distance):
            good.append(m)
    matches = np.asarray(good)

    src = np.float32([kp1[m.queryIdx].pt
                      for m in matches[:, 0]]).reshape(-1, 1, 2)
    dst = np.float32([kp2[m.trainIdx].pt
                      for m in matches[:, 0]]).reshape(-1, 1, 2)

    # Matrix
    H = ransacHomography(src[:200, 0, :], dst[:200, 0, :])

    return H
예제 #3
0
    def __init__(self, number_of_features: int = None):
        if number_of_features is None:
            self.number_of_features = self.DEFAULT_NUMBER_OF_FEATURES

        self.features_extractor = ORB_create(
            nfeatures=self.DEFAULT_NUMBER_OF_FEATURES)

        # ORB uses binary descriptors -> use hamming norm (XOR between descriptors)
        self.features_matcher = BFMatcher(NORM_HAMMING, crossCheck=True)
예제 #4
0
  def __init__(self,K,nfeatures=5000,norm=NORM_HAMMING,crosscheck=False):
    self.orb = ORB_create(nfeatures)
    self.bfm = BFMatcher(norm,crosscheck)
    self.orb_mask = None
    self.last = None
    self.K = K
    self.Kinv = inv(self.K)

    self.f_est_avg = []
예제 #5
0
def knn_matcher(descriptors1, descriptors2, nndist=0.75):
    bf = BFMatcher()
    # Match descriptors with 2 nearest neighbours
    matches = bf.knnMatch(descriptors1.fft, descriptors2.fft, k=2)
    good_matches = []
    for m, n in matches:
        if m.distance < nndist * n.distance:
            good_matches.append(m)
    return good_matches
예제 #6
0
def SeekDuelist():
    for times in range(4):
        info("SeekDuelist() getting screenshot")
        #get screen image
        Screenshot()
        #iterate through sprites
        info("SeekDuelist() iterating through duelists")
        for image in listdir("TEMPLATES\\characters\\"):
            #set sprite
            pic="TEMPLATES\\characters\\"+image
            #read sprite image
            img1= imread(pic,IMREAD_GRAYSCALE)
            #read screen image
            img2= imread("screen.png",IMREAD_GRAYSCALE)

            #set ORB object
            orb= ORB_create(100000)

            #feature detect sprite
            kp1, des1= orb.detectAndCompute(img1, None)
            #feature detect screen
            kp2, des2= orb.detectAndCompute(img2, None)

            #match features from sprite and screen
            bf = BFMatcher(NORM_HAMMING, crossCheck=True)
            matches = bf.match(des1, des2)
            #sort accuracy of matches
            matches = sorted(matches,key=lambda image:image.distance)

            #select accurate matches
            found=[]
            for m in matches:
                if m.distance<=20:
                    found.append(m.distance)

            #check if accurate matches were found
            if len(found)!=0:

                #set image info
                img2_idx = matches[0].trainIdx
                
                # Get the coordinates
                # x - left to right
                # y - top to bottom
                (x,y) = kp2[img2_idx].pt
                info("SeekDuelist() match found for "+image+" at ("+str(int(x))+", "+str(int(y))+")")
                return (int(x),int(y))
        #Scroll one down to change screens
        info("SeekDuelist() no matches found. scrolling")
        mouse_event(MOUSEEVENTF_WHEEL, 0, 0, -1, 0)
        sleep(2)
    info("SeekDuelist() no matches found. raising exception")
    raise Exception
def closestImageSimple(desc, imageLib, nb_el=5):
	"""
		Take an image descriptor list, and a list of image with their descriptors
	"""
	# create BFMatcher object
	from cv2 import BFMatcher, NORM_HAMMING
	nbmatches = []
	matcher = BFMatcher(NORM_HAMMING, crossCheck=True)
	for j, img2 in enumerate(imageLib):
		nameComp = img2[0]
		currentmatches = matcher.match(desc, img2[1][1])
		#matches = sorted(currentmatches, key = lambda x:x.distance)
		nbmatches.append((img2, len(currentmatches), currentmatches))
	return sorted(nbmatches, key = lambda x: x[1])[-nb_el:]
예제 #8
0
def closestImageSimple(desc, imageLib, nb_el=5):
    """
		Take an image descriptor list, and a list of image with their descriptors
	"""
    # create BFMatcher object
    from cv2 import BFMatcher, NORM_HAMMING
    nbmatches = []
    matcher = BFMatcher(NORM_HAMMING, crossCheck=True)
    for j, img2 in enumerate(imageLib):
        nameComp = img2[0]
        currentmatches = matcher.match(desc, img2[1][1])
        #matches = sorted(currentmatches, key = lambda x:x.distance)
        nbmatches.append((img2, len(currentmatches), currentmatches))
    return sorted(nbmatches, key=lambda x: x[1])[-nb_el:]
예제 #9
0
def closestImage(desc, key_images, key_desc, nb_el=5):
    """
		Take an image descriptor list, and a list of image with their descriptors
	"""
    # create BFMatcher object
    from cv2 import BFMatcher, NORM_HAMMING
    nbmatches = []
    for j, img2 in enumerate(key_images):
        try:
            currentmatches = BFMatcher().knnMatch(desc, key_desc[j], k=2)
            good = []
            visited = []
            for c in currentmatches:
                if (len(c) == 2):
                    m, n = c
                    if m.distance < 0.75 * n.distance:
                        if not (m.trainIdx in visited) and not (m.queryIdx
                                                                in visited):
                            visited.append(m.trainIdx)
                            visited.append(m.queryIdx)
                            good.append([m])
            nbmatches.append((img2, len(good), good))
        except Exception:
            print 'erreur'
            print 'in image', img2
            if img2 in erreurs.keys():
                erreurs[img2] += 1
            else:
                erreurs[img2] = 1
        else:
            pass
    return sorted(nbmatches, key=lambda x: x[1])[-nb_el:]
예제 #10
0
def closestImage(desc, imageLib, nb_el=5):
    """
		Take an image descriptor list, and a list of image with their descriptors
	"""
    # create BFMatcher object
    from cv2 import BFMatcher, NORM_HAMMING
    nbmatches = []
    for j, img2 in enumerate(imageLib):
        nameComp = img2[0]
        currentmatches = BFMatcher().knnMatch(queryDescriptors=desc,
                                              img2[1][1],
                                              k=2)
        good = []
        visited = []
        for c in currentmatches:
            if (len(c) == 2):
                m, n = c
                if m.distance < 0.75 * n.distance:
                    if not (m.trainIdx in visited) and not (m.queryIdx
                                                            in visited):
                        visited.append(m.trainIdx)
                        visited.append(m.queryIdx)
                        good.append([m])
        nbmatches.append((img2, len(good), good))
    return sorted(nbmatches, key=lambda x: x[1])[-nb_el:]
예제 #11
0
class OrbFeaturesHandler(FeaturesHandlerAbstractBase):
    """
    This class implements an ORB features exractor
    """
    features_extractor: ORB
    features_matcher: BFMatcher

    DEFAULT_DISTANCE_THRESHOLD_FOR_SUCCESSFULL_FEATURE_MATCH: int = 10

    DEFAULT_NUMBER_OF_FEATURES = 1000
    number_of_features: int

    def __init__(self, number_of_features: int = None):
        if number_of_features is None:
            self.number_of_features = self.DEFAULT_NUMBER_OF_FEATURES

        self.features_extractor = ORB_create(
            nfeatures=self.DEFAULT_NUMBER_OF_FEATURES)

        # ORB uses binary descriptors -> use hamming norm (XOR between descriptors)
        self.features_matcher = BFMatcher(NORM_HAMMING, crossCheck=True)

    def extract_features(self, frame: np.ndarray) -> ProcessedFrameData:
        """
        This method extracts ORB features
        """
        list_of_keypoints, descriptors = self.features_extractor.detectAndCompute(
            image=frame, mask=None)
        return ProcessedFrameData.build(frame=frame,
                                        list_of_keypoints=list_of_keypoints,
                                        descriptors=descriptors)

    def match_features(self, frame_1: ProcessedFrameData,
                       frame_2: ProcessedFrameData):
        """
        This method matches ORB features
        based on https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
        """
        list_of_matches: List[DMatch] = self.features_matcher.match(
            queryDescriptors=frame_1.descriptors,
            trainDescriptors=frame_2.descriptors)
        return sorted(list_of_matches, key=lambda x: x.distance)

        # # Sort them in the order of their distance.
        # return

    def is_handler_capable(self, frame: np.ndarray) -> bool:
        """
        This method implements ORB handler capability test
        """
        extracted_features: ProcessedFrameData = self.extract_features(
            frame=frame)
        return len(extracted_features.list_of_keypoints) >= int(
            0.9 * self.DEFAULT_NUMBER_OF_FEATURES)
예제 #12
0
class ROS_Video_Node():
    def __init__(self, name):
        """This initializes the object"""
        self.name = name
        self.last_frame = None
        self.current_frame = None
        self.match_frames = None

        #Create an ORB object for keypoint detection
        self.orb = ORB_create(nfeatures=100,
                              scaleFactor=2,
                              edgeThreshold=100,
                              fastThreshold=10)

        #Create a BF object for keypoint matching
        self.bf = BFMatcher(NORM_L1, crossCheck=True)

        #For keeping track of the drone's current velocity
        self.vel_x = None
        self.vel_y = None

        #For plotting the drone's current and past velocities
        self.times = []
        self.x_s = []
        self.y_s = []

        #Creating a publisher that will publish the calculated velocity to the ROS topic /quadrotor/ardrone/calc_vel
        self.pub = rospy.Publisher("/quadrotor/ardrone/calc_vel",
                                   Twist,
                                   queue_size=10)

    def detect_motion(self):
        """This function detects the motion between the current and last frame."""
        if (self.last_frame == self.current_frame).all():
            print(
                "Beep boop! I think my current frame is exactly the same as my last frame. \nEither I'm not moving, or something is very wrong.\n"
            )
            namedWindow(self.name)
            imshow(self.name, self.current_frame)
            waitKey(1)
            return None

        #This finds the keypoints and descriptors with SIFT
        kp1, des1 = self.orb.detectAndCompute(self.last_frame, None)
        kp2, des2 = self.orb.detectAndCompute(self.current_frame, None)

        #Match descriptors
        matches = self.bf.match(des1, des2)

        #Sort them in the order of their distance
        matches = sorted(matches, key=lambda x: x.distance)

        #This is a test to see if I can filter out bad matches from the getgo
        #Right now I've set a threshold that I think will keep only bad matches to see if it makes the data super noisy
        # print("MATCH DISTANCES")
        #undoiing test, ELEPHANT
        filtered_matches = matches
        # filtered_matches = []
        # for match in matches:
        #     if match.distance > 1000:
        #         filtered_matches.append(match)

        if len(filtered_matches) < 5:
            print(
                "Beep boop! Not enough good matches were found. This data is unreliable. \n Try moving me above the building, please.\n"
            )
            namedWindow(self.name)
            imshow(self.name, self.current_frame)
            waitKey(1)
            return None

        #create arrays of the coordinates for keypoints in the two frames
        kp1_coords = asarray(
            [kp1[mat.queryIdx].pt for mat in filtered_matches])
        kp2_coords = asarray(
            [kp2[mat.trainIdx].pt for mat in filtered_matches])

        #calculate the translations needed to get from the first set of keypoints to the next set of keypoints
        translations = kp2_coords - kp1_coords

        least_error = 5
        for translation in translations:
            a = translation[0] * ones((translations.shape[0], 1))
            b = translation[1] * ones((translations.shape[0], 1))
            test_translation = concatenate((a, b), axis=1)
            test_coords = kp1_coords + test_translation
            error = sqrt(mean(square(kp2_coords - test_coords)))
            if error < least_error:
                least_error = error
                best_translation = translation

        self.vel_x = translation[0]
        self.vel_y = translation[1]

        # Draw first matches.
        draw_params = dict(
            matchColor=(0, 255, 0),  # draw matches in green color
            singlePointColor=None,
            matchesMask=None,  # draw only inliers
            flags=2)
        self.match_frames = drawMatches(self.last_frame, kp1,
                                        self.current_frame, kp2, matches, None,
                                        **draw_params)

        namedWindow(self.name)
        imshow(self.name, self.match_frames)
        waitKey(1)

        if least_error < 2:
            print("This is a new match")
            print("X velocity: ", self.vel_x)
            print("Y velocity: ", self.vel_y)
            print("least_error: ", least_error)

            #Publish the velocities found
            calc_vel = Twist()
            calc_vel.linear.x = self.vel_x
            calc_vel.linear.y = self.vel_y

            self.pub.publish(calc_vel)

    def plot_current_vel(self):
        """This function live plots the quadrotor's measured velocity"""
        #save the current velocities
        self.times.append(time())
        self.x_s.append(self.vel_x)
        self.y_s.append(self.vel_y)

        #plot the velocities
        plt.title("Live Velocities")
        plt.ylabel("Velocity")
        plt.xlabel("Time")
        plt.ylim(-20, 20)
        try:
            plt.plot(self.times[-60:],
                     self.x_s[-60:],
                     'r-',
                     label="X Velocity")
            plt.plot(self.times[-60:],
                     self.y_s[-60:],
                     'b-',
                     label="Y Velocity")
        except:
            plt.plot(self.times, self.x_s, 'r-', label="X Velocity")
            plt.plot(self.times, self.y_s, 'b-', label="Y Velocity")
        plt.legend(loc="upper right")
예제 #13
0
class KPExtractor(object):
  def __init__(self,K,nfeatures=5000,norm=NORM_HAMMING,crosscheck=False):
    self.orb = ORB_create(nfeatures)
    self.bfm = BFMatcher(norm,crosscheck)
    self.orb_mask = None
    self.last = None
    self.K = K
    self.Kinv = inv(self.K)

    self.f_est_avg = []

  def normalize(self,pts):
    return dot(self.Kinv,self.add_ones(pts).T).T[:, 0:2]

  def denormalize(self,pt):
    ret = dot(self.K, array([pt[0],pt[1],1.0]).T)
    ret /= ret[2]
    return int(round(ret[0])),int(round(ret[1])),int(round(ret[2]))

  def build_orb_mask(self,frame):
    h,w= frame.shape
    c = 1
    self.orb_mask = zeros((h,w,c),dtype=uint8)
    rectangle(self.orb_mask,(0,0),(w,int(h*0.75)),(255,255,255),-1)
    # self.orb_mask = cvtColor(self.orb_mask,COLOR_BGR2GRAY)

  def extractRt(self,E):
    U,w,Vt = svd(E)
    assert det(U) > 0
    if det(Vt) < 0:
      Vt *= -1.0

    #Find R and T from Hartleyy and Zisserman
    W = mat([[0,-1,0],[1,0,0],[0,0,1]],dtype=float)
    R = dot(dot(U,W),Vt)
    if sum(R.diagonal()) < 0:
      R = dot(dot(U,W.T),Vt)
    t = U[:,2]
    Rt = concatenate([R,t.reshape(3,1)],axis=1)
    return Rt
      
  # [x,y] to [x,y,1]
  def add_ones(self,x):
    return concatenate([x, ones((x.shape[0],1))],axis=1)

  def extract(self,frame,maxCorners=5000,qualityLevel=.01,minDistance=3):
    #Detect
    feats = goodFeaturesToTrack(frame,maxCorners,qualityLevel,minDistance)#,mask=self.orb_mask)
    #Extract
    kps = [KeyPoint(x=f[0][0],y=f[0][1],_size=20) for f in feats]
    kps,des = self.orb.compute(frame,kps)

    
    #Match
    ret = []
    if self.last is not None:
      #THE QUERY IMAGE IS THE ACTUAL IMAGE &
      #THE TRAIN IMAGE IS THE LAST IMAGE
      #U STOUPID KUNT
      matches = self.bfm.knnMatch(des,self.last['des'],k=2)
      for m,n in matches:
        if m.distance < 0.7 * n.distance:
          ret.append((kps[m.queryIdx].pt,self.last['kps'][m.trainIdx].pt))



    #filter   
    Rt = None
    if len(ret) > 0:
      ret = array(ret)
      #Normalize
      ret[:,0,:] = self.normalize(ret[:,0,:])
      ret[:,1,:] = self.normalize(ret[:,1,:])

      try:
        # print(f"{len(ret)=}, {ret[:,0].shape=}, {ret[:,1].shape=}")
        model, inliers = ransac((ret[:, 0],ret[:, 1]),
                                  #FundamentalMatrixTransform,
                                  EssentialMatrixTransform,
                                  min_samples=8,
                                  residual_threshold=0.005,
                                  max_trials=200)

        ret = ret[inliers]

        Rt = self.extractRt(model.params)
      except:
        pass

    self.last = {'kps': kps,'des':des}
    return ret,Rt


  #warp is pattern for now
  def homography(self,last_puzzle,puzzle,warp,maxCorners=5000,qualityLevel=.01,minDistance=3):
    #Detect
    f_feats = goodFeaturesToTrack(frame,maxCorners,qualityLevel,minDistance)#,mask=self.orb_mask)
    #Extract
    f_kps = [KeyPoint(x=f[0][0],y=f[0][1],_size=20) for f in f_feats]
    f_kps,f_des = self.orb.compute(frame,f_kps)

    #Detect
    w_feats = goodFeaturesToTrack(warp,maxCorners,qualityLevel,minDistance)#,mask=self.orb_mask)
    #Extract
    w_kps = [KeyPoint(x=f[0][0],y=f[0][1],_size=20) for f in w_feats]
    w_kps,w_des = self.orb.compute(warp,w_kps)

    #Match
    ret = []
    
    # #THE QUERY IMAGE IS THE ACTUAL IMAGE &
    # #THE TRAIN IMAGE IS THE LAST IMAGE
    # #U STOUPID KUNT
    matches = self.bfm.knnMatch(w_des,f_des,k=2)
    for m,n in matches:
      # if m.distance <= 1 * n.distance:
      ret.append((w_kps[m.queryIdx].pt,f_kps[m.trainIdx].pt))

    #filter   
    Rt = None
    H, mask, warp_pts, orig_pts = None,None,None,None
    if len(ret) > 0:
        warp_pts = float32([r[0] for r in ret]).reshape(-1,1,2)
        orig_pts = float32([r[1] for r in ret]).reshape(-1,1,2)
        H,mask = findHomography(warp_pts,orig_pts,RANSAC,5.0)


    return H,mask,warp_pts,orig_pts

  def project_matrix(self,homography):
    homography *= (-1)
    rot_and_transl = dot(self.Kinv, homography)
    col_1 = rot_and_transl[:, 0]
    col_2 = rot_and_transl[:, 1]
    col_3 = rot_and_transl[:, 2]
    # normalise vectors
    l = sqrt(norm(col_1, 2) * norm(col_2, 2))
    rot_1 = col_1 / l
    rot_2 = col_2 / l
    translation = col_3 / l
    # compute the orthonormal basis
    c = rot_1 + rot_2
    p = cross(rot_1, rot_2)
    d = cross(c, p)
    rot_1 = dot(c / norm(c, 2) + d / norm(d, 2), 1 / sqrt(2))
    rot_2 = dot(c / norm(c, 2) - d / norm(d, 2), 1 / sqrt(2))
    rot_3 = cross(rot_1, rot_2)
    # finally, compute the 3D projection matrix from the model to the current frame
    projection = stack((rot_1, rot_2, rot_3, translation)).T
    return dot(self.K, projection)
예제 #14
0
def spoj(leva, desna, sleva=False, nove=None, orig=None):
    # Ukoliko nisu prosledjene tacke, moraju se naci
    if nove is None or orig is None \
       or not nove or not orig \
       or len(nove) != len(orig) \
       or len(nove) < 4:
        # Log poruka o akciji
        if LOGUJ:
            print()
            print('Traže se korespondencije.')

        # Upotreba SIFT (scale-invariant feature transform)
        # algoritma za pronalazak zanimljivih tacaka na slikama
        sift = SIFT_create()
        kpl, desl = sift.detectAndCompute(leva, None)
        kpd, desd = sift.detectAndCompute(desna, None)

        # Uparivanje dobijenih deskriptora
        # brute-force metodom najblizih suseda
        parovi = BFMatcher().knnMatch(desd, desl, k=2)

        # Filtriranje parova izuzimanjem onih previse
        # dalekih; ovo nije neophodno, ali olaksava
        # posao RANSAC-u i znatno ga ubrzava
        bliski = [m for m, n in parovi if m.distance < 0.5 * n.distance]

        # Neophodna su barem cetiri para za
        # potrebe odredjivanja projekcije
        if len(bliski) < 4:
            raise ValueError

        # Izdvajanje originala (sa desne slike)
        # i slika (sa leve slike) za projekciju
        orig = np.float32([kpd[m.queryIdx].pt for m in bliski]).reshape(-1, 2)
        nove = np.float32([kpl[m.trainIdx].pt for m in bliski]).reshape(-1, 2)

        # Log poruka o akciji
        if LOGUJ:
            print('Uspešno odabrane korespondencije.')
    elif LOGUJ:
        print()

    # Log poruka o akciji
    if LOGUJ:
        print('Određuje se transformacija.')

    # Izracunavanje matrice projekcije
    M = RANSAC(nove, orig)
    if sleva:
        M = LA.inv(M)

    # Log poruka o akciji
    if LOGUJ:
        print('Uspešno određena transformacija.')

    # Dimenzije ulaznih slika
    dim1 = leva.shape[1], leva.shape[0]
    dim2 = desna.shape[1], desna.shape[0]

    if sleva:
        dim1, dim2 = dim2, dim1

    # Pronalazak tacaka van slike
    cosk = np.array([[0, 0, 1], [dim2[0] - 1, 0, 1], [0, dim2[1] - 1, 1],
                     [dim2[0] - 1, dim2[1] - 1, 1]])
    cosk = np.array([*map(lambda x: M @ x, cosk)])
    cosk = np.array([*map(lambda x: [x[0] / x[2], x[1] / x[2], 1], cosk)])
    mini = cosk[:, 0].min(), cosk[:, 1].min()
    mini = [*map(lambda x: abs(ceil(min(x, 0))), mini)]

    # Nova matrica, sa dodatkom translacije koja
    # dosad nevidljive elemente smesta na sliku
    M = np.array([[1, 0, mini[0]], [0, 1, mini[1]], [0, 0, 1]]) @ M

    # Dimenzije slike koja nije fiksirana
    cosk = np.array(
        [*map(lambda x: [x[0] + mini[0], x[1] + mini[1], 1], cosk)])
    dim = (ceil(max(cosk[:, 0].max() + 1, dim1[0] + mini[0])),
           ceil(max(cosk[:, 1].max() + 1, dim1[1] + mini[1])))

    # Obuhvatajuci pravougaonik (bounding box)
    # slike koja nije fiksna zarad ustede vremena;
    # ukoliko su dimenzije nove slike dosta vece
    # od polazne, nema potrebe gledati crne piksele
    minx = int(ceil(cosk[:, 0].min()))
    maxx = int(ceil(cosk[:, 0].max())) + 1
    miny = int(ceil(cosk[:, 1].min()))
    maxy = int(ceil(cosk[:, 1].max())) + 1
    gran = (miny, minx), (maxy, maxx)

    # Cuvanje fiksirane i slike koju treba
    # transformisati pod informativnijim imenima
    fiksna = leva
    transf = desna

    if sleva:
        fiksna, transf = transf, fiksna

    # Log poruka o akciji
    if LOGUJ:
        print(f'Transformiše se {"leva" if sleva else "desna"} slika.')

    # Transformacija slike koja nije fiksirana
    transf = projektuj(transf, M, dim, gran)

    # Log poruka o akciji
    if LOGUJ:
        print('Uspešno izvršena transformacija.')

    # Log poruka o akciji
    if LOGUJ:
        print('Spajaju se slike.')

    # Uzduzne granice preklapanja
    if sleva:
        lgran = mini[0]
        dgran = maxx
    else:
        lgran = minx
        dgran = dim1[0] + mini[0]

    # Postavljanje fiksne slike na mesto;
    # prvo obrada delova pre i posle granice
    if sleva:
        transf[mini[1]:dim1[1]+mini[1],
                dgran :dim1[0]+mini[0]] = \
                     [[fiksna[i-mini[1],j-mini[0]]
               for j in range( dgran , dim1[0]+mini[0])]
               for i in range(mini[1], dim1[1]+mini[1])]
    else:
        transf[mini[1]:dim1[1]+mini[1],
               mini[0]:     lgran     ] = \
                     [[fiksna[i-mini[1],j-mini[0]]
               for j in range(mini[0],      lgran     )]
               for i in range(mini[1], dim1[1]+mini[1])]

    # Funkcija za filtriranje crnih piskela
    crn = lambda p: all(map(lambda x: x == 0, p))

    # Funkcija za interpolaciju piksela
    duzina = dgran - lgran + 1
    if sleva:
        pros = lambda y, x, j: (dgran - j) / duzina * x + (j - lgran + 1
                                                           ) / duzina * y
    else:
        pros = lambda x, y, j: (dgran - j) / duzina * x + (j - lgran + 1
                                                           ) / duzina * y

    # Tezinsko uprosecavanje (interpolacija)
    # necrnih piksela unutar granicnog pojasa
    transf[mini[1]:dim1[1] + mini[1], lgran:dgran] = [[
        transf[i, j] if crn(fiksna[i - mini[1],
                                   j - mini[0]]) else fiksna[i - mini[1],
                                                             j - mini[0]]
        if crn(transf[i, j]) else pros(fiksna[i - mini[1],
                                              j - mini[0]], transf[i, j], j)
        for j in range(lgran, dgran)
    ] for i in range(mini[1], dim1[1] + mini[1])]

    # Log poruka o akciji
    if LOGUJ:
        print('Uspešno spojene slike.')

    # Isecanje praznih ivica
    return iseci(transf)