Ejemplo n.º 1
0
def main():
    args = parse_args()

    proxy = setup_proxy(args.proxy_url, args.proxy_auth)


    if not args.QR_text == "":
        args.file = "./img/QRcode.png"
        Image.create_QR_image(args.QR_text, args.QR_scale)

    image = Image(args.file, args.round_sensitive, args.image_brightness)

    bot = Bot(image, args.fingerprint, args.start_x, args.start_y, args.mode_defensive, args.colors_ignored, args.colors_not_overwrite, args.min_range, args.max_range, proxy,
              args.draw_strategy, args.xreversed, args.yreversed)

    bot.init()

    def run():
        try:
            bot.run()
        except NeedUserInteraction as exception:
            alert(str(exception))
            try:
                if raw_input(I18n.get('token_resolved')).strip() == 'y':
                    run()
            except NameError as e:
                if input(I18n.get('token_resolved')).strip() == 'y':
                    run()

    run()
Ejemplo n.º 2
0
def classification_score():
    """
    Test the classificarion score on the basic CNN
    """
    basic = BasicCNN()
    model = basic.get_model()
    data_training, labels = get_dataset_classification_only(1000)

    evaluation = basic.evaluate_model(np.array(data_training))
    evaluation = [Image(i, np.argmax(j[0]), 0, 0, 0, 0) for i, j in enumerate(evaluation)]
    labels = [Image(i, j, 0, 0, 0, 0) for i, j in enumerate(labels)]
    metric = calculate_metric_classification(evaluation, labels)
    return metric
Ejemplo n.º 3
0
def generate_img_1b(C=400):
    '''Image "Linie pola ekektr wokół punktowego ładunku ujemnego"'''
    image = Image((C * 2, C * 2), line_width=3)
    image.draw_circle(Circle((C, C), int(C * 0.4)), fill=[0, 0, 190], width=10)
    image.draw_line(Line((int(C + (C * 0.1)), C), (int(C - (C * 0.1)), C)),
                    fill=[0, 0, 190])
    for deg in range(0, 360, 360 // 12):
        prevector = Vector.from_rotation((C, C), deg, int(C * 0.95))
        vector = Vector.from_reversed(prevector)
        vector.scale(0.45)
        vector.round()
        image.draw_vector(vector)
    image.save('img/img_1b.png')
Ejemplo n.º 4
0
    def run(self):
        while True:
            try:
                quote = quotes_generator.generate()[0]

                quote = self.clean_quote(quote)

                self.logger.info('Quote: %s' % quote)

                if len(quote) > self.max_quote_length:
                    self.logger.warning('Quote too long!')
                    continue

                quote_vector = self.similarity.get_vector(text=quote)
                keywords = self.similarity.get_keywords(
                    text=quote, num=config.NUMBER_OF_KEYWORDS)
                self.logger.info('Keywords: %s' % ','.join(keywords))

                photos = self.unsplash.get_photos(
                    query=','.join(keywords),
                    num=config.UNSPLASH_PHOTOS_TO_ANALYSE)
                self.logger.info('Unsplash found photos: %s' % len(photos))

                if not photos:
                    self.logger.warning('No Unsplash photos found!')
                    continue

                photo = self.get_best_photo(photos=photos,
                                            quote_vector=quote_vector)
                self.logger.info(photo)

                image = Image(url=photo[1])
                image.draw_text(text=quote)
                image_file_path = image.save(
                    file_path=config.QUOTES_IMAGES_PATH)

                self.reddit.post(text='<...>%s<...> %s, %s' %
                                 (quote, '\n', photo[3]),
                                 image_path=image_file_path)

                self.logger.info('Posted to reddit!')

                time.sleep(config.GENERATION_TIMEOUT)
            except KeyboardInterrupt:
                self.logger.info('Stoping!')
                break
            except:
                self.logger.exception(traceback.format_exc())
                time.sleep(60)
Ejemplo n.º 5
0
    def create_randomImages(self, number):
        images = []
        for i in range(number):
            classification = random.randint(0, 2)
            if classification == 0:
                images.append(Image(i, 0, 0, 0, 0, 0))
            elif classification == 1:
                images.append(
                    Image(i, 1,
                          random.random() * 24,
                          random.random() * 24, 0, 0))
            else:
                images.append(Image(i, 1, random.random() * 24, random.random() * 24, \
                    random.random() * 24, random.random() * 24))

        return images
Ejemplo n.º 6
0
    def __preview_art(self, name: str, path: str, contrast: bool,
                      negative: bool, sharpen: bool, emboss: bool,
                      grayscale: str) -> None:
        """
        Qt slot for image previewing.

        Args:
            name: str
                Image's name.
            path: str
                Path to image file.
            contrast: bool
                Image's contrast flag.
            negative: bool
                Image's negative flag.
            sharpen: bool
                Image's sharpen flag.
            emboss: bool
                Image's emboss flag.
            grayscale: str
                Image's grayscale level.

        Returns:
            None.
        """

        self.on_preview_art(
            self,
            Image(name, path, contrast, negative, sharpen, emboss, grayscale))
Ejemplo n.º 7
0
 def create_1_images(self, number):
     images = []
     for i in range(number):
         images.append(
             Image(i, 1,
                   random.random() * 24,
                   random.random() * 24, 0, 0))
     return images
Ejemplo n.º 8
0
 def get_image(self):
     """Returns the source image if we are still in still phase, otherwise returns
     a rotated and scaled image."""
     if self.still:
         return self.image
     else:
         surface = pygame.transform.rotozoom(self.image.get_surface(),
                                             self.angle, self.scale)
         offset_x, offset_y = self.image.get_offset()
         return Image(surface, (offset_x, offset_y))
Ejemplo n.º 9
0
def main():
    args = parse_args()

    proxy = setup_proxy(args.proxy_url, args.proxy_auth)

    image = Image(args.file)

    bot = Bot(image, args.fingerprint, args.start_x, args.start_y, args.mode_defensive, args.colors_ignored, proxy, args.draw_strategy)

    bot.run()
Ejemplo n.º 10
0
 def screenshot_mutlfind(self, template_img_path):
     """
     截取游戏画面并查找图片位置
     :param template_img_path: 差早图片位置
     :return: 成功返回图片位置[left_top,right_bottom],失败返回None
     """
     screenshot = self.window.hwd_screenshot()
     screenshot_height, screenshot_width = screenshot.shape[:2]
     template_img = Image.read_img(template_img_path, 0)
     zoom = screenshot_width / default_window_width  # 计算缩放比例
     return self.search_mutlimg_zoom(template_img, screenshot, zoom)
Ejemplo n.º 11
0
def factory():
    from src.factory import ArtFactory
    from src.image import Image
    factory = ArtFactory()
    for i in range(10):
        factory += Image(
            f"Lenna{i}", "",
            False, False, False, False,
            ""
        )
    return factory
Ejemplo n.º 12
0
    def render_isometric(
            self,
            scene: Base,
            origin: Vector3 = (0, 0, -5),
            direction: Vector3 = (0, 0, 1),
            width: int = 6,
            height: int = 6,
    ):
        origin = Vec3(origin)
        direction = Vec3(direction)

        raymarcher = Raymarcher(scene)
        image = Image(width, height)
        raymarcher.render(image, lambda p: (p + origin, direction), aa=4)

        file = StringIO()
        image.dump_bw(file=file, black=".", white="#", threshold=.3)
        file.seek(0)
        #image.dump()
        return file.read()
Ejemplo n.º 13
0
 def find_img_zoom(self, template_img, target_img, zoom):
     """
     缩放查找图片
     :param template_img: 要查找的图片
     :param target_img: 需要查找的目标图片
     :param zoom: 缩放
     :return: 成功返回图片位置[x,y],失败返回None
     """
     template_img = Image.resize_by_zoom(zoom, template_img)
     threshold = config.getfloat("game", "imageSearchThreshold")
     debug = config.getboolean("game", "debug")
     return best_match(target_img, template_img, threshold, debug)
Ejemplo n.º 14
0
    def search_mutlimg_zoom(self, template_img, target_img, zoom):

        """
        缩放查找图片
        :param template_img: 要查找的图片
        :param target_img: 需要查找的目标图片
        :param zoom: 缩放
        :return: 成功返回图片位置的列表[x,y],失败返回空列表
        """
        template_img = Image.resize_by_zoom(zoom, template_img)
        threshold = config.getfloat("game", "imageSearchThreshold")
        return mutl_match(target_img, template_img, threshold)
Ejemplo n.º 15
0
    def __image_thread_func(index: int, image: Image,
                            signal: Signal[int]) -> None:
        """
        Function used in image converting background thread.

        Sends converted image index to signal.

        Args:
            index: int
                Image's index in list.
                If image is not added to list yet, index should be set to -1.
            image: Image
                Image to convert.
            signal: Signal[int]
                Qt signal accepting converted image index.

        Returns:
            None.
        """

        image.convert_to_ascii_art()
        signal.emit(index)
Ejemplo n.º 16
0
    def hwd_screenshot(self):
        # 获取句柄窗口的大小信息
        try:
            _left, _top, _right, _bot = self.get_window_rect()
            _width = _right - _left
            _height = _bot - _top
            # 返回句柄窗口的设备环境,覆盖整个窗口,包括非客户区,标题栏,菜单,边框
            _hwnd_dc = GetWindowDC(self.hwnd)
            # 创建设备描述表
            _mfc_dc = CreateDCFromHandle(_hwnd_dc)
            # 创建内存设备描述表
            _save_dc = _mfc_dc.CreateCompatibleDC()
            # 创建位图对象准备保存图片
            _save_bit_map = CreateBitmap()
            # 为bitmap开辟存储空间
            _save_bit_map.CreateCompatibleBitmap(_mfc_dc, _width, _height)
            # 将截图保存到saveBitMap中
            _save_dc.SelectObject(_save_bit_map)
            # 保存bitmap到内存设备描述表
            _save_dc.BitBlt((0, 0), (_width, _height), _mfc_dc, (0, 0),
                            win32con.SRCCOPY)

            # 如果要截图到打印设备:
            ###最后一个int参数:0-保存整个窗口,1-只保存客户区。如果PrintWindow成功函数返回值为1
            # result = windll.user32.PrintWindow(hWnd,_save_dc.GetSafeHdc(),0)
            # print(result) #PrintWindow成功则输出1

            # 保存图像
            ##方法一:windows api保存
            ###保存bitmap到文件
            # _save_bit_map.SaveBitmapFile(_save_dc, "img_Winapi.bmp")

            ##方法二(第一部分):PIL保存
            ###获取位图信息
            # bmpinfo = _save_bit_map.GetInfo()
            # bmpstr = _save_bit_map.GetBitmapBits(True)
            ###生成图像
            # im_PIL = Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']), bmpstr, 'raw', 'BGRX', 0, 1)
            ##方法二(后续转第二部分)

            ##方法三(第一部分):opencv+numpy保存
            ###获取位图信息
            signed_ints_array = _save_bit_map.GetBitmapBits(True)
            return Image.get_img_opencv(signed_ints_array, _width, _height)
            ##方法三(后续转第二部分)
        finally:
            # 内存释放
            DeleteObject(_save_bit_map.GetHandle())
            _save_dc.DeleteDC()
            _mfc_dc.DeleteDC()
            ReleaseDC(self.hwnd, _hwnd_dc)
Ejemplo n.º 17
0
 def find_imgs(self, template_img_paths):
     """
     检查当前场景是否存在其中一个图片
     :param template_img_paths:要查找的图片位置
     :return: 成功返回True,失败返回False
     """
     screenshot = self.screenshot()
     screenshot_height, screenshot_width = screenshot.shape[:2]
     zoom = screenshot_width / default_window_width  # 计算缩放比例
     for i in range(0, len(template_img_paths)):
         template_img = Image.read_img(template_img_paths[i], 0)
         pos = self.find_img_zoom(template_img, screenshot, zoom)
         if pos is not None:
             return [i, pos, template_img_paths[i]]
     return None
Ejemplo n.º 18
0
def get_csv_training():
    """
    Get the annotations for
    all images contained in a particular
    csv file
    :return: list of all the annotations (id, c ???, x coordinate of the
    first spot, y coordinate of the first sport and then the coordinates
    of the second spot).
    """
    list_images = []
    with open('DataChallenge/descriptions_training.csv', 'r') as csvfile:
        spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
        for row in spamreader:
            row = row[0].split(",")
            _id, c, xf, yf, xs, ys = row
            i = Image(_id, c, xf, yf, xs, ys)
            list_images.append(i)

    return list_images
Ejemplo n.º 19
0
def main():
    args = parse_args()

    proxy = setup_proxy(args.proxy_url, args.proxy_auth)

    image = Image(args.file, args.round_sensitive, args.image_brightness)

    bot = Bot(image, args.fingerprint, args.start_x, args.start_y, args.mode_defensive, args.colors_ignored, proxy, args.draw_strategy)

    bot.init()

    def run():
        try:
            bot.run()
        except NeedUserInteraction as exception:
            alert(exception.message)
            if raw_input(I18n.get('token_resolved')) == 'y':
                run()
    run()
Ejemplo n.º 20
0
def parse_current_images(current_image_points, index):
    if not current_image_points:
        image_out = Image(index - 1, 0, 0, 0, 0, 0)
    elif len(current_image_points) == 1:
        if is_spot_possible(current_image_points[0][1]/float(RESIZE_FACTOR), \
            current_image_points[0][2]/float(RESIZE_FACTOR), \
            current_image_points[0][3]/float(RESIZE_FACTOR), \
            current_image_points[0][4]/float(RESIZE_FACTOR)):
            spot = ((current_image_points[0][1] + current_image_points[0][2])/float(2 * RESIZE_FACTOR), \
                (current_image_points[0][3] + current_image_points[0][4])/float(2 * RESIZE_FACTOR))
            image_out = Image(index - 1, 1, spot[0], spot[1], 0, 0)
        else:
            image_out = Image(index - 1, 0, 0, 0, 0, 0)
    else:
        # Reverse sort the mostr likely images
        current_image_points.sort(key=lambda x: -x[0])
        spots = []
        if is_spot_possible(current_image_points[0][1]/float(RESIZE_FACTOR), \
            current_image_points[0][2]/float(RESIZE_FACTOR), \
            current_image_points[0][3]/float(RESIZE_FACTOR), \
            current_image_points[0][4]/float(RESIZE_FACTOR)):
            spot1 = ((current_image_points[0][1] + current_image_points[0][2])/float(2 * RESIZE_FACTOR), \
                (current_image_points[0][3] + current_image_points[0][4])/float(2 * RESIZE_FACTOR))
            spots.append(spot1)
        if is_spot_possible(current_image_points[1][1]/float(RESIZE_FACTOR), \
            current_image_points[1][2]/float(RESIZE_FACTOR), \
            current_image_points[1][3]/float(RESIZE_FACTOR), \
            current_image_points[1][4]/float(RESIZE_FACTOR)):
            spot2 = ((current_image_points[1][1] + current_image_points[1][2])/float(2 * RESIZE_FACTOR), \
                (current_image_points[1][3] + current_image_points[1][4])/float(2 * RESIZE_FACTOR))
            spots.append(spot2)
        if len(spots) == 0:
            image_out = Image(index - 1, 0, 0, 0, 0, 0)
        elif len(spots) == 1:
            image_out = Image(index - 1, 1, spots[0][0], spots[0][1], 0, 0)
        else:
            image_out = Image(index - 1, 2, spot1[0], spot1[1], spot2[0],
                              spot2[1])

    return image_out
Ejemplo n.º 21
0
 def import_key_points(self):
     self.first_img = Image(self.first_img_name)
     self.first_img.import_key_points()
     self.second_img = Image(self.second_img_name)
     self.second_img.import_key_points()
Ejemplo n.º 22
0
 def test_is_dark(self):
     img = Image.open("tests/image/walking_person.jpg")
     self.assertFalse(img.is_dark())
Ejemplo n.º 23
0
 def test_superior_brightness(self):
     old = Image.open("tests/image/walking_person.jpg")
     new = old.change_brightness(2)
     self.assertTrue(new.is_brighter_than(old))
Ejemplo n.º 24
0
 def create_0_images(self, number):
     images = []
     for i in range(number):
         images.append(Image(i, 0, 0, 0, 0, 0))
     return images
Ejemplo n.º 25
0
def generate_img_2b(C=400):
    '''Image "Linie pola ekektr wokół 2 punktowych ładunków jednoimiennych"'''
    image = Image((C * 4, C * 2), line_width=3)
    # left
    image.draw_circle(Circle((C, C), int(C * 0.4)), fill=[190, 0, 0], width=10)
    image.draw_line(Line((C, int(C + (C * 0.1))), (C, int(C - (C * 0.1)))),
                    fill=[190, 0, 0])
    image.draw_line(Line((int(C + (C * 0.1)), C), (int(C - (C * 0.1)), C)),
                    fill=[190, 0, 0])
    # right
    image.draw_circle(Circle((3 * C, C), int(C * 0.4)),
                      fill=[190, 0, 0],
                      width=10)
    image.draw_line(Line((3 * C, int(C + (C * 0.1))),
                         (3 * C, int(C - (C * 0.1)))),
                    fill=[190, 0, 0])
    image.draw_line(Line((int(3 * C + (C * 0.1)), C),
                         (int(3 * C - (C * 0.1)), C)),
                    fill=[190, 0, 0])
    # vertical
    image.draw_line(Line((2 * C, 0), (2 * C, 2 * C)), width=1)
    # vectors
    image.draw_vector(
        Vector((int(C * 1.5), int(C * 1.1)), (int(C * 0.25), int(C * 0.3))))
    image.draw_vector(
        Vector((int(C * 1.5), int(C * 0.9)), (int(C * 0.25), -int(C * 0.3))))
    image.draw_vector(Vector((int(C * 1.8), int(C * 1.5)), (0, int(C * 0.4))))
    image.draw_vector(Vector((int(C * 1.8), int(C * 0.5)), (0, -int(C * 0.4))))
    image.draw_vector(
        Vector((int(C * 2.5), int(C * 1.1)), (-int(C * 0.25), int(C * 0.3))))
    image.draw_vector(
        Vector((int(C * 2.5), int(C * 0.9)), (-int(C * 0.25), -int(C * 0.3))))
    image.draw_vector(Vector((int(C * 2.2), int(C * 1.5)), (0, int(C * 0.4))))
    image.draw_vector(Vector((int(C * 2.2), int(C * 0.5)), (0, -int(C * 0.4))))
    #
    image.draw_vector(
        Vector((int(C * 1.4), int(C * 1.25)), (int(C * 0.20), int(C * 0.40))))
    image.draw_vector(
        Vector((int(C * 1.4), int(C * 0.75)), (int(C * 0.20), -int(C * 0.40))))
    image.draw_vector(
        Vector((int(C * 2.6), int(C * 1.25)), (-int(C * 0.20), int(C * 0.40))))
    image.draw_vector(
        Vector((int(C * 2.6), int(C * 0.75)),
               (-int(C * 0.20), -int(C * 0.40))))
    #
    image.draw_vector(
        Vector((int(C * 1.2), int(C * 1.4)), (int(C * 0.1), int(C * 0.5))))
    image.draw_vector(
        Vector((int(C * 1.2), int(C * 0.6)), (int(C * 0.1), -int(C * 0.5))))
    image.draw_vector(
        Vector((int(C * 2.8), int(C * 1.4)), (-int(C * 0.1), int(C * 0.5))))
    image.draw_vector(
        Vector((int(C * 2.8), int(C * 0.6)), (-int(C * 0.1), -int(C * 0.5))))
    image.save('img/img_2b.png')
Ejemplo n.º 26
0
 def create_2_images(self, number):
     images = []
     for i in range(number):
         images.append(Image(i, 2, random.random() * 24, random.random() * 24, \
             random.random() * 24, random.random() * 24))
     return images
Ejemplo n.º 27
0
from src.image import Image
import sys

if __name__ == '__main__':

    if len(sys.argv) != 2:
        print('Argument error')

    img = Image(sys.argv[1])
    size = len(img.data) // 2
    img.data = img.data[:size, :size]
    img.save('img/penguin128.png')
Ejemplo n.º 28
0
def generate_img_2a(C=400):
    '''Image "Linie pola ekektr wokół 2 punktowych ładunków różnoimiennych"'''
    image = Image((C * 4, C * 2), line_width=3)
    # pos
    image.draw_circle(Circle((C, C), int(C * 0.4)), fill=[190, 0, 0], width=10)
    image.draw_line(Line((C, int(C + (C * 0.1))), (C, int(C - (C * 0.1)))),
                    fill=[190, 0, 0])
    image.draw_line(Line((int(C + (C * 0.1)), C), (int(C - (C * 0.1)), C)),
                    fill=[190, 0, 0])
    # neg
    image.draw_circle(Circle((3 * C, C), int(C * 0.4)),
                      fill=[0, 0, 190],
                      width=10)
    image.draw_line(Line((int(3 * C + (C * 0.1)), C),
                         (int(3 * C - (C * 0.1)), C)),
                    fill=[0, 0, 190])
    # straight vectors
    image.draw_vector(Vector((int(C * 1.5), C), (int(C * 0.4), 0)))
    image.draw_vector(Vector((int(C * 1.5), int(C * 1.2)), (int(C * 0.4), 0)))
    image.draw_vector(Vector((int(C * 1.5), int(C * 0.8)), (int(C * 0.4), 0)))
    image.draw_vector(Vector((int(C * 2.1), C), (int(C * 0.4), 0)))
    image.draw_vector(Vector((int(C * 2.1), int(C * 1.2)), (int(C * 0.4), 0)))
    image.draw_vector(Vector((int(C * 2.1), int(C * 0.8)), (int(C * 0.4), 0)))
    # slightly curved vectors
    image.draw_vector(
        Vector((int(C * 1.5), int(C * 1.4)), (int(C * 0.4), int(C * 0.05))))
    image.draw_vector(
        Vector((int(C * 2.1), int(C * 1.45)), (int(C * 0.4), int(C * -0.05))))
    image.draw_vector(
        Vector((int(C * 1.5), int(C * 0.6)), (int(C * 0.4), int(C * -0.05))))
    image.draw_vector(
        Vector((int(C * 2.1), int(C * 0.55)), (int(C * 0.4), int(C * 0.05))))
    # curved 3-part vectors
    image.draw_vector(
        Vector((int(C * 1.1), int(C * 1.5)), (int(C * 0.4), int(C * 0.1))))
    image.draw_vector(Vector((int(C * 1.8), int(C * 1.65)), (int(C * 0.4), 0)))
    image.draw_vector(
        Vector((int(C * 2.5), int(C * 1.6)), (int(C * 0.4), int(C * -0.1))))
    image.draw_vector(
        Vector((int(C * 1.1), int(C * 0.5)), (int(C * 0.4), int(C * -0.1))))
    image.draw_vector(Vector((int(C * 1.8), int(C * 0.35)), (int(C * 0.4), 0)))
    image.draw_vector(
        Vector((int(C * 2.5), int(C * 0.4)), (int(C * 0.4), int(C * 0.1))))
    image.save('img/img_2a.png')
Ejemplo n.º 29
0
def lenna():
    from src.image import Image
    return Image("Lenna", relative_path("tests/data/lenna.png"), False, False,
                 False, False, consts.uiConsts["DefaultGrayscaleLevel"])
Ejemplo n.º 30
0
class ImageAnalyzer:
    def __init__(
            self,
            first_img_name: str,
            second_img_name: str,
            neighbourhood_size: int,
            consistency_threshold: float,
            iterations: int,
            ransac_threshold: float,
            transformation: TransformationType = TransformationType.AFFINE,
            ransac_heuristic: RansacHeuristic = RansacHeuristic.NONE,
            iteration_heuristic_probability: float = 0.9):
        self.first_img_name = first_img_name
        self.second_img_name = second_img_name
        self.first_img: Optional[Image] = None
        self.second_img: Optional[Image] = None
        self.key_point_pairs: List[KeyPointPair] = []
        self.consistent_key_point_pairs: List[KeyPointPair] = []
        self.neighbourhood_size = neighbourhood_size
        self.consistency_threshold = consistency_threshold
        self.iterations = iterations
        self.ransac_threshold = ransac_threshold
        self.best_ransac_consensus: List[KeyPointPair] = []
        self.best_model: Optional[np.ndarray] = None
        self.ransac_heuristic = ransac_heuristic
        self.iteration_heuristic_probability = iteration_heuristic_probability
        self.transformation = transformation
        self.small_r: float = -1
        self.big_R: float = -1

    def run(self):
        print("Extracting")
        self.extract_files()
        print("Importing key points")
        self.import_key_points()
        print("Calculating pairs")
        self.calculate_key_point_pairs()
        print("Calculating neighbourhoods")
        self.calculate_neighbourhoods()
        print("Analyzing consistency")
        self.analyze_consistency()
        print("Running ransac")

        if self.ransac_heuristic == RansacHeuristic.ITERATIONS:
            self.estimate_ransac_iterations()

        if self.ransac_heuristic == RansacHeuristic.DISTANCE:
            self.init_distance_heuristic_params()

        if self.transformation == TransformationType.AFFINE:

            def get_sample(distribution):
                return self.get_random_sample(3, distribution)

            def transform_function(sample):
                return self.get_affine_transform(sample)
        else:

            def get_sample(distribution):
                return self.get_random_sample(4, distribution)

            def transform_function(sample):
                return self.get_perspective_transform(sample)

        self.ransac(get_sample, transform_function)
        print(len(self.key_point_pairs))
        print(len(self.consistent_key_point_pairs))
        print(len(self.best_ransac_consensus))
        self.show_images()

    def init_distance_heuristic_params(self):
        image1 = cv2.imread(self.first_img_name)
        size = max(image1.shape)
        self.small_r = (0.01 * size)**2
        self.big_R = (0.3 * size)**2

    def extract_files(self):
        self.extract_image(self.first_img_name)
        self.extract_image(self.second_img_name)

    def import_key_points(self):
        self.first_img = Image(self.first_img_name)
        self.first_img.import_key_points()
        self.second_img = Image(self.second_img_name)
        self.second_img.import_key_points()

    def calculate_key_point_pairs(self):
        self.first_img.calculate_closest_key_points(self.second_img)
        self.second_img.calculate_closest_key_points(self.first_img)
        for key_point in self.first_img.key_points:
            if key_point.closest.closest == key_point:
                self.key_point_pairs.append((key_point, key_point.closest))

    def analyze_consistency(self):
        pair_number_threshold = int(self.neighbourhood_size *
                                    self.consistency_threshold)
        for pair_one in self.key_point_pairs:
            neighbour_pairs_count = 0
            for pair_two in self.key_point_pairs:
                if pair_one[0].has_neighbour(
                        pair_two[0]) and pair_one[1].has_neighbour(
                            pair_two[1]):
                    neighbour_pairs_count += 1
                    if neighbour_pairs_count >= pair_number_threshold:
                        self.consistent_key_point_pairs.append(pair_one)
                        break

    def calculate_neighbourhoods(self):
        first_img_paired_key_points = [
            pair[0] for pair in self.key_point_pairs
        ]
        second_img_paired_key_points = [
            pair[1] for pair in self.key_point_pairs
        ]

        for pair in self.key_point_pairs:
            pair[0].calculate_neighbours(self.neighbourhood_size,
                                         first_img_paired_key_points)
            pair[1].calculate_neighbours(self.neighbourhood_size,
                                         second_img_paired_key_points)

    def show_images(self):
        image1 = cv2.imread(self.first_img_name)
        image2 = cv2.imread(self.second_img_name)
        stacked_image = np.vstack((image1, image2))
        offset = image1.shape[0]
        for pair in self.key_point_pairs:
            point1 = pair[0]
            point2 = pair[1]
            cv2.line(stacked_image, (int(point1.x), int(point1.y)),
                     (int(point2.x), int(point2.y + offset)),
                     (random.randint(0, 255), random.randint(
                         0, 255), random.randint(0, 255)), 1, cv2.LINE_AA)

        cv2.namedWindow("All pairs", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("All pairs", stacked_image.shape[1], 650)
        cv2.imshow("All pairs", stacked_image)

        cv2.waitKey(0)

        stacked_image2 = np.vstack((image1, image2))
        for pair in self.consistent_key_point_pairs:
            point1 = pair[0]
            point2 = pair[1]
            cv2.line(stacked_image2, (int(point1.x), int(point1.y)),
                     (int(point2.x), int(point2.y + offset)),
                     (random.randint(0, 255), random.randint(
                         0, 255), random.randint(0, 255)), 1, cv2.LINE_AA)

        cv2.namedWindow("Consistent pairs", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Consistent pairs", stacked_image2.shape[1], 650)
        cv2.imshow("Consistent pairs", stacked_image2)

        cv2.waitKey(0)

        stacked_image2 = np.vstack((image1, image2))
        for pair in self.best_ransac_consensus:
            point1 = pair[0]
            point2 = pair[1]
            cv2.line(stacked_image2, (int(point1.x), int(point1.y)),
                     (int(point2.x), int(point2.y + offset)),
                     (random.randint(0, 255), random.randint(
                         0, 255), random.randint(0, 255)), 1, cv2.LINE_AA)

        cv2.namedWindow("Transformed ransac pairs", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Transformed ransac pairs", stacked_image2.shape[1],
                         650)
        cv2.imshow("Transformed ransac pairs", stacked_image2)

        cv2.waitKey(0)

        if self.best_model is not None:
            if self.best_model[2][0] == 0 and self.best_model[2][1] == 0:
                image1 = cv2.warpAffine(image1, self.best_model[:2],
                                        (image1.shape[1], image1.shape[0]))
            else:
                image1 = cv2.warpPerspective(
                    image1, self.best_model,
                    (image1.shape[1], image1.shape[0]))

        cv2.namedWindow("Transformation", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Transformation", image1.shape[1], image1.shape[0])
        cv2.imshow("Transformation", image1)
        cv2.waitKey(0)

    def ransac(self, get_sample: Callable[[List[KeyPointPair]],
                                          List[KeyPointPair]],
               get_transformation: Callable[[List[KeyPointPair]], np.ndarray]):
        best_consensus: List[KeyPointPair] = []
        best_transformation = None

        distribution = list(self.key_point_pairs)

        for i in range(self.iterations):
            current_consensus = []
            sample = get_sample(distribution)
            A = get_transformation(sample)
            for pair in self.key_point_pairs:
                point = [[pair[0].x], [pair[0].y], [1]]
                transformed_point = A @ np.array(point)
                transformed_point = transformed_point / transformed_point[2]
                distance = pair[1].euclidean_distance(transformed_point[0],
                                                      transformed_point[1])
                if distance < self.ransac_threshold:
                    current_consensus.append(pair)

            if len(current_consensus) > len(best_consensus):
                best_consensus = current_consensus
                best_transformation = A
                if self.ransac_heuristic == RansacHeuristic.DISTRIBUTION:
                    distribution.extend(sample)
        self.best_ransac_consensus = best_consensus
        self.best_model = best_transformation

    def get_affine_transform(self, pairs: List[KeyPointPair]):
        pair1 = pairs[0]
        pair2 = pairs[1]
        pair3 = pairs[2]

        B = np.array([[pair1[0].x, pair1[0].y, 1, 0, 0, 0],
                      [pair2[0].x, pair2[0].y, 1, 0, 0, 0],
                      [pair3[0].x, pair3[0].y, 1, 0, 0, 0],
                      [0, 0, 0, pair1[0].x, pair1[0].y, 1],
                      [0, 0, 0, pair2[0].x, pair2[0].y, 1],
                      [0, 0, 0, pair3[0].x, pair3[0].y, 1]])

        C = np.array([[pair1[1].x], [pair2[1].x], [pair3[1].x], [pair1[1].y],
                      [pair2[1].y], [pair3[1].y]])

        result = np.linalg.inv(B) @ C
        return np.reshape(np.append(result, [[0], [0], [1]]), (3, 3))

    def get_perspective_transform(self, pairs: List[KeyPointPair]):
        pair1 = pairs[0]
        pair2 = pairs[1]
        pair3 = pairs[2]
        pair4 = pairs[3]

        B = np.array([[
            pair1[0].x, pair1[0].y, 1, 0, 0, 0, -pair1[1].x * pair1[0].x,
            -pair1[1].x * pair1[0].y
        ],
                      [
                          pair2[0].x, pair2[0].y, 1, 0, 0, 0,
                          -pair2[1].x * pair2[0].x, -pair2[1].x * pair2[0].y
                      ],
                      [
                          pair3[0].x, pair3[0].y, 1, 0, 0, 0,
                          -pair3[1].x * pair3[0].x, -pair3[1].x * pair3[0].y
                      ],
                      [
                          pair4[0].x, pair4[0].y, 1, 0, 0, 0,
                          -pair4[1].x * pair4[0].x, -pair4[1].x * pair4[0].y
                      ],
                      [
                          0, 0, 0, pair1[0].x, pair1[0].y, 1,
                          -pair1[1].y * pair1[0].x, -pair1[1].y * pair1[0].y
                      ],
                      [
                          0, 0, 0, pair2[0].x, pair2[0].y, 1,
                          -pair2[1].y * pair2[0].x, -pair2[1].y * pair2[0].y
                      ],
                      [
                          0, 0, 0, pair3[0].x, pair3[0].y, 1,
                          -pair3[1].y * pair3[0].x, -pair3[1].y * pair3[0].y
                      ],
                      [
                          0, 0, 0, pair4[0].x, pair4[0].y, 1,
                          -pair4[1].y * pair4[0].x, -pair4[1].y * pair4[0].y
                      ]])

        C = np.array([[pair1[1].x], [pair2[1].x], [pair3[1].x], [pair4[1].x],
                      [pair1[1].y], [pair2[1].y], [pair3[1].y], [pair4[1].y]])

        result = np.linalg.inv(B) @ C
        return np.reshape(np.append(result, [[1]]), (3, 3))

    def get_random_sample(self, number_of_points: int,
                          key_point_pairs: List[KeyPointPair]):
        if self.ransac_heuristic == RansacHeuristic.DISTANCE:
            return self.get_random_sample_heuristic(number_of_points,
                                                    key_point_pairs)
        else:
            return self.get_random_sample_normal(number_of_points,
                                                 key_point_pairs)

    def get_random_sample_normal(self, number_of_points: int,
                                 key_point_pairs: List[KeyPointPair]):
        result: List[KeyPointPair] = []
        while len(result) < number_of_points:
            random_pair = random.choice(key_point_pairs)
            pair_correct = True
            for pair in result:
                if (pair[0].x == random_pair[0].x and pair[0].y == random_pair[0].y) or \
                        (pair[1].x == random_pair[1].x and pair[1].y == random_pair[1].y):
                    pair_correct = False
                    break
            if pair_correct:
                result.append(random_pair)

        return result

    def get_random_sample_heuristic(self, number_of_points: int,
                                    key_point_pairs: List[KeyPointPair]):
        result: List[KeyPointPair] = []
        pairs_copy = list(key_point_pairs)

        while len(result) < number_of_points:
            random_pair = random.choice(pairs_copy)
            pairs_copy = self.filter_incorrect_pairs(random_pair, pairs_copy)
            if len(pairs_copy) > 0:
                result.append(random_pair)
            else:
                result = []
                pairs_copy = list(key_point_pairs)

        return result

    def filter_incorrect_pairs(self, new_pair: KeyPointPair,
                               pairs_list: List[KeyPointPair]):
        def satisfies_heuristic(pair1: KeyPointPair, pair2: KeyPointPair):
            distance1 = pair1[0].square_distance(pair2[0])
            if self.small_r < distance1 < self.big_R:
                distance2 = pair1[1].square_distance(pair2[1])
                return self.small_r < distance2 < self.big_R
            else:
                return False

        return list(
            filter(lambda pair: satisfies_heuristic(new_pair, pair),
                   pairs_list))

    def estimate_ransac_iterations(self):
        if len(self.key_point_pairs) == 0:
            return 0
        else:
            n = 3 if self.transformation == TransformationType.AFFINE else 4
            w = len(self.consistent_key_point_pairs) / len(
                self.key_point_pairs)
            self.iterations = int(
                np.log2(1 - self.iteration_heuristic_probability) /
                np.log2(1 - w**n))
            print(f"estimated: {self.iterations}")

    @staticmethod
    def extract_image(image_path: str):
        subprocess.call([
            "extract_features_32bit.exe", '-haraff', '-sift', '-i', image_path,
            '-DE'
        ])