예제 #1
0
파일: gui.py 프로젝트: rezkyd/twitter-event
    def action_start(self):
        data = Data(self.path)
        pref = self.preference.get()
        pref = pref if pref == 'median' or 'max' else float(pref)
        param = [
            self.min_count.get(),
            self.min_occur.get(),
            self.segment_range.get(), pref,
            self.damping_factor.get(),
            self.max_iteration.get()
        ]
        if None not in param:
            mc.start(data,
                     min_count=param[0],
                     min_occur=param[1],
                     segment_range_ms=param[2] * 3600 * 1000,
                     preference=param[3],
                     damping_factor=param[4],
                     max_iteration=param[5],
                     new_data=self.new_data.get())
            segment_list = data.get_segment_list()
            if len(segment_list) > 0:
                option = self.segment_option['menu']
                option.delete(0, 'end')
                for index, string in enumerate(segment_list):
                    option.add_command(label=string,
                                       command=lambda idx=index, val=string:
                                       self.load_summary(idx, name=val))

            self.statistic_text.delete('1.0', 'end')
            statistic = data.get_statistic()
            for text in statistic:
                self.statistic_text.insert('insert', text + '\n')
예제 #2
0
파일: gui.py 프로젝트: rezkyd/twitter-event
    def action_open_path(self):
        self.path = askdirectory()
        if not self.path:
            return
        data = Data(self.path)
        param = data.get_result('PARAMETER')
        if param is not None:
            self.min_count.set(param['min_count'])
            self.min_occur.set(param['min_occur'])
            self.segment_range.set(param['segment_range_ms'] / (3600 * 1000))
            self.preference.set(param['preference'])
            self.damping_factor.set(param['damping_factor'])
            self.max_iteration.set(param['max_iteration'])

            segment_list = data.get_segment_list()
            if len(segment_list) > 0:
                option = self.segment_option['menu']
                option.delete(0, 'end')
                for index, string in enumerate(segment_list):
                    option.add_command(label=string,
                                       command=lambda idx=index, val=string:
                                       self.load_summary(idx, name=val))

            self.statistic_text.delete('1.0', 'end')
            statistic = data.get_statistic()
            for text in statistic:
                self.statistic_text.insert('insert', text + '\n')
예제 #3
0
    def __init__(self, master):
        self.master = master
        master.title("Digital Signal Processing")
        master.geometry("800x600")

        self.master.protocol("WM_DELETE_WINDOW", self.master.quit)
        self.data = Data()
        self.scalar = 1
        self.popup_return = ''
        self.menubar = tk.Menu(self.master)
        self.file_menu = tk.Menu(self.menubar, tearoff=0)
        self.operations_menu = tk.Menu(self.menubar, tearoff=0)
        self.init_menubar()
        self.master.config(menu=self.menubar)
        self.path = ''
        self.fig = plt.figure(1)
        canvas = FigureCanvasTkAgg(self.fig, master=root)
        self.plot_widget = canvas.get_tk_widget()
        self.nextBtn = tk.Button(self.master,
                                 text='Next',
                                 command=self.draw_next)
        self.prevBtn = tk.Button(self.master,
                                 text='Prev',
                                 command=self.draw_prev)
        self.prevBtn.pack()
        self.nextBtn.pack()
        self.plot_widget.pack(side=tk.BOTTOM)
        self.counter = 0
def test_should_be_departure_header(browser, link):
    link_dep = urljoin(link, "/departure/msk")
    page = DeparturePage(browser, link_dep)
    page.open()
    header_text = page.get_header_text()
    data = Data()
    header_departure = data.get_departure('msk')
    expected_text = f'Летим {header_departure}'
    page.should_be_exact_text(expected_text.lower(), header_text.lower())
예제 #5
0
파일: gui.py 프로젝트: rezkyd/twitter-event
 def load_summary(self, segment, name=None, to_segment=0, keyword=None):
     if not self.path:
         return
     if name is not None:
         self.selected_segment.set(name)
     data = Data(self.path)
     self.summary_text.delete('1.0', 'end')
     summary = data.get_summary(segment, to_segment, keyword)
     for text in summary:
         self.summary_text.insert('insert', text + '\n')
예제 #6
0
def test_tour_general_info(browser, link):
    link_tour = urljoin(link, "/tour/4")
    page = TourPage(browser, link_tour)
    page.open()
    data = Data()
    tour_page_title_text = page.get_tour_description_text()

    tour_description_text = f'{data.get_tour_duration_of_stay(id=4)} Ночей'
    page.should_be_exact_text(tour_description_text.lower(),
                              tour_page_title_text.lower())

    tour_page_title = data.get_departure(departure='msk')
    page.should_be_exact_text(tour_page_title.lower(),
                              tour_page_title_text.lower())
예제 #7
0
파일: bot.py 프로젝트: Pumpka-s/TelegramBot
def send_welcome(message):
    if message.text == '/start':
        if not Data().is_registered(message.from_user.id):
            Data().register_user(message.from_user.id,
                                 message.from_user.username)
            bot.send_message(message.chat.id,
                             config.HELLO_MESSAGE,
                             reply_markup=show_main_keyboard(0))
    if message.text == '/delete_account':
        # Удаление аккаунта
        Data().delete_user(message.from_user.id)
        bot.send_message(message.chat.id,
                         "Пока 😥",
                         reply_markup=types.ReplyKeyboardRemove())
예제 #8
0
def test_tours_have_different_pictures_and_content(browser, link):
    for i in range(1, 3):
        link_tour = urljoin(link, f"/tour/{i}")
        page = TourPage(browser, link_tour)
        page.open()

        tour_picture_on_page = page.get_picture_src_on_tour_page()
        tour_content_on_page = page.get_content_on_tour_page_text()

        data = Data()
        tour_picture = data.get_tour_picture(id=i)
        assert tour_picture == tour_picture_on_page, f'Different pictures on tour page {i} and data'

        tour_content = data.get_tour_content(id=i)
        assert tour_content == tour_content_on_page, f'Different content on tour page {i} and data'
예제 #9
0
class Predict:
    def __init__(self):
        self.data = Data(FLAGS)
        model = Seq2seq(self.data.vocab_size, FLAGS)
        estimator = tf.estimator.Estimator(model_fn=model.make_graph,
                                           model_dir=FLAGS.model_dir)

        def input_fn():
            inp = tf.placeholder(tf.int64, shape=[None, None], name='input')
            output = tf.placeholder(tf.int64,
                                    shape=[None, None],
                                    name='output')
            tf.identity(inp[0], 'source')
            tf.identity(output[0], 'target')
            dict = {'input': inp, 'output': output}
            return tf.estimator.export.ServingInputReceiver(dict, dict)

        self.predictor = tf.contrib.predictor.from_estimator(
            estimator, input_fn)

    def infer(self, sentence):
        input = self.data.prepare(sentence)
        predictor_prediction = self.predictor({
            "input": input,
            "output": input
        })
        words = [
            self.data.rev_vocab.get(i, '<UNK>')
            for i in predictor_prediction['output'][0] if i > 2
        ]
        return ' '.join(words)
예제 #10
0
def main():
	data = Data(database_dir= 'w5_BBDD_random', query_dir= 'w5_devel_random')
	gt = ground_truth_text.get_text_gt()
	iou_list = []
	# loop over database_imgs without overloading memory
	for im, im_name in data.database_imgs:
		x1gt, y1gt, x2gt, y2gt = gt[im_name]
		h,w = im.shape[0:2]
		fixed_width = 1000
		ratio = fixed_width/w
		im  = cv2.resize(im,(fixed_width,int(ratio*h)))#(ratio*w,ratio*h))
		#cv2.rectangle(im,(x1gt,y1gt),(x2gt,y2gt),(0,255,0),2)
		if 1:#im_name =='ima_000036':
			lines_np, lines_im, edges_im = line_detection(im)
			w_msk = white_filter(im)
			b_msk = black_filter(im)
			
			w_box, w_score = detect_text(im, w_msk, '_white')
			b_box, b_score = detect_text(im, b_msk, '_black')

			if w_score>b_score:
				text_box = w_box
				print('white')
			else:
				text_box = b_box
				print('black')
			x,y,w,h = text_box
			#---------------------------------
			"""
			y = int(y-0.2*h)
			h = int(h+0.6*h)
			
			x = int(x - 0.04*w)
			w = int(w + 0.08*w)
			text_box = [x,y,w,h]
			"""
			text_box = posprocess_bbox(text_box, lines_np)
			x,y,w,h = text_box
			#---------------------------------

			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)

			rescaled_text_box = [int(i/ratio) for i in text_box]
			x,y,w,h = rescaled_text_box
			rescaled_text_box = [x,y,x+w,y+h]

			print(rescaled_text_box)
			print(gt[im_name])
			iou = intersection_over_union(rescaled_text_box, gt[im_name])
			print("iou: "+str(iou))
			iou_list.append(iou)
			
			cv2.imshow('input',im)
			#print()
			#if iou<0.5:
			#cv2.waitKey()

	print(iou_list)
	print (np.mean(iou_list))
def experiment_fn(run_config, params):
    data = Data(FLAGS)
    data.initialize_word_vectors()

    model = Seq2seq(data.vocab_size, FLAGS, data.embeddings_mat)
    estimator = tf.estimator.Estimator(
        model_fn=model.make_graph,
        #                                        model_dir=FLAGS.model_dir,
        config=run_config,
        params=FLAGS)

    train_input_fn, train_feed_fn = data.make_input_fn('train')
    eval_input_fn, eval_feed_fn = data.make_input_fn('test')

    print_vars = [
        'source', 'predict'
        # 'decoder_output',
        # 'actual'
    ]
    print_inputs = tf.train.LoggingTensorHook(print_vars,
                                              every_n_iter=FLAGS.print_every,
                                              formatter=data.get_formatter(
                                                  ['source', 'predict']))

    experiment = tf.contrib.learn.Experiment(
        estimator=estimator,
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        train_steps=FLAGS.iterations,
        min_eval_frequency=FLAGS.print_every,
        train_monitors=[tf.train.FeedFnHook(train_feed_fn), print_inputs],
        eval_hooks=[tf.train.FeedFnHook(eval_feed_fn)],
        eval_steps=10)
    return experiment
예제 #12
0
def test_departure_general_info(browser, link):
    link_dep = urljoin(link, "/departure/msk")
    page = DeparturePage(browser, link_dep)
    page.open()
    departure_description_text = page.get_departure_description_text()
    tour_cards_amount = page.get_amount_of_tour_cards()
    
    amount_expected_text = f'Найдено {tour_cards_amount} туров'
    page.should_be_exact_text(amount_expected_text.lower(), departure_description_text.lower())
        
    data = Data()
    departure_prices = data.get_tour_prices('msk')
    price_expected_text = f'от {min(departure_prices)} до {max(departure_prices)} и'
    page.should_be_exact_text(price_expected_text.lower(), departure_description_text.lower())

    departure_duration_of_stay = data.get_tours_duration_of_stay(departure="msk")
    days_expected_text = f'от {min(departure_duration_of_stay)} ночей до {max(departure_duration_of_stay)} ночей'
    page.should_be_exact_text(days_expected_text.lower(), departure_description_text.lower())
예제 #13
0
def test_tour_price(browser, link):
    link_tour = urljoin(link, "/tour/4")
    page = TourPage(browser, link_tour)
    page.open()
    data = Data()
    tour_page_price_text = page.get_tour_price_text()
    tour_page_price = f'за {data.get_tour_price(id=4)}'
    page.should_be_exact_text(tour_page_price.lower(),
                              tour_page_price_text.lower())
예제 #14
0
    def __init__(self):
        self.data = Data(FLAGS)
        model = Seq2seq(self.data.vocab_size, FLAGS)
        estimator = tf.estimator.Estimator(model_fn=model.make_graph,
                                           model_dir=FLAGS.model_dir)

        def input_fn():
            inp = tf.placeholder(tf.int64, shape=[None, None], name='input')
            output = tf.placeholder(tf.int64,
                                    shape=[None, None],
                                    name='output')
            tf.identity(inp[0], 'source')
            tf.identity(output[0], 'target')
            dict = {'input': inp, 'output': output}
            return tf.estimator.export.ServingInputReceiver(dict, dict)

        self.predictor = tf.contrib.predictor.from_estimator(
            estimator, input_fn)
def main(args):
    # tf.logging._logger.setLevel(logging.INFO)
    tf.logging.set_verbosity(logging.INFO)

    data = Data(FLAGS)
    model = Seq2seq(data.vocab_size, FLAGS)

    input_fn, feed_fn = data.make_input_fn()
    print_inputs = tf.train.LoggingTensorHook(
        ['source', 'target', 'predict'],
        every_n_iter=FLAGS.print_every,
        formatter=data.get_formatter(['source', 'target', 'predict']))

    estimator = tf.estimator.Estimator(
        model_fn=model.make_graph, model_dir=FLAGS.model_dir)  #, params=FLAGS)
    estimator.train(input_fn=input_fn,
                    hooks=[tf.train.FeedFnHook(feed_fn), print_inputs],
                    steps=FLAGS.iterations)
def main():
	data = Data(database_dir= 'w5_BBDD_random', query_dir= 'w5_devel_random')
	gt = ground_truth_text.get_text_gt()

	# loop over database_imgs without overloading memory
	#for im, im_name in data.database_imgs:
	for im, im_name in data.query_imgs:
		x1gt, y1gt, x2gt, y2gt = gt[im_name]
		cv2.imshow('im',im)
		divide_measure_colorfulness(im)
		print(image_colorfulness(im))

		cv2.waitKey()
예제 #17
0
    def start_evaluation():
        data = Data('search')
        min_count = 5
        min_occur = 82
        segment_range_ms = 6 * 3600 * 1000
        preference_test = ['min', 'q1', 'median', 'q3']
        damping_factor_test = np.linspace(0, 1, 10, endpoint=False)
        changed_limit_test = range(1, 11)
        max_iteration_test = range(5, 101, 5)

        best_p = evaluation_by(data, 'preference', preference_test, min_count,
                               min_occur, segment_range_ms, None, 0.5, 2, 10)
        best_df = evaluation_by(data, 'damping_factor', damping_factor_test,
                                min_count, min_occur, segment_range_ms, best_p,
                                None, 2, 10)
        best_cl = evaluation_by(data, 'changed_limit', changed_limit_test,
                                min_count, min_occur, segment_range_ms, best_p,
                                best_df, None, 10)
        best_mi = evaluation_by(data, 'max_iteration', max_iteration_test,
                                min_count, min_occur, segment_range_ms, best_p,
                                best_df, best_cl, None)
예제 #18
0
def main():
    database_dir = 'w5_BBDD_random'
    query_folder = "w5_test_random"

    data = Data(database_dir=database_dir, query_dir=query_folder)

    # frames = []

    for q_im, q_name in data.query_imgs:
        get_painting_rotated(q_im, show=True, imname=q_name, save_fig=False)
        # bbox = []
        # bbox.append(int(angle))
        # temp = []
        # for i in box:
        #     temp.append(tuple(i.astype("int32")))
        # bbox.append(temp)
        # frames.append(bbox)
        # print(bbox)

    # with open("./frames.pkl", "wb") as f:
    #     pickle.dump(frames, f)

    exit(0)
예제 #19
0
def main(args):
    if args.test:
        query_folder = "query_test_random"
        ground_truth = ground_truth_test
    else:
        query_folder = "query_devel_random"
        ground_truth = ground_truth_val

    data = Data(database_dir='museum_set_random', query_dir=query_folder)

    # test_ground_truth(ground_truth=ground_truth, museum_set=museum_set, query_set=query_set)
    eval_array = []

    query_imgs = [[im, name] for im, name in data.query_imgs]
    database_imgs = [[im, name] for im, name in data.database_imgs]
    query_hist = [get_hsv_hist(im) for im, name in query_imgs]
    database_hist = [get_hsv_hist(im) for im, name in database_imgs]

    database_hash = {}
    for im, name in data.database_imgs:
        database_hash[name] = get_hash(im)

    for [q_image, q_name], q_hist in zip(query_imgs, query_hist):
        if args.use_histogram:
            K = len(database_imgs)
        else:
            K = 10

        scores = retrieve_best_results(q_image,
                                       database_imgs,
                                       database_hash,
                                       K=K)

        if args.use_histogram:
            scores2 = retrieve_best_results_hsv(image_histH=q_hist,
                                                database_imgs=database_imgs,
                                                database_hist=database_hist,
                                                K=K)
            # sort by image name
            scores.sort(key=lambda s: s[0], reverse=False)
            scores2.sort(key=lambda s: s[0], reverse=False)

            # add the scores (assuming we are using cv2.HISTCMP_BHATTACHARYYA, as it outputs the best match as the
            # lowest score)
            combined_scores = [(score[0][0], score[1][1] + score[0][1])
                               for score in zip(scores, scores2)]

            combined_scores.sort(key=lambda s: s[1], reverse=False)
            combined_scores = combined_scores[:10]
            scores = combined_scores

        eval = evaluation(predicted=[s[0] for s in scores],
                          actual=[ground_truth[q_name]])
        print(eval)
        eval_array.append(eval)
        """
        show_results(scores=scores,
                     museum_set=museum_set,
                     query_image=query_set[query]['image'],
                     ground_truth_image=data.database_imgs.read( ground_truth[query_name] ))
        """
    global_eval = np.mean(eval_array)
    print("----------------\nEvaluation: " + str(global_eval))
예제 #20
0
파일: bot.py 프로젝트: Pumpka-s/TelegramBot
def mes(message):
    # if message.from_user.id != 587925968:
    #     bot.send_message(message.from_user.id, "🏗Ведутся технические работы👷")
    #     return
    # Проверка регистрации пользователя
    if not Data().is_registered(message.from_user.id):
        bot.send_message(message.from_user.id,
                         "🚫Для начала, зарегистрируйся🚫\nНапиши /start")
        return
    # Получение сведений о пользователе из БД
    user = User(message.from_user.id)
    user.bot_status = str(Data().bot_status(user.id))
    # Процесс создания туду
    if user.bot_status == 'creating_to_do':
        task = To_do()
        task.create_task(message.text)
        Data().create_task(user.id, task)
        Data().bot_status(user.id, 'waiting')
        bot.send_message(user.id,
                         'Туду успешно создано',
                         reply_markup=show_main_keyboard(0))
    # Обработка с параметром
    elif len(user.bot_status.split('_<>_')) > 4:
        task = To_do()
        button = Button(button_type='parameter')
        button.convert_to_button(user.bot_status)
        if button.parameter.values[button.parameter.now] == 'add':
            task = Data().get_task(user.id, int(button.task_id))
            task.text += '\n' + message.text
            task.build_task_out()
        elif button.parameter.values[button.parameter.now] == 'rewrite':
            task.id = button.task_id
            task.create_task(message.text)
        Data().update_task(user.id, task)
        bot.send_message(message.chat.id,
                         'Изменено',
                         reply_markup=show_main_keyboard(0))
        Data().bot_status(user.id, 'waiting')
    # Инструкция по созданию туду
    elif message.text == 'Новый туду':
        Data().bot_status(user.id, 'creating_to_do')
        bot.send_message(message.chat.id,
                         "В первой строке напиши название задачи, "
                         "после - описание\n"
                         "Например:",
                         reply_markup=types.ReplyKeyboardRemove())
        button = Button('creating_to_do', action='cancel')
        markup = types.InlineKeyboardMarkup(row_width=2)
        item = types.InlineKeyboardButton(
            'Отменить', callback_data=button.convert_to_string())
        markup.add(item)
        bot.send_message(message.chat.id, "Список покупок\n"
                         "молоко, хлеб, бананы",
                         reply_markup=markup)
    # Вывод существующих туду
    elif message.text == 'Мои туду':
        if not Data().tasks_exist(user.id):
            bot.send_message(message.chat.id,
                             'У тебя нет ни одного туду',
                             reply_markup=show_main_keyboard(0))
        else:
            bot.send_message(message.chat.id,
                             'Твои туду:',
                             reply_markup=show_main_keyboard(0))
            for index in range(Data().get_index_of_the_last_task(user.id) + 1):
                task = Data().get_task(user.id, index)
                if not task:
                    continue
                if task.active:
                    markup = types.InlineKeyboardMarkup(row_width=2)
                    button = Button('showing_tasks',
                                    action='done',
                                    task_id=task.id)
                    item1 = types.InlineKeyboardButton(
                        "✅ Сделано", callback_data=button.convert_to_string())
                    button.action = 'delete'
                    item2 = types.InlineKeyboardButton(
                        "❌ Удалить", callback_data=button.convert_to_string())
                    button.action = 'edit'
                    item3 = types.InlineKeyboardButton(
                        "✏ Изменить", callback_data=button.convert_to_string())
                    markup.add(item1, item2, item3)

                    bot.send_message(message.chat.id,
                                     task.out,
                                     reply_markup=markup,
                                     parse_mode='html')
                else:
                    markup = types.InlineKeyboardMarkup(row_width=2)
                    button = Button('showing_tasks',
                                    action='backup',
                                    task_id=task.id)
                    item1 = types.InlineKeyboardButton(
                        "🔙 Вернуть", callback_data=button.convert_to_string())
                    button.action = "delete"
                    item2 = types.InlineKeyboardButton(
                        "❌ Удалить", callback_data=button.convert_to_string())
                    markup.add(item1, item2)
                    bot.send_message(message.chat.id,
                                     "🟢 " + task.title,
                                     reply_markup=markup,
                                     parse_mode='html')
예제 #21
0
파일: bot.py 프로젝트: Pumpka-s/TelegramBot
def callback_inline(call):
    try:
        if call.message:
            button = Button(button_type=call.data.split('_<>_')[3])
            button.convert_to_button(call.data)
            if button.type == 'simple':
                if button.status == 'showing_tasks':
                    task = Data().get_task(call.from_user.id, button.task_id)
                    if not task:
                        return
                    # Активация кнопки "Сделано"
                    if button.action == 'done':
                        Data().change_task_activity(call.from_user.id, task.id,
                                                    False)
                        markup = types.InlineKeyboardMarkup(row_width=2)
                        button.action = 'backup'
                        item1 = types.InlineKeyboardButton(
                            "🔙 Вернуть",
                            callback_data=button.convert_to_string())
                        button.action = 'delete'
                        item2 = types.InlineKeyboardButton(
                            "❌ Удалить",
                            callback_data=button.convert_to_string())
                        markup.add(item1, item2)
                        bot.edit_message_text(
                            chat_id=call.message.chat.id,
                            message_id=call.message.message_id,
                            reply_markup=markup,
                            text="🟢 " + task.title,
                            parse_mode='html')
                    # Активация кнопки "Удалить"
                    elif button.action == 'delete':
                        Data().delete_task(call.from_user.id, task.id)
                        bot.delete_message(chat_id=call.message.chat.id,
                                           message_id=call.message.message_id)
                    # Активация кнопки "Изменить"
                    elif button.action == 'edit':
                        button.status = 'editing_to_do'
                        button.type = 'parameter'
                        button.parameter.values = ['rewrite', 'add']
                        markup = types.InlineKeyboardMarkup(row_width=2)
                        item = types.InlineKeyboardButton(
                            "Параметр: Перезаписать",
                            callback_data=button.convert_to_string())
                        markup.add(item)
                        bot.send_message(
                            call.message.chat.id,
                            "👇 Твое туду",
                            reply_markup=types.ReplyKeyboardRemove())
                        bot.send_message(call.message.chat.id,
                                         task.title + task.text,
                                         reply_markup=markup)
                        Data().bot_status(call.from_user.id,
                                          button.convert_to_string())
                    # Возвращение туду в активные
                    elif button.action == "backup":
                        markup = types.InlineKeyboardMarkup(row_width=2)
                        button.action = 'done'
                        item1 = types.InlineKeyboardButton(
                            "✅ Сделано",
                            callback_data=button.convert_to_string())
                        button.action = 'delete'
                        item2 = types.InlineKeyboardButton(
                            "❌ Удалить",
                            callback_data=button.convert_to_string())
                        button.action = 'edit'
                        item3 = types.InlineKeyboardButton(
                            "✏ Изменить",
                            callback_data=button.convert_to_string())
                        markup.add(item1, item2, item3)
                        Data().change_task_activity(call.from_user.id,
                                                    button.task_id, True)
                        bot.edit_message_text(
                            Data().get_task(call.from_user.id,
                                            button.task_id).out,
                            chat_id=call.from_user.id,
                            message_id=call.message.message_id,
                            reply_markup=markup,
                            parse_mode='html')
                # Кнопка связана с отменой создания туду
                elif button.status == 'creating_to_do':
                    if button.action == 'cancel' and Data().bot_status(
                            call.from_user.id) == 'creating_to_do':
                        Data().bot_status(call.from_user.id, 'waiting')
                        bot.send_message(call.message.chat.id,
                                         "Отменено",
                                         parse_mode='html',
                                         reply_markup=show_main_keyboard(0))

            elif button.type == 'parameter' and Data().bot_status(
                    call.from_user.id).find('editing_to_do') != -1:
                # Кнопка связана с изменением туду
                if button.status == 'editing_to_do':
                    button.parameter.next_step()
                    bot.edit_message_reply_markup(
                        call.message.chat.id,
                        call.message.message_id,
                        reply_markup=button.build_parameter_button())
                    Data().bot_status(call.from_user.id,
                                      button.convert_to_string())
    except Exception as e:
        print(repr(e))
예제 #22
0
def supervisedLearning(datasetPath,
                       datasetSize,
                       transferMethod=None,
                       transferVocabularyPath=None,
                       sourceCheckpointPath=None,
                       suffix=""):

    test_source = datasetPath + "test_source.txt"
    test_target = datasetPath + "test_target.txt"
    train_source = datasetPath + "train_source.txt"
    train_target = datasetPath + "train_target.txt"
    resultPath = datasetPath + "SL"

    if (transferMethod is None):
        vocabulary = datasetPath + "quora_msr_vocabulary.txt"
    else:
        vocabulary = transferVocabularyPath

    data = Data(FLAGS, train_source, train_target, test_source, test_target,
                vocabulary)
    model = Seq2seq(data.vocab_size, FLAGS, transferMethod,
                    sourceCheckpointPath)
    size = datasetSize
    epoch = 5
    # iterations = int(round(size * epoch / FLAGS.batch_size))
    iterations = 4

    # var_list = checkpoint_utils.list_variables('checkpoints')
    # for v in var_list: print(v)

    input_fn, feed_fn = data.make_input_fn()
    print_inputs = tf.train.LoggingTensorHook(
        ['source', 'target', 'predict'],
        every_n_iter=FLAGS.print_every,
        formatter=data.get_formatter(['source', 'target', 'predict']))

    # estimator = tf.estimator.Estimator(model_fn = model.make_graph, model_dir="checkpointsQuoraMSR")
    # estimator.train(input_fn=input_fn, hooks=[tf.train.FeedFnHook(feed_fn), print_inputs], steps=iterations)

    # modelInfer = Seq2seq(data.vocab_size, FLAGS, transferMethod, sourceCheckpointPath, False, inferGraph = 1)
    estimator = tf.estimator.Estimator(
        model_fn=model.make_graph,
        model_dir="data/cps/checkpointsQuoraMSRinit")
    model.setLoadParameters(False)

    test_fn = data.make_test_fn()

    a = estimator.predict(test_fn)

    test_paraphrases = []

    # a = list(a)

    # for i in a:
    #     print(i.shape)recent   # print(len(a))

    for j in a:
        # j = i[:, 0]
        test_paraphrases.append(j)

    data.builtTranslationCorpus(test_paraphrases)
    scr = evaluate(data.reference_corpus, data.translation_corpus)
    print(data.translation_corpus)
    print(scr)
    saveResult(100, scr, resultPath)
예제 #23
0
def trainWithPreviousKnowledge(datasetPath,
                               datasetSize,
                               transferMethod=None,
                               transferVocabularyPath=None,
                               sourceCheckpointPath=None,
                               suffix=""):
    percentages = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
    checkpoint_filename = "checkpointsAdaptiveWithPrev" + suffix
    size = datasetSize
    epoch = 5
    test_source = datasetPath + "test_source.txt"
    test_target = datasetPath + "test_target.txt"
    resultPath = datasetPath + "AdaptiveWithPrev"

    if (transferMethod is None):
        vocabulary = datasetPath + "v.txt"
    else:
        vocabulary = transferVocabularyPath

    data = Data(FLAGS, "", "", "", "", vocabulary)
    model = Seq2seq(data.vocab_size, FLAGS, transferMethod,
                    sourceCheckpointPath)

    for i in percentages:
        source_filename = datasetPath + "without-pool/" + format(
            i, '.1f') + "_source" + ".txt"
        target_filename = datasetPath + "without-pool/" + format(
            i, '.1f') + "_target" + ".txt"
        data = Data(FLAGS, source_filename, target_filename, test_source,
                    test_target, vocabulary)
        iterations = int(round(size * i * epoch / FLAGS.batch_size))
        # iterations = 1

        input_fn, feed_fn = data.make_input_fn()
        test_fn = data.make_test_fn()
        print_inputs = tf.train.LoggingTensorHook(
            ['source', 'target', 'predict'],
            every_n_iter=FLAGS.print_every,
            formatter=data.get_formatter(['source', 'target', 'predict']))

        estimator = tf.estimator.Estimator(model_fn=model.make_graph,
                                           model_dir=checkpoint_filename,
                                           params=FLAGS)
        print("Training with " + format(i, '.2f') + " percent of the dataset.")
        estimator.train(input_fn=input_fn,
                        hooks=[tf.train.FeedFnHook(feed_fn), print_inputs],
                        steps=iterations)

        model.setLoadParameters(False)

        # test_paraphrases = list(estimator.predict(test_fn))

        a = estimator.predict(test_fn)

        test_paraphrases = []

        # a = list(a)

        # for i in a:
        #     print(i.shape)

        # print(len(a))

        for j in a:
            # j = i[:, 0]
            test_paraphrases.append(j)

        data.builtTranslationCorpus(test_paraphrases)
        scr = evaluate(data.reference_corpus, data.translation_corpus)
        print(i, scr)
        saveResult(i, scr, resultPath)
예제 #24
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from data_handler import Data

if __name__ == '__main__':
    data_handler = Data()
    data_handler.start_program()
else:
    pass
예제 #25
0
import tensorflow as tf
from seq2seq import Seq2seq
from data_handler import Data
from train import FLAGS

from tensorflow.python import debug as tf_debug
from tensorflow.contrib.learn import learn_runner

tf.logging.set_verbosity(tf.logging.INFO)

run_config = tf.contrib.learn.RunConfig()
run_config = run_config.replace(model_dir=FLAGS.experiment_dir)

data = Data(FLAGS)
data.initialize_word_vectors()

input_fn, _ = data.make_input_fn()
format_fn = lambda seq: ' '.join([data.rev_vocab.get(x, '<UNK>') for x in seq])

model = Seq2seq(data.vocab_size, FLAGS, data.embeddings_mat)
estimator = tf.estimator.Estimator(model_fn=model.make_graph,
                                   config=run_config,
                                   params=FLAGS)


def predict_feed_fn(phrase):
    tokens = data.tokenize_and_map(phrase, mode='test') + [data.END_TOKEN]

    def feed_fn():
        return {'source_in:0': [tokens]}
예제 #26
0
def main(database_dir, query_folder, ground_truth, method_name, forTest=False):
    if method_name == 'orb':
        method = Orb(max_features=2000)
        # min_features = 25
        th = 0.02
    elif method_name == 'surf':
        method = Surf()
        # min_features = 100
        th = 0.24
    elif method_name == 'sift':
        method = Sift()
        # min_features = 70
        th = 0.0341796875
        print("SIFT threshold might need to be tuned again")
    elif method_name == 'root_sift':
        method = RootSift()
        # min_features = 70
        th = 0.1  # 0.0341796875
    else:
        exit(1)

    data = Data(database_dir=database_dir, query_dir=query_folder)

    query_imgs = [[None, name] for im, name in data.query_imgs]
    database_imgs = [[None, name] for im, name in data.database_imgs]
    database_feats, query_feats = get_features(method, data, query_folder, database_dir, method_name)

    eval_array = []

    res = []
    for _, q_name in query_imgs:

        scores = method.retrieve_best_results(None, database_imgs, database_feats, query_feats[q_name])

        if method_name == 'sift' or method_name == 'root_sift':
            features_num = len(query_feats[q_name][0])
        else:
            features_num = len(query_feats[q_name])
        features_num = max(features_num, 1)

        if scores[0][1] / features_num < th:  # minimum number of features matched allowed (-1 otherwise)
            scores = [(-1, 0)]
        eval = evaluation(predicted=[s[0] for s in scores], actual=ground_truth[q_name])
        eval_array.append(eval)

        res.append([score for score, _ in scores])

        print(scores[:3], "   ", ground_truth[q_name], "   ", scores[0][1] / features_num)
        print(eval)
        #
        # import matplotlib.pyplot as plt
        # plt.subplot(1, 2, 1)
        # plt.imshow(plt.imread(query_folder + "/" + q_name + ".jpg"))
        # plt.subplot(1, 2, 2)
        # if len(scores) > 3:
        #     plt.imshow(plt.imread(database_dir + "/" + scores[0][0] + ".jpg"))
        # else:
        #     plt.imshow(np.ones((200, 200)).astype("uint8"))
        # plt.show()

    global_eval = np.mean(eval_array)
    print("----------------\nEvaluation: " + str(global_eval))

    q = [name for _, name in data.query_imgs]
    with open("result.pkl", "wb") as f:
        pickle.dump(res, f)
    with open("query.pkl", "wb") as f:
        pickle.dump(q, f)

    return 0
예제 #27
0
def main():

    data = Data(database_dir= 'w5_BBDD_random', query_dir= 'w5_devel_random')

    gt = ground_truth_text.get_text_gt()

    #### parameters obtained analyzing ground truth ###################
    mean_aspect_ratio = 7.996836472972114
    std_aspect_ratio = 1.8974561127167842
    mean_length = 125.89268292682927
    std_length = 29.238100687737802
    mean_area = 2080.360975609756
    std_area = 794.6253398125554
    mean_filling_ratio = 0.13717846010306994
    std_fillin_ratio = 0.07545651641355072
    mean_saturation = 37.55115543136576
    std_saturation = 28.178800884826995
    mean_centered_distance = 0.0069118142084640425
    std_centered_distance = 0.002423878582904023

    iou_list = []
    # loop over database_imgs without overloading memory
    for im, im_name in data.database_imgs:

        ################ Resize ########################################################

        FinalSize = 200
        shape_max = max(im.shape)
        ratio = FinalSize / shape_max
        hsv_image_big = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)

        image_white = cv2.inRange(hsv_image_big, (0, 0, 200), (180, 50, 255)) / 255
        image_black = cv2.inRange(hsv_image_big, (0, 0, 0), (180, 50, 50)) / 255

        size = max(hsv_image_big.shape)
        kernel_size = int(size / 100)
        kernel = np.ones((kernel_size, kernel_size), np.uint8)

        image_black = cv2.morphologyEx(image_black, cv2.MORPH_CLOSE, kernel)
        image_white = cv2.morphologyEx(image_white, cv2.MORPH_CLOSE, kernel)

        image_blackwhite = cv2.bitwise_or(image_black, image_white)

        integral_threshblack_big = cv2.integral(image_black)
        integral_threshwhite_big = cv2.integral(image_white)
        integral_blackwhite_big = cv2.integral(image_blackwhite)

        im = cv2.resize(im, (0, 0), fx=ratio, fy=ratio)

        hsv_image = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        rgb_image = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        hue_im, sat_im, val_im = hsv_image[:,:,0], hsv_image[:, :, 1], hsv_image[:, :, 2]

        bboxes_white = detect_letters.bboxes_white(hsv_image_big)
        bboxes_black = detect_letters.bboxes_black(hsv_image_big)

        ################ Smoothing #####################################################

        kernel_size = 3


        #sat_im = cv2.GaussianBlur(sat_im, (kernel_size, kernel_size), 0)
        #val_im = cv2.GaussianBlur(val_im, (kernel_size, kernel_size), 0)


        ################ Gradient ######################################################

        k_size = 3

        hue_sobelx = np.absolute(cv2.Sobel(hue_im, cv2.CV_64F, 1, 0, ksize=k_size))
        hue_sobely = np.absolute(cv2.Sobel(hue_im, cv2.CV_64F, 0, 1, ksize=k_size))
        hue_sobel = hue_sobelx + hue_sobely

        sat_sobelx = np.absolute(cv2.Sobel(sat_im, cv2.CV_64F, 1, 0, ksize=k_size))
        sat_sobely = np.absolute(cv2.Sobel(sat_im, cv2.CV_64F, 0, 1, ksize=k_size))
        sat_sobel = sat_sobelx + sat_sobely

        val_sobelx = np.absolute(cv2.Sobel(val_im, cv2.CV_64F, 1, 0, ksize=k_size))
        val_sobely = np.absolute(cv2.Sobel(val_im, cv2.CV_64F, 0, 1, ksize=k_size))
        val_sobel = val_sobelx + val_sobely

        mix_sobelx = (sat_sobelx + val_sobelx) / 510
        mix_sobely = (sat_sobely + val_sobely) / 510
        mix_sobel = sat_sobel + val_sobel

        integral_mix_sobelx = cv2.integral(mix_sobelx)
        integral_mix_sobely = cv2.integral(mix_sobely)

        mix_sobel = mix_sobel#*255/np.max(mix_sobel)

        mix_sobel = mix_sobel.astype(np.uint8)

        ############## Filter High Saturation ################

        mask = cv2.inRange(sat_im, 0, 70)
        kernel = np.ones((5, 5), np.uint8)
        mask = cv2.dilate(mask, kernel, iterations=1)/255
        mix_sobelx = np.multiply(mix_sobelx, mask)
        mix_sobely = np.multiply(mix_sobely, mask)



        ############# Edges Custom #############################
        edges_step = 2
        soft_threshold_multiplier = 2
        high_threshold_multiplier = 4

        #horizontal edges
        hedges = []
        (mu, sigma) = cv2.meanStdDev(mix_sobely)
        soft_threshold = mu + soft_threshold_multiplier*sigma
        strong_threshold = mu + high_threshold_multiplier*sigma
        edge_state = -1  # -1 No edge, 0 soft edge, 1 strong edge
        for y in range(1, im.shape[0]-1, edges_step):
            for x in range(0, im.shape[1], edges_step):
                score = mix_sobely[y,x]+mix_sobely[y-1,x]+mix_sobely[y+1,x]
                if(edge_state == -1):
                    if(score>soft_threshold):
                        edge_state=0
                        edge_start = (x, y)

                if (edge_state == 0):
                    if(score>strong_threshold):
                        edge_state = 1
                    if (score < soft_threshold):
                        edge_state = -1

                if (edge_state == 1):
                    if (score < soft_threshold):
                        hedges.append((edge_start, (x, y)))
                        edge_state = -1

            if (edge_state == 1):
                hedges.append((edge_start, (x, y)))
            edge_state = -1

        for edge in hedges:
            cv2.line(img=rgb_image, pt1=edge[0], pt2=edge[1], color=(0,255,0), thickness=1)

        #print(len(hedges))

        # vertical edges
        vedges = []
        (mu, sigma) = cv2.meanStdDev(mix_sobelx)
        soft_threshold = mu + soft_threshold_multiplier * sigma
        strong_threshold = mu + high_threshold_multiplier * sigma
        edge_state = -1  # -1 No edge, 0 soft edge, 1 strong edge
        for x in range(1, im.shape[1]-1, edges_step):
            for y in range(0, im.shape[0], edges_step):
                score = mix_sobelx[y, x] + mix_sobelx[y, x-1] + mix_sobelx[y, x+1]
                if (edge_state == -1):
                    if (score > soft_threshold):
                        edge_state = 0
                        edge_start = (x, y)

                if (edge_state == 0):
                    if (score > strong_threshold):
                        edge_state = 1
                    if (score < soft_threshold):
                        edge_state = -1

                if (edge_state == 1):
                    if (score < soft_threshold):
                        vedges.append((edge_start, (x, y)))
                        edge_state = -1

            if (edge_state == 1):
                vedges.append((edge_start, (x, y)))
            edge_state = -1

        for edge in vedges:
            cv2.line(img=rgb_image, pt1=edge[0], pt2=edge[1], color=(255, 0, 0), thickness=1)

        #print(len(vedges))
        ############# "Candidate Windows from edge vertices ####################
        candidate_points = []
        for edge in hedges:
            candidate_points.append(edge[0])
            candidate_points.append(edge[1])
        for edge in vedges:
            candidate_points.append(edge[0])
            candidate_points.append(edge[1])
        #print("len candidate points")
        #print(len(candidate_points))

        integral_image_sat = cv2.integral(sat_im)
        integral_image_val = cv2.integral(val_im)

        ################ GT test ################################################

        x1_big, y1_big, x2_big, y2_big = gt[im_name]

        point1 = (int(x1_big * ratio), int(y1_big * ratio))
        point2 = (int(x2_big * ratio), int(y2_big * ratio))

        passed = False

        length = abs(point1[0] - point2[0])
        if (length > 40 and length < 170):

            height = abs(point1[1] - point2[1])
            if (height > 0):

                aspect_ratio = length / height
                if (aspect_ratio > 3 and aspect_ratio < 14):

                    area = length * height
                    if (area > 400 and area < 5000):

                        x1 = min(point1[0], point2[0])
                        y1 = min(point1[1], point2[1])
                        x2 = max(point1[0], point2[0])
                        y2 = max(point1[1], point2[1])

                        x2 = min(x2, im.shape[1] - 1)
                        y2 = min(y2, im.shape[0] - 1)

                        sum_sat = integral_image_sat[y2 + 1, x2 + 1] + integral_image_sat[y1, x1] - \
                                  integral_image_sat[y2 + 1, x1] - integral_image_sat[y1, x2 + 1]
                        mean_sat = sum_sat / area

                        if (mean_sat < 150):

                            x1_big, y1_big, x2_big, y2_big = int(x1 / ratio), int(y1 / ratio), int(
                                x2 / ratio), int(y2 / ratio)

                            centered_distance = abs(x1_big - (hsv_image_big.shape[1] - x2_big)) / hsv_image_big.shape[1]

                            if (centered_distance < 0.2):

                                sum_val = integral_image_val[y2 + 1, x2 + 1] + integral_image_val[y1, x1] - \
                                          integral_image_val[y2 + 1, x1] - integral_image_val[y1, x2 + 1]
                                mean_val = sum_val / area

                                if (mean_val < 120):  # dark background -> white letters
                                    target_integral_image = integral_threshwhite_big
                                    is_dark_bg = True
                                else:
                                    is_dark_bg = False
                                if (mean_val > 140):  # bright background -> dark letters
                                    target_integral_image = integral_threshblack_big
                                    is_bright_bg = True
                                else:
                                    is_bright_bg = False
                                if (not (is_dark_bg or is_bright_bg)):
                                    target_integral_image = integral_blackwhite_big


                                count_letters_big = target_integral_image[y2_big + 1, x2_big + 1] + \
                                                    target_integral_image[y1_big, x1_big] - \
                                                    target_integral_image[y2_big + 1, x1_big] - \
                                                    target_integral_image[y1_big, x2_big + 1]
                                area_big = area / ratio / ratio
                                filling_letters = count_letters_big / area_big

                                if (filling_letters > 0.02 and filling_letters < 0.4):
                                    passed = True

                                    #################### SCORE WINDOWS AND RETRIEVE BEST ##########################

                                    distance = abs(aspect_ratio - mean_aspect_ratio) / std_aspect_ratio + \
                                               abs(length - mean_length) / std_length + \
                                               abs(area - mean_area) / std_area + \
                                               abs(filling_letters - mean_filling_ratio) / std_fillin_ratio + \
                                               abs(mean_sat - mean_saturation) / std_saturation

                                    x2 = min(x2, im.shape[1] - 2)
                                    y2 = min(y2, im.shape[0] - 2)
                                    x1 = max(1, x1)
                                    y1 = max(1, y1)

                                    count_gradient_htop = (integral_mix_sobely[y1 + 2, x2 + 1] + \
                                                           integral_mix_sobely[y1 - 1, x1] - \
                                                           integral_mix_sobely[y1 - 1, x2 + 1] - \
                                                           integral_mix_sobely[y1 + 2, x1]) / ((x2 - x1) * 3)

                                    count_gradient_hbot = (integral_mix_sobely[y2 + 2, x2 + 1] + \
                                                           integral_mix_sobely[y2 - 1, x1] - \
                                                           integral_mix_sobely[y2 - 1, x2 + 1] - \
                                                           integral_mix_sobely[y2 + 2, x1]) / ((x2 - x1) * 3)

                                    count_gradient_vleft = (integral_mix_sobelx[y2 + 1, x1 + 2] + \
                                                            integral_mix_sobelx[y1, x1 - 1] - \
                                                            integral_mix_sobelx[y1, x1 + 2] - \
                                                            integral_mix_sobelx[y2 + 1, x1 - 1]) / ((y2 - y1) * 3)

                                    count_gradient_right = (integral_mix_sobelx[y2 + 1, x2 + 2] + \
                                                            integral_mix_sobelx[y1, x2 - 1] - \
                                                            integral_mix_sobelx[y1, x2 + 2] - \
                                                            integral_mix_sobelx[y2 + 1, x2 - 1]) / ((y2 - y1) * 3)

                                    gradient_score = count_gradient_htop + count_gradient_hbot + count_gradient_vleft + count_gradient_right

                                    if (is_dark_bg):
                                        bboxes = bboxes_white
                                    elif (is_bright_bg):
                                        bboxes = bboxes_black
                                    else:
                                        bboxes = bboxes_white + bboxes_black

                                    box_cuts = 0
                                    intersections = 0
                                    for bbox in bboxes:
                                        intersect = intersection(bbox, (x1_big, y1_big, x2_big, y2_big))
                                        area_bbox = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
                                        cut = min(intersect, area_bbox - intersect)
                                        box_cuts += cut
                                        intersections += intersect

                                    intersect_score = intersections / area_big
                                    box_cuts = box_cuts / area_big

                                    gt_dict_score = {}
                                    gt_dict_score["gradient_score"] = gradient_score
                                    gt_dict_score["box_cuts"] = box_cuts
                                    gt_dict_score["dist_aspect_ratio"] = abs(aspect_ratio - mean_aspect_ratio) / std_aspect_ratio
                                    gt_dict_score["dist_length"] = abs(length - mean_length) / std_length
                                    gt_dict_score["area"] = abs(area - mean_area) / std_area
                                    gt_dict_score["filling_letters"] = abs(filling_letters - mean_filling_ratio) / std_fillin_ratio
                                    gt_dict_score["saturation"] = abs(mean_sat - mean_saturation) / std_saturation
                                    gt_dict_score["intersect score"] = intersect_score

                                    gt_score = distance/4 - gradient_score + 10*box_cuts - 5*intersect_score

        if(passed and (intersect_score>0)):
            print("Test gt ok: " + im_name)
        else:
            print ("---------------")
            print("TEST GT not PASSED!!!: "+im_name)
            print("intersect_score", intersect_score)
            print("length", length)
            print("aspect_ratio", aspect_ratio)
            print("area", area)
            print("mean_sat", mean_sat)
            print("centered", centered_distance)
            print("filling ratio", filling_letters)
            print("is_dark", is_dark_bg)
            print("is_bright", is_bright_bg)
            print(mean_val)
            print("... Bboxes")
            print((x1_big, y1_big, x2_big, y2_big))
            print(bboxes)
            for bbox in bboxes:
                cv2.rectangle(image_black, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (1), 10)
                cv2.rectangle(image_white, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (1), 10)
            plt.subplot(331)
            plt.title("Image")
            plt.imshow(rgb_image)
            plt.subplot(332)
            plt.title("Saturation")
            plt.imshow(sat_im, cmap='gray')
            plt.subplot(333)
            plt.title("Value")
            plt.imshow(val_im, cmap='gray')

            plt.subplot(3, 3, 7)
            plt.title("Threshold_Black")
            plt.imshow(image_black, cmap='gray')

            plt.subplot(3, 3, 8)
            plt.title("Threshold_White")
            plt.imshow(image_white, cmap='gray')

            plt.subplot(3, 3, 5)
            plt.title("Saturation Gradient")
            plt.imshow(sat_sobel, cmap='gray')

            plt.subplot(3, 3, 6)
            plt.title("Value Gradient")
            plt.imshow(val_sobel, cmap='gray')

            plt.subplot(3, 3, 4)
            plt.title("Mask")
            plt.imshow(mask, cmap='gray')

            plt.show()

        cv2.rectangle(rgb_image, pt1=point1, pt2=point2, color=(0, 255, 0), thickness=2)

        count = 0
        window_candidates = []
        window_candidates
        for point1 in candidate_points:
            for point2 in candidate_points:

                ######################################## FILTER WINDOWS ###############################################
                length = abs(point1[0]-point2[0])
                if(length >40 and length < 170):

                    height = abs(point1[1] - point2[1])
                    if(height > 0):

                        aspect_ratio = length / height
                        if (aspect_ratio>3 and aspect_ratio<14):

                            area = length*height
                            if(area > 400 and area < 5000):

                                x1 = min(point1[0], point2[0])
                                y1 = min(point1[1], point2[1])
                                x2 = max(point1[0], point2[0])
                                y2 = max(point1[1], point2[1])

                                x2 = min(x2, im.shape[1] - 1)
                                y2 = min(y2, im.shape[0] - 1)

                                x1_big, y1_big, x2_big, y2_big = int(x1 / ratio), int(y1 / ratio), int(
                                    x2 / ratio), int(y2 / ratio)

                                sum_sat = integral_image_sat[y2 + 1, x2 + 1] + integral_image_sat[y1, x1] - \
                                          integral_image_sat[y2 + 1, x1] - integral_image_sat[y1, x2 + 1]
                                mean_sat = sum_sat / area

                                if(mean_sat < 150):

                                    centered_distance = abs(x1_big - (hsv_image_big.shape[1] - x2_big)) / hsv_image_big.shape[1]

                                    if(centered_distance < 0.2):

                                        sum_val = integral_image_val[y2 + 1, x2 + 1] + integral_image_val[y1, x1] - \
                                                  integral_image_val[y2 + 1, x1] - integral_image_val[y1, x2 + 1]
                                        mean_val = sum_val / area

                                        if (mean_val < 120):  # dark background -> white letters
                                            target_integral_image = integral_threshwhite_big
                                            is_dark_bg = True
                                        else:
                                            is_dark_bg = False
                                        if (mean_val > 140):  # bright background -> dark letters
                                            target_integral_image = integral_threshblack_big
                                            is_bright_bg = True
                                        else:
                                            is_bright_bg = False
                                        if (not (is_dark_bg or is_bright_bg)):
                                            target_integral_image = integral_blackwhite_big


                                        count_letters_big = target_integral_image[y2_big + 1, x2_big + 1] + \
                                                            target_integral_image[y1_big, x1_big] - \
                                                            target_integral_image[y2_big + 1, x1_big] - \
                                                            target_integral_image[y1_big, x2_big + 1]
                                        area_big = area / ratio / ratio
                                        filling_letters = count_letters_big / area_big

                                        if (filling_letters > 0.02 and filling_letters < 0.4):


                                            count+=1
                                            #cv2.rectangle(rgb_image, pt1=point1, pt2=point2, color=(0,0,255), thickness=1)

                                            #################### SCORE WINDOWS AND RETRIEVE BEST ##########################

                                            distance = abs(aspect_ratio-mean_aspect_ratio)/std_aspect_ratio + \
                                                abs(length-mean_length)/std_length + \
                                                abs(area-mean_area)/std_area + \
                                                abs(filling_letters-mean_filling_ratio)/std_fillin_ratio + \
                                                abs(mean_sat-mean_saturation)/std_saturation

                                            x2 = min(x2, im.shape[1] - 2)
                                            y2 = min(y2, im.shape[0] - 2)
                                            x1 = max(1, x1)
                                            y1 = max(1, y1)

                                            count_gradient_htop = (integral_mix_sobely[y1 + 2, x2 + 1] + \
                                                                  integral_mix_sobely[y1-1, x1] - \
                                                                  integral_mix_sobely[y1-1 , x2+1] - \
                                                                  integral_mix_sobely[y1+2, x1]) / ((x2-x1)*3)

                                            count_gradient_hbot = (integral_mix_sobely[y2 + 2, x2 + 1] + \
                                                                  integral_mix_sobely[y2 - 1, x1] - \
                                                                  integral_mix_sobely[y2 - 1, x2 + 1] - \
                                                                  integral_mix_sobely[y2 + 2, x1]) / ((x2-x1)*3)

                                            count_gradient_vleft = (integral_mix_sobelx[y2 + 1, x1 + 2] + \
                                                                  integral_mix_sobelx[y1, x1-1] - \
                                                                  integral_mix_sobelx[y1, x1 + 2] - \
                                                                  integral_mix_sobelx[y2 + 1, x1-1]) / ((y2-y1)*3)

                                            count_gradient_right = (integral_mix_sobelx[y2 + 1, x2 + 2] + \
                                                                   integral_mix_sobelx[y1, x2 - 1] - \
                                                                   integral_mix_sobelx[y1, x2 + 2] - \
                                                                   integral_mix_sobelx[y2 + 1, x2 - 1]) / ((y2-y1)*3)

                                            gradient_score = count_gradient_htop + count_gradient_hbot + count_gradient_vleft + count_gradient_right

                                            if(is_dark_bg):
                                                bboxes = bboxes_white
                                            elif(is_bright_bg):
                                                bboxes = bboxes_black
                                            else: bboxes = bboxes_white+bboxes_black

                                            box_cuts = 0
                                            intersections = 0
                                            for bbox in bboxes:
                                                intersect = intersection(bbox, (x1_big, y1_big, x2_big, y2_big))
                                                area_bbox = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
                                                cut = min(intersect, area_bbox-intersect)
                                                box_cuts += cut
                                                intersections += intersect

                                            intersect_score = intersections / area_big
                                            box_cuts = box_cuts / area_big

                                            dict_score = {}
                                            dict_score["gradient_score"] = gradient_score
                                            dict_score["box_cuts"] = box_cuts
                                            dict_score["dist_aspect_ratio"] = abs(aspect_ratio-mean_aspect_ratio)/std_aspect_ratio
                                            dict_score["dist_length"] = abs(length-mean_length)/std_length/4
                                            dict_score["area"] = abs(area-mean_area)/std_area/4
                                            dict_score["filling_letters"] = abs(filling_letters-mean_filling_ratio)/std_fillin_ratio
                                            dict_score["saturation"] = abs(mean_sat - mean_saturation) / std_saturation
                                            dict_score["intersect score"] = intersect_score

                                            score = distance/4 - gradient_score + 10*box_cuts - 5*intersect_score

                                            window_candidates.append(((x1, y1, x2, y2), score, dict_score))


        #print("candidate windows")
        #print(count)
        if(len(window_candidates)==0):
            print("Empty windows candidates!!!: "+im_name)
            winning_window = (0,0,0,0)
        else:
            winning_window, scoreBest, dict_scoreBest = min(window_candidates, key=lambda x: x[1])
        #print("WINNER")
        #print(winning_window)

        gt_window = gt[im_name]






        #print("winning window", winning_window)
        winning_window_big = ( int(winning_window[0]/ratio), int(winning_window[1]/ratio), int(winning_window[2]/ratio), int(winning_window[3]/ratio) )

        #print("winning window", winning_window_big)
        #print("gt window", gt_window)

        iou = intersection_over_union(winning_window_big, gt_window)
        print("iou: "+str(iou))
        iou_list.append(iou)


        point1 = (int(gt_window[0] * ratio), int(gt_window[1] * ratio))
        point2 = (int(gt_window[2] * ratio), int(gt_window[3] * ratio))

        p1 = winning_window[:2]
        p2 = winning_window[2:]
        #print(p1)
        #print(p2)
        cv2.rectangle(rgb_image, pt1=p1, pt2=p2, color=(0, 0, 255), thickness=2)
        #print("-----------------------------")
        if(gt_score < scoreBest):
            print("GT WINS")
        else:
            print("GT Loses")
            print("-----------------------------------------")
            print("score winning window", scoreBest)
            print("scores winning window", dict_scoreBest)
            print("...................")
            print("gt score", gt_score)
            print("gt scores", gt_dict_score)
        if(iou < 0.4):
            plt.subplot(331)
            plt.title("Image")
            plt.imshow(rgb_image)
            plt.subplot(332)
            plt.title("Saturation")
            plt.imshow(sat_im, cmap='gray')
            plt.subplot(333)
            plt.title("Value")
            plt.imshow(val_im, cmap='gray')

            plt.subplot(3, 3, 7)
            plt.title("Threshold_Black")
            plt.imshow(image_black, cmap='gray')

            plt.subplot(3, 3, 8)
            plt.title("Threshold_White")
            plt.imshow(image_white, cmap='gray')

            plt.subplot(3, 3, 5)
            plt.title("Saturation Gradient")
            plt.imshow(sat_sobel, cmap='gray')

            plt.subplot(3, 3, 6)
            plt.title("Value Gradient")
            plt.imshow(val_sobel, cmap='gray')

            plt.subplot(3, 3, 4)
            plt.title("Mask")
            plt.imshow(mask, cmap='gray')



            plt.show()
        """"

        ############## Edges Canny ##############################


        (mu, sigma) = cv2.meanStdDev(mix_sobel)
        edges = cv2.Canny(mix_sobel, mu - 2*sigma, mu + sigma)
        edges = np.multiply(edges, mask)

        ############### Corners ###############################

        sat_corners = cv2.cornerHarris(sat_im, blockSize=2, ksize=5, k=0.01)
        val_corners = cv2.cornerHarris(val_im, blockSize=2, ksize=5, k=0.01)

        # Threshold for an optimal value, it may vary depending on the image.
        #rgb_image[dst > 0.2 * dst.max()] = [0, 0, 255]

        lines = cv2.HoughLines(edges, 1, np.pi / 180, 30)

        for line in lines:
            print (line)
            print("-------")

        for line in lines:
            rho, theta = line[0]
            if(theta==0):
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a * rho
                y0 = b * rho
                x1 = int(x0 + 500 * (-b))
                y1 = int(y0 + 500 * (a))
                x2 = int(x0 - 500 * (-b))
                y2 = int(y0 - 500 * (a))

                cv2.line(rgb_image, (x1, y1), (x2, y2), (0, 0, 255), 1)

        ret, thresh = cv2.threshold(edges, 127, 255, 0)
        im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)



        for cont in contours:
            x, y, w, h = cv2.boundingRect(cont)
            area1 = w*h
            area2 = cv2.contourArea(cont)
            if(area2 > 0):
                if (float(area1)/float(area2) <1.2 and float(area1)/float(area2) > 0.8


                    ):
                    cv2.rectangle(rgb_image, (x, y), (x + w, y + h), (0, 255, 0), 2)


        ############### Sliding window ##################################################
        #Values obtained analizing ground truth

        minLength = FinalSize/2
        maxLength = FinalSize*1.2
        stepLength = FinalSize/20

        minAspectRatio = 4
        maxAspectRatio = 13
        stepAspectRatio = 0.5

        for len in range(minLength, maxLength, stepLength):
            for ar in range(minAspectRatio, maxAspectRatio, stepAspectRatio):
                for i in range(1, im.shape[0]*ratio, 2):
                    for j in range(1, im.shape[1] * ratio, 2):








        plt.subplot(331)
        plt.title("Image")
        plt.imshow(rgb_image)
        plt.subplot(332)
        plt.title("Saturation")
        plt.imshow(sat_im, cmap='gray')
        plt.subplot(333)
        plt.title("Value")
        plt.imshow(val_im, cmap='gray')

        plt.subplot(3, 3, 7)
        plt.title("Mix Gradient X")
        plt.imshow(mix_sobelx, cmap='gray')

        plt.subplot(3, 3, 8)
        plt.title("Mix Gradient Y")
        plt.imshow(mix_sobely, cmap='gray')

        plt.subplot(3, 3, 5)
        plt.title("Saturation Gradient")
        plt.imshow(sat_sobel, cmap='gray')

        plt.subplot(3, 3, 6)
        plt.title("Value Gradient")
        plt.imshow(val_sobel, cmap='gray')

        plt.subplot(3, 3, 4)
        plt.title("Mask")
        plt.imshow(mask, cmap='gray')

        plt.subplot(3, 4, 8)
        plt.title("Edges")
        plt.imshow(edges, cmap='gray')

        plt.subplot(3, 4, 9)
        plt.title("Corners Sat")
        plt.imshow(sat_corners, cmap='gray')

        plt.subplot(3, 4, 10)
        plt.title("Corners Val")
        plt.imshow(val_corners, cmap='gray')


        plt.show()
        """

    print("------------------------------------------")
    print("MEAN INTERSECTION OVER UNION")
    print(iou_list)
    print (np.mean(iou_list))
예제 #28
0
def main():
    data = Data(database_dir='w5_BBDD_random', query_dir='w5_devel_random')
    gt = get_text_gt()
    #print(gt)
    #test_gt(data, gt)
    analyze_gt(data, gt)
예제 #29
0
class GUI:
    def __init__(self, master):
        self.master = master
        master.title("Digital Signal Processing")
        master.geometry("800x600")

        self.master.protocol("WM_DELETE_WINDOW", self.master.quit)
        self.data = Data()
        self.scalar = 1
        self.popup_return = ''
        self.menubar = tk.Menu(self.master)
        self.file_menu = tk.Menu(self.menubar, tearoff=0)
        self.operations_menu = tk.Menu(self.menubar, tearoff=0)
        self.init_menubar()
        self.master.config(menu=self.menubar)
        self.path = ''
        self.fig = plt.figure(1)
        canvas = FigureCanvasTkAgg(self.fig, master=root)
        self.plot_widget = canvas.get_tk_widget()
        self.nextBtn = tk.Button(self.master,
                                 text='Next',
                                 command=self.draw_next)
        self.prevBtn = tk.Button(self.master,
                                 text='Prev',
                                 command=self.draw_prev)
        self.prevBtn.pack()
        self.nextBtn.pack()
        self.plot_widget.pack(side=tk.BOTTOM)
        self.counter = 0

    def draw_next(self):
        self.counter += 1
        if self.counter >= len(self.data.signals[0]):
            self.counter -= 1
        plt.clf()
        plt.gca().set_color_cycle(None)
        for signal in self.data.signals:
            # x_axis = range(len(signal))
            # plt.xticks(list(signal.keys()))
            plt.xlim(
                (min(list(signal.keys())) - 1, max(list(signal.keys())) + 1))
            plt.ylim((min(list(signal.values())) - 1,
                      max(list(signal.values())) + 1))
            plt.scatter(
                list(signal.keys())[:self.counter],
                list(signal.values())[:self.counter])
        self.fig.canvas.draw()

    def draw_prev(self):
        self.counter -= 1
        if self.counter <= 0:
            self.counter += 1
        plt.clf()
        plt.gca().set_color_cycle(None)
        for signal in self.data.signals:
            # x_axis = range(len(signal))
            plt.xlim(
                (min(list(signal.keys())) - 1, max(list(signal.keys())) + 1))
            plt.ylim((min(list(signal.values())) - 1,
                      max(list(signal.values())) + 1))
            plt.scatter(
                list(signal.keys())[:self.counter],
                list(signal.values())[:self.counter])
        self.fig.canvas.draw()

    def init_menubar(self):
        self.file_menu.add_command(label="Open Time signal",
                                   command=self.open_time_dialog)
        self.file_menu.add_command(label="Open Frequency signal",
                                   command=self.open_freq_dialog)
        self.file_menu.add_command(label="Append", command=self.on_append)
        self.file_menu.add_command(label="Save", command=self.on_save)

        self.file_menu.add_command(label="Generate Signal",
                                   command=self.on_generate)

        self.operations_menu.add_command(label="Add", command=self.on_add)
        self.operations_menu.add_command(label="Subtract",
                                         command=self.on_subtract)
        self.operations_menu.add_command(label="Scale", command=self.on_scale)
        self.operations_menu.add_command(label="Quantize",
                                         command=self.on_quantize)
        self.operations_menu.add_command(label="Delay", command=self.on_delay)
        self.operations_menu.add_command(label="Fold", command=self.on_fold)
        self.operations_menu.add_command(label="DFT", command=self.on_dft)
        self.operations_menu.add_command(label="FFT", command=self.on_fft)
        self.operations_menu.add_command(label="IDFT", command=self.on_idft)
        self.operations_menu.add_command(label="IFFT", command=self.on_ifft)
        self.file_menu.add_separator()
        self.file_menu.add_command(label="Exit", command=self.master.quit)
        self.menubar.add_cascade(label="File", menu=self.file_menu)
        self.menubar.add_cascade(label="Operations", menu=self.operations_menu)

        # self.filemenu.entryconfigure("Save", state=DISABLED)
    def open_freq_dialog(self):
        self.data.signals = []
        self.path = tk.filedialog.askopenfilename()
        freq, amp, phase = read_ds_file(self.path)
        self.data.frequency = (freq, amp, phase)
        self.draw_multi_axes(freq, amp, phase)

    def open_time_dialog(self):
        self.path = tk.filedialog.askopenfilename()
        self.data.signals = [read_ds_file(self.path)]
        self.draw_on_canvas(clear=True)

    def on_append(self):
        self.path = tk.filedialog.askopenfilename()
        self.data.signals.append(read_ds_file(self.path))
        self.draw_on_canvas()

    def on_add(self):
        self.path = tk.filedialog.askopenfilename()
        self.data.apply_operation(read_ds_file(self.path), op='+')
        self.draw_on_canvas(clear=True)

    def on_subtract(self):
        self.path = tk.filedialog.askopenfilename()
        self.data.apply_operation(read_ds_file(self.path), op='-')
        self.draw_on_canvas(clear=True)

    def on_scale(self):
        self.popupmsg('Enter your value')
        self.data.apply_operation(self.scalar, op='s')
        self.draw_on_canvas(clear=True)

    def on_quantize(self):
        self.popupmsg('Number of Levels/Bits for example 3L/3B:')
        encoding, sample_error = self.data.quantize(levels=self.scalar)
        self.draw_on_canvas(clear=True)
        self.popup_after_quantize(encoding, sample_error)

    def draw_on_canvas(self, clear=False):
        self.counter = len(self.data.signals[0])
        if clear:
            plt.clf()
        plt.gca().set_color_cycle(None)
        for signal in self.data.signals:
            # x_axis = range(len(signal))
            plt.scatter(signal.keys(), signal.values())
        self.fig.canvas.draw()

    def draw_multi_axes(self, freq, amp, phase):
        plt.clf()
        plt.subplot(221), plt.title('Amplitudes')
        plt.scatter(freq, amp)
        plt.subplot(222), plt.title('Phase')
        plt.scatter(freq, phase)
        self.fig.canvas.draw()

    def on_save(self):
        self.path = tk.filedialog.asksaveasfilename()
        save_ds_file(self.path, self.data.signals[0])

    def on_delay(self):

        self.popupmsg("Enter Delay")
        self.data.delay(self.scalar)
        self.draw_on_canvas(True)

    def on_dft(self):
        s = list(self.data.signals[0].values())
        res, amp, phase = self.data.dft(s)

        self.popupmsg('Enter Sampling Frequency ')
        self.scalar = (2 * np.pi) / (len(amp) * (1 / self.scalar))
        freq = [self.scalar * (i + 1) for i in range(len(amp))]

        self.draw_multi_axes(freq, amp, phase)
        x_axis = list(self.data.signals[0].keys())
        save_ds_frequency('./data/outputFreq.ds', x_axis, amp, phase)
        self.data.frequency = (x_axis, amp, phase)
        self.data.signals = []
        print(res)
        print(self.data.fft(s))
        # print(freq)

    def on_fft(self):
        s = list(self.data.signals[0].values())
        res = self.data.fft(s)
        amp = np.sqrt(np.square(res.real) + np.square(res.imag))
        phase = np.angle(res)
        self.popupmsg('Enter Sampling Frequency ')
        self.scalar = (2 * np.pi) / (len(amp) * (1 / self.scalar))
        freq = [self.scalar * (i + 1) for i in range(len(amp))]

        self.draw_multi_axes(freq, amp, phase)
        x_axis = list(self.data.signals[0].keys())
        save_ds_frequency('./data/outputFreq.ds', x_axis, amp, phase)
        self.data.frequency = (x_axis, amp, phase)
        self.data.signals = []
        print(res)
        print(self.data.fft(s))
        # print(freq)
        # print(amp)
        # print(phase)

    def on_idft(self):

        x = self.data.frequency[1] * np.cos(self.data.frequency[2])
        y = self.data.frequency[1] * np.sin(self.data.frequency[2])
        # x = np.round(x, 4)
        # y = np.round(y, 4)
        res = x + y * 1j
        print('ifft')
        res = self.data.dft(res, inverse=True)
        print(res)
        print(self.data.fft(res, inverse=True))
        print('ifft')
        # res = np.flip(res, 0)
        self.data.signals.append(dict(zip(range(len(res)), res.real.tolist())))
        print(self.data.signals[0])
        self.draw_on_canvas(clear=True)
        # res = self.data.dft(self.data.signals[0].values())
        # print(res)

    def on_ifft(self):

        x = self.data.frequency[1] * np.cos(self.data.frequency[2])
        y = self.data.frequency[1] * np.sin(self.data.frequency[2])
        # x = np.round(x, 4)
        # y = np.round(y, 4)
        res = x + y * 1j
        print('ifft')
        res = self.data.ifft(res)
        print(res)
        # print(self.data.fft(res, inverse=True))
        # print('ifft')
        # res = np.flip(res, 0)
        self.data.signals.append(dict(zip(range(len(res)), res.real.tolist())))
        print(self.data.signals[0])
        self.draw_on_canvas(clear=True)
        # res = self.data.dft(self.data.signals[0].values())
        # print(res)
    def on_fold(self):
        pass

    def on_generate(self):
        self.popupmsg('Sin/Cos,n_samples,Amplitude,theta,F,Fs')
        s_type, n, A, theta, F, Fs = self.popup_return
        n, A, theta, F, Fs = int(n), int(A), int(theta), int(F), int(Fs)
        self.data.generate_signal(s_type, n, A, theta, F, Fs)
        self.draw_on_canvas()

    def popupmsg(self, msg):
        popup = tk.Tk()
        popup.wm_title("")
        input = ttk.Entry(popup)

        def disable_event():
            pass

        popup.protocol("WM_DELETE_WINDOW", disable_event)

        def on_press():
            self.popup_return = input.get()
            if self.popup_return.endswith(('b', 'B')):
                self.scalar = int(math.pow(2, int(self.popup_return[:-1])))
            elif self.popup_return.endswith(('l', 'L')):
                self.scalar = int(self.popup_return[:-1])
            elif self.popup_return.startswith(('sin', 'cos')):
                self.popup_return = self.popup_return.split(',')
            else:
                self.scalar = float(self.popup_return)
            popup.destroy()
            self.master.quit()

        label = ttk.Label(popup, text=msg)
        label.pack(side="top", fill="x", padx=12)
        b = ttk.Button(popup, text="Submit", command=on_press)
        input.pack()
        b.pack(side='bottom')
        popup.mainloop()

    def popup_after_quantize(self, encoding, sample_error):

        popup = tk.Tk()
        popup.wm_title("")

        def disable_event():
            popup.destroy()
            self.master.quit()

        popup.protocol("WM_DELETE_WINDOW", disable_event)

        encoding_list = tk.Listbox(popup)
        encoding_list.insert(0, "Encoding")
        sample_error_list = tk.Listbox(popup)
        sample_error_list.insert(0, "Error")
        for i in range(len(encoding)):
            encoding_list.insert(i + 1, encoding[i])
        for i in range(len(sample_error)):
            sample_error_list.insert(i + 1, sample_error[i])
        encoding_list.pack(side='left')
        sample_error_list.pack(side='right')
        popup.mainloop()
예제 #30
0
def main():

    data = Data(database_dir='w5_BBDD_random', query_dir='w5_devel_random')
    gt = ground_truth_text.get_text_gt()

    # loop over database_imgs without overloading memory
    for im, im_name in data.database_imgs:

        hsv_image = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        value_image = hsv_image[:, :, 2]

        x1, y1, x2, y2 = gt[im_name]

        bboxesblack = bboxes_black(hsv_image)
        bboxeswhite = bboxes_white(hsv_image)

        image_white = cv2.inRange(hsv_image, (0, 0, 200), (180, 50, 255))
        image_black = cv2.inRange(hsv_image, (0, 0, 0), (180, 50, 50))

        size = max(hsv_image.shape)
        kernel_size = int(size / 100)
        kernel = np.ones((kernel_size, kernel_size), np.uint8)

        image_black = cv2.morphologyEx(image_black, cv2.MORPH_CLOSE, kernel)
        image_white = cv2.morphologyEx(image_white, cv2.MORPH_CLOSE, kernel)

        for bbox in bboxesblack:
            cv2.rectangle(image_black, (bbox[0], bbox[1]), (bbox[2], bbox[3]),
                          (150), 10)

        for bbox in bboxeswhite:
            cv2.rectangle(image_white, (bbox[0], bbox[1]), (bbox[2], bbox[3]),
                          (150), 10)

        ################### Merge Bboxs (letters) #################
        """"
        parameter_letters = im.shape[1]/80

        finished = False
        while(not finished):
            print(len(bboxes))
            newboxes = []
            merged = set()
            for bbox1 in bboxes:
                if bbox1 not in merged:
                    for bbox2 in bboxes:
                        if bbox2 not in merged:
                            if ( not bbox1 == bbox2 ):
                                dist = distance_rectangles(bbox1, bbox2)

                                if(dist < parameter_letters ):

                                    height_merged = max(bbox1[3], bbox2[3])-min(bbox1[1], bbox2[1])
                                    if (height_merged <0.15*im.shape[0]):
                                        merged.add(bbox1)
                                        merged.add(bbox2)
                                        newboxes.append( (min(bbox1[0], bbox2[0]), min(bbox1[1], bbox2[1]), max(bbox1[2], bbox2[2]), max(bbox1[3], bbox2[3]) ) )
                    if ( bbox1 not in merged):
                        newboxes.append(bbox1)
            bboxes = newboxes
            if(len(merged)== 0):
                finished = True
        for bbox in bboxes:
            x1, y1, x2, y2 = bbox
            cv2.rectangle(closing_bboxs, (x1, y1), (x2, y2), (150), 10)


        cv2.rectangle(threshblack, (x1, y1), (x2,y2), (150), 10)
        cv2.rectangle(threshwhite, (x1, y1), (x2, y2), (150), 10)

        x1, y1, x2, y2 = gt[im_name]
        integral_image_val = cv2.integral(value_image)
        x2 = min(x2, im.shape[1] - 2)
        y2 = min(y2, im.shape[0] - 2)
        sum_val = integral_image_val[y2 + 1, x2 + 1] + integral_image_val[y1, x1] - integral_image_val[y2 + 1, x1] - \
                  integral_image_val[y1, x2 + 1]

        area = (x2 - x1) * (y2 - y1)
        mean_val = sum_val / area
        if (mean_val < 120):  # dark background -> white letters
            target_integral_image = integral_threshwhite
            print("Dark background")
        if (mean_val > 120):  # bright background -> dark letters
            target_integral_image = integral_threshblack
            print("White background")
        """
        plt.subplot(131)
        plt.title("Image")
        plt.imshow(im)
        plt.subplot(132)
        plt.title("Black boxes")
        plt.imshow(image_black, cmap='gray')
        plt.subplot(133)
        plt.title("White Boxes")
        plt.imshow(image_white, cmap='gray')

        plt.show()