def __init__(self, solve_url, use_proxies=True, headless=False):
        options = Options()
        options.headless = headless

        profile = webdriver.FirefoxProfile()
        if use_proxies:
            proxy = load_proxy()
            profile.set_preference("network.proxy.type", 1)
            profile.set_preference("network.proxy.http", proxy["ip"])
            profile.set_preference("network.proxy.http_port", proxy["port"])
            if "username" in proxy:
                credentials = b64encode(
                    f'{proxy["username"]}:{proxy["password"]}'.encode(
                        "ascii")).decode()
                profile.set_preference("extensions.closeproxyauth.authtoken",
                                       credentials)

        profile.set_preference("dom.webdriver.enabled", False)
        profile.set_preference("useAutomationExtension", False)
        profile.update_preferences()

        try:
            self.driver = webdriver.Firefox(firefox_profile=profile,
                                            options=options)
        except WebDriverException:
            options.headless = True
            self.driver = webdriver.Firefox(firefox_profile=profile,
                                            options=options)

        self.image_handler = ImageHandler()
        self.solve_url = solve_url
        self.recaptcha_task = RecaptchaTask()
def command_get_white_black_img(update: Update, context: CallbackContext):
    """This function is processing image by the black_white filter"""
    img = ImageHandler()
    img.get_black_white_img()

    reply_markup = ReplyKeyboardRemove()  # Remove keyboard
    bot.send_message(chat_id=update.message.chat_id,
                     text="Upload new image",
                     reply_markup=reply_markup)
    def get_annotation_suggestions(self, n_predicts=0, n_samples=100):

        n_digits = len(repr(abs(n_samples)))

        predictions, save_names = self.predict(n_predicts=n_predicts)

        images = [[ImageHandler._tensor_to_image(img, mask=True) for img in imgs] for imgs in predictions]
        # sum_images = np.sum(images, axis=0)
        result_imgs = []
        for imgs in images:
            result_and = np.zeros_like(imgs[0])
            result_or = np.zeros_like(imgs[0])
            for img in imgs:
                result_and = np.logical_and(img, result_and)
                result_or = np.logical_or(img, result_or)

            result_imgs.append(result_or - result_and)

        uncertainties = []
        for idx, res_img in result_imgs:
            uncertainties.append((np.sum(res_img), idx))

        uncertainties.sort(key=lambda tup: tup[0], reverse=True)

        if n_samples < len(uncertainties):
            n_samples = len(uncertainties)

        output_dir = self.options.output_path + "/" + self.options.name + "/Suggest/" + "Epoch_" + str(self.options.load_epoch)

        for i in range(n_samples):
            self.image_handler.save_image(result_imgs[uncertainties[i][1]], output_dir, save_names[uncertainties[i][1]])
    def __init__(self):

        self.classnames = [
            "background", "person", "crutches", "walking_frame", "wheelchair",
            "push_wheelchair"
        ]

        #read rosparams
        config_file = rospy.get_param('~model_config', "")
        self.fixed_frame = rospy.get_param('~fixed_frame', 'odom')
        self.tracking = rospy.get_param('~tracking', True)
        self.filter_detections = rospy.get_param('~filter_inside_boxes', True)
        self.inside_box_ratio = rospy.get_param('~inside_box_ratio', 0.8)
        camera_topic = rospy.get_param('~camera_topic',
                                       '/kinect2/qhd/image_color_rect')
        camera_info_topic = rospy.get_param('~camera_info_topic',
                                            '/kinect2/qhd/camera_info')

        #initialize subscribers
        rospy.Subscriber(camera_topic,
                         Image,
                         self.image_callback,
                         queue_size=1)
        rospy.Subscriber(camera_info_topic,
                         CameraInfo,
                         self.cam_info_callback,
                         queue_size=1)

        #detection model and tracker
        self.setup_model_and_tracker(config_file)

        #image queues
        self.last_received_image = None  #set from image topic
        self.last_processed_image = None  #set from image topic
        self.new_image = False

        self.cam_calib = None  #set from camera info
        self.camera_frame = None  #set from camera info

        #helpers
        Server(TrackingParamsConfig, self.reconfigure_callback)
        bridge = CvBridge()
        self.viz_helper = Visualizer(len(self.classnames))
        self.publisher = Publisher(self.classnames, bridge)
        self.image_handler = ImageHandler(bridge, cfg.TEST.MAX_SIZE,
                                          cfg.TEST.SCALE)
        self.tfl = tf.TransformListener()
예제 #5
0
 def posts_main(posts_list):
     res_list = []
     for post in posts_list:
         # Send to Text Module
         text_app = TextHandler().main(post.title, post.text)
         # Send to Image Module
         image_app = ImageHandler().main(post.images)
         res_list.append((post.id, text_app, image_app))
     return res_list
    def __init__(self, options, gpu_ids=[]):
        self.options = options
        self.model = NetworkBench(n_networks=options.n_networks,
                                  n_input_channels=options.input_nc,
                                  n_output_channels=options.output_nc,
                                  n_blocks=options.n_blocks,
                                  initial_filters=options.initial_filters,
                                  dropout_value=options.dropout_value,
                                  lr=options.lr,
                                  decay=options.decay,
                                  decay_epochs=options.decay_epochs,
                                  batch_size=options.batch_size,
                                  image_width=options.image_width,
                                  image_height=options.image_height,
                                  load_network=options.load_network,
                                  load_epoch=options.load_epoch,
                                  model_path=os.path.join(
                                      options.model_path, options.name),
                                  name=options.name,
                                  gpu_ids=gpu_ids,
                                  dont_care=options.dont_care,
                                  gan=options.gan,
                                  pool_size=options.pool_size,
                                  lambda_gan=options.lambda_gan,
                                  n_blocks_discr=options.n_blocks_discr)

        self.model.cuda()

        self.dont_care = options.dont_care
        self.gan = options.gan

        if self.gan:
            self.discriminator_datasets = DataLoaderDiscriminator(
                options).load_data()

        self.data_sets = CorrespondenceDataLoaderDontCare(options).load_data()
        self.image_handler = ImageHandler()
        self.loss_dir = self.options.output_path + "/" + self.options.name + "/Train"
        copyfile(
            os.path.relpath('seg_config.yaml'),
            os.path.join(self.options.model_path, self.options.name,
                         'seg_config.yaml'))
        self.writer = SummaryWriter(self.loss_dir)
예제 #7
0
    def __init__(self, options, gpu_ids=[]):
        self.options = options
        self.model = NetworkBench(
            n_networks=options.n_networks,
            n_input_channels=options.input_nc,
            n_output_channels=options.output_nc * options.n_labels,
            n_blocks=options.n_blocks,
            initial_filters=options.initial_filters,
            dropout_value=options.dropout_value,
            lr=options.lr,
            decay=options.decay,
            decay_epochs=options.decay_epochs,
            batch_size=options.batch_size,
            image_width=options.image_width,
            image_height=options.image_height,
            load_network=options.load_network,
            load_epoch=options.load_epoch,
            model_path=os.path.join(options.model_path, options.name),
            name=options.name,
            gpu_ids=gpu_ids,
            gan=options.gan,
            pool_size=options.pool_size,
            lambda_gan=options.lambda_gan,
            n_blocks_discr=options.n_blocks_discr)

        # FIXME: Save Graph to tensorboardX

        self.model.cuda()
        self.n_labels = options.n_labels
        self.gan = options.gan

        self.data_sets = CorrespondenceDataLoaderMultiLabel(
            options).load_data()

        self.image_handler = ImageHandler()
        self.log_dir = self.options.output_path + "/" + self.options.name + "/Train"
        copyfile(
            os.path.relpath('seg_config.yaml'),
            os.path.join(self.options.model_path, self.options.name,
                         'seg_config.yaml'))
        self.writer = Summarywriter(self.log_dir)
    def selecionar_imagem(self):
        item = self.ui.files_list.currentItem()
        arquivo_imagem = os.path.join(self.pasta, item.text())
        imagem_settings = ImagemSettings(item.text(), self.pasta_settings)

        self.image_handler = ImageHandler(arquivo_imagem, imagem_settings)

        # Disponibiliza a imagem HSV no widget
        self.ui.imagem_original.cv_image = self.image_handler.hsv_image
        self.alterar_interface()


        self.show_image(self.image_handler.cv_image)
        self.show_image(self.image_handler.classificar_imagem(), destination='imagem_classificada')
예제 #9
0
    def __init__(self):
        self.builder = Gtk.Builder()
        self.builder.add_from_file('main_gui.glade')
        self.builder.connect_signals(self)

        self.box = self.builder.get_object('inner_box')
        self.window = self.builder.get_object('main_window')
        self.window.set_default_size(600, 400)

        self.image_handler = ImageHandler()

        self.main_canvas = FigureCanvas(self.image_handler.read_main_image('lena.jpg'))
        self.box.pack_start(self.main_canvas, True, True, 0)
        self.filtered_canvas = None
        self.secondary_canvas = None
        self.window.show_all()
def main():
    app = QtWidgets.QApplication(sys.argv)
    main_view = MainView(app)
    controls = Controls()
    main_view.subscribe_controls(controls)

    metrics_engine = MetricsEngine()
    metrics_engine.subscribe_view(main_view)
    metrics_engine.load_metrics()
    controls.subscribe_view(main_view)
    image_handler = ImageHandler()
    image_handler.subscribe_view(main_view)
    modifications_provider = ModificationsProvider()
    modifications_provider.subscribe_image_handler(image_handler)
    controls.subscribe_image_handler(image_handler)
    controls.subscribe_modifications_provider(modifications_provider)
    image_handler.subscribe_metrics_engine(metrics_engine)

    main_view.show()
    sys.exit(app.exec_())
예제 #11
0
class MainView:
    def __init__(self):
        self.builder = Gtk.Builder()
        self.builder.add_from_file('main_gui.glade')
        self.builder.connect_signals(self)

        self.box = self.builder.get_object('inner_box')
        self.window = self.builder.get_object('main_window')
        self.window.set_default_size(600, 400)

        self.image_handler = ImageHandler()

        self.main_canvas = FigureCanvas(self.image_handler.read_main_image('lena.jpg'))
        self.box.pack_start(self.main_canvas, True, True, 0)
        self.filtered_canvas = None
        self.secondary_canvas = None
        self.window.show_all()

    def set_main_image(self, figure):
        self.remove_current_images(all=True)
        self.main_canvas = FigureCanvas(figure)
        self.box.pack_start(self.main_canvas, True, True, 0)
        self.window.show_all()

    def set_resulted_image(self, figure, preserve_middle=False):
        self.remove_current_images(preserve_middle=preserve_middle)
        self.filtered_canvas = FigureCanvas(figure)
        self.box.pack_end(self.filtered_canvas, True, True, 0)
        self.window.show_all()

    def set_secondary_image(self, figure):
        self.remove_current_images()
        self.secondary_canvas = FigureCanvas(figure)
        self.box.pack_start(self.secondary_canvas, True, True, 0)
        self.window.show_all()

    def remove_current_images(self, all=False, preserve_middle=False):
        if not preserve_middle and self.secondary_canvas in self.box.get_children():
            self.box.remove(self.secondary_canvas)
        if self.filtered_canvas in self.box.get_children():
            self.box.remove(self.filtered_canvas)
        if all:
            self.box.remove(self.main_canvas)

#################################################################
#                   Signal Handlers                             #
    def on_delete_window(self, *args):
        Gtk.main_quit(*args)

    def on_open_file(self, widget):
        self.file_dialog = FileDialog(self.window)
        file_path = self.file_dialog.choose_file()
        if(file_path):
            self.set_main_image(self.image_handler.read_main_image(file_path))

    def on_salt_and_pepper(self, widget):
        resulted_figure = self.image_handler.salt_and_pepper()
        self.set_resulted_image(resulted_figure)

    def on_gray_scale(self, widget):
        resulted_figure = self.image_handler.convert_current_to_gray()
        self.set_resulted_image(resulted_figure)

    def on_replace(self, widget):
        self.set_main_image(self.image_handler.replace_current_img())

    def on_recover(self, widget):
        self.set_main_image(self.image_handler.recover_original_img())

    def on_thresholding(self, widget):
        self.threshold_dialog = ThresholdDialog(self.window)
        threshold_value, is_adaptive = self.threshold_dialog.open_dialog()
        if(threshold_value != None):
            resulted_figure = self.image_handler.threshold(threshold_value, is_adaptive)
            self.set_resulted_image(resulted_figure)

    def on_average(self, widget):
        self.mask_dialog = MaskDialog(self.window)
        mask_value = self.mask_dialog.open_dialog()
        if(mask_value != None):
            resulted_figure = self.image_handler.average(mask_value)
            self.set_resulted_image(resulted_figure)

    def on_median(self, widget):
        self.mask_dialog = MaskDialog(self.window)
        mask_value = self.mask_dialog.open_dialog()
        if(mask_value != None):
            resulted_figure = self.image_handler.median(mask_value)
            self.set_resulted_image(resulted_figure)

    def on_high_pass(self, widget):
        resulted_figure = self.image_handler.high_pass()
        self.set_resulted_image(resulted_figure)

    def on_horizontal(self, widget):
        resulted_figure = self.image_handler.horizontal()
        self.set_resulted_image(resulted_figure)

    def on_vertical(self, widget):
        resulted_figure = self.image_handler.vertical()
        self.set_resulted_image(resulted_figure)

    def on_plus_45(self, widget):
        resulted_figure = self.image_handler.plus_45()
        self.set_resulted_image(resulted_figure)

    def on_minus_45(self, widget):
        resulted_figure = self.image_handler.minus_45()
        self.set_resulted_image(resulted_figure)

    def on_sobel(self, widget):
        resulted_figure = self.image_handler.sobel()
        self.set_resulted_image(resulted_figure)

    def on_prewitt(self, widget):
        resulted_figure = self.image_handler.prewitt()
        self.set_resulted_image(resulted_figure)

    def on_roberts(self, widget):
        resulted_figure = self.image_handler.roberts()
        self.set_resulted_image(resulted_figure)

    def on_hough_line(self, widget):
        self.config_dialog = ConfigDialog(self.window)
        config_value = self.config_dialog.open_dialog('Nivel de Aceitacao', 255)
        if(config_value != None):
            resulted_figure = self.image_handler.hough_line(config_value)
            self.set_resulted_image(resulted_figure)

    def on_seam_carving(self, widget):
        self.config_dialog = ConfigDialog(self.window)
        amount_value = self.config_dialog.open_dialog('Linhas a Retirar', self.image_handler.get_current_img_width())
        if(amount_value != None):
            resulted_figure = self.image_handler.apply_seam_carving(amount_value)
            self.set_resulted_image(resulted_figure)

    def on_color_extract(self, widget):
        self.colorDialog = ColorDialog(self.window)
        color = self.colorDialog.select()
        if color != None:
            self.config_dialog = ConfigDialog(self.window)
            config_value = self.config_dialog.open_dialog('Taxa de Tolerancia', 255)
            if(config_value != None):
                resulted_figure = self.image_handler.color_extract(color, config_value)
                self.set_resulted_image(resulted_figure)

    def on_operand_file(self, widget):
        file_dialog = FileDialog(self.window)
        file_path = file_dialog.choose_file()
        if(file_path):
            self.set_secondary_image(self.image_handler.read_secondary_image(file_path))

    def on_union(self, widget):
        resulted_figure = self.image_handler.union()
        self.set_resulted_image(resulted_figure, preserve_middle=True)

    def on_intersection(self, widget):
        resulted_figure = self.image_handler.intersection()
        self.set_resulted_image(resulted_figure, preserve_middle=True)

    def on_subtraction(self, widget):
        resulted_figure = self.image_handler.subtraction()
        self.set_resulted_image(resulted_figure, preserve_middle=True)

    def on_complement(self, widget):
        resulted_figure = self.image_handler.complement()
        self.set_resulted_image(resulted_figure)
"""
This module is reserved for arbitrary helper functions
"""
import os

from downloader import download
from image_handler import ImageHandler

images = ImageHandler('%s/SaturnServer/images/' % os.path.expanduser('~'))

예제 #13
0
from image_handler import ImageHandler

import cv2
import os


if __name__ == '__main__':
    for i in range(100, 200):
        image_handler = ImageHandler()
        if image_handler.is_legal():
            print("legal")
            image = image_handler.get_gray_static_image()
            cv2.imwrite(os.path.join("origin", "{}.png".format(str(i))), image)



class SegmentationNetwork(object):
    def __init__(self, options, gpu_ids=[]):
        self.options = options
        self.model = NetworkBench(n_networks=options.n_networks,
                                  n_input_channels=options.input_nc,
                                  n_output_channels=options.output_nc,
                                  n_blocks=options.n_blocks,
                                  initial_filters=options.initial_filters,
                                  dropout_value=options.dropout_value,
                                  lr=options.lr,
                                  decay=options.decay,
                                  decay_epochs=options.decay_epochs,
                                  batch_size=options.batch_size,
                                  image_width=options.image_width,
                                  image_height=options.image_height,
                                  load_network=options.load_network,
                                  load_epoch=options.load_epoch,
                                  model_path=os.path.join(
                                      options.model_path, options.name),
                                  name=options.name,
                                  gpu_ids=gpu_ids,
                                  dont_care=options.dont_care,
                                  gan=options.gan,
                                  pool_size=options.pool_size,
                                  lambda_gan=options.lambda_gan,
                                  n_blocks_discr=options.n_blocks_discr)

        self.model.cuda()

        self.dont_care = options.dont_care
        self.gan = options.gan

        if self.gan:
            self.discriminator_datasets = DataLoaderDiscriminator(
                options).load_data()

        self.data_sets = CorrespondenceDataLoaderDontCare(options).load_data()
        self.image_handler = ImageHandler()
        self.loss_dir = self.options.output_path + "/" + self.options.name + "/Train"
        copyfile(
            os.path.relpath('seg_config.yaml'),
            os.path.join(self.options.model_path, self.options.name,
                         'seg_config.yaml'))
        self.writer = SummaryWriter(self.loss_dir)

    def train(self):
        """
        Function to train model from dataset
        :return: None
        """
        print("Started Training")
        batch_size = self.options.batch_size
        loss_file = self.loss_dir + "/losses.txt"

        if os.path.isfile(loss_file):
            if self.options.load_network:
                self.erase_loss_file(loss_file, self.options.load_epoch)
            else:
                self.erase_loss_file(loss_file, 0)

        if self.options.load_network:
            base_epoch = int(self.options.load_epoch)
        else:
            base_epoch = 0

        for epoch in range(
                1 + base_epoch, self.options.n_epochs +
                self.options.decay_epochs + 1 + base_epoch):

            epoch_start_time = time.time()

            steps = 0
            t = 0

            # Get Iteraters for each dataset
            data_iters = []
            for loader in self.data_sets:
                data_iters.append(iter(loader))

            discr_data_iters = []
            if self.gan:
                for loader in self.discriminator_datasets:
                    discr_data_iters.append(iter(loader))

            for i in range(len(self.data_sets[0])):
                iter_start_time = time.time()

                current_batch_imgs, current_batch_labels, dont_care_masks, discriminator_imgs = [], [], [], []

                for iterator in data_iters:
                    data = next(iterator)
                    current_batch_imgs.append(data['img'])
                    current_batch_labels.append(data['label'])
                    if self.dont_care:
                        dont_care_masks.append(data['dont_care'])
                    else:
                        dont_care_masks = None
                if self.gan:
                    for idx, iterator in enumerate(discr_data_iters):
                        try:
                            discriminator_imgs.append(next(iterator))
                        except StopIteration:
                            discr_data_iters[idx] = iter(
                                self.discriminator_datasets[idx])
                            discriminator_imgs.append(
                                next(discr_data_iters[idx]))

                self.model.set_inputs(current_batch_imgs, current_batch_labels,
                                      dont_care_masks, discriminator_imgs)

                self.model.optimize()

                if (steps + 1) % self.options.print_freq == 0:
                    errors = self.model.get_current_errors()
                    t = (time.time() - iter_start_time)

                    # with open(loss_file, 'a+') as f:
                    #     f.write(message + "\n")

                    message = '(epoch: %d, step: %d, time/step: %.3f)\n' % (
                        epoch, steps + 1, t)
                    for k, v in errors.items():
                        message += '%s: %.3f, ' % (k, float(v))

                    if not os.path.isdir(str(self.loss_dir)):
                        os.makedirs(str(self.loss_dir))

                    for k, v in errors.items():
                        self.writer.add_scalar(
                            'Model %d/%s' %
                            (float(k.split("_")[-1]), k.split("_")[0]),
                            float(v),
                            (epoch - 1) * self.options.steps_per_epoch + steps)

                    print(message)
                    # self.plot_losses(loss_file)

                steps += 1
                if steps >= self.options.steps_per_epoch:
                    break

            print('End of epoch %d / %d \t Time Taken: %d sec' %
                  (epoch, self.options.n_epochs + self.options.decay_epochs,
                   time.time() - epoch_start_time))

            if epoch % self.options.save_img_freq == 0:
                # self.model.predict()
                output_dir = self.options.output_path + "/" + self.options.name + "/Train/images"
                img_list = self.model.get_current_imgs()
                for idx, images in enumerate(img_list):
                    self.image_handler.save_image(
                        images['img'], output_dir,
                        'epoch_%03d_real_img_model_%d' % (epoch, idx))
                    self.image_handler.save_mask(
                        images['mask'], output_dir,
                        'epoch_%03d_fake_mask_model_%d' % (epoch, idx))
                    self.image_handler.save_mask(
                        images['gt'], output_dir,
                        'epoch_%03d_gt_model_%d' % (epoch, idx))
                    self.writer.add_image(
                        "Real Images", images['img'].data,
                        (epoch - 1) * self.options.steps_per_epoch + steps)
                    self.writer.add_image(
                        "fake Masks", images['mask'].data,
                        (epoch - 1) * self.options.steps_per_epoch + steps)
                    self.writer.add_image(
                        "Groundtruth Masks", images['gt'].data,
                        (epoch - 1) * self.options.steps_per_epoch + steps)

                self.create_html_file(epoch)

            if epoch % self.options.save_freq == 0:
                print('saving the model at the end of epoch %d' % epoch)
                self.model.save(str(epoch))

            if epoch > self.options.n_epochs:
                self.model.update_learning_rate()

    def predict_to_dir(self, n_predicts=0):
        """
        Function to predict Images from Dataroot to a subfolder of output_path
        :param n_predicts: number of Images to predict, set 0 to predict all images
        :return:
        """

        print("Started Prediction")
        if not n_predicts:
            n_predicts = max([len(dataset) for dataset in self.data_sets])

        for i, data in enumerate(self.data_sets[0]):
            self.model.set_inputs(
                [data['img'] for x in range(self.options.n_networks)])
            predicted_mask = self.model.predict()

            # FIXME: data['path_img'] gives list of strings instead of string
            save_name = (os.path.split(data['path_img'][0])[-1]).rsplit(
                '.', 1)[0]

            output_dir = self.options.output_path + "/" + self.options.name + "/Predict/" + "Epoch_" + str(
                self.options.load_epoch)
            self.image_handler.save_mask(predicted_mask[0], output_dir,
                                         save_name + '_pred')
            self.image_handler.save_image(
                self.model.get_current_imgs()[0]['img'], output_dir, save_name)

            if ((i + 1) % 10) == 0:
                print("Predicted %d of %d Images" % (i + 1, n_predicts))

            if (i + 1) >= n_predicts:
                break

        print("Finished Prediction")

    def predict(self, n_predicts=0):

        print("Started Prediction")

        predictions = []
        save_names = []

        if not n_predicts:
            n_predicts = max([len(dataset) for dataset in self.data_sets])

        for i, data in enumerate(self.data_sets[0]):
            self.model.set_inputs(
                [data['img'] for x in range(self.options.n_networks)])
            predicted_mask = self.model.predict()
            save_name = (os.path.split(data['path_img'][0])[-1]).rsplit(
                '.', 1)[0] + '_pred'
            save_names.append(save_name)

            predictions.append(predicted_mask)

            if ((i + 1) % 10) == 0:
                print("Predicted %d of %d Images" % (i + 1, n_predicts))

            if (i + 1) >= n_predicts:
                break

        print("Finished Prediction")
        return predictions, save_names

    def get_annotation_suggestions(self, n_predicts=0, n_samples=100):

        n_digits = len(repr(abs(n_samples)))

        predictions, save_names = self.predict(n_predicts=n_predicts)

        images = [[
            ImageHandler._tensor_to_image(img, mask=True) for img in imgs
        ] for imgs in predictions]
        # sum_images = np.sum(images, axis=0)
        result_imgs = []
        for imgs in images:
            result_and = np.zeros_like(imgs[0])
            result_or = np.zeros_like(imgs[0])
            for img in imgs:
                result_and = np.logical_and(img, result_and)
                result_or = np.logical_or(img, result_or)

            result_imgs.append(result_or - result_and)

        uncertainties = []
        for idx, res_img in result_imgs:
            uncertainties.append((np.sum(res_img), idx))

        uncertainties.sort(key=lambda tup: tup[0], reverse=True)

        if n_samples < len(uncertainties):
            n_samples = len(uncertainties)

        output_dir = self.options.output_path + "/" + self.options.name + "/Suggest/" + "Epoch_" + str(
            self.options.load_epoch)

        for i in range(n_samples):
            self.image_handler.save_image(result_imgs[uncertainties[i][1]],
                                          output_dir,
                                          save_names[uncertainties[i][1]])

    @staticmethod
    def erase_loss_file(loss_file, initial_epoch):
        """
        Function to erase all losses of future epochs
        Necessary for continued train with intermediate epoch or restart training with same name
        :param loss_file: file the losses are stored in
        :param initial_epoch: epoch to start training
        :return: None
        """

        new_content = []

        with open(loss_file, 'r') as f:
            content = f.readlines()

            for line in content:
                header, loss_data = line.split(")", maxsplit=1)

                header_value_paires = header.split(",")
                epoch = int(header_value_paires[0].split(":")[1])
                if epoch < initial_epoch:
                    new_content.append(line)

        with open(loss_file, 'w') as f:
            for line in new_content:
                f.write(line)

    def plot_losses(self, loss_file):
        """
        Function to plot loss values
        :param loss_file: file to read loss values from
        :return: None
        """
        if not os.path.isfile(loss_file):
            raise ValueError('%s is not a file' % str(loss_file))

        seg_losses = []
        epochs = []
        steps = []

        with open(loss_file, 'r') as f:
            content = f.readlines()

        content = [x.strip(" ").strip("\n") for x in content]

        for line in content:
            header, loss_data = line.split(")", maxsplit=1)

            header_value_paires = header.split(",")
            epoch = int(header_value_paires[0].split(":")[1])
            step = int(header_value_paires[1].split(":")[1])

            step_total = (epoch - 1) * self.options.steps_per_epoch + step

            _tmp = str(loss_data).split(",")
            seg = _tmp[0]

            seg_losses.append(float(seg.split(":")[1]))
            epochs.append(epoch)
            steps.append(step_total)

        markers = {0: "o", 1: "s", 2: "^", 3: "D", 4: "*", 5: "x"}

        colors = {0: "b", 1: "g", 2: "r", 3: "c", 4: "m", 5: "k", 6: "y"}

        print("plotting Errors and save files to ", self.loss_dir)
        fig_losses_steps = plt.figure(1, figsize=(48, 27))
        fig_losses_epochs = plt.figure(2, figsize=(48, 27))

        figures = [fig_losses_steps, fig_losses_epochs]
        loss_labels = []
        for key, _ in self.model.get_current_errors().items():
            loss_labels.append("Loss " + str(key))
        # loss_labels = ["Loss Seg"]

        loss_list = [seg_losses]

        time_list = [steps]

        time_labels = ["Total Steps", "Epochs"]

        save_paths = [
            self.loss_dir + "/loss_plot_steps.png",
            self.loss_dir + "/loss_plot_epochs.png"
        ]

        max_epoch = max(epochs)

        for j in range(len(time_list)):
            plt.figure(j + 1)
            for i, loss in enumerate(loss_list):
                ax = figures[j].add_subplot(len(loss_list), 1, i + 1)
                style = markers[i % 6] + colors[i % 7] + "-"
                ax.plot(time_list[j],
                        loss_list[i],
                        style,
                        label=loss_labels[i],
                        markersize=3)
                ax.set_title(loss_labels[i])
                ax.set_xlabel(time_labels[j])
                ax.set_ylabel("Loss Values")
                if j == 0:
                    for ep in range(1, max_epoch + 1):
                        ax.axvline(ep * self.options.steps_per_epoch)

            figures[j].subplots_adjust(hspace=1.0)
            figures[j].savefig(save_paths[j])

    def create_html_file(self, current_epoch, width=400):
        """
        Function to create HTML file for better visualization
        :param current_epoch: current epoch (epoch shown at top of the HTML file)
        :param width: width of displayed images
        :return: None
        """
        print("Create HTML File")
        epoch_freq = self.options.save_img_freq

        web_dir = self.options.output_path + "/" + self.options.name + "/Train"
        self.image_handler.create_html_file(web_dir, "OverviewEpochs",
                                            "./images", current_epoch,
                                            epoch_freq,
                                            self.options.n_networks, width)
class ControlMainWindow(QMainWindow):
    def __init__(self, parent=None):
        super(ControlMainWindow, self).__init__(parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        self.settings = QSettings('Classificador', 'Independente')
        self._prepare_settings()
        self._abrir_pasta(self.settings.value('pasta_imagens'))
        # Caminho da pasta com as configurações de parâmetro.
        self.pasta_settings = os.path.join(self.settings.value('pasta_imagens'), 'images_settings')

    def _prepare_settings(self):
        # Verifica se tem o dir de imagens
        if not self.settings.contains('pasta_imagens'):
            self.settings.setValue('pasta_imagens', '.')

    def _inicio_selecao(self):
        print ("oi")


    def show_image(self, cv_image, destination='imagem_original'):
        """Show image on the interface.

        :param np.ndarray cv_image: OpenCV image (numpy array).
        :param destination: Name of QLabel to display the image.
        """
        # Muda a ordem das cores
        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Alpha ou não.
        alpha = False
        formato = QImage.Format_ARGB32 if alpha else QImage.Format_RGB888

        # Caso  para mostrar a cv_image lida do disco, strides é necessaorio ou fica distorcida.
        # Strides é mais ou menos os espaços na memória entres as bandas.
        qimage = QImage(cv_image, cv_image.shape[1], cv_image.shape[0], cv_image.strides[0], formato)
        qpixmap = QPixmap.fromImage(qimage)
        imagem_widget = self.ui.centralwidget.findChild(QLabel, destination)
        imagem_widget.setPixmap(qpixmap)

    def _abrir_pasta(self, pasta):
        self.ui.caminho_pasta_imagens.setText(pasta)
        self.pasta = pasta
        #Lista os arquivos jpg.
        arquivos = os.listdir(pasta)
        arquivos_imagens = []
        for arquivo in arquivos:
            print os.path.splitext(arquivo)[1]
            if os.path.splitext(arquivo)[1] in ('.jpg', '.JPG', '.png', '.PNG'):
                arquivos_imagens.append(arquivo)
        self.ui.files_list.clear()
        self.ui.files_list.insertItems(0, arquivos_imagens)

    def selecionar_pasta(self):
        pasta = QFileDialog.getExistingDirectory(self, caption="Selecione uma pasta.")
        self.settings.setValue('pasta_imagens', pasta)
        # Caminho da pasta com as configurações de parâmetro.
        self.pasta_settings = os.path.join(self.settings.value('pasta_imagens'), 'images_settings')
        self._abrir_pasta(pasta)

    def selecionar_imagem(self):
        item = self.ui.files_list.currentItem()
        arquivo_imagem = os.path.join(self.pasta, item.text())
        imagem_settings = ImagemSettings(item.text(), self.pasta_settings)

        self.image_handler = ImageHandler(arquivo_imagem, imagem_settings)

        # Disponibiliza a imagem HSV no widget
        self.ui.imagem_original.cv_image = self.image_handler.hsv_image
        self.alterar_interface()


        self.show_image(self.image_handler.cv_image)
        self.show_image(self.image_handler.classificar_imagem(), destination='imagem_classificada')

    def alterar_interface(self):
        parametro = 'verde_upper'
        self.ui.hsv_h.setValue(self.image_handler.imagem_settings.param[parametro][0])
        self.ui.hsv_s.setValue(self.image_handler.imagem_settings.param[parametro][1])
        self.ui.hsv_v.setValue(self.image_handler.imagem_settings.param[parametro][2])
        parametro = 'verde_lower'
        self.ui.hsv_h_2.setValue(self.image_handler.imagem_settings.param[parametro][0])
        self.ui.hsv_s_2.setValue(self.image_handler.imagem_settings.param[parametro][1])
        self.ui.hsv_v_2.setValue(self.image_handler.imagem_settings.param[parametro][2])
        parametro = 'palha_upper'
        self.ui.hsv_h_3.setValue(self.image_handler.imagem_settings.param[parametro][0])
        self.ui.hsv_s_3.setValue(self.image_handler.imagem_settings.param[parametro][1])
        self.ui.hsv_v_3.setValue(self.image_handler.imagem_settings.param[parametro][2])
        parametro = 'palha_lower'
        self.ui.hsv_h_4.setValue(self.image_handler.imagem_settings.param[parametro][0])
        self.ui.hsv_s_4.setValue(self.image_handler.imagem_settings.param[parametro][1])
        self.ui.hsv_v_4.setValue(self.image_handler.imagem_settings.param[parametro][2])

    def alterar_parametro(self):
        # Pega os parametros.
        # Verde.
        parametro = 'verde_upper'
        h, s, v = self.ui.hsv_h.value(), self.ui.hsv_s.value(), self.ui.hsv_v.value()
        self.image_handler.imagem_settings.param[parametro] = [h, s, v]
        parametro = 'verde_lower'
        h, s, v = self.ui.hsv_h_2.value(), self.ui.hsv_s_2.value(), self.ui.hsv_v_2.value()
        self.image_handler.imagem_settings.param[parametro] = [h, s, v]
        self.image_handler.imagem_settings.salvar()

        # Palha.
        parametro = 'palha_upper'
        h, s, v = self.ui.hsv_h_3.value(), self.ui.hsv_s_3.value(), self.ui.hsv_v_3.value()
        self.image_handler.imagem_settings.param[parametro] = [h, s, v]
        parametro = 'palha_lower'
        h, s, v = self.ui.hsv_h_4.value(), self.ui.hsv_s_4.value(), self.ui.hsv_v_4.value()
        self.image_handler.imagem_settings.param[parametro] = [h, s, v]

        #Salva.
        self.image_handler.imagem_settings.salvar()

        #Recalcula.
        self.show_image(self.image_handler.classificar_imagem(), destination='imagem_classificada')
예제 #16
0
def run_style_transfer(image_size,
                       content_image_path,
                       style_image_path,
                       content_layers_weights,
                       style_layers_weights,
                       variation_weight,
                       n_steps,
                       shifting_activation_value,
                       device_name,
                       preserve_colors):
    print('Transfer style to content image')
    print('Number of iterations: %s' % n_steps)
    print('Preserve colors: %s' % preserve_colors)
    print('--------------------------------')
    print('Content image path: %s' % content_image_path)
    print('Style image path: %s' % style_image_path)
    print('--------------------------------')
    print('Content layers: %s' % content_layers_weights.keys())
    print('Content weight: %s' % style_layers_weights.keys())
    print('Style layers: %s' % content_layers_weights.values())
    print('Style weight: %s' % style_layers_weights.values())
    print('Variation weight: %s' % variation_weight)
    print('--------------------------------')
    print('Shifting activation value: %s' % shifting_activation_value)
    print('--------------------------------\n\n')

    device = torch.device("cuda" if (torch.cuda.is_available() and device_name == 'cuda') else "cpu")

    image_handler = ImageHandler(image_size=image_size,
                                 content_image_path=content_image_path,
                                 style_image_path=style_image_path,
                                 device=device,
                                 preserve_colors=preserve_colors)
    content_layer_names = list(content_layers_weights.keys())
    style_layer_names = list(style_layers_weights.keys())
    layer_names = content_layer_names + style_layer_names

    last_layer = get_last_used_conv_layer(layer_names)
    model = transfer_vgg19(last_layer, device)

    print('--------------------------------')
    print('Model:')
    print(model)
    print('--------------------------------')
    content_features = model(image_handler.content_image, content_layer_names)
    content_losses = {layer_name: ContentLoss(weight=weight)
                      for layer_name, weight in content_layers_weights.items()}

    style_features = model(image_handler.style_image, style_layer_names)
    style_losses = {layer_name: StyleLoss(weight=weight,
                                          shifting_activation_value=shifting_activation_value)
                    for layer_name, weight in style_layers_weights.items()}

    variation_loss = VariationLoss(weight=variation_weight)

    combination_image = image_handler.content_image.clone()
    optimizer = optim.LBFGS([combination_image.requires_grad_()])
    run = [0]
    while run[0] <= n_steps:
        def closure():
            # correct the values of updated input image
            combination_image.data.clamp_(0, 1)

            optimizer.zero_grad()
            out = model(combination_image, layer_names)
            variation_score = variation_loss(combination_image)
            content_score = torch.sum(torch.stack([loss(out[layer_name], content_features[layer_name].detach())
                                                   for layer_name, loss in content_losses.items()]))
            style_score = torch.sum(torch.stack([loss(out[layer_name], style_features[layer_name].detach())
                                                 for layer_name, loss in style_losses.items()]))

            loss = style_score + content_score + variation_score
            loss.backward()

            run[0] += 1
            if run[0] % 50 == 0:
                print("run {}:".format(run))
                print('Style Loss : {:4f} Content Loss: {:4f} Variation Loss: {:4f}'.format(
                    style_score.item(), content_score.item(), variation_score.item()))

            return loss
        optimizer.step(closure)

        # a last correction...
    combination_image.data.clamp_(0, 1)

    plt.figure()
    image_handler.imshow(combination_image, title='Output Image')
    plt.show()
    return image_handler.image_unloader(combination_image)
예제 #17
0
class DetectorNode(object):
    def __init__(self):
        self.classnames = [
            "background", "person", "crutches", "walking_frame", "wheelchair",
            "push_wheelchair"
        ]

        detectron_ops_lib = net_helper.get_detectron_ops_lib()
        dyndep.InitOpsLibrary(detectron_ops_lib)

        model_path = rospy.get_param("~model_path")
        self.fixed_frame = rospy.get_param('~fixed_frame', 'odom')
        self.tracking = rospy.get_param('~tracking', True)
        self.filter_detections = rospy.get_param('~filter_inside_boxes', True)
        self.inside_box_ratio = rospy.get_param('~inside_box_ratio', 0.8)
        camera_topic = rospy.get_param('~camera_topic',
                                       '/camera/color/image_raw')
        camera_info_topic = rospy.get_param('~camera_info_topic',
                                            '/camera/color/camera_info')

        self.net = caffe2_pb2.NetDef()
        with open(os.path.join(model_path, "model.pb"), "rb") as f:
            self.net.ParseFromString(f.read())

        self.init_net = caffe2_pb2.NetDef()
        with open(os.path.join(model_path, "model_init.pb"), "rb") as f:
            self.init_net.ParseFromString(f.read())

        workspace.ResetWorkspace()
        workspace.RunNetOnce(self.init_net)
        for op in self.net.op:
            for blob_in in op.input:
                if not workspace.HasBlob(blob_in):
                    workspace.CreateBlob(blob_in)
        workspace.CreateNet(self.net)

        # initialize subscribers
        rospy.Subscriber(camera_topic,
                         Image,
                         self.image_callback,
                         queue_size=1)
        rospy.Subscriber(camera_info_topic,
                         CameraInfo,
                         self.cam_info_callback,
                         queue_size=1)

        # image queues
        self.last_received_image = None  # set from image topic
        self.last_processed_image = None  # set from image topic
        self.new_image = False

        self.cam_calib = None  # set from camera info
        self.camera_frame = None  # set from camera info

        bridge = CvBridge()
        self.publisher = Publisher(self.classnames, bridge)
        observation_model = np.loadtxt(os.path.join(model_path,
                                                    "observation_model.txt"),
                                       delimiter=',')
        ekf_sensor_noise = np.loadtxt(os.path.join(model_path, "meas_cov.txt"),
                                      delimiter=',')
        self.tracker = Tracker(ekf_sensor_noise,
                               observation_model,
                               use_hmm=True)
        self.tfl = tf.TransformListener()
        self.image_handler = ImageHandler(bridge, 540, 960)
        Server(TrackingParamsConfig, self.reconfigure_callback)
        thresholds = {}
        with open(os.path.join(model_path, "AP_thresholds.txt")) as f:
            for line in f:
                (key, val) = line.split(',')
                thresholds[key] = float(val)
        self.cla_thresholds = thresholds

    def reconfigure_callback(self, config, level):

        pos_cov_threshold = config["pos_cov_threshold"]
        mahalanobis_threshold = config["mahalanobis_max_dist"]
        euclidean_threshold = config["euclidean_max_dist"]

        accel_noise = config["accel_noise"]
        height_noise = config["height_noise"]
        init_vel_sigma = config["init_vel_sigma"]
        hmm_transition_prob = config["hmm_transition_prob"]

        use_hmm = config["use_hmm"]

        self.tracker.set_thresholds(pos_cov_threshold, mahalanobis_threshold,
                                    euclidean_threshold)

        self.tracker.set_tracking_config(accel_noise, height_noise,
                                         init_vel_sigma, hmm_transition_prob,
                                         use_hmm)

        return config

    def get_trafo_odom_in_cam(self):
        trafo_odom_in_cam = None

        if self.camera_frame is not None:

            try:
                time = self.last_processed_image.header.stamp
                self.tfl.waitForTransform(self.camera_frame, self.fixed_frame,
                                          time, rospy.Duration(0.5))
                pos, quat = self.tfl.lookupTransform(self.camera_frame,
                                                     self.fixed_frame, time)

                trans = tf.transformations.translation_matrix(pos)
                rot = tf.transformations.quaternion_matrix(quat)

                trafo_odom_in_cam = np.dot(trans, rot)

            except Exception as e:
                rospy.logerr(e)

        else:
            rospy.logerr(
                "camera frame not set, cannot get trafo between camera and fixed frame"
            )

        return trafo_odom_in_cam

    def run_model_pb(self, im):
        input_blobs = net_helper._prepare_blobs(
            im, [[[102.9801, 115.9465, 122.7717]]], 540, 960)

        gpu_blobs = ['data']

        for k, v in input_blobs.items():
            workspace.FeedBlob(
                core.ScopedName(k), v,
                net_helper.get_device_option_cuda()
                if k in gpu_blobs else net_helper.get_device_option_cpu())

        try:
            workspace.RunNet(self.net.name)
            scores = workspace.FetchBlob("score_nms")
            cls_prob = workspace.FetchBlob(
                core.ScopedName('cls_prob')).squeeze()
            classids = workspace.FetchBlob("class_nms")
            boxes = workspace.FetchBlob("bbox_nms")
            depths = workspace.FetchBlob("depth_pred").squeeze()
            pred_boxes = workspace.FetchBlob("pred_bbox").squeeze()

            # Get depth predictions per class
            num_classes = len(self.classnames)
            depths = net_helper.get_depth_nms_predictions(
                pred_boxes, depths, cls_prob, num_classes)

        except Exception as e:
            print("Running pb model failed.\n{}".format(e))
            # may not detect anything at all
            R = 0
            scores = np.zeros((R, ), dtype=np.float32)
            boxes = np.zeros((R, 4), dtype=np.float32)
            classids = np.zeros((R, ), dtype=np.float32)
            depths = np.zeros((R, ), dtype=np.float32)

        boxes = np.column_stack((boxes, scores))
        detections = []

        for i in range(len(classids)):
            detection = {}

            detection["bbox"] = list(map(int, boxes[i, :4]))
            detection["score"] = boxes[i, -1]
            detection["depth"] = depths[i]
            detection["category_id"] = int(classids[i])

            if detection["score"] > self.cla_thresholds[self.classnames[
                    detection["category_id"]]]:
                detections.append(detection)

        if self.filter_detections:
            filter_inside_boxes(detections,
                                inside_ratio_thresh=self.inside_box_ratio)

        return detections

    def update_tracker(self, detections, trafo_odom_in_cam, dt):
        if dt is not None:
            self.tracker.predict(dt)

        if (trafo_odom_in_cam is not None) and (self.cam_calib is not None):
            self.tracker.update(detections, trafo_odom_in_cam, self.cam_calib)

    def process_last_image(self):
        if self.new_image:

            dt = None
            if self.last_processed_image is not None:
                dt = (self.last_received_image.header.stamp -
                      self.last_processed_image.header.stamp).to_sec()
            self.last_processed_image = self.last_received_image

            image = self.image_handler.get_image(self.last_processed_image)

            detections = self.run_model_pb(image)

            trafo_odom_in_cam = self.get_trafo_odom_in_cam()

            if self.tracking:
                self.update_tracker(detections, trafo_odom_in_cam, dt)

            # publish messages
            self.publisher.publish_results(image,
                                           self.last_processed_image.header,
                                           detections,
                                           self.tracker,
                                           self.cam_calib,
                                           trafo_odom_in_cam,
                                           self.fixed_frame,
                                           tracking=self.tracking)
            self.new_image = False

    def get_cam_calib(self, camera_info):
        cam_calib = {}

        # camera calibration
        cam_calib["fx"] = camera_info.K[0]
        cam_calib["cx"] = camera_info.K[2]
        cam_calib["fy"] = camera_info.K[4]
        cam_calib["cy"] = camera_info.K[5]

        return cam_calib

    def cam_info_callback(self, camera_info):
        if self.cam_calib is None:
            rospy.loginfo("camera info received")
            self.cam_calib = self.get_cam_calib(camera_info)
            self.camera_frame = camera_info.header.frame_id

    def image_callback(self, image):
        self.last_received_image = image
        self.new_image = True
예제 #18
0
    def __init__(self):
        self.classnames = [
            "background", "person", "crutches", "walking_frame", "wheelchair",
            "push_wheelchair"
        ]

        detectron_ops_lib = net_helper.get_detectron_ops_lib()
        dyndep.InitOpsLibrary(detectron_ops_lib)

        model_path = rospy.get_param("~model_path")
        self.fixed_frame = rospy.get_param('~fixed_frame', 'odom')
        self.tracking = rospy.get_param('~tracking', True)
        self.filter_detections = rospy.get_param('~filter_inside_boxes', True)
        self.inside_box_ratio = rospy.get_param('~inside_box_ratio', 0.8)
        camera_topic = rospy.get_param('~camera_topic',
                                       '/camera/color/image_raw')
        camera_info_topic = rospy.get_param('~camera_info_topic',
                                            '/camera/color/camera_info')

        self.net = caffe2_pb2.NetDef()
        with open(os.path.join(model_path, "model.pb"), "rb") as f:
            self.net.ParseFromString(f.read())

        self.init_net = caffe2_pb2.NetDef()
        with open(os.path.join(model_path, "model_init.pb"), "rb") as f:
            self.init_net.ParseFromString(f.read())

        workspace.ResetWorkspace()
        workspace.RunNetOnce(self.init_net)
        for op in self.net.op:
            for blob_in in op.input:
                if not workspace.HasBlob(blob_in):
                    workspace.CreateBlob(blob_in)
        workspace.CreateNet(self.net)

        # initialize subscribers
        rospy.Subscriber(camera_topic,
                         Image,
                         self.image_callback,
                         queue_size=1)
        rospy.Subscriber(camera_info_topic,
                         CameraInfo,
                         self.cam_info_callback,
                         queue_size=1)

        # image queues
        self.last_received_image = None  # set from image topic
        self.last_processed_image = None  # set from image topic
        self.new_image = False

        self.cam_calib = None  # set from camera info
        self.camera_frame = None  # set from camera info

        bridge = CvBridge()
        self.publisher = Publisher(self.classnames, bridge)
        observation_model = np.loadtxt(os.path.join(model_path,
                                                    "observation_model.txt"),
                                       delimiter=',')
        ekf_sensor_noise = np.loadtxt(os.path.join(model_path, "meas_cov.txt"),
                                      delimiter=',')
        self.tracker = Tracker(ekf_sensor_noise,
                               observation_model,
                               use_hmm=True)
        self.tfl = tf.TransformListener()
        self.image_handler = ImageHandler(bridge, 540, 960)
        Server(TrackingParamsConfig, self.reconfigure_callback)
        thresholds = {}
        with open(os.path.join(model_path, "AP_thresholds.txt")) as f:
            for line in f:
                (key, val) = line.split(',')
                thresholds[key] = float(val)
        self.cla_thresholds = thresholds
예제 #19
0
from image_handler import ImageHandler

import cv2
import os
import time

if __name__ == '__main__':

    image_index = len(os.listdir(os.path.join("cache")))

    while True:
        for _ in range(10):
            image_handler = ImageHandler()
            if image_handler.is_legal():
                print("legal")

                image_list = image_handler.generate_uniform_image()
                image = image_handler.get_gray_static_image()
                cv2.imwrite(
                    os.path.join("origin",
                                 "{}.png".format(str(image_index).zfill(5))),
                    image)
                image_index += 1

                if image_handler.get_suffix() == "png":
                    dest_dir = "png"
                else:
                    dest_dir = "gif"
                for image in image_list:
                    cv2.imwrite(
                        os.path.join(
예제 #20
0
class DisplayHandler(wx.EvtHandler):
    __image_handler = None
    __video_handler = None
    __camera_handler = None
    __display_handler = None

    def __init__(self, gui):
        wx.EvtHandler.__init__(self)
        self.__gui = gui
        self._start_threads()

    def _start_threads(self):
        self.__image_handler = ImageHandler(self.__gui)
        self.__video_handler = VideoHandler(self.__gui)
        self.__camera_handler = CameraHandler(self.__gui)
        self.__display_handler = self.__image_handler

    # ########## Interface to GUI ##########
    def start_display(self):
        print(whoami())
        self.__display_handler.Start()

    def stop_display(self):
        print(whoami())
        self.__display_handler.Stop()

    def next_file(self):
        print(whoami())
        self.__display_handler.Stop()
        self.__display_handler.next_file()
        self.__display_handler.Start()

    def enable_processing(self, control=False):
        print(whoami())
        self.__display_handler.enable_processing(control)

    def save_file(self, control=False):
        print(whoami())
        self.__display_handler.save_file(control)

    def set_image(self):
        print(whoami())
        self.__display_handler.Stop()
        self.__display_handler = self.__image_handler
        self.__display_handler.reset()

    def set_video(self):
        print(whoami())
        self.__display_handler.Stop()
        self.__display_handler = self.__video_handler
        self.__display_handler.reset()

    def set_camera(self):
        print(whoami())
        self.__display_handler.Stop()
        self.__display_handler = self.__camera_handler
        self.__display_handler.reset()

    def load_files(self, files):
        print(whoami())
        self.__display_handler.set_files(files)

    def CleanUp(self):
        print(whoami())
        if self.__display_handler:
            self.__display_handler.stop()
        if self.__image_handler:
            self.__image_handler.stop()
        if self.__video_handler:
            self.__video_handler.stop()
        if self.__camera_handler:
            self.__camera_handler.stop()
class Detector:
    def __init__(self):

        self.classnames = [
            "background", "person", "crutches", "walking_frame", "wheelchair",
            "push_wheelchair"
        ]

        #read rosparams
        config_file = rospy.get_param('~model_config', "")
        self.fixed_frame = rospy.get_param('~fixed_frame', 'odom')
        self.tracking = rospy.get_param('~tracking', True)
        self.filter_detections = rospy.get_param('~filter_inside_boxes', True)
        self.inside_box_ratio = rospy.get_param('~inside_box_ratio', 0.8)
        camera_topic = rospy.get_param('~camera_topic',
                                       '/kinect2/qhd/image_color_rect')
        camera_info_topic = rospy.get_param('~camera_info_topic',
                                            '/kinect2/qhd/camera_info')

        #initialize subscribers
        rospy.Subscriber(camera_topic,
                         Image,
                         self.image_callback,
                         queue_size=1)
        rospy.Subscriber(camera_info_topic,
                         CameraInfo,
                         self.cam_info_callback,
                         queue_size=1)

        #detection model and tracker
        self.setup_model_and_tracker(config_file)

        #image queues
        self.last_received_image = None  #set from image topic
        self.last_processed_image = None  #set from image topic
        self.new_image = False

        self.cam_calib = None  #set from camera info
        self.camera_frame = None  #set from camera info

        #helpers
        Server(TrackingParamsConfig, self.reconfigure_callback)
        bridge = CvBridge()
        self.viz_helper = Visualizer(len(self.classnames))
        self.publisher = Publisher(self.classnames, bridge)
        self.image_handler = ImageHandler(bridge, cfg.TEST.MAX_SIZE,
                                          cfg.TEST.SCALE)
        self.tfl = tf.TransformListener()

    def setup_model_and_tracker(self, config_file):

        detectron_root = os.path.join(
            os.path.dirname(inspect.getfile(detectron)), os.pardir)

        if not os.path.exists(config_file):
            rospy.logerr(
                "config file '{}' does not exist. ".format(config_file) +
                "Please specify a valid model config file for the " +
                "model_config ros param. See " +
                "https://github.com/marinaKollmitz/mobilityaids_detector " +
                "for setup instructions")
            exit(0)  #TODO throw exception

        merge_cfg_from_file(config_file)

        #absolute output dir path
        cfg.OUTPUT_DIR = os.path.join(detectron_root, cfg.OUTPUT_DIR)

        weights_file = os.path.join(detectron_root, cfg.TEST.WEIGHTS)
        val_dataset = cfg.TRACK.VALIDATION_DATASET

        assert_and_infer_cfg()
        self.model = infer_engine.initialize_model_from_cfg(weights_file)

        #initialize tracker
        class_thresh, obs_model, meas_cov = validate_tracking_params(
            weights_file, val_dataset)
        self.tracker = Tracker(meas_cov, obs_model, use_hmm=True)
        self.cla_thresholds = class_thresh

    def reconfigure_callback(self, config, level):

        pos_cov_threshold = config["pos_cov_threshold"]
        mahalanobis_threshold = config["mahalanobis_max_dist"]
        euclidean_threshold = config["euclidean_max_dist"]

        accel_noise = config["accel_noise"]
        height_noise = config["height_noise"]
        init_vel_sigma = config["init_vel_sigma"]
        hmm_transition_prob = config["hmm_transition_prob"]

        use_hmm = config["use_hmm"]

        self.tracker.set_thresholds(pos_cov_threshold, mahalanobis_threshold,
                                    euclidean_threshold)

        self.tracker.set_tracking_config(accel_noise, height_noise,
                                         init_vel_sigma, hmm_transition_prob,
                                         use_hmm)

        return config

    def get_trafo_odom_in_cam(self):

        trafo_odom_in_cam = None

        if self.camera_frame is not None:

            try:
                time = self.last_processed_image.header.stamp
                self.tfl.waitForTransform(self.camera_frame, self.fixed_frame,
                                          time, rospy.Duration(0.5))
                pos, quat = self.tfl.lookupTransform(self.camera_frame,
                                                     self.fixed_frame, time)

                trans = tf.transformations.translation_matrix(pos)
                rot = tf.transformations.quaternion_matrix(quat)

                trafo_odom_in_cam = np.dot(trans, rot)

            except (Exception) as e:
                print e

        else:
            rospy.logerr(
                "camera frame not set, cannot get trafo between camera and fixed frame"
            )

        return trafo_odom_in_cam

    def get_detections(self, image):

        cls_boxes, cls_depths, cls_segms, cls_keyps = infer_engine.im_detect_all(
            self.model, image, None)

        boxes, depths, _segms, _keyps, classes = convert_from_cls_format(
            cls_boxes, cls_depths, None, None)
        detections = []

        for i in range(len(classes)):
            detection = {}

            detection["bbox"] = boxes[i, :4]
            detection["score"] = boxes[i, -1]
            detection["depth"] = depths[i]
            detection["category_id"] = classes[i]

            if detection["score"] > self.cla_thresholds[self.classnames[
                    detection["category_id"]]]:
                detections.append(detection)

        if self.filter_detections:
            filter_inside_boxes(detections,
                                inside_ratio_thresh=self.inside_box_ratio)

        return detections

    def update_tracker(self, detections, trafo_odom_in_cam, dt):

        if dt is not None:
            self.tracker.predict(dt)

        if (trafo_odom_in_cam is not None) and (self.cam_calib is not None):
            self.tracker.update(detections, trafo_odom_in_cam, self.cam_calib)

    def process_detections(self, image, detections, trafo_odom_in_cam, dt):

        if self.tracking:
            self.update_tracker(detections, trafo_odom_in_cam, dt)

        #publish messages
        self.publisher.publish_results(image,
                                       self.last_processed_image.header,
                                       detections,
                                       self.tracker,
                                       self.cam_calib,
                                       trafo_odom_in_cam,
                                       self.fixed_frame,
                                       tracking=self.tracking)

    def process_last_image(self):

        if self.new_image:

            dt = None
            if self.last_processed_image is not None:
                dt = (self.last_received_image.header.stamp -
                      self.last_processed_image.header.stamp).to_sec()
            self.last_processed_image = self.last_received_image

            image = self.image_handler.get_image(self.last_processed_image)

            with c2_utils.NamedCudaScope(0):
                detections = self.get_detections(image)

            trafo_odom_in_cam = self.get_trafo_odom_in_cam()

            self.process_detections(image, detections, trafo_odom_in_cam, dt)
            self.new_image = False

    def get_cam_calib(self, camera_info):

        cam_calib = {}

        #camera calibration
        cam_calib["fx"] = camera_info.K[0]
        cam_calib["cx"] = camera_info.K[2]
        cam_calib["fy"] = camera_info.K[4]
        cam_calib["cy"] = camera_info.K[5]

        return cam_calib

    def cam_info_callback(self, camera_info):

        if self.cam_calib is None:
            rospy.loginfo("camera info received")
            self.cam_calib = self.get_cam_calib(camera_info)
            self.camera_frame = camera_info.header.frame_id

    def image_callback(self, image):

        self.last_received_image = image
        self.new_image = True
예제 #22
0
# encoding: cp1252
import pygame, os
import pyautogui
from image_handler import ImageHandler
from Tkinter import Tk
from PIL import Image
from options_gui import OptionsGUI

pygame.init()

if __name__ == "__main__":
    im_handle = ImageHandler()
    pyautogui.screenshot('foo.png')
    input_loc = 'foo.png'
    output_loc = 'out.png'
    screen, px = im_handle.setup(input_loc)
    left, upper, right, lower = im_handle.mainLoop(screen, px)

    # ensure output rect always has positive width, height
    if right < left:
        left, right = right, left
    if lower < upper:
        lower, upper = upper, lower
    im = Image.open(input_loc)
    im = im.crop((left, upper, right, lower))
    pygame.display.quit()
    im.save(output_loc)
    os.remove(input_loc)

    # File options
    root = Tk()
예제 #23
0
        description='Command to reduce image size and lossless compression')
    parser.add_argument('inputDir', help='Input directory to be monitored')
    parser.add_argument(
        'outputDir',
        help='Output directory where transformed images will be saved')
    parser.add_argument(
        '-r',
        '--ratio',
        type=float,
        default=1.0,
        help='Reduce size ratio, from 0.0 to 1.0, default: 1.0')
    args = parser.parse_args()

    # Image handler setup
    logger.info(
        'Process started. Monitoring directory: {inputDir}. Output directory: {outputDir}'
        .format(inputDir=args.inputDir, outputDir=args.outputDir))
    observer: Observer = Observer()
    event_handler: ImageHandler = ImageHandler(size_ratio=args.ratio,
                                               dest_path=args.outputDir)
    observer.schedule(event_handler, args.inputDir)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join
예제 #24
0
 def _start_threads(self):
     self.__image_handler = ImageHandler(self.__gui)
     self.__video_handler = VideoHandler(self.__gui)
     self.__camera_handler = CameraHandler(self.__gui)
     self.__display_handler = self.__image_handler
class RecaptchaSolver:
    def __init__(self, solve_url, use_proxies=True, headless=False):
        options = Options()
        options.headless = headless

        profile = webdriver.FirefoxProfile()
        if use_proxies:
            proxy = load_proxy()
            profile.set_preference("network.proxy.type", 1)
            profile.set_preference("network.proxy.http", proxy["ip"])
            profile.set_preference("network.proxy.http_port", proxy["port"])
            if "username" in proxy:
                credentials = b64encode(
                    f'{proxy["username"]}:{proxy["password"]}'.encode(
                        "ascii")).decode()
                profile.set_preference("extensions.closeproxyauth.authtoken",
                                       credentials)

        profile.set_preference("dom.webdriver.enabled", False)
        profile.set_preference("useAutomationExtension", False)
        profile.update_preferences()

        try:
            self.driver = webdriver.Firefox(firefox_profile=profile,
                                            options=options)
        except WebDriverException:
            options.headless = True
            self.driver = webdriver.Firefox(firefox_profile=profile,
                                            options=options)

        self.image_handler = ImageHandler()
        self.solve_url = solve_url
        self.recaptcha_task = RecaptchaTask()

    def solve(self):
        self.load_captcha_url()
        self.switch_to_recap_iframe()
        self.trigger_captcha()
        self.switch_to_challenge_iframe()
        while True:
            self.check_challenge_type()
            self.find_image_grid()
            self.solve_image_grid()
            self.solve_new_images()
            success = self.verify_challenge()
            if success:
                recaptcha_token = self.get_recaptcha_token()
                self.driver.quit()
                return recaptcha_token

    def load_captcha_url(self):
        logger.debug(f"Load url: {self.solve_url}")
        self.driver.get(self.solve_url)

    def switch_to_recap_iframe(self):
        logger.debug("Searching for recaptcha iframe")
        recaptcha_iframe = WebDriverWait(self.driver, 25).until(
            EC.presence_of_element_located(
                (By.CSS_SELECTOR, 'iframe[title="reCAPTCHA"]')))
        logger.debug("Found iframe, switching to it...")
        self.driver.switch_to.frame(recaptcha_iframe.get_attribute("name"))

    def trigger_captcha(self):
        logger.debug("Searching for recaptcha checkbox")
        recaptcha_checkbox = WebDriverWait(self.driver, 25).until(
            EC.presence_of_element_located(
                (By.CLASS_NAME, "recaptcha-checkbox")))
        logger.debug("Found recaptcha checkbox, delaying before click...")
        sleep_random(1.0, 3.0)

        ActionChains(self.driver).move_to_element(recaptcha_checkbox).perform()
        recaptcha_checkbox.click()

    def switch_to_challenge_iframe(self):
        logger.debug("Switching back to parent frame")
        self.driver.switch_to.parent_frame()
        logger.debug("Searching for challenge iframe")
        recaptcha_challenge_iframe = WebDriverWait(self.driver, 25).until(
            EC.visibility_of_element_located(
                (By.CSS_SELECTOR, 'iframe[title="recaptcha challenge"]')))
        logger.debug("Found challenge iframe, switching to it...")
        self.driver.switch_to.frame(
            recaptcha_challenge_iframe.get_attribute("name"))

    def check_challenge_type(self):
        while True:
            class_name = "rc-imageselect-desc-no-canonical"
            try:
                captcha_type = self.driver.find_element_by_class_name(
                    "rc-imageselect-desc-no-canonical").get_attribute(
                        "textContent")
            except NoSuchElementException:
                captcha_type = self.driver.find_element_by_class_name(
                    "rc-imageselect-desc").get_attribute("textContent")
                class_name = "rc-imageselect-desc"

            if "Select all squares with" in captcha_type:
                logger.debug("Fetching new challenge...")
                self.reload_captcha()
                continue
            elif "Select all images with" in captcha_type:
                desired_image_type = self.driver.find_element_by_class_name(
                    class_name).find_element_by_tag_name(
                        "strong").get_attribute("textContent")
                if desired_image_type in image_types_conversions:
                    logger.debug(f"Challenge type found: {desired_image_type}")
                    self.recaptcha_task.desired_image_type = desired_image_type
                    return
                else:
                    logger.error(
                        f"Unknown challenge type found ({desired_image_type}), reloading..."
                    )
                    self.reload_captcha()
                    continue
            else:
                raise Exception("Unknown challenge type")

    def find_image_grid(self):
        logger.debug("Searching for image grid")
        image_grid_url = self.driver.find_element_by_class_name(
            "rc-image-tile-wrapper").find_element_by_tag_name(
                "img").get_attribute("src")
        logger.debug(f"Found image grid: {image_grid_url}")
        self.recaptcha_task.image_grid_url = image_grid_url

    def solve_image_grid(self):
        while True:
            logger.debug("Processing images in grid")
            results = self.image_handler.process_grid(
                self.recaptcha_task.image_grid_url,
                self.recaptcha_task.desired_image_type)
            if len(results) == 0:
                logger.error("Failed to identify images, reloading")
                self.reload_captcha()
                time.sleep(1)
                continue
            for index in results:
                self.click_image_grid_elem(index)
            return

    def click_image_grid_elem(self, index):
        image_element = self.driver.find_elements_by_class_name(
            "rc-image-tile-target")[index]
        ActionChains(self.driver).move_to_element(image_element).perform()
        image_element.click()

    def solve_new_images(self):
        while True:
            logger.debug("Sleeping before checking new images")
            time.sleep(5)
            logger.debug("Processing new images")
            new_images = self.driver.find_elements_by_class_name(
                "rc-image-tile-11")
            new_images_urls = [
                new_image.get_attribute("src") for new_image in new_images
            ]

            results = self.image_handler.process_new_images(
                new_images_urls, self.recaptcha_task.desired_image_type)
            for i, result in enumerate(results):
                if result["matches"]:
                    self.click_new_image_elem(i)

            if len([result
                    for result in results if result["matches"]]) == 0 or len([
                        result for result in results if not result["matches"]
                    ]) == len(results):
                logger.debug("All new images solved, proceeding")
                return

    def get_element_index(self, image_url, new_images_urls):
        for i, new_image_url in enumerate(new_images_urls):
            if new_image_url == image_url:
                return i

    def click_new_image_elem(self, index):
        image_element = self.driver.find_elements_by_class_name(
            "rc-image-tile-11")[index].find_element_by_xpath("..")
        ActionChains(self.driver).move_to_element(image_element).perform()
        image_element.click()

    def verify_challenge(self):
        logger.debug("Verifying challenge solution")
        self.driver.find_element_by_id("recaptcha-verify-button").click()
        time.sleep(1)

        self.driver.switch_to.parent_frame()
        self.switch_to_recap_iframe()
        try:
            self.driver.find_element_by_class_name(
                "recaptcha-checkbox-checked")
            logger.success("Successfully solved challenge")
            return True
        except NoSuchElementException:
            logger.error("Failed to solve challenge, retrying")
            self.switch_to_challenge_iframe()
            if self.driver.find_element_by_class_name(
                    "rc-imageselect-incorrect-response").get_attribute(
                        "style") != "":
                self.reload_captcha()
            return False

    def get_recaptcha_token(self):
        logger.debug("Searching for recaptcha token")
        self.driver.switch_to.parent_frame()
        recaptcha_token = self.driver.find_element_by_id(
            "g-recaptcha-response").get_attribute("value")
        logger.debug(f"Found recaptcha token: {recaptcha_token}")
        return recaptcha_token

    def reload_captcha(self):
        old_val = self.driver.find_element_by_id(
            "recaptcha-token").get_attribute("value")
        self.driver.find_element_by_id("recaptcha-reload-button").click()
        while True:
            if self.driver.find_element_by_id("recaptcha-token").get_attribute(
                    "value") != old_val:
                return
            time.sleep(0.01)