def __init__(self, solve_url, use_proxies=True, headless=False):
        options = Options()
        options.headless = headless

        profile = webdriver.FirefoxProfile()
        if use_proxies:
            proxy = load_proxy()
            profile.set_preference("network.proxy.type", 1)
            profile.set_preference("network.proxy.http", proxy["ip"])
            profile.set_preference("network.proxy.http_port", proxy["port"])
            if "username" in proxy:
                credentials = b64encode(
                    f'{proxy["username"]}:{proxy["password"]}'.encode(
                        "ascii")).decode()
                profile.set_preference("extensions.closeproxyauth.authtoken",
                                       credentials)

        profile.set_preference("dom.webdriver.enabled", False)
        profile.set_preference("useAutomationExtension", False)
        profile.update_preferences()

        try:
            self.driver = webdriver.Firefox(firefox_profile=profile,
                                            options=options)
        except WebDriverException:
            options.headless = True
            self.driver = webdriver.Firefox(firefox_profile=profile,
                                            options=options)

        self.image_handler = ImageHandler()
        self.solve_url = solve_url
        self.recaptcha_task = RecaptchaTask()
Example #2
0
 def posts_main(posts_list):
     res_list = []
     for post in posts_list:
         # Send to Text Module
         text_app = TextHandler().main(post.title, post.text)
         # Send to Image Module
         image_app = ImageHandler().main(post.images)
         res_list.append((post.id, text_app, image_app))
     return res_list
def command_get_white_black_img(update: Update, context: CallbackContext):
    """This function is processing image by the black_white filter"""
    img = ImageHandler()
    img.get_black_white_img()

    reply_markup = ReplyKeyboardRemove()  # Remove keyboard
    bot.send_message(chat_id=update.message.chat_id,
                     text="Upload new image",
                     reply_markup=reply_markup)
    def __init__(self):

        self.classnames = [
            "background", "person", "crutches", "walking_frame", "wheelchair",
            "push_wheelchair"
        ]

        #read rosparams
        config_file = rospy.get_param('~model_config', "")
        self.fixed_frame = rospy.get_param('~fixed_frame', 'odom')
        self.tracking = rospy.get_param('~tracking', True)
        self.filter_detections = rospy.get_param('~filter_inside_boxes', True)
        self.inside_box_ratio = rospy.get_param('~inside_box_ratio', 0.8)
        camera_topic = rospy.get_param('~camera_topic',
                                       '/kinect2/qhd/image_color_rect')
        camera_info_topic = rospy.get_param('~camera_info_topic',
                                            '/kinect2/qhd/camera_info')

        #initialize subscribers
        rospy.Subscriber(camera_topic,
                         Image,
                         self.image_callback,
                         queue_size=1)
        rospy.Subscriber(camera_info_topic,
                         CameraInfo,
                         self.cam_info_callback,
                         queue_size=1)

        #detection model and tracker
        self.setup_model_and_tracker(config_file)

        #image queues
        self.last_received_image = None  #set from image topic
        self.last_processed_image = None  #set from image topic
        self.new_image = False

        self.cam_calib = None  #set from camera info
        self.camera_frame = None  #set from camera info

        #helpers
        Server(TrackingParamsConfig, self.reconfigure_callback)
        bridge = CvBridge()
        self.viz_helper = Visualizer(len(self.classnames))
        self.publisher = Publisher(self.classnames, bridge)
        self.image_handler = ImageHandler(bridge, cfg.TEST.MAX_SIZE,
                                          cfg.TEST.SCALE)
        self.tfl = tf.TransformListener()
    def __init__(self, options, gpu_ids=[]):
        self.options = options
        self.model = NetworkBench(n_networks=options.n_networks,
                                  n_input_channels=options.input_nc,
                                  n_output_channels=options.output_nc,
                                  n_blocks=options.n_blocks,
                                  initial_filters=options.initial_filters,
                                  dropout_value=options.dropout_value,
                                  lr=options.lr,
                                  decay=options.decay,
                                  decay_epochs=options.decay_epochs,
                                  batch_size=options.batch_size,
                                  image_width=options.image_width,
                                  image_height=options.image_height,
                                  load_network=options.load_network,
                                  load_epoch=options.load_epoch,
                                  model_path=os.path.join(
                                      options.model_path, options.name),
                                  name=options.name,
                                  gpu_ids=gpu_ids,
                                  dont_care=options.dont_care,
                                  gan=options.gan,
                                  pool_size=options.pool_size,
                                  lambda_gan=options.lambda_gan,
                                  n_blocks_discr=options.n_blocks_discr)

        self.model.cuda()

        self.dont_care = options.dont_care
        self.gan = options.gan

        if self.gan:
            self.discriminator_datasets = DataLoaderDiscriminator(
                options).load_data()

        self.data_sets = CorrespondenceDataLoaderDontCare(options).load_data()
        self.image_handler = ImageHandler()
        self.loss_dir = self.options.output_path + "/" + self.options.name + "/Train"
        copyfile(
            os.path.relpath('seg_config.yaml'),
            os.path.join(self.options.model_path, self.options.name,
                         'seg_config.yaml'))
        self.writer = SummaryWriter(self.loss_dir)
def main():
    app = QtWidgets.QApplication(sys.argv)
    main_view = MainView(app)
    controls = Controls()
    main_view.subscribe_controls(controls)

    metrics_engine = MetricsEngine()
    metrics_engine.subscribe_view(main_view)
    metrics_engine.load_metrics()
    controls.subscribe_view(main_view)
    image_handler = ImageHandler()
    image_handler.subscribe_view(main_view)
    modifications_provider = ModificationsProvider()
    modifications_provider.subscribe_image_handler(image_handler)
    controls.subscribe_image_handler(image_handler)
    controls.subscribe_modifications_provider(modifications_provider)
    image_handler.subscribe_metrics_engine(metrics_engine)

    main_view.show()
    sys.exit(app.exec_())
Example #7
0
    def __init__(self, options, gpu_ids=[]):
        self.options = options
        self.model = NetworkBench(
            n_networks=options.n_networks,
            n_input_channels=options.input_nc,
            n_output_channels=options.output_nc * options.n_labels,
            n_blocks=options.n_blocks,
            initial_filters=options.initial_filters,
            dropout_value=options.dropout_value,
            lr=options.lr,
            decay=options.decay,
            decay_epochs=options.decay_epochs,
            batch_size=options.batch_size,
            image_width=options.image_width,
            image_height=options.image_height,
            load_network=options.load_network,
            load_epoch=options.load_epoch,
            model_path=os.path.join(options.model_path, options.name),
            name=options.name,
            gpu_ids=gpu_ids,
            gan=options.gan,
            pool_size=options.pool_size,
            lambda_gan=options.lambda_gan,
            n_blocks_discr=options.n_blocks_discr)

        # FIXME: Save Graph to tensorboardX

        self.model.cuda()
        self.n_labels = options.n_labels
        self.gan = options.gan

        self.data_sets = CorrespondenceDataLoaderMultiLabel(
            options).load_data()

        self.image_handler = ImageHandler()
        self.log_dir = self.options.output_path + "/" + self.options.name + "/Train"
        copyfile(
            os.path.relpath('seg_config.yaml'),
            os.path.join(self.options.model_path, self.options.name,
                         'seg_config.yaml'))
        self.writer = Summarywriter(self.log_dir)
Example #8
0
 def _start_threads(self):
     self.__image_handler = ImageHandler(self.__gui)
     self.__video_handler = VideoHandler(self.__gui)
     self.__camera_handler = CameraHandler(self.__gui)
     self.__display_handler = self.__image_handler
"""
This module is reserved for arbitrary helper functions
"""
import os

from downloader import download
from image_handler import ImageHandler

images = ImageHandler('%s/SaturnServer/images/' % os.path.expanduser('~'))

Example #10
0
        description='Command to reduce image size and lossless compression')
    parser.add_argument('inputDir', help='Input directory to be monitored')
    parser.add_argument(
        'outputDir',
        help='Output directory where transformed images will be saved')
    parser.add_argument(
        '-r',
        '--ratio',
        type=float,
        default=1.0,
        help='Reduce size ratio, from 0.0 to 1.0, default: 1.0')
    args = parser.parse_args()

    # Image handler setup
    logger.info(
        'Process started. Monitoring directory: {inputDir}. Output directory: {outputDir}'
        .format(inputDir=args.inputDir, outputDir=args.outputDir))
    observer: Observer = Observer()
    event_handler: ImageHandler = ImageHandler(size_ratio=args.ratio,
                                               dest_path=args.outputDir)
    observer.schedule(event_handler, args.inputDir)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join
Example #11
0
# encoding: cp1252
import pygame, os
import pyautogui
from image_handler import ImageHandler
from Tkinter import Tk
from PIL import Image
from options_gui import OptionsGUI

pygame.init()

if __name__ == "__main__":
    im_handle = ImageHandler()
    pyautogui.screenshot('foo.png')
    input_loc = 'foo.png'
    output_loc = 'out.png'
    screen, px = im_handle.setup(input_loc)
    left, upper, right, lower = im_handle.mainLoop(screen, px)

    # ensure output rect always has positive width, height
    if right < left:
        left, right = right, left
    if lower < upper:
        lower, upper = upper, lower
    im = Image.open(input_loc)
    im = im.crop((left, upper, right, lower))
    pygame.display.quit()
    im.save(output_loc)
    os.remove(input_loc)

    # File options
    root = Tk()
Example #12
0
from image_handler import ImageHandler

import cv2
import os
import time

if __name__ == '__main__':

    image_index = len(os.listdir(os.path.join("cache")))

    while True:
        for _ in range(10):
            image_handler = ImageHandler()
            if image_handler.is_legal():
                print("legal")

                image_list = image_handler.generate_uniform_image()
                image = image_handler.get_gray_static_image()
                cv2.imwrite(
                    os.path.join("origin",
                                 "{}.png".format(str(image_index).zfill(5))),
                    image)
                image_index += 1

                if image_handler.get_suffix() == "png":
                    dest_dir = "png"
                else:
                    dest_dir = "gif"
                for image in image_list:
                    cv2.imwrite(
                        os.path.join(
Example #13
0
    def __init__(self):
        self.classnames = [
            "background", "person", "crutches", "walking_frame", "wheelchair",
            "push_wheelchair"
        ]

        detectron_ops_lib = net_helper.get_detectron_ops_lib()
        dyndep.InitOpsLibrary(detectron_ops_lib)

        model_path = rospy.get_param("~model_path")
        self.fixed_frame = rospy.get_param('~fixed_frame', 'odom')
        self.tracking = rospy.get_param('~tracking', True)
        self.filter_detections = rospy.get_param('~filter_inside_boxes', True)
        self.inside_box_ratio = rospy.get_param('~inside_box_ratio', 0.8)
        camera_topic = rospy.get_param('~camera_topic',
                                       '/camera/color/image_raw')
        camera_info_topic = rospy.get_param('~camera_info_topic',
                                            '/camera/color/camera_info')

        self.net = caffe2_pb2.NetDef()
        with open(os.path.join(model_path, "model.pb"), "rb") as f:
            self.net.ParseFromString(f.read())

        self.init_net = caffe2_pb2.NetDef()
        with open(os.path.join(model_path, "model_init.pb"), "rb") as f:
            self.init_net.ParseFromString(f.read())

        workspace.ResetWorkspace()
        workspace.RunNetOnce(self.init_net)
        for op in self.net.op:
            for blob_in in op.input:
                if not workspace.HasBlob(blob_in):
                    workspace.CreateBlob(blob_in)
        workspace.CreateNet(self.net)

        # initialize subscribers
        rospy.Subscriber(camera_topic,
                         Image,
                         self.image_callback,
                         queue_size=1)
        rospy.Subscriber(camera_info_topic,
                         CameraInfo,
                         self.cam_info_callback,
                         queue_size=1)

        # image queues
        self.last_received_image = None  # set from image topic
        self.last_processed_image = None  # set from image topic
        self.new_image = False

        self.cam_calib = None  # set from camera info
        self.camera_frame = None  # set from camera info

        bridge = CvBridge()
        self.publisher = Publisher(self.classnames, bridge)
        observation_model = np.loadtxt(os.path.join(model_path,
                                                    "observation_model.txt"),
                                       delimiter=',')
        ekf_sensor_noise = np.loadtxt(os.path.join(model_path, "meas_cov.txt"),
                                      delimiter=',')
        self.tracker = Tracker(ekf_sensor_noise,
                               observation_model,
                               use_hmm=True)
        self.tfl = tf.TransformListener()
        self.image_handler = ImageHandler(bridge, 540, 960)
        Server(TrackingParamsConfig, self.reconfigure_callback)
        thresholds = {}
        with open(os.path.join(model_path, "AP_thresholds.txt")) as f:
            for line in f:
                (key, val) = line.split(',')
                thresholds[key] = float(val)
        self.cla_thresholds = thresholds
Example #14
0
def run_style_transfer(image_size,
                       content_image_path,
                       style_image_path,
                       content_layers_weights,
                       style_layers_weights,
                       variation_weight,
                       n_steps,
                       shifting_activation_value,
                       device_name,
                       preserve_colors):
    print('Transfer style to content image')
    print('Number of iterations: %s' % n_steps)
    print('Preserve colors: %s' % preserve_colors)
    print('--------------------------------')
    print('Content image path: %s' % content_image_path)
    print('Style image path: %s' % style_image_path)
    print('--------------------------------')
    print('Content layers: %s' % content_layers_weights.keys())
    print('Content weight: %s' % style_layers_weights.keys())
    print('Style layers: %s' % content_layers_weights.values())
    print('Style weight: %s' % style_layers_weights.values())
    print('Variation weight: %s' % variation_weight)
    print('--------------------------------')
    print('Shifting activation value: %s' % shifting_activation_value)
    print('--------------------------------\n\n')

    device = torch.device("cuda" if (torch.cuda.is_available() and device_name == 'cuda') else "cpu")

    image_handler = ImageHandler(image_size=image_size,
                                 content_image_path=content_image_path,
                                 style_image_path=style_image_path,
                                 device=device,
                                 preserve_colors=preserve_colors)
    content_layer_names = list(content_layers_weights.keys())
    style_layer_names = list(style_layers_weights.keys())
    layer_names = content_layer_names + style_layer_names

    last_layer = get_last_used_conv_layer(layer_names)
    model = transfer_vgg19(last_layer, device)

    print('--------------------------------')
    print('Model:')
    print(model)
    print('--------------------------------')
    content_features = model(image_handler.content_image, content_layer_names)
    content_losses = {layer_name: ContentLoss(weight=weight)
                      for layer_name, weight in content_layers_weights.items()}

    style_features = model(image_handler.style_image, style_layer_names)
    style_losses = {layer_name: StyleLoss(weight=weight,
                                          shifting_activation_value=shifting_activation_value)
                    for layer_name, weight in style_layers_weights.items()}

    variation_loss = VariationLoss(weight=variation_weight)

    combination_image = image_handler.content_image.clone()
    optimizer = optim.LBFGS([combination_image.requires_grad_()])
    run = [0]
    while run[0] <= n_steps:
        def closure():
            # correct the values of updated input image
            combination_image.data.clamp_(0, 1)

            optimizer.zero_grad()
            out = model(combination_image, layer_names)
            variation_score = variation_loss(combination_image)
            content_score = torch.sum(torch.stack([loss(out[layer_name], content_features[layer_name].detach())
                                                   for layer_name, loss in content_losses.items()]))
            style_score = torch.sum(torch.stack([loss(out[layer_name], style_features[layer_name].detach())
                                                 for layer_name, loss in style_losses.items()]))

            loss = style_score + content_score + variation_score
            loss.backward()

            run[0] += 1
            if run[0] % 50 == 0:
                print("run {}:".format(run))
                print('Style Loss : {:4f} Content Loss: {:4f} Variation Loss: {:4f}'.format(
                    style_score.item(), content_score.item(), variation_score.item()))

            return loss
        optimizer.step(closure)

        # a last correction...
    combination_image.data.clamp_(0, 1)

    plt.figure()
    image_handler.imshow(combination_image, title='Output Image')
    plt.show()
    return image_handler.image_unloader(combination_image)