Esempio n. 1
0
    def run(self):
        while True:
            # Perform style transfer for payload
            payload = self.queue.get()

            # Unpack and setup style transer for payload
            content_data, style_data, tag, settings = api.unpack_payload(
                payload)
            content_image = convert_image(content_data)
            style_image = convert_image(style_data)

            # Perform style transfer
            if self.verbose: print("[StyleWorker]: processing payload: ", tag)
            n_epochs = 100
            if api.SETTING_NUMBER_EPOCHS_KEY in settings:
                n_epochs = settings[api.SETTING_NUMBER_EPOCHS_KEY]
            pastiche_image = styleopt.transfer_style(content_image,
                                                     style_image,
                                                     n_epochs=n_epochs,
                                                     settings=settings,
                                                     verbose=self.verbose)

            # Save results of style transfer
            if self.verbose: print("[StyleWorker]: completed payload: ", tag)
            if not os.path.exists("static/pastiche"):
                os.mkdir("static/pastiche")
            pastiche_image.save("static/pastiche/{}.jpg".format(tag))
Esempio n. 2
0
def get_logo_id_new(logo_url, download_crawler, source, sourceId, catename):
    mongo = db.connect_mongo()
    # imgfs = gridfs.GridFS(mongo.gridfs)
    name = None
    height = None
    width = None
    if logo_url is not None and len(logo_url.strip()) > 0:
        logger.info("Download logo: %s", logo_url)
        # (image_value, width, height) = download_crawler.get_image_size(logo_url)
        (image_file, width,
         height) = download_crawler.get_image_size_new(logo_url)
        if image_file is not None:
            # logo_id = imgfs.put(image_value, content_type='jpeg', filename='%s_%s_%s.jpg' % (catename, source, sourceId))
            # out = imgfs.get(ObjectId(logo_id))
            name = util.get_uuid()
            logger.info("%s->%s|%s", logo_url, name, image_file)
            if source in [13835, 13836, 13613]:
                # img, width, height = util.convert_image(out, out.name, size=1024)
                img, width, height = util.convert_image(image_file,
                                                        name,
                                                        size=1024)
            elif source in [13613, 13803]:
                img, width, height = util.convert_image(image_file,
                                                        name,
                                                        size=width)
            else:
                # img, width, height = util.convert_image(out, out.name)
                img, width, height = util.convert_image(image_file, name)
            headers = {"Content-Type": "image/jpeg"}
            # oss2put.put(str(logo_id), img, headers=headers)
            oss2put.put(name, img, headers=headers)
    # mongo.close()
    return (name, width, height)
Esempio n. 3
0
def get_page_image(workflow, seq_num, img_type, plugname):
    """ Return image for requested page. """
    if img_type not in ('raw', 'processed'):
        raise ApiException("Image type must be one of 'raw' or 'processed', "
                           "not '{0}'".format(img_type), 400)
    # Scale image if requested
    width = request.args.get('width', None)
    img_format = request.args.get('format', None)
    page = get_next(p for p in workflow.pages if p.sequence_num == seq_num)
    if not page:
        raise ApiException("Could not find page with sequence number {0}"
                           .format(seq_num), 404)
    if img_type == 'raw':
        fpath = page.raw_image
    elif plugname is None:
        fpath = page.get_latest_processed(image_only=True)
    else:
        fpath = page.processed_images[plugname]
    if width and fpath.suffix.lower() in ('.jpg', '.jpeg', '.tif', '.tiff',
                                          '.png'):
        return scale_image(fpath, width=int(width))
    elif fpath.suffix.lower() in ('.tif', '.tiff') and img_format:
        img_format = 'png' if img_format == 'browser' else img_format
        return convert_image(fpath, img_format)
    else:
        return send_file(unicode(fpath))
Esempio n. 4
0
def get_page_image(workflow, seq_num, img_type, plugname):
    """ Return image for requested page. """
    if img_type not in ('raw', 'processed'):
        raise ApiException("Image type must be one of 'raw' or 'processed', "
                           "not '{0}'".format(img_type), 400)
    # Scale image if requested
    width = request.args.get('width', None)
    img_format = request.args.get('format', None)
    page = get_next(p for p in workflow.pages if p.sequence_num == seq_num)
    if not page:
        raise ApiException("Could not find page with sequence number {0}"
                           .format(seq_num), 404)
    if img_type == 'raw':
        fpath = page.raw_image
    elif plugname is None:
        fpath = page.get_latest_processed(image_only=True)
    else:
        fpath = page.processed_images[plugname]
    if width and fpath.suffix.lower() in ('.jpg', '.jpeg', '.tif', '.tiff',
                                          '.png'):
        return scale_image(fpath, width=int(width))
    elif fpath.suffix.lower() in ('.tif', '.tiff') and img_format:
        img_format = 'png' if img_format == 'browser' else img_format
        return convert_image(fpath, img_format)
    else:
        return send_file(unicode(fpath))
Esempio n. 5
0
def save_oss2_image(grid_id, size=None):
    if grid_id is None or grid_id.strip() == "":
        return

    item = mongo.temp.gridid.find_one({"gridid": grid_id})
    if item is not None:
        return

    out = grid.get(ObjectId(grid_id))
    logger.info("%s -> %s", grid_id, out.name)
    if size is None:
        img, xsize, ysize = util.convert_image(out, out.name)
    else:
        img, xsize, ysize = util.convert_image(out, out.name, size=size)
    headers = {"Content-Type": "image/jpeg"}
    oss2.put(grid_id, img, headers=headers)
    mongo.temp.gridid.insert({"gridid": grid_id})
Esempio n. 6
0
def get_page_image(fpath, page, workflow, number, img_type, plugname):
    """ Get image for requested page.

    :param workflow:    UUID or slug for a workflow
    :type workflow:     str
    :param number:      Capture number of requested page
    :type number:       int
    :param img_type:    Type of image
    :type img_type:     str, one of `raw` or `processed`
    :param plugname:    Only applicable if `img_type` is `processed`,
                        selects the desired processed file by its key in the
                        :py:attr:`spreads.workflow.Workflow.processed_images`
                        dictionary.
    :type plugname:     str
    :queryparam width:  Optionally scale down image to the desired width
    :type width:        int
    :queryparam format: Optionally convert image to desired format.
                        If `browser` is specified, non-JPG or PNG images will
                        be converted to PNG.
    :type format:       str, either `browser` or a format string recognized
                        by Pillow: http://pillow.readthedocs.org/en/latest/\\
                                   handbook/image-file-formats.html

    :resheader Content-Type:    Depends on value of `format`, by default
                                the mime-type of the original image.
    """
    width = request.args.get('width', None)
    img_format = request.args.get('format', None)
    # FIXME: This clearly sucks, rework convert_image and scale_image to allow
    #        for it.
    if width is not None and img_format is not None:
        raise ApiException("Can not scale and convert at the same time.", 400)
    transformable = fpath.suffix.lower() in ('.jpg', '.jpeg', '.tif', '.tiff',
                                             '.png')
    if (width is not None or img_format is not None) and not transformable:
        raise ApiException("Can only scale/convert JPG, TIF or PNG files.",
                           400)
    if width:
        # Scale image if requested
        return scale_image(fpath, width=int(width))
    elif img_format:
        # Convert to target format
        if fpath.suffix.lower() not in ('.tif', '.tiff', '.jpg', '.jpeg'):
            img_format = 'png' if img_format == 'browser' else img_format
        return convert_image(fpath, img_format)
    else:
        # Send unmodified if no scaling/converting is requested
        return send_file(unicode(fpath))
Esempio n. 7
0
def get_page_image(workflow, number, img_type, plugname):
    """ Return image for requested page. """
    # Scale image if requested
    width = request.args.get('width', None)
    img_format = request.args.get('format', None)
    page = find_page(workflow, number)
    check_page_params(page, img_type, plugname)
    if img_type == 'raw':
        fpath = page.raw_image
    elif plugname is None:
        fpath = page.get_latest_processed(image_only=True)
    else:
        fpath = page.processed_images[plugname]
    if width and fpath.suffix.lower() in ('.jpg', '.jpeg', '.tif', '.tiff',
                                          '.png'):
        return scale_image(fpath, width=int(width))
    elif fpath.suffix.lower() in ('.tif', '.tiff') and img_format:
        img_format = 'png' if img_format == 'browser' else img_format
        return convert_image(fpath, img_format)
    else:
        return send_file(unicode(fpath))
def test():
    if opt.specific_observation_idcs is not None:
        specific_observation_idcs = list(
            map(int, opt.specific_observation_idcs.split(',')))
    else:
        specific_observation_idcs = None

    dataset = dataio.SceneClassDataset(
        root_dir=opt.data_root,
        max_num_instances=opt.max_num_instances,
        specific_observation_idcs=specific_observation_idcs,
        max_observations_per_instance=-1,
        samples_per_instance=1,
        img_sidelength=opt.img_sidelength)
    dataset = DataLoader(dataset,
                         collate_fn=dataset.collate_fn,
                         batch_size=1,
                         shuffle=False,
                         drop_last=False)

    model = SRNsModel(num_instances=opt.num_instances,
                      latent_dim=opt.embedding_size,
                      has_params=opt.has_params,
                      fit_single_srn=opt.fit_single_srn,
                      use_unet_renderer=opt.use_unet_renderer,
                      tracing_steps=opt.tracing_steps)

    assert (opt.checkpoint_path is not None), "Have to pass checkpoint!"

    print("Loading model from %s" % opt.checkpoint_path)
    util.custom_load(model,
                     path=opt.checkpoint_path,
                     discriminator=None,
                     overwrite_embeddings=False)

    model.eval()
    model.cuda()

    # directory structure: month_day/
    renderings_dir = os.path.join(opt.logging_root, 'renderings')
    gt_comparison_dir = os.path.join(opt.logging_root, 'gt_comparisons')
    util.cond_mkdir(opt.logging_root)
    util.cond_mkdir(gt_comparison_dir)
    util.cond_mkdir(renderings_dir)

    # Save command-line parameters to log directory.
    with open(os.path.join(opt.logging_root, "params.txt"), "w") as out_file:
        out_file.write('\n'.join(
            ["%s: %s" % (key, value) for key, value in vars(opt).items()]))

    print('Beginning evaluation...')
    with torch.no_grad():
        instance_idx = 0
        idx = 0
        psnrs, ssims = list(), list()
        for model_input, ground_truth in dataset:
            model_outputs = model(model_input)
            psnr, ssim = model.get_psnr(model_outputs, ground_truth)

            psnrs.extend(psnr)
            ssims.extend(ssim)

            instance_idcs = model_input['instance_idx']
            print("Object instance %d. Running mean PSNR %0.6f SSIM %0.6f" %
                  (instance_idcs[-1], np.mean(psnrs), np.mean(ssims)))

            if instance_idx < opt.save_out_first_n:
                output_imgs = model.get_output_img(model_outputs).cpu().numpy()
                comparisons = model.get_comparisons(model_input, model_outputs,
                                                    ground_truth)
                for i in range(len(output_imgs)):
                    prev_instance_idx = instance_idx
                    instance_idx = instance_idcs[i]

                    if prev_instance_idx != instance_idx:
                        idx = 0

                    img_only_path = os.path.join(renderings_dir,
                                                 "%06d" % instance_idx)
                    comp_path = os.path.join(gt_comparison_dir,
                                             "%06d" % instance_idx)

                    util.cond_mkdir(img_only_path)
                    util.cond_mkdir(comp_path)

                    pred = util.convert_image(output_imgs[i].squeeze())
                    comp = util.convert_image(comparisons[i].squeeze())

                    util.write_img(
                        pred, os.path.join(img_only_path, "%06d.png" % idx))
                    util.write_img(comp,
                                   os.path.join(comp_path, "%06d.png" % idx))

                    idx += 1

    with open(os.path.join(opt.logging_root, "results.txt"), "w") as out_file:
        out_file.write("%0.6f, %0.6f" % (np.mean(psnrs), np.mean(ssims)))

    print("Final mean PSNR %0.6f SSIM %0.6f" %
          (np.mean(psnrs), np.mean(ssims)))
Esempio n. 9
0
    if options["verbose"]:
        print("Sending style transfer request to server... ", end="")

    r = requests.post(api.SERVER_URL + "/api/style", json=payload)

    # Wait for success response from server
    if options["verbose"]: print("Waiting for response from server ", end="")
    has_pastiche = False
    while not has_pastiche:
        r = requests.get(api.SERVER_URL + "/api/pastiche/" + tag_id)
        if r.status_code == api.STATUS_OK:
            print(" Done!")
            has_pastiche = True
        elif r.status_code == api.STATUS_NOT_READY:
            print("#", end="", flush=True)
            time.sleep(1)
        else:
            raise ValueError("FATAL: something went wrong")

    # Read pastiche image from server
    if options["verbose"]: print("Loading pastiche from server... ", end="")
    r = requests.get(api.SERVER_URL + "/api/pastiche/" + tag_id)
    if r.status_code == api.STATUS_OK:
        pastiche_image = convert_image(r.content)
    else:
        raise ValueError("FATAL: something went wrong")

    # Show pastiche image
    plt.imshow(np.asarray(pastiche_image))
    plt.show()
Esempio n. 10
0
        if CAPTURE_MODE:
            # capture an image every second, and save as a training sample
            if frame_count % FPS == 0:
                ROI = depth[top_left[1] + 2:bottom_right[1],
                            top_left[0] + 2:bottom_right[0]]
                #cv2.imshow('img', ROI)
                util.capture_image(ROI, img_count, IMG_SAVE_PATH)
                img_count += 1
        else:
            # capture every 0.5 second
            if frame_count % FPS == 0 or frame_count % FPS == 0.5 * FPS:
                ROI = depth[top_left[1] + 2:bottom_right[1],
                            top_left[0] + 2:bottom_right[0]]
                img = util.resize_and_smooth(ROI)
                img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
                img = util.convert_image(img)
                prediction = model.predict(img)
                arg = np.argmax(prediction)
                number = mapping[arg]
                if number != last_prediction:
                    last_prediction = number
                    label.config(text=str(number))
                    print("Predicted number: {}".format(str(number)))
        cv2.imshow("depth image", depth)
    if enable_rgb:
        cv2.imshow("color",
                   cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
    listener.release(frames)

    key = cv2.waitKey(delay=1)
    if key == ord('q'):