Esempio n. 1
0
def process(inputs, ctx, **kwargs):
    image, is_video = load_image(inputs, 'inputs')
    if image is None:
        raise RuntimeError('Missing "inputs" key in inputs. Provide an image in "inputs" key')
    def _return(result):
        encoding = ''
        if not is_video:
            if result.shape[2] == 3:
                result = result[:, :, ::-1]
                result = cv2.imencode('.jpg', result)[1].tostring()
                encoding = 'jpeg'
            else:
                result = result
                result = cv2.imencode('.png', result)[1].tostring()
                encoding = 'png'
        return {'output': result, 'encoding': encoding}
    ratio = 1.0
    w = float(image.shape[1])
    h = float(image.shape[0])
    if w > h:
        if w > 1024:
            ratio = w / 1024.0
    else:
        if h > 1024:
            ratio = h / 1024.0

    if ratio > 1:
        image = cv2.resize(image, (int(w / ratio), int(h / ratio)))


    input = cv2.resize(image, (160, 160))
    input = np.asarray(input, np.float32) / 255.0
    outputs = ctx.drivers[0].predict({'image': np.expand_dims(input, axis=0)})
    mask = outputs['output'][0]
    mask = cv2.resize(mask, (image.shape[1], image.shape[0]))
    mask = cv2.GaussianBlur(mask, (21, 21), 11)
    mask = np.expand_dims(mask, 2)
    back_name = get_param(inputs, 'background', None)
    if back_name is not None:
        background = backgrounds.get(back_name)
    else:
        if glob_background is not None:
            background = glob_background
        else:
            background = backgrounds.get('None')

    image = image.astype(np.float32) * mask
    background = cv2.resize(background, (image.shape[1], image.shape[0]))
    background = background.astype(np.float32)
    background = background * (1 - mask)
    image = background + image
    image = image.astype(np.uint8)
    return _return(image)
Esempio n. 2
0
def process(inputs, ctx, **kwargs):
    style_name = helpers.get_param(inputs, 'style', None)
    default_model = ctx.global_ctx['default_model']
    if style_name == 'young' or default_model == 'young':
        return ctx.global_ctx['young'].process(inputs, ctx, **kwargs)
    img, is_video = helpers.load_image(inputs, 'image', rgb=False)
    if style_name is not None:
        p = style_name.split('_')
        model = default_model
        if len(p) > 1:
            model = p[0]
            style_name = '_'.join(p[1:])
        img = ctx.global_ctx[model].process(img, style_name, inputs)
    if not is_video:
        img = img[:, :, ::-1]
        img = cv2.imencode('.jpg', img)[1].tostring()
    return {'output': img}
Esempio n. 3
0
def set_detection_params(inputs, ctx):
    detection_params = [
        'detect_faces',
        'detect_objects',
        'build_caption',
        'detect_poses',
    ]
    for param in detection_params:
        raw_value = helpers.get_param(inputs, param)
        if raw_value is not None:
            LOG.info('%s=%s', param, raw_value)
            value = raw_value
        else:
            # True by default
            value = PARAMS.get(param, True)

        setattr(ctx, param, value)
Esempio n. 4
0
def process(inputs, ct_x, **kwargs):
    original_image, is_video = load_image(inputs, 'inputs')
    if original_image is None:
        raise RuntimeError(
            'Missing "inputs" key in inputs. Provide an image in "inputs" key')

    def _return(result):
        encoding = ''
        if not is_video:
            if result.shape[2] == 3:
                result = result[:, :, ::-1]
                result = cv2.imencode('.jpg', result)[1].tostring()
                encoding = 'jpeg'
            else:
                result = result
                result = cv2.imencode('.png', result)[1].tostring()
                encoding = 'png'
        return {'output': result, 'encoding': encoding}

    ratio = 1.0
    w = float(original_image.shape[1])
    h = float(original_image.shape[0])
    if w > h:
        if w > 1024:
            ratio = w / 1024.0
    else:
        if h > 1024:
            ratio = h / 1024.0

    if ratio > 1:
        image = cv2.resize(original_image, (int(w / ratio), int(h / ratio)))
    else:
        image = original_image

    if not boolean_string(get_param(inputs, 'return_origin_size', False)):
        original_image = image

    try:
        area_threshold = int(get_param(inputs, 'area_threshold', 0))
    except:
        area_threshold = 0
    area_threshold = limit(area_threshold, 0, 100, 0)
    try:
        max_objects = int(get_param(inputs, 'max_objects', 1))
    except:
        max_objects = 1
    max_objects = limit(max_objects, 1, 10, 1)

    try:
        pixel_threshold = int(
            float(get_param(inputs, 'pixel_threshold', 0.5)) * 255)
    except:
        pixel_threshold = int(0.5 * 255)

    pixel_threshold = limit(pixel_threshold, 1, 254, int(0.5 * 255))

    object_classes = [
        obj_classes.get(get_param(inputs, 'object_class', 'Person'), 1)
    ]
    effect = get_param(inputs, 'effect',
                       'Remove background')  # Remove background,Mask,Blur

    try:
        blur_radius = int(get_param(inputs, 'blur_radius', 2))
    except:
        blur_radius = 2

    blur_radius = limit(blur_radius, 1, 10, 2)

    outputs = ct_x.drivers[0].predict(
        {'inputs': np.expand_dims(image, axis=0)})
    num_detection = int(outputs['num_detections'][0])
    if num_detection < 1:
        return _return(original_image)

    process_width = image.shape[1]
    process_height = image.shape[0]
    image_area = process_width * process_height
    detection_boxes = outputs["detection_boxes"][0][:num_detection]
    detection_boxes = detection_boxes * [
        process_height, process_width, process_height, process_width
    ]
    detection_boxes = detection_boxes.astype(np.int32)
    detection_classes = outputs["detection_classes"][0][:num_detection]
    detection_masks = outputs["detection_masks"][0][:num_detection]
    masks = []
    for i in range(num_detection):
        if int(detection_classes[i]) not in object_classes:
            continue
        box = detection_boxes[i]
        mask_image = cv2.resize(detection_masks[i],
                                (box[3] - box[1], box[2] - box[0]),
                                interpolation=cv2.INTER_LINEAR)
        left = max(0, box[1] - 50)
        right = min(process_width, box[3] + 50)
        upper = max(0, box[0] - 50)
        lower = min(process_height, box[2] + 50)
        box_mask = np.pad(mask_image, ((box[0] - upper, lower - box[2]),
                                       (box[1] - left, right - box[3])),
                          'constant')
        area = int(np.sum(np.greater_equal(box_mask, 0.5).astype(np.int32)))
        if area * 100 / image_area < area_threshold:
            continue
        masks.append((area, box_mask, [upper, left, lower, right]))

    if len(masks) < 1:
        return _return(original_image)
    masks = sorted(masks, key=lambda row: -row[0])
    total_mask = np.zeros((process_height, process_width), np.float32)
    min_left = process_width
    min_upper = process_height
    max_right = 0
    max_lower = 0
    for i in range(min(len(masks), max_objects)):
        pre_mask = masks[i][1]
        box = masks[i][2]
        left = max(0, box[1])
        right = min(process_width, box[3])
        upper = max(0, box[0])
        lower = min(process_height, box[2])
        box_mask = np.pad(pre_mask, ((upper, process_height - lower),
                                     (left, process_width - right)),
                          'constant')
        total_mask = np.maximum(total_mask, box_mask)
        if left < min_left:
            min_left = left
        if right > max_right:
            max_right = right
        if upper < min_upper:
            min_upper = upper
        if lower > max_lower:
            max_lower = lower
    mask = np.uint8(total_mask[min_upper:max_lower, min_left:max_right] * 255)
    box = (min_upper, min_left, max_lower, max_right)
    if len(mask.shape) > 2:
        logging.warning('Mask shape is {}'.format(mask.shape))
        mask = mask[:, :, 0]
    image = cv2.resize(image[box[0]:box[2], box[1]:box[3], :], (320, 320))
    mask = cv2.resize(mask, (320, 320))
    mask[np.less_equal(mask, pixel_threshold)] = 0
    mask[np.greater(mask, pixel_threshold)] = 255
    input_trimap = generate_trimap(mask)
    input_trimap = np.expand_dims(input_trimap.astype(np.float32), 2)
    image = image.astype(np.float32)
    input_image = image - g_mean
    outputs = ct_x.drivers[1].predict({
        'input':
        np.expand_dims(input_image, axis=0),
        'trimap':
        np.expand_dims(input_trimap, axis=0)
    })
    mask = outputs.get('mask', None)
    if mask is None:
        mask = outputs['output'][0] * 255
        mask = np.reshape(mask, (320, 320))
        mask = np.clip(mask, 0, 255)
        mask = mask.astype(np.uint8)
        mask = cv2.resize(mask, (box[3] - box[1], box[2] - box[0]))
    mask = mask.astype(np.float32) / 255
    mask = np.pad(mask, ((box[0], process_height - box[2]),
                         (box[1], process_width - box[3])), 'constant')
    if mask.shape != original_image.shape:
        mask = cv2.resize(mask,
                          (original_image.shape[1], original_image.shape[0]))
    mask = cv2.GaussianBlur(mask, (21, 21), 11)
    if effect == 'Remove background':
        background = None
        if 'background_img' in inputs:
            background, _ = load_image(inputs, 'background_img')
        if background is None:
            back_name = get_param(inputs, 'background', None)
            if back_name is not None:
                background = backgrounds.get(back_name)
            else:
                if glob_background is not None:
                    background = glob_background
                else:
                    background = backgrounds.get('None')
        add_style = get_param(inputs, 'style', '')
        if len(add_style) > 0:
            image = apply_style(original_image, add_style).astype(np.float32)
        else:
            image = original_image.astype(np.float32)
        mask = np.expand_dims(mask, 2)
        if background is not None:
            image = image * mask
            background = cv2.resize(background,
                                    (image.shape[1], image.shape[0]))
            background = background.astype(np.float32)
            background = background * (1 - mask)
            image = background + image
            image = image.astype(np.uint8)
        else:
            if not is_video:
                mask = (mask * 255).astype(np.uint8)
                image = image[:, :, ::-1].astype(np.uint8)
                image = np.concatenate([image, mask], axis=2)
            else:
                image = image * mask
                image = image.astype(np.uint8)
    elif effect == "Mask":
        mask = mask * 255
        image = mask.astype(np.uint8)
    else:
        image = original_image.astype(np.float32)
        mask = np.expand_dims(mask, 2)
        foreground = mask * image
        radius = min(max(blur_radius, 2), 10)
        if effect == 'Grey':
            background = rgb2gray(original_image)
        else:
            background = cv2.GaussianBlur(original_image, (radius, radius), 10)
        background = (1.0 - mask) * background.astype(np.float32)
        image = foreground + background
        image = image.astype(np.uint8)

    return _return(image)
Esempio n. 5
0
    def process(self, inputs, ctx, **kwargs):
        alpha = int(helpers.get_param(inputs, 'alpha', self._alpha))
        original, is_video = helpers.load_image(inputs, 'image')
        if self._portret:
            original = np.transpose(original, (1, 0, 2))
        output_view = helpers.get_param(inputs, 'output_view',
                                        self._output_view)
        if output_view == 'horizontal' or output_view == 'h':
            x0 = int(original.shape[1] / 4)
            x1 = int(original.shape[1] / 2) + x0
            original = original[:, x0:x1, :]
        if output_view == 'vertical' or output_view == 'v':
            y0 = int(original.shape[0] / 4)
            y1 = int(original.shape[0] / 2) + y0
            original = original[y0:y1, :, :]
        boxes = self.face_detector.bboxes(original)
        boxes.sort(key=lambda box: abs((box[3] - box[1]) * (box[2] - box[0])),
                   reverse=True)

        box = None
        if len(boxes) > 0:
            box = boxes[0].astype(int)
            if box[3] - box[1] < 1 or box[2] - box[0] < 1:
                box = None
        image = original.copy()
        if box is not None and self.style_model is not None:
            inference_img, output, box = self.style_model.process(
                ctx, image, box)
            alpha = np.clip(alpha, 1, 255)
            if srt_2_bool(
                    helpers.get_param(inputs, 'color_correction',
                                      self._color_correction)):
                output = color_tranfer(output, inference_img)
            if helpers.get_param(inputs, 'transfer_mode',
                                 self._transfer_mode) == 'direct':
                output = (inference_img * self._mask_orig +
                          output * self._mask_face).astype(np.uint8)
                output = cv2.resize(output, (box[2] - box[0], box[3] - box[1]),
                                    interpolation=cv2.INTER_LINEAR)
                image[box[1]:box[3], box[0]:box[2], :] = output
            else:
                output = cv2.resize(np.array(output),
                                    (box[2] - box[0], box[3] - box[1]),
                                    interpolation=cv2.INTER_AREA)
                if helpers.get_param(inputs, 'transfer_mode',
                                     self._transfer_mode) == 'box_margin':
                    xmin = max(0, box[0] - 50)
                    wleft = box[0] - xmin
                    ymin = max(0, box[1] - 50)
                    wup = box[1] - ymin
                    xmax = min(image.shape[1], box[2])
                    ymax = min(image.shape[0], box[3])
                    out = image[ymin:ymax, xmin:xmax, :]
                    center = (wleft + output.shape[1] // 2,
                              wup + output.shape[0] // 2)
                    samples = int(helpers.get_param(inputs, 'samples', 0))
                    if samples > 1:
                        results = {
                            's_0':
                            cv2.imencode('.jpg',
                                         image[:, :, ::-1])[1].tostring()
                        }
                        step_alpha = 255.0 / (samples - 1)
                        for si in range(samples - 1):
                            alpha = int(step_alpha * (si + 1))
                            s_image = image.copy()
                            #out_copy = out.copy()
                            out = cv2.seamlessClone(
                                output, out,
                                np.ones_like(output) * alpha, center,
                                cv2.NORMAL_CLONE)
                            s_image[ymin:ymax, xmin:xmax, :] = out
                            results[f's_{si + 1}'] = cv2.imencode(
                                '.jpg', s_image[:, :, ::-1])[1].tostring()
                        results['output'] = results['s_4']
                        return results
                    else:
                        out = cv2.seamlessClone(output, out,
                                                np.ones_like(output) * alpha,
                                                center, cv2.NORMAL_CLONE)
                        image[ymin:ymax, xmin:xmax, :] = out
                else:
                    center = (box[0] + output.shape[1] // 2,
                              box[1] + output.shape[0] // 2)
                    if not (center[0] >= output.shape[1] or
                            box[1] + output.shape[0] // 2 >= output.shape[0]):
                        image = cv2.seamlessClone(output, image,
                                                  np.ones_like(output) * alpha,
                                                  center, cv2.NORMAL_CLONE)
            if len(box) > 0:
                if srt_2_bool(
                        helpers.get_param(inputs, "draw_box", self._draw_box)):
                    image = cv2.rectangle(image, (box[0], box[1]),
                                          (box[2], box[3]), (0, 255, 0), 2, 8)

        result = {}
        image = self.maybe_mirror(image)
        if output_view == 'horizontal' or output_view == 'h' or output_view == 'fh':
            image = np.hstack((self.maybe_mirror(original), image))
        elif output_view == 'vertical' or output_view == 'v':
            image = np.vstack((self.maybe_mirror(original), image))
        image = self.add_overlay(image)
        image = image[:, :, ::-1]
        if not is_video:
            image_bytes = cv2.imencode('.jpg', image)[1].tostring()
        else:
            image_bytes = image[:, :, ::-1]
            h = 480
            w = int(480 * image.shape[1] / image.shape[0])
            result['status'] = cv2.resize(image, (w, h))

        result['output'] = image_bytes
        return result