def main():
    """
        Inference function to generate SR images.
    """
    nn.load_parameters(args.model)
    # Inference data loader
    inference_data = inference_data_loader(args.input_dir_lr)
    input_shape = [
        1,
    ] + list(inference_data.inputs[0].shape)
    output_shape = [1, input_shape[1] * 4, input_shape[2] * 4, 3]
    oh = input_shape[1] - input_shape[1] // 8 * 8
    ow = input_shape[2] - input_shape[2] // 8 * 8

    # Build the computation graph
    inputs_raw = nn.Variable(input_shape)
    pre_inputs = nn.Variable(input_shape)
    pre_gen = nn.Variable(output_shape)
    pre_warp = nn.Variable(output_shape)

    transposed_pre_warp = space_to_depth(pre_warp)
    inputs_all = F.concatenate(inputs_raw, transposed_pre_warp)
    with nn.parameter_scope("generator"):
        gen_output = generator(inputs_all, 3, args.num_resblock)
    outputs = (gen_output + 1) / 2
    inputs_frames = F.concatenate(pre_inputs, inputs_raw)
    with nn.parameter_scope("fnet"):
        flow_lr = flow_estimator(inputs_frames)
    flow_lr = F.pad(flow_lr, (0, 0, 0, oh, 0, ow, 0, 0), "reflect")
    flow_hr = upscale_four(flow_lr * 4.0)
    pre_gen_warp = warp_by_flow(pre_gen, flow_hr)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    max_iter = len(inference_data.inputs)
    print('Frame evaluation starts!!')
    pre_inputs.d, pre_gen.d, pre_warp.d = 0, 0, 0
    for i in range(max_iter):
        inputs_raw.d = np.array([inference_data.inputs[i]]).astype(np.float32)
        if i != 0:
            pre_gen_warp.forward()
            pre_warp.data.copy_from(pre_gen_warp.data)
        outputs.forward()
        output_frame = outputs.d

        if i >= 5:
            name, _ = os.path.splitext(
                os.path.basename(str(inference_data.paths_lr[i])))
            filename = args.output_name + '_' + name
            print('saving image %s' % filename)
            out_path = os.path.join(args.output_dir,
                                    "%s.%s" % (filename, args.output_ext))
            save_img(out_path, output_frame[0])
        else:  # First 5 is a hard-coded symmetric frame padding, ignored but time added!
            print("Warming up %d" % (5 - i))

        pre_inputs.data.copy_from(inputs_raw.data)
        pre_gen.data.copy_from(outputs.data)
Example #2
0
async def post_defect_detection(img: UploadFile = File(...)):
    """
The function first detects all objects present in an image 
& then predicts whether or not each of them is defective

**Parameters:**

1. `img`: image file uploaded to the API endpoint using the POST request
    """

    img_name = "temp-img.png"
    label_req = ["pump_impeller"]

    with open(img_name, "wb") as buffer:
        shutil.copyfileobj(img.file, buffer)
    """
It first stores the uploaded image temporarily in the local database 
of the server runs it through our Object Detection pipeline.
    """

    detections = object_detection_model.detect_objects(img_name)

    for i in range(len(detections)):
        if detections[i]["label"] in label_req:
            b_box = detections[i]["bounding_box"]
            pred = extract_region_and_predict(img_name, b_box)
            detections[i]["defect_pred"] = pred["prediction"]
    """
The bounding boxes of the detected parts are returned as a dictionary. 
For each detected part, the region of interest is selected using the 
bounding box & a prediction is made as to whether or not it contains 
any defect based on our Defect Detection Model.
    """
    final_img = utils.draw_bounding_boxes(img_name, detections)
    """
These predictions are used to draw colored bounding boxes around the detected parts
    """

    os.remove(img_name)

    with tempfile.NamedTemporaryFile(mode="w+b", suffix=".png",
                                     delete=False) as FOUT:
        utils.save_img(final_img, filename=FOUT.name)
        encoded_image_string = base64.b64encode(FOUT.read())
        return {
            "mime": "image/png",
            "image": encoded_image_string,
        }
    """
Example #3
0
def img_style_transfer(filename, filename2):

    img = get_img(filename)
    filename_dir = '/Users/tanya/PycharmProjects/image6/static/images/'
    content_filename = filename_dir + filename
    content_image = load_image(content_filename, max_size=None)

    style_filename = filename_dir + filename2
    style_image = load_image(style_filename, max_size=300)

    content_layer_ids = [4]
    style_layer_ids = list(range(13))
    res = style_transfer(content_image=content_image,
                         style_image=style_image,
                         content_layer_ids=content_layer_ids,
                         style_layer_ids=style_layer_ids,
                         weight_content=5,
                         weight_style=10.0,
                         weight_denoise=0.3,
                         num_iterations=60,
                         step_size=10.0)
    # 滤波处理

    # 返回处理后文件名称和原始参数
    return save_img(res, filename)
def rotate(filename: str, sp: list, size: int, degree: int):
    if degree == 90:
        angle = Image.ROTATE_90
    elif degree == 180:
        angle = Image.ROTATE_180
    else:
        angle = Image.ROTATE_270
    # 获取图像
    im = get_img(filename)
    width, height = im.size

    sp[0] = int(sp[0])
    sp[1] = int(sp[1])
    # 区域起点大于图像的尺寸
    if sp[0] > width or sp[1] > height:
        return filename
    # 设置窗口大小
    size_x = width - sp[0] if sp[0] + size > width else size
    size_y = height - sp[1] if sp[1] + size > height else size
    size = size_x if size_x < size_x else size_y
    # 创建矩形窗,左-上-右-下
    box = (sp[0], sp[1], sp[0] + size, sp[1] + size)
    # 裁剪窗口
    region = im.crop(box)
    region = region.transpose(angle)
    im.paste(region, box)
    return save_img(im, filename)
def mosaic(filename: str, sp: list, ep: list, granularity: int) -> tuple:
    olddata = '#'.join(sp + ep) + '#' + str(granularity)
    # 获取图像
    im = get_img(filename)
    width, height = im.size
    # 创建Draw对象
    draw = ImageDraw.ImageDraw(im=im)
    sp[0] = int(sp[0])
    sp[1] = int(sp[1])
    # 区域起点大于图像的尺寸
    if sp[0] > width or sp[1] > height or granularity < 1:
        return filename, olddata
    # 处理终点
    ep[0] = width if int(ep[0]) > width else int(ep[0])
    ep[1] = height if int(ep[1]) > height else int(ep[1])

    # 正式打马赛克
    for x in range(int(sp[0]), ep[0], granularity):
        for y in range(int(sp[1]), ep[1], granularity):
            # 选取小方块中心的像素点填充整个小方块
            r, g, b = im.getpixel((x, y))
            draw.rectangle([(x, y),
                            (x + granularity - 1, y + granularity - 1)],
                           fill=(r, g, b))
    # 保存并返回文件名
    return save_img(im, filename), olddata
Example #6
0
def img_filter(form, filename, **kwargs):
    mode = kwargs.get('mode', None)

    img = get_img(filename)

    # 读取滤波方式
    im_filter, olddata = get_filter(mode, form)

    # 滤波处理
    res = img.filter(im_filter)
    # 返回处理后文件名称和原始参数
    return save_img(res, filename), olddata
Example #7
0
def img_kernel(filename, size, kernel, scale=None, offset=None):
    im = get_img(filename)
    if scale is not None and scale.strip() != '':
        scale = float(scale)
    else:
        scale = None
    if offset is not None and offset.strip() != '':
        offset = float(offset)
    else:
        offset = 0
    im_filter = ImageFilter.Kernel(size, kernel, scale, offset)
    res = im.filter(im_filter)
    return save_img(res, filename)
Example #8
0
def img_enhance(filename, enhance_type, factor):
    im = get_img(filename)
    if enhance_type == 'Color':
        enhancer = ImageEnhance.Color(im)
    elif enhance_type == 'Contrast':
        enhancer = ImageEnhance.Contrast(im)
    elif enhance_type == 'Brightness':
        enhancer = ImageEnhance.Brightness(im)
    elif enhance_type == 'Sharpness':
        enhancer = ImageEnhance.Sharpness(im)
    else:
        raise NotExistEnhanceError()
    res = enhancer.enhance(factor)
    return save_img(res, filename)
def overlay(filename1, filename2, alpha: float):
    im1 = get_img(filename1)
    im2 = get_img(filename2)
    w = im1.size[0] if im1.size[0] > im2.size[0] else im2.size[0]
    h = im1.size[1] if im1.size[1] > im2.size[1] else im2.size[1]
    size = (w, h)
    base1 = Image.new('RGB', size, ImageColor.getrgb('white'))
    base2 = Image.new('RGB', size, ImageColor.getrgb('white'))
    base1.paste(im1, (0, 0))
    base2.paste(im2, (0, 0))
    base1.show()
    base2.show()

    res = Image.blend(base1, base2, alpha)
    res.show()
    return save_img(res, filename1)
Example #10
0
def text_mask(filename, text, color, font_size, start_point):
    olddata = text + '#' + color + '#' + font_size + '#' + start_point[
        0] + '#' + start_point[1]
    im = get_img(filename)
    draw = ImageDraw.ImageDraw(im)
    font = ImageFont.truetype(font=basedir +
                              '/static/font/STHeiti-Light-3.ttc',
                              size=int(font_size),
                              encoding='utf-8')
    if color in ImageColor.colormap:
        color = ImageColor.getrgb(color)
    else:
        color = ImageColor.getrgb('red')
    draw.text((int(start_point[0]), int(start_point[1])),
              text,
              fill=color,
              font=font)
    return save_img(im, filename), olddata
Example #11
0
def water_mask(filename: str, mask_filename: str):
    # 获取图像
    im = get_img(filename)
    width, height = im.size

    # 获取水印图片
    mask_filename = convert_img(filename, mask_filename)
    mask_im = get_img(mask_filename)
    mw, mh = mask_im.size
    mw, mh = 250, int(mh * 250 / mw)
    mask_im = mask_im.resize((mw, mh))

    # 裁剪原来的图像区域并混叠
    region = im.crop((width - mw, height - mh, width, height))
    new = Image.blend(mask_im, region, 0.5)

    im.paste(new, (width - mw, height - mh))

    return save_img(im, filename)
Example #12
0
        print(log_str)

        model.seen += imgs.size(0)


    if epoch % 5 == 0:
        predictions = non_max_suppression(outputs, conf_thres=opt.conf_thres, nms_thres=opt.nms_thres)
        targets[:, 1:] = xywh2xyxy(targets[:, 1:])
        # Rescale to image dimension
        targets[:, 1:] *= opt.img_size
        # Get batch statistics used to compute metrics
        statistics = get_batch_statistics(predictions, targets, iou_threshold=0.5)
        true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*statistics))]
        # Compute metrics
        precision, recall, AP, f1, ap_class = ap_per_class(
        true_positives, pred_scores, pred_labels, list(range(int(data_config["classes"])))
        )
        global_metrics = [
            ("F1", f1.mean()),
            ("Recall", recall.mean()),
            ("Precision", precision.mean()),
            ("mAP", AP.mean()),
        ]
        print(global_metrics)
        for img_i, (img_path, detections) in enumerate(zip(paths, predictions)):
           save_img(img_path, detections, classes, "output", str(epoch)+"_"+str(img_i), opt)


    if epoch % opt.checkpoint_interval == 0  or epoch == opt.epochs-1:
        model.save_weights("%s/%d.weights" % (opt.checkpoint_dir, epoch))
Example #13
0
 def save_img(self, fig, name):
     utils.save_img(fig, self.model_name, name, self.result_dir)
     return
Example #14
0
def convert_img(dst, src):
    dst_suffix = dst.split('.')[-1]
    new_filename = src.split('.')[0] + '.' + dst_suffix
    im = get_img(src)
    im = im.convert('RGB')
    return save_img(im, new_filename)