コード例 #1
0
    if not os.path.isdir(args.write_images):
        os.mkdir(args.write_images)
    """
    
    openpose_server_url = "http://" + args.host + ":" + args.port + "/openpose"
    #openpose_server_url = "http://" + args.host + ":" + args.port + "/"
    if( args.debug ):
        print( "openpose_server_url : ", openpose_server_url )

    image_names = sorted( [f for f in os.listdir(args.image_dir) if f.endswith(IMG_EXTENSIONS)] )
    for img_name in tqdm(image_names):
        #----------------------------------
        # リクエスト送信データの設定
        #----------------------------------
        pose_img_pillow = Image.open( os.path.join(args.image_dir, img_name) )
        pose_img_base64 = conv_pillow_to_base64(pose_img_pillow)

        #----------------------------------
        # リクエスト処理
        #----------------------------------
        oepnpose_msg = {'pose_img_base64': pose_img_base64 }
        oepnpose_msg = json.dumps(oepnpose_msg)     # dict を JSON 文字列として整形して出力
        try:
            openpose_responce = requests.post( openpose_server_url, json=oepnpose_msg )
            openpose_responce = openpose_responce.json()
            """
            if( args.debug ):
                print( "openpose_responce : ", openpose_responce )
            """

        except Exception as e:
コード例 #2
0
def responce():
    print('MMFashion')
    if (app.debug):
        print("flask.request.method : ", flask.request.method)
        print("flask.request.headers \n: ", flask.request.headers)

    if (flask.request.headers["User-Agent"].split("/")[0]
            in "python-requests"):
        json_data = json.loads(flask.request.json)
    else:
        json_data = flask.request.get_json()

    img_path = args.input

    global model
    if args.use_cuda:
        model = model.cuda()

    #------------------------------------------
    # ブラウザから送信された画像データの変換
    #------------------------------------------
    img_cv = conv_base64_to_cv(json_data["img_base64"])
    cv2.imwrite(img_path, img_cv)

    img_tensor = get_img_tensor(args.input, args.use_cuda)

    query_feat = model(img_tensor, landmark=None, return_loss=False)
    query_feat = query_feat.data.cpu().numpy()

    retrieved_paths = retriever.show_retrieved_images(query_feat,
                                                      gallery_embeds)

    retrieved_imgs = []
    for retrieved_path in retrieved_paths:
        retrieved_path = retrieved_path.replace('data/In-shop/', '', 1)
        retrieved_img = Image.open(retrieved_path)
        retrieved_img_base64 = conv_pillow_to_base64(retrieved_img)
        retrieved_imgs.append(retrieved_img_base64)

    #------------------------------------------
    # json 形式のレスポンスメッセージを作成
    #------------------------------------------
    #torch.cuda.empty_cache()
    http_status_code = 200
    response = flask.jsonify({
        'status':
        'OK',
        'origin_img':
        conv_pillow_to_base64(Image.open(img_path)),
        'retrieved_imgs':
        retrieved_imgs
    })

    # Access-Control-Allow-Origin
    response.headers.add('Access-Control-Allow-Origin', '*')
    response.headers.add('Access-Control-Allow-Headers',
                         'Content-Type,Authorization')
    if (app.debug):
        print("response.headers : \n", response.headers)

    # release gpu
    del img_tensor, query_feat, retrieved_paths
    if args.use_cuda:
        model = model.cpu()
        torch.cuda.empty_cache()

    return response, http_status_code
コード例 #3
0
ファイル: app.py プロジェクト: Yagami360/densepose_wrapper
def responce():
    print("リクエスト受け取り")
    if (app.debug):
        print "flask.request.method : ", flask.request.method
        print "flask.request.headers \n: ", flask.request.headers

    #------------------------------------------
    # 送信された json データの取得
    #------------------------------------------
    if (flask.request.headers["User-Agent"].split("/")[0]
            in "python-requests"):
        json_data = json.loads(flask.request.json)
    else:
        json_data = flask.request.get_json()

    #------------------------------------------
    # 送信された画像データの変換
    #------------------------------------------
    pose_img_pillow = conv_base64_to_pillow(json_data["pose_img_base64"])
    pose_img_pillow.save(os.path.join("tmp", "pose_img.png"))

    #------------------------------------------
    # DensePose の実行
    #------------------------------------------
    in_img_path = os.path.join("tmp", "pose_img.png")
    iuv_pillow, inds_pillow = inference(cfg_path=args.cfg,
                                        weights=args.weights,
                                        img_pillow=pose_img_pillow,
                                        output_dir="tmp")

    #------------------------------------------
    # パース画像の抽出
    #------------------------------------------
    """
    iuv_np = cv2.cvtColor(np.asarray(iuv_pillow), cv2.COLOR_RGB2BGR)
    parse_np = iuv_np[:,:,0]
    parse_pillow = Image.fromarray(parse_np)
    """

    #------------------------------------------
    # 等高線画像の抽出
    #------------------------------------------
    """
    fig, ax = plt.subplots(figsize=(iuv_np.shape[0]/10, iuv_np.shape[1]/10))
    ax.contour( iuv_np[:,:,1]/256., 10, linewidths = 1 )
    ax.contour( iuv_np[:,:,2]/256., 10, linewidths = 1 )
    ax.invert_yaxis()
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
    plt.axis('off')
    
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    enc = np.frombuffer(buf.getvalue(), dtype=np.uint8)
    contour_np = cv2.imdecode(enc, 1)
    contour_np = contour_np[:,:,::-1]
    contour_np = cv2.resize(contour_np, dsize=(iuv_np.shape[0], iuv_np.shape[1]), interpolation=cv2.INTER_LANCZOS4)
    plt.clf()
    contour_pillow = Image.fromarray(contour_np)
    """

    #------------------------------------------
    # 送信する画像データの変換
    #------------------------------------------
    iuv_img_base64 = conv_pillow_to_base64(iuv_pillow)
    inds_img_base64 = conv_pillow_to_base64(inds_pillow)
    #parse_img_base64 = conv_pillow_to_base64( parse_pillow )
    #contour_img_base64 = conv_pillow_to_base64( contour_pillow )

    #------------------------------------------
    # レスポンスメッセージの設定
    #------------------------------------------
    http_status_code = 200
    response = flask.jsonify({
        'status': 'OK',
        'iuv_img_base64': iuv_img_base64,
        'inds_img_base64': inds_img_base64,
        #            'parse_img_base64': parse_img_base64,
        #            'contour_img_base64': contour_img_base64,
    })

    # レスポンスメッセージにヘッダーを付与(Access-Control-Allow-Origin エラー対策)
    #response.headers.add('Access-Control-Allow-Origin', '*')
    #response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
    #response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
    if (app.debug):
        print "response.headers : \n", response.headers

    return response, http_status_code
コード例 #4
0
ファイル: app.py プロジェクト: Sam1224/OutfitApp-AWS
def responce():
    if( app.debug ):
        print( "flask.request.method : ", flask.request.method )
        print( "flask.request.headers \n: ", flask.request.headers )

    if( flask.request.headers["User-Agent"].split("/")[0] in "python-requests" ):
        json_data = json.loads(flask.request.json)
    else:
        json_data = flask.request.get_json()

    # file paths
    poseA_img_path = os.path.join( args.dataset_dir, "test", "poseA", "1.png" )
    poseB_img_path = os.path.join( args.dataset_dir, "test", "poseB", "1.png" )
    cloth_img_path = os.path.join( args.dataset_dir, "test", "cloth", "1.png" )
    cloth_mask_img_path = os.path.join( args.dataset_dir, "test", "cloth_mask", "1.png" )
    poseA_keypoints_path = os.path.join( args.dataset_dir, "test", "poseA_keypoints", "1_keypoints.json" )
    poseB_keypoints_path = os.path.join( args.dataset_dir, "test", "poseB_keypoints", "1_keypoints.json" )

    #------------------------------------------
    # imgs: base64 -> rgb format
    #------------------------------------------
    pose_img_pillow = conv_base64_to_pillow( json_data["pose_img_base64"] )
    cloth_img_pillow = conv_base64_to_pillow( json_data["cloth_img_base64"] )
    pose_img_pillow = pose_img_pillow.resize( (args.image_width, args.image_height), resample = Image.LANCZOS )
    cloth_img_pillow = cloth_img_pillow.resize( (args.image_width, args.image_height), resample = Image.LANCZOS )
    pose_img_base64 = conv_pillow_to_base64( pose_img_pillow )
    cloth_img_base64 = conv_pillow_to_base64( cloth_img_pillow )

    pose_img_pillow.save( poseA_img_path )
    pose_img_pillow.save( poseB_img_path )
    cloth_img_pillow.save( cloth_img_path )

    #------------------------------------------
    # Graphonomy: Human parsing (seperate each part, e.g. head, body, hand, etc.)
    #------------------------------------------
    graphonomy_msg = {'pose_img_base64': pose_img_base64 }
    graphonomy_msg = json.dumps(graphonomy_msg)
    try:
        graphonomy_responce = requests.post( args.graphonomy_server_url, json=graphonomy_msg )
        graphonomy_responce = graphonomy_responce.json()

    except Exception as e:
        print( "Fail to connect to the graphonomy server." )
        print( "Exception : ", e )
        #torch.cuda.empty_cache()

        http_status_code = 400
        response = flask.jsonify(
            {
                'status':'NG',
            }
        )
        return http_status_code, response

    pose_parse_img_base64 = graphonomy_responce["pose_parse_img_base64"]
    pose_parse_img_RGB_base64 = graphonomy_responce["pose_parse_img_RGB_base64"]
    pose_parse_img_pillow = conv_base64_to_pillow(pose_parse_img_base64)
    pose_parse_img_RGB_pillow = conv_base64_to_pillow(pose_parse_img_RGB_base64)

    pose_parse_img_pillow.save( os.path.join( args.dataset_dir, "test", "poseA_parsing", "1.png" ) )
    pose_parse_img_pillow.save( os.path.join( args.dataset_dir, "test", "poseB_parsing", "1.png" ) )
    pose_parse_img_RGB_pillow.save( os.path.join( args.dataset_dir, "test", "poseA_parsing", "1_vis.png" ) )
    pose_parse_img_RGB_pillow.save( os.path.join( args.dataset_dir, "test", "poseB_parsing", "1_vis.png" ) )

    #------------------------------------------
    # OpenPose: Extract keypoints of human pose
    #------------------------------------------
    # request
    oepnpose_msg = {'pose_img_base64': pose_img_base64 }
    oepnpose_msg = json.dumps(oepnpose_msg)
    try:
        openpose_responce = requests.post(args.openpose_server_url, json=oepnpose_msg)
        openpose_responce = openpose_responce.json()
        with open( poseA_keypoints_path, 'w') as f:
            json.dump( openpose_responce, f, ensure_ascii=False )
        with open( poseB_keypoints_path, 'w') as f:
            json.dump( openpose_responce, f, ensure_ascii=False )

    except Exception as e:
        print( "Fail to connect to the openpose server." )
        print( "Exception : ", e )
        #torch.cuda.empty_cache()

        http_status_code = 400
        response = flask.jsonify(
            {
                'status':'NG',
            }
        )
        return http_status_code, response

    #------------------------------------------
    # Preprocessing
    #------------------------------------------
    # Generate cloth mask
    cloth_mask_img_cv = create_binary_mask( cloth_img_path )
    cv2.imwrite( cloth_mask_img_path, cloth_mask_img_cv )

    #------------------------------------------
    # virtual try-on
    #------------------------------------------
    # Put a small batch of data onto gpu for calculating
    inputs = dloader_test.next_batch()
    cloth_tsr = inputs["cloth_tsr"].to(device)
    cloth_mask_tsr = inputs["cloth_mask_tsr"].to(device)
    grid_tsr = inputs["grid_tsr"].to(device)

    poseA_tsr = inputs["poseA_tsr"].to(device)
    poseA_cloth_tsr = inputs["poseA_cloth_tsr"].to(device)
    poseA_cloth_mask_tsr = inputs["poseA_cloth_mask_tsr"].to(device)
    poseA_bodyshape_mask_tsr = inputs["poseA_bodyshape_mask_tsr"].to(device)
    poseA_gmm_agnostic_tsr = inputs["poseA_gmm_agnostic_tsr"].to(device)
    poseA_tom_agnostic_tsr = inputs["poseA_tom_agnostic_tsr"].to(device)
    poseA_keypoints_tsr = inputs["poseA_keypoints_tsr"].to(device)
    poseA_keypoints_img_tsr = inputs["poseA_keypoints_img_tsr"].to(device)
    poseA_wuton_agnotic_tsr = inputs["poseA_wuton_agnotic_tsr"].to(device)
    poseA_wuton_agnotic_woErase_mask_tsr = inputs["poseA_wuton_agnotic_woErase_mask_tsr"].to(device)

    poseB_tsr = inputs["poseB_tsr"].to(device)
    poseB_cloth_tsr = inputs["poseB_cloth_tsr"].to(device)
    poseB_cloth_mask_tsr = inputs["poseB_cloth_mask_tsr"].to(device)
    poseB_bodyshape_mask_tsr = inputs["poseB_bodyshape_mask_tsr"].to(device)
    poseB_gmm_agnostic_tsr = inputs["poseB_gmm_agnostic_tsr"].to(device)
    poseB_tom_agnostic_tsr = inputs["poseB_tom_agnostic_tsr"].to(device)
    poseB_keypoints_tsr = inputs["poseB_keypoints_tsr"].to(device)
    poseB_keypoints_img_tsr = inputs["poseB_keypoints_img_tsr"].to(device)
    poseB_wuton_agnotic_tsr = inputs["poseB_wuton_agnotic_tsr"].to(device)
    poseB_wuton_agnotic_woErase_mask_tsr = inputs["poseB_wuton_agnotic_woErase_mask_tsr"].to(device)

    with torch.no_grad():
        poseA_warp_cloth, poseA_warp_cloth_mask, poseA_warped_grid, \
        poseB_warp_cloth, poseB_warp_cloth_mask, poseB_warped_grid, \
        poseA_rough, poseA_attention, poseA_gen, \
        poseB_rough, poseB_attention, poseB_gen \
        = model_G(
            cloth_tsr, cloth_mask_tsr, grid_tsr,
            poseA_tsr, poseA_bodyshape_mask_tsr, poseA_gmm_agnostic_tsr, poseA_tom_agnostic_tsr, poseA_keypoints_tsr, poseA_wuton_agnotic_tsr,
            poseB_tsr, poseB_bodyshape_mask_tsr, poseB_gmm_agnostic_tsr, poseB_tom_agnostic_tsr, poseB_keypoints_tsr, poseB_wuton_agnotic_tsr,
        )

        if( args.reuse_tom_wuton_agnotic ):
            poseA_gen = (1 - poseA_wuton_agnotic_woErase_mask_tsr) * poseA_gen + poseA_wuton_agnotic_woErase_mask_tsr * poseA_wuton_agnotic_tsr
            poseB_gen = (1 - poseB_wuton_agnotic_woErase_mask_tsr) * poseB_gen + poseB_wuton_agnotic_woErase_mask_tsr * poseB_wuton_agnotic_tsr


    tryon_img_pillow = conv_tensor_to_pillow( poseB_gen )
    tryon_img_base64 = conv_pillow_to_base64( tryon_img_pillow )

    #------------------------------------------
    # Send generated image back in the format of base64
    #------------------------------------------
    #torch.cuda.empty_cache()
    http_status_code = 200
    response = flask.jsonify(
        {
            'status':'OK',
            'tryon_img_base64': tryon_img_base64,
        }
    )

    # Cross-Domain Policy
    response.headers.add('Access-Control-Allow-Origin', '*')
    response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
    #response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
    if( app.debug ):
        print( "response.headers : \n", response.headers )

    return response, http_status_code
コード例 #5
0
def responce():
    print("リクエスト受け取り")
    if (app.debug):
        print("flask.request.method : ", flask.request.method)
        print("flask.request.headers \n: ", flask.request.headers)

    #------------------------------------------
    # 送信された json データの取得
    #------------------------------------------
    if (flask.request.headers["User-Agent"].split("/")[0]
            in "python-requests"):
        json_data = json.loads(flask.request.json)
    else:
        json_data = flask.request.get_json()

    #------------------------------------------
    # 送信された画像データの変換
    #------------------------------------------
    pose_img_pillow = conv_base64_to_pillow(json_data["pose_img_base64"])
    pose_img_pillow.save(os.path.join("tmp", "pose_img.png"))

    #------------------------------------------
    # Graphonomy の実行
    #------------------------------------------
    in_img_path = os.path.join("tmp", "pose_img.png")
    pose_parse_img_np, pose_parse_img_RGB_pillow = inference(
        net=model, img_path=in_img_path, device=device)
    pose_parse_img_pillow = Image.fromarray(
        np.uint8(pose_parse_img_np.transpose(0, 1)), 'L')

    pose_parse_img_pillow.save(os.path.join("tmp", "pose_parse_img.png"))
    pose_parse_img_RGB_pillow.save(
        os.path.join("tmp", "pose_parse_img_vis.png"))

    #------------------------------------------
    # 送信する画像データの変換
    #------------------------------------------
    pose_parse_img_base64 = conv_pillow_to_base64(pose_parse_img_pillow)
    pose_parse_img_RGB_base64 = conv_pillow_to_base64(
        pose_parse_img_RGB_pillow)

    #------------------------------------------
    # レスポンスメッセージの設定
    #------------------------------------------
    http_status_code = 200
    response = flask.jsonify({
        'status':
        'OK',
        'pose_parse_img_base64':
        pose_parse_img_base64,
        'pose_parse_img_RGB_base64':
        pose_parse_img_RGB_base64,
    })

    # レスポンスメッセージにヘッダーを付与(Access-Control-Allow-Origin エラー対策)
    #response.headers.add('Access-Control-Allow-Origin', '*')
    #response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
    #response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
    if (app.debug):
        print("response.headers : \n", response.headers)

    return response, http_status_code