Ejemplo n.º 1
0
def sketch(request):
    data = utils.parseRequest(request)
    url = data['src_img']
    image = utils.url_to_image(url)
    sketch = image_utils.sketch(image)
    utils.image_to_url("sketch.jpg", sketch)
    return Response('')
Ejemplo n.º 2
0
def warmer(request):
    data = utils.parseRequest(request)
    url = data['image_url']
    # percentage = data['percentage']

    original = utils.url_to_image(url)
    image = np.copy(original)

   # Pivot points for X-Coordinates
    originalValue = np.array([0, 50, 100, 150, 200, 255])
    # Changed points on Y-axis for each channel
    # rCurve = np.array([0, 80 + 35, 150 + 35, 190 + 35, 220 + 35, 255])
    # bCurve = np.array([0, 20 ,  40 ,  75 , 150, 255])

    rCurve = np.array([0, 80, 150, 190, 220, 255])
    bCurve = np.array([0, 20,  40,  75, 150, 255])

    # Create a LookUp Table
    fullRange = np.arange(0, 256)
    rLUT = np.interp(fullRange, originalValue, rCurve)
    bLUT = np.interp(fullRange, originalValue, bCurve)

    bChannel = image[:, :, 0]
    bChannel = cv2.LUT(bChannel, bLUT)
    image[:, :, 0] = bChannel

    # Get the red channel and apply the mapping
    rChannel = image[:, :, 2]
    rChannel = cv2.LUT(rChannel, rLUT)
    image[:, :, 2] = rChannel

    url = utils.image_to_url("results/warmer.jpg", image)
    # cv2.imwrite("results/warmer_%s.jpg" % ('result'), image)
    # return Response('', status=status.HTTP_200_OK)
    return Response({"image_url": url})
Ejemplo n.º 3
0
def darken(request):
    data = utils.parseRequest(request)
    url = data['image_url']
    percentage = data['percentage']
    # name = url.split('.')[0]
    image = utils.url_to_image(url)
    imdark = adjustBrightness(image, percentage, -1)
    url = utils.image_to_url("results/dark%s_%2.2f%%.jpg" %
                             ('result', percentage), imdark)
    return Response({"image_url": url})
Ejemplo n.º 4
0
def brighten(request):
    data = utils.parseRequest(request)
    url = data['image_url']
    percentage = data['percentage']
    # name = url.split('.')[0]
    # image = cv2.imread(url)
    image = utils.url_to_image(url)

    imbright = adjustBrightness(image, percentage, 1)
    url = utils.image_to_url("results/bright_%s_%2.2f%%.jpg" %
                             ('result', percentage), imbright)
    return Response({"image_url": url})
Ejemplo n.º 5
0
def ig_filter(request):
    data = utils.parseRequest(request)
    url = data['image_url']
    original = utils.url_to_image(url)
    height, width = original.shape[:2]
    scale = 0.5
    original = cv2.resize(original, (int(width * scale), int(width * scale)))
    image = np.copy(original)

    num_down = 2
    num_bilateral = 7

    for _ in range(num_down):
        image = cv2.pyrDown(image)

    for _ in range(num_bilateral):
        image = cv2.bilateralFilter(image, d=9, sigmaColor=9, sigmaSpace=7)

    for _ in range(num_down):
        image = cv2.pyrUp(image)

    img_gray = cv2.cvtColor(original, cv2.COLOR_RGB2GRAY)
    img_blur = cv2.medianBlur(img_gray, 7)

    img_edge = cv2.adaptiveThreshold(img_blur,
                                     255,
                                     cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY,
                                     blockSize=9,
                                     C=2)
    img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
    img_cartoon = cv2.bitwise_and(image, img_edge)

    img_cartoon = image_utils.sketchPencilUsingBlending(img_cartoon)

    src_img = cv2.imread('src/sketchpad_texture.jpg')

    # img_cartoon = image_utils.color_transfer(src_img, img_cartoon)
    # img_cartoon = image_utils.color_transfer(img_cartoon, src_img)
    img_cartoon = image_utils.alphablend(src_img, img_cartoon)

    # url = ''
    url = utils.image_to_url(
        "results/ig_filter_%d.jpg" % (random.randint(0, 10000)), img_cartoon)
    return Response({"image_url": url})
Ejemplo n.º 6
0
def ig_filter2(request):
    data = utils.parseRequest(request)
    url = data['image_url']
    # percentage = data['percentage']

    original = utils.url_to_image(url)
    image = np.copy(original)

    originalValue = np.array([0, 28, 56, 85, 113, 141, 170, 198, 227, 255])

    # originalValue = np.array([0, 63, 126, 189, 255]);

    bCurve = np.array([0, 26, 62, 96, 104, 128, 153, 189, 219, 255])
    # bCurve = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    # bCurve = np.array([255, 255, 255, 255, 255, 255, 255, 255, 255, 255])
    # bCurve = np.array([128, 128, 128, 128, 128, 128, 128, 128, 128, 128])
    # bCurve = np.array([0, 28, 56, 85, 113, 141, 170, 198, 227, 255])
    # bCurve = np.array([0, 57, 128, 176, 255])

    # gCurve = np.array([0, 38, 66, 104, 139, 175, 206, 226, 245, 255])
    gCurve = np.array([0, 17, 57, 65, 75, 102, 146, 172, 232, 255])
    # gCurve = np.array([255, 255, 255, 255, 255, 255, 255, 255, 255, 255])
    # gCurve = np.array([128, 128, 128, 128, 128, 128, 128, 128, 128, 128])
    # gCurve = np.array([0, 47, 60, 166, 255])

    # rCurve = np.array([0, 24, 49, 98, 141, 174, 201, 223, 239, 255 ])
    # rCurve = np.array([0, 0, 0, 25, 45, 70, 100, 125, 220, 255])
    # rCurve = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    # rCurve = np.array([255, 255, 255, 255, 255, 255, 255, 255, 255, 255])
    rCurve = np.array([0, 0, 0, 10, 21, 41, 89, 150, 219, 255])

    # rCurve = np.array([0, 3, 53, 162, 255])

    # Create a LookUp Table
    fullRange = np.arange(0, 256)
    rLUT = np.interp(fullRange, originalValue, rCurve)
    bLUT = np.interp(fullRange, originalValue, bCurve)
    gLUT = np.interp(fullRange, originalValue, gCurve)

    # Get the blue channel and apply the mapping
    bChannel = image[:, :, 0]
    bChannel = cv2.LUT(bChannel, bLUT)
    image[:, :, 0] = bChannel

    # Get the green channel and apply the mapping
    gChannel = image[:, :, 1]
    gChannel = cv2.LUT(gChannel, gLUT)
    image[:, :, 2] = gChannel

    # Get the red channel and apply the mapping
    rChannel = image[:, :, 2]
    rChannel = cv2.LUT(rChannel, rLUT)
    image[:, :, 2] = rChannel

    # sharpen = np.array((
    #     [0, -1, 0],
    #     [-1, 5, -1],
    #     [0, -1, 0]), dtype="int")
    # image = cv2.filter2D(image, -1, sharpen)

    url = utils.image_to_url("results/ig_filter_%d.jpg" %
                             (random.randint(0, 10000)), image)
    # cv2.imwrite("results/warmer_%s.jpg" % ('result'), image)
    # return Response('', status=status.HTTP_200_OK)
    return Response({"image_url": url})
Ejemplo n.º 7
0
def video_morph(request):
    data = utils.parseRequest(request)
    dst_url = data['dst_img']
    src_url = data['src_img']

    src_img = utils.url_to_image(src_url)
    dst_img = utils.url_to_image(dst_url)

    utils.image_to_url("results/face_morph/morph_orig_src.jpg", src_img)
    utils.image_to_url("results/face_morph/morph_orig_dst.jpg", dst_img)

    src_pts = fbc.getLandmarks(faceDetector, landmarkDetector,
                               cv2.cvtColor(src_img, cv2.COLOR_BGR2RGB))
    dst_pts = fbc.getLandmarks(faceDetector, landmarkDetector,
                               cv2.cvtColor(dst_img, cv2.COLOR_BGR2RGB))

    src_pts = np.array(src_pts)
    dst_pts = np.array(dst_pts)

    # Convert image to floating point in the range 0 to 1
    src_img = np.float32(src_img) / 255.0
    dst_img = np.float32(dst_img) / 255.0

    h = 300
    w = 300

    # Normalize image to output coordinates.
    srcNorm, src_pts = fbc.normalizeImagesAndLandmarks((h, w), src_img,
                                                       src_pts)
    dstNorm, dst_pts = fbc.normalizeImagesAndLandmarks((h, w), dst_img,
                                                       dst_pts)

    # Calculate average points. Will be used for Delaunay triangulation.
    pointsAvg = (src_pts + dst_pts) / 2.0

    # 8 Boundary points for Delaunay Triangulation
    boundaryPoints = fbc.getEightBoundaryPoints(h, w)
    src_pts = np.concatenate((src_pts, boundaryPoints), axis=0)
    dst_pts = np.concatenate((dst_pts, boundaryPoints), axis=0)
    pointsAvg = np.concatenate((pointsAvg, boundaryPoints), axis=0)
    # Calculate Delaunay triangulation.
    rect = (0, 0, w, h)
    dt = fbc.calculateDelaunayTriangles(rect, pointsAvg)

    # Start animation.
    alpha = 0

    frames = []

    while alpha < 1:
        # Compute landmark points based on morphing parameter alpha
        pointsMorph = (1 - alpha) * src_pts + alpha * dst_pts

        # Warp images such that normalized points line up with morphed points.
        imOut1 = fbc.warpImage(srcNorm, src_pts, pointsMorph.tolist(), dt)
        imOut2 = fbc.warpImage(dstNorm, dst_pts, pointsMorph.tolist(), dt)

        # Blend warped images based on morphing parameter alpha
        imMorph = (1 - alpha) * imOut1 + alpha * imOut2

        imMorph = np.uint8(imMorph * 255)
        utils.image_to_url("results/face_morph/morph_%1.2f.jpg" % alpha,
                           imMorph)
        frames.append(imMorph)

        alpha += 0.05

    path = "results/face_morph/face_average.avi"
    video_utils.video_write(path, 8, (300, 300), frames)
    # video_utils.video_write("results/face_morph/face_average.mp4", 8, (300, 300), frames)
    url = utils.video_to_url(path)
    # return Response("")
    return Response({"video_url": url})
Ejemplo n.º 8
0
def face_average(request):
    data = utils.parseRequest(request)

    img1 = utils.url_to_image(data['src_img'])
    img2 = utils.url_to_image(data['dst_img'])
    p1 = fbc.getLandmarks(faceDetector, landmarkDetector, img1)
    p2 = fbc.getLandmarks(faceDetector, landmarkDetector, img2)
    output = image_utils.face_average(img1, img2, p1, p2)

    # image_urls = data['image_urls']
    # image_urls = [data['src_img'], data['dst_img'], "src/baby.jpg"]
    # # image_urls.append(data['src_img'])
    # # image_urls.append(data['src_img'])
    # # print(len(image_urls))
    # images = []
    # allPoints = []

    # for url in image_urls:
    #   try:
    #     im = utils.url_to_image(url)
    #     if im is None:
    #       print("Unable to load image url")
    #       next
    #     else:
    #       utils.image_to_url("results/face_to_average/%d.jpg" % random.randint(0, 10000), im)
    #       points = fbc.getLandmarks(faceDetector,landmarkDetector, im)
    #       if(len(points) > 0):
    #         allPoints.append(points)
    #         im = np.float32(im)/255.0
    #         images.append(im)
    #       else:
    #         print("No face detected")
    #   except:
    #     print("Forbidden image")
    # if len(images) == 0:
    #   print("No images loaded")
    #   return Response("no faces to average")

    # w = 300
    # h = 300

    # boundaryPts = fbc.getEightBoundaryPoints(w, h)

    # numImages = len(images)
    # numLandmarks = len(allPoints[0])

    # imagesNorm = []
    # pointsNorm = []

    # pointsAvg = np.zeros((numLandmarks, 2), dtype=np.float32)

    # # Warp images and trasnform landmarks to output coordinate system,
    # # and find average of transformed landmarks.
    # for i, img in enumerate(images):

    #   points = allPoints[i]
    #   points = np.array(points)

    #   img, points = fbc.normalizeImagesAndLandmarks((h, w), img, points)

    #   # Calculate average landmark locations
    #   pointsAvg = pointsAvg + (points / (1.0*numImages))

    #   # Append boundary points. Will be used in Delaunay Triangulation
    #   points = np.concatenate((points, boundaryPts), axis=0)

    #   pointsNorm.append(points)
    #   imagesNorm.append(img)

    # # Append boundary points to average points.
    # pointsAvg = np.concatenate((pointsAvg, boundaryPts), axis=0)

    # # Delaunay triangulation
    # rect = (0, 0, w, h)
    # dt = fbc.calculateDelaunayTriangles(rect, pointsAvg)

    # # Output image
    # output = np.zeros((h, w, 3), dtype=np.float)

    # # Warp input images to average image landmarks
    # for i in range(0, numImages):

    #   imWarp = fbc.warpImage(
    #     imagesNorm[i], pointsNorm[i], pointsAvg.tolist(), dt)

    #   # Add image intensities for averaging
    #   output = output + imWarp

    # # Divide by numImages to get average
    # output = output / (1.0*numImages)
    # output = output * 255.0
    # output = np.uint8(output)
    # print(output)

    url = utils.image_to_url("results/face_to_average/face_average.jpg",
                             output)
    return Response({"image_url": url})
Ejemplo n.º 9
0
def face_average2(request):
    data = utils.parseRequest(request)
    image_urls = data['image_urls']
    # image_urls = [data['src_img'], data['dst_img']]

    images = []
    allPoints = []

    for url in image_urls:
        try:
            im = utils.url_to_image(url)
            if im is None:
                print("Unable to load image url")
                next
            else:
                utils.image_to_url(
                    "results/face_to_average/%d.jpg" %
                    random.randint(0, 10000), im)
                points = fbc.getLandmarks(faceDetector, landmarkDetector, im)
                if (len(points) > 0):
                    allPoints.append(points)
                    im = np.float32(im) / 255.0
                    images.append(im)
                else:
                    print("No face detected")
        except:
            print("Forbidden image")
    if len(images) == 0:
        print("No images loaded")
        return Response("no faces to average")

    w = 300
    h = 300

    boundaryPts = fbc.getEightBoundaryPoints(w, h)

    numImages = len(images)
    numLandmarks = len(allPoints[0])

    imagesNorm = []
    pointsNorm = []

    pointsAvg = np.zeros((numLandmarks, 2), dtype=np.float32)

    # Warp images and trasnform landmarks to output coordinate system,
    # and find average of transformed landmarks.
    for i, img in enumerate(images):

        points = allPoints[i]
        points = np.array(points)

        img, points = fbc.normalizeImagesAndLandmarks((h, w), img, points)

        # Calculate average landmark locations
        pointsAvg = pointsAvg + (points / (1.0 * numImages))

        # Append boundary points. Will be used in Delaunay Triangulation
        points = np.concatenate((points, boundaryPts), axis=0)

        pointsNorm.append(points)
        imagesNorm.append(img)

    # Append boundary points to average points.
    pointsAvg = np.concatenate((pointsAvg, boundaryPts), axis=0)

    # Delaunay triangulation
    rect = (0, 0, w, h)
    dt = fbc.calculateDelaunayTriangles(rect, pointsAvg)

    # Output image
    output = np.zeros((h, w, 3), dtype=np.float)

    # Warp input images to average image landmarks
    for i in range(0, numImages):

        imWarp = fbc.warpImage(imagesNorm[i], pointsNorm[i],
                               pointsAvg.tolist(), dt)

        # Add image intensities for averaging
        output = output + imWarp

    # Divide by numImages to get average
    output = output / (1.0 * numImages)
    output = output * 255.0
    output = np.uint8(output)
    print(output)
    url = utils.image_to_url("results/face_to_average/face_average.jpg",
                             output)
    return Response({"image_url": url})