def triple_exposer(img1, img2, img3):

    te1 = resize_cartoon(img1)

    te2 = resize_shine2(img2, te1.shape)

    te3 = resize_shine2(img3, te1.shape)

    img_small = imutils.resize(te1, width=500, inter=cv2.INTER_CUBIC)

    # Using the sengmentation parser on smaller image to get mask
    mask = human_parser.parse(img_small)

    # Making the mask bigger to match the original image size
    parsed = cv2.resize(mask.astype(np.uint8), (te1.shape[1], te1.shape[0]))

    te1 = enhance(te1)

    img1 = cv2.addWeighted(te2, 0.7, te3, 0.4, 0)
    img2 = cv2.addWeighted(te1, 1, img1, 0.4, 0)

    img1[parsed != 0] = img2[parsed != 0]
    final = img1  # Just to follow the same returning pattern

    return final
Ejemplo n.º 2
0
def apply(img, news, words):
    # news = cv2.imread("./news.jpeg")
    # words = cv2.imread("./words.jpeg")
    img = resize_even(img, 1080)
    news = cv2.resize(news, (img.shape[1], img.shape[0]))
    words = cv2.resize(words, (img.shape[1], img.shape[0]))
    # Making the image size small to get computation faster
    img_small = imutils.resize(img, width=500, inter=cv2.INTER_CUBIC)

    # Using the sengmentation parser on smaller image to get mask
    mask = human_parser.parse(img_small)

    # Making the mask bigger to match the original image size
    mask_big = cv2.resize(mask.astype(np.uint8), (img.shape[1], img.shape[0]))

    img1 = mask_big.copy()
    img1[img1 != 0] = 255
    img5 = img1.copy()
    new_mask = draw_outline(img5)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img[:, :, 0] = gray
    img[:, :, 1] = gray
    img[:, :, 2] = gray

    img4 = np.zeros((new_mask.shape[0], new_mask.shape[1], 3)).astype(np.uint8)
    img4[:, :, 0] = new_mask
    img4[:, :, 1] = new_mask
    img4[:, :, 2] = new_mask

    img4[new_mask == 0] = news[new_mask == 0]
    img4[mask_big != 0] = img[mask_big != 0]
    img4[np.isin(mask_big, [1, 5, 7])] = words[np.isin(mask_big, [1, 5, 7])]

    return img4
Ejemplo n.º 3
0
def shake_filter(img,img_back):
    # img_back = cv2.imread("./sprit-back.jpg")
    img = resize_even(img, 1080)
    img_back = cv2.resize(img_back, (img.shape[1], img.shape[0]))

    # Making the image size small to get computation faster
    img_small = imutils.resize(img, width=500, inter=cv2.INTER_CUBIC)

    # Using the sengmentation parser on smaller image to get mask
    mask = human_parser.parse(img_small)

    # Making the mask bigger to match the original image size
    mask_big = cv2.resize(mask.astype(np.uint8), (img.shape[1], img.shape[0]))#imutils.resize(mask.astype(float), height=img.shape[0], width=img.shape[1], inter=cv2.INTER_CUBIC)


    img2 = img.copy()
    img2 = enhance(img2, 0.8)
    img2[:,:,0] = np.minimum(255, img2[:,:,0]*1.8)
    img2[:,:,1] = np.minimum(255, img2[:,:,1]*1.2)
    img2[:,:,2] = img2[:,:,2]*0.4
    img2[mask_big==0] = img_back[mask_big==0]
    img2 = shake(img2, 5)
    #img2 = enhance(img2, 1)


    img3 = img.copy()
    #img3 = enhance(img3, 0.5)
    img3[:,:,0] = np.minimum(255, img3[:,:,0]*0.4)
    img3[:,:,1] = np.minimum(255, img3[:,:,1]*1.8)
    img3[:,:,2] = np.minimum(255, img3[:,:,2]*1.8)
    img3[mask_big==0] = [255, 255, 255]
    img3 = shake(img3, 80)

    #img3 = enhance(img3, 1)

    fore=np.where(mask_big!=0)
    topLeft=(np.min(fore[0]), np.min(fore[1]))
    rightBottom=(np.max(fore[0]), np.max(fore[1]))

    trans = mask_big.copy()



    for i in range(topLeft[0], rightBottom[0]):
        row = trans[i,:]
        color_strip = np.where(row>0)
        mx = np.max(color_strip)
        mn = np.min(color_strip)
        strip_width= mx - mn
        for j in range(mn, mx+1):
            row[j] = int(255*(1- ((i-topLeft[0])/(rightBottom[0]-topLeft[0]))))*0.1  + int(255*((j-mn)/strip_width)**2)*0.9

    trans.shape = (trans.shape[0],trans.shape[1], 1)
    img4 = np.concatenate((img3, trans), axis=2)
    a=np.zeros(img2.shape).astype(np.uint8)
    shift = int(img4.shape[1]*0.13)
    final = overlay_transparent(img2, img4[:,:-shift,:], shift, 0)
    return final
def cloth_color_filter(main_img, back_img):
    # back_img = cv2.imread("spray-wall.jpg")
    main_img = resize_cartoon(main_img)
    back_img = resize_shine2(back_img, (main_img.shape[0], main_img.shape[1]))

    blue = main_img.copy()
    blue[:, :, 0] = np.minimum(255, blue[:, :, 0] * 5.5).astype(np.uint8)
    blue[:, :, 1] = np.minimum(255, blue[:, :, 1] * 1).astype(np.uint8)
    blue[:, :, 2] = np.minimum(255, blue[:, :, 2] * 1).astype(np.uint8)
    blue = bifi(blue)

    red = main_img.copy()
    red[:, :, 0] = np.minimum(255, red[:, :, 0] * 1).astype(np.uint8)
    red[:, :, 1] = np.minimum(255, red[:, :, 1] * 1).astype(np.uint8)
    red[:, :, 2] = np.minimum(255, red[:, :, 2] * 5.5).astype(np.uint8)
    red = bifi(red)

    sky = main_img.copy()
    sky[:, :, 0] = np.minimum(255, sky[:, :, 0] * 5.5).astype(np.uint8)
    sky[:, :, 1] = np.minimum(255, sky[:, :, 1] * 2.0).astype(np.uint8)
    sky[:, :, 2] = np.minimum(255, sky[:, :, 2] * 1).astype(np.uint8)
    sky = bifi(sky)

    #reducing size for faster computation
    img_small = imutils.resize(main_img, width=500, inter=cv2.INTER_CUBIC)

    parsed = human_parser.parse(img_small)
    # Making the mask bigger to match the original image size

    parsed = cv2.resize(parsed.astype(np.uint8),
                        (main_img.shape[1], main_img.shape[0]))

    red[np.where(np.isin(parsed, [5, 6, 7, 9, 12],
                         invert=True))] = main_img[np.where(
                             np.isin(parsed, [5, 6, 7, 9, 12], invert=True))]
    red[parsed == 0] = back_img[parsed == 0]

    blue[np.where(np.isin(parsed, [5, 6, 7, 9, 12],
                          invert=True))] = main_img[np.where(
                              np.isin(parsed, [5, 6, 7, 9, 12], invert=True))]
    blue[parsed == 0] = back_img[parsed == 0]

    sky[np.where(np.isin(parsed, [5, 6, 7, 9, 12],
                         invert=True))] = main_img[np.where(
                             np.isin(parsed, [5, 6, 7, 9, 12], invert=True))]
    sky[parsed == 0] = back_img[parsed == 0]

    red = cv2.cvtColor(red, cv2.COLOR_BGR2RGB)
    blue = cv2.cvtColor(blue, cv2.COLOR_BGR2RGB)
    sky = cv2.cvtColor(sky, cv2.COLOR_BGR2RGB)

    images = [blue, red, sky]  # here read all images
    fobj = io.BytesIO(b"")
    imageio.mimsave(fobj, images, 'GIF', duration=0.2)
    binData = fobj.getvalue()

    return binData
def invisible_filter(main_img,bg):
    body_parts = human_parser.parse(main_img)
    clothes=np.array(np.where(np.isin(body_parts,[1,3,4,5,6,7,8,9,11,12,18,19])))
    background=np.array(np.where(body_parts==0)) 
    # bg=cv2.imread("background.jpg")
    bg=cv2.resize(bg,(main_img.shape[1],main_img.shape[0]))
    main_img[list(background)]=bg[list(background)]
    img=cv2.addWeighted(main_img, 0.6, bg, 0.4, 0)
    img[list(clothes)]=main_img[list(clothes)]
    return img
Ejemplo n.º 6
0
def smoke_filter(img,smoke,smoke_frame):   
    img = resize_even(img, 1080)
    # smoke = cv2.imread("smoke.png")
    smoke = cv2.resize(smoke, (img.shape[1], img.shape[0]))
    # smoke_frame = cv2.imread("smoke-frame.png", -1)
    smoke_frame = cv2.resize(smoke_frame, (img.shape[1], img.shape[0]))

    mask_big = human_parser.parse(img)
    smoke[mask_big!=0] = img[mask_big!=0]
    final = overlay_transparent(smoke, smoke_frame, 0, 0)
    return final
Ejemplo n.º 7
0
def cloth_color_filter(main_img, back_img):
    # back_img = cv2.imread("spray-wall.jpg")
    main_img = resize_cartoon(main_img)
    back_img = resize_shine2(back_img, (main_img.shape[0], main_img.shape[1]))

    blue = main_img.copy()
    blue[:, :, 0] = np.minimum(255, blue[:, :, 0] * 5.5).astype(np.uint8)
    blue[:, :, 1] = np.minimum(255, blue[:, :, 1] * 1).astype(np.uint8)
    blue[:, :, 2] = np.minimum(255, blue[:, :, 2] * 1).astype(np.uint8)
    blue = bifi(blue)

    red = main_img.copy()
    red[:, :, 0] = np.minimum(255, red[:, :, 0] * 1).astype(np.uint8)
    red[:, :, 1] = np.minimum(255, red[:, :, 1] * 1).astype(np.uint8)
    red[:, :, 2] = np.minimum(255, red[:, :, 2] * 5.5).astype(np.uint8)
    red = bifi(red)

    sky = main_img.copy()
    sky[:, :, 0] = np.minimum(255, sky[:, :, 0] * 5.5).astype(np.uint8)
    sky[:, :, 1] = np.minimum(255, sky[:, :, 1] * 2.0).astype(np.uint8)
    sky[:, :, 2] = np.minimum(255, sky[:, :, 2] * 1).astype(np.uint8)
    sky = bifi(sky)

    parsed = human_parser.parse(main_img)

    red[np.where(np.isin(parsed, [5, 6, 7, 9, 12],
                         invert=True))] = main_img[np.where(
                             np.isin(parsed, [5, 6, 7, 9, 12], invert=True))]
    red[parsed == 0] = back_img[parsed == 0]

    blue[np.where(np.isin(parsed, [5, 6, 7, 9, 12],
                          invert=True))] = main_img[np.where(
                              np.isin(parsed, [5, 6, 7, 9, 12], invert=True))]
    blue[parsed == 0] = back_img[parsed == 0]

    sky[np.where(np.isin(parsed, [5, 6, 7, 9, 12],
                         invert=True))] = main_img[np.where(
                             np.isin(parsed, [5, 6, 7, 9, 12], invert=True))]
    sky[parsed == 0] = back_img[parsed == 0]

    red = cv2.cvtColor(red, cv2.COLOR_BGR2RGB)
    blue = cv2.cvtColor(blue, cv2.COLOR_BGR2RGB)
    sky = cv2.cvtColor(sky, cv2.COLOR_BGR2RGB)

    images = [blue, red, sky]  # here read all images
    fobj = io.BytesIO(b"")
    imageio.mimsave(fobj, images, 'GIF', duration=0.2)
    binData = fobj.getvalue()

    return binData
Ejemplo n.º 8
0
def smoke_filter(img,smoke,smoke_frame):   
    img = resize_even(img, 1080)
    # smoke = cv2.imread("smoke.png")
    smoke = cv2.resize(smoke, (img.shape[1], img.shape[0]))
    # smoke_frame = cv2.imread("smoke-frame.png", -1)
    smoke_frame = cv2.resize(smoke_frame, (img.shape[1], img.shape[0]))

    # Making the image size small to get computation faster
    img_small = imutils.resize(img, width=500, inter=cv2.INTER_CUBIC)
   
    mask_big = human_parser.parse(img)

    # Making the mask bigger to match the original image size
    mask_big = cv2.resize(mask_big.astype(np.uint8), (img.shape[1], img.shape[0]))

    smoke[mask_big!=0] = img[mask_big!=0]
    final = overlay_transparent(smoke, smoke_frame, 0, 0)
    return final
Ejemplo n.º 9
0
def remove_background(img):

    # getting body parts
    body_parts = human_parser.parse(img)

    # getting background
    background = np.array(np.where(body_parts == 0))

    #convertng from 2d to 3d
    bg = np.stack((body_parts, ) * 3, axis=-1)

    # replacing backgroung of image to black
    img[list(background)] = bg[list(background)]

    # conveting to pillow
    pilimg = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))

    # converting to rgba
    pilimg = pilimg.convert("RGBA")

    # conveting background to transparent
    datas = pilimg.getdata()

    newData = []

    for item in datas:
        if item[0] == 0 and item[1] == 0 and item[2] == 0:
            newData.append((255, 255, 255, 0))
        else:
            newData.append(item)
    # converting to cv2
    open_cv_image = np.array(pilimg)
    # Convert RGB to BGR
    open_cv_image = open_cv_image[:, :, ::-1].copy()

    return open_cv_image
Ejemplo n.º 10
0
def cartoon_tear(img, back_img, inner_img):
    # back_img = cv2.imread("cartoon-snake.png", -1)

    img2 = resize_cartoon(img)
    # Making the image size small to get computation faster
    img_small = imutils.resize(img2, width=500, inter=cv2.INTER_CUBIC)

    # Using the sengmentation parser on smaller image to get mask
    mask = human_parser.parse(img_small)

    # Making the mask bigger to match the original image size
    parsed = cv2.resize(mask.astype(np.uint8), (img2.shape[1], img2.shape[0]))

    img2 = toonify.toonify(img2.astype(int))

    img2 = cv2.resize(img2, (parsed.shape[1], parsed.shape[0]))
    back_img = cv2.resize(back_img, (parsed.shape[1], parsed.shape[0]))
    inner_img = cv2.resize(inner_img, (parsed.shape[1], parsed.shape[0]))

    colored = np.where(parsed != 0)
    xmin = np.min(colored[1])
    xmax = np.max(colored[1])
    human_width = xmax - xmin
    human = img2[:, xmin:xmax, :]
    parsed = parsed[:, xmin:xmax]

    human2 = cv2.resize(
        human,
        (int(0.8 * back_img.shape[1]),
         int((0.8 * back_img.shape[1] / human.shape[1]) * human.shape[0])))
    human2 = cv2.resize(human2, (int(
        (0.8 * back_img.shape[0] / human.shape[0]) * human.shape[1]),
                                 int(0.8 * back_img.shape[0])))
    parsed2 = cv2.resize(parsed.astype(np.uint8),
                         (human2.shape[1], human2.shape[0]))

    back_img2 = back_img.copy()
    back_img2 = back_img2[:, :, :3]

    # back_img2[back_img[:, :, 3] < 250] = [18, 109, 242]
    back_img2[back_img[:, :, 3] < 250] = inner_img[:, :, :3][back_img[:, :,
                                                                      3] < 250]

    padx = int((back_img2.shape[1] - human2.shape[1]) / 2)
    pady = int((back_img2.shape[0] - human2.shape[0]) / 2)
    cut = back_img2[pady:pady + human2.shape[0],
                    padx:padx + human2.shape[1], :]
    cut[parsed2 != 0] = human2[parsed2 != 0]
    back_img2[pady:pady + human2.shape[0],
              padx:padx + human2.shape[1], :] = cut

    back_img3 = back_img.copy()

    top = int(back_img3.shape[0] * 0.5)
    for i in range(top, pady + human2.shape[0]):
        for j in range(padx, padx + human2.shape[1]):
            if back_img3[i, j, 3] < 250 and parsed2[i - pady, j - padx] != 0:
                back_img3[i, j, :3] = human2[i - pady, j - padx, :]
            elif back_img3[i, j, 3] < 250:
                # back_img3[i, j, :3] = [18, 109, 242]
                back_img3[i, j, :3] = inner_img[i, j, :3]
            back_img3[i, j, 3] = 255
    # back_img3[back_img3[:, :, 3] < 250] = [18, 109, 242, 255]
    back_img3[back_img3[:, :, 3] < 250] = inner_img[back_img3[:, :, 3] < 250]

    back_img2[top:, :, :3] = back_img3[top:, :, :3]
    return back_img2
Ejemplo n.º 11
0
def apply(main_img):

    main_img = cv2.cvtColor(main_img, cv2.COLOR_BGR2RGB)
    overlay = cv2.imread("overlay8.png", cv2.IMREAD_UNCHANGED)
    left_overlay = cv2.imread("left_overlay2.png", cv2.IMREAD_UNCHANGED)
    right_overlay = cv2.imread("right_overlay2.png", cv2.IMREAD_UNCHANGED)
    stars = cv2.imread("stars.png", cv2.IMREAD_UNCHANGED)

    if main_img.shape[0] > 1080:
        main_img = imutils.resize(main_img, height=1080)
    if main_img.shape[1] > 1080:
        main_img = imutils.resize(main_img, width=1080)

    left_overlay = cv2.resize(left_overlay, (int(
        (main_img.shape[0] / left_overlay.shape[0]) * left_overlay.shape[1]),
                                             main_img.shape[0]),
                              interpolation=cv2.INTER_AREA)
    right_overlay = cv2.resize(right_overlay, (int(
        (main_img.shape[0] / right_overlay.shape[0]) * right_overlay.shape[1]),
                                               main_img.shape[0]),
                               interpolation=cv2.INTER_AREA)
    left_overlay = left_overlay[:, :main_img.shape[1], :]
    right_overlay = right_overlay[:, -main_img.shape[1]:, :]
    if right_overlay.shape[1] < main_img.shape[1]:
        right_overlay = np.concatenate((np.zeros(
            (main_img.shape[0], main_img.shape[1] - right_overlay.shape[1],
             4)), right_overlay),
                                       axis=1)
    ratio = main_img.shape[1] / stars.shape[1] if main_img.shape[
        1] < stars.shape[1] else 1

    ratio = main_img.shape[0] / stars.shape[0] if main_img.shape[
        0] < stars.shape[0] else 1

    stars = cv2.resize(
        stars, (int(stars.shape[1] * ratio), int(stars.shape[0] * ratio)),
        interpolation=cv2.INTER_AREA)

    features = extract(main_img)

    left_mean = np.mean(features["Right_Eye"], axis=0)
    right_mean = np.mean(features["Left_Eye"], axis=0)
    eyes = np.concatenate((features["Right_Eye"], features["Left_Eye"]))

    l = left_mean[0]
    r = right_mean[0]

    u = (left_mean[1] + right_mean[1]) / 2
    mid = (r + l) / 2
    a_w = r - l

    pos_x = l
    pos_y = u

    angle = math.degrees(math.atan((left_mean[1] - right_mean[1]) / (l - r)))

    # Making the image size small to get computation faster
    img_small = imutils.resize(main_img, width=500, inter=cv2.INTER_CUBIC)

    # Using the sengmentation parser on smaller image to get mask
    mask = human_parser.parse(img_small)

    # Making the mask bigger to match the original image size
    body_parts = cv2.resize(
        mask.astype(np.uint8), (main_img.shape[1], main_img.shape[0])
    )  #imutils.resize(mask.astype(float), height=img.shape[0], width=img.shape[1], inter=cv2.INTER_CUBIC)

    no_clothes = np.array(
        np.where(np.isin(body_parts, [3, 5, 6, 7, 8, 9, 10, 11, 12]) == False))
    no_background = np.array(np.where(body_parts != [0]))

    (over, ratios) = resizeAndPad(overlay,
                                  org_shape=main_img.shape,
                                  padColor=0,
                                  sq_width=a_w,
                                  sq_height=0)

    pos_x = int(pos_x - 628 * ratios[0])
    pos_y = int(pos_y - 1240 * ratios[0])

    if pos_x < 0:
        over = over[:, -pos_x:, :]
        pos_x = 0
    if pos_y < 0:
        over = over[-pos_y:, :, :]
        pos_y = 0

    overlay_layer = np.zeros(
        (main_img.shape[0], main_img.shape[1], main_img.shape[2] + 1))

    overlay_layer = overlay_transparent(overlay_layer, over, pos_x, pos_y)

    overlay_layer = rotate_image(overlay_layer, angle, ((r + l) / 2, u))

    np.min(overlay_layer)

    def filter_sides(side, no_back):
        no_backnew = [[], []]
        for i in range(len(no_back[0])):
            if no_back[0][i] < side.shape[0] and no_back[1][i] < side.shape[1]:
                no_backnew[0].append(no_back[0][i])
                no_backnew[1].append(no_back[1][i])
        return no_backnew

    stars_back = filter_sides(stars, no_clothes)
    stars[stars_back[0], stars_back[1], :] = 0

    left_back = filter_sides(left_overlay, no_background)
    right_back = filter_sides(right_overlay, no_background)
    left_overlay[left_back[0], left_back[1], :] = 0
    right_overlay[right_back[0], right_back[1], :] = 0

    final = overlay_transparent(final, left_overlay, 0, 0)
    final = overlay_transparent(final, right_overlay, 0, 0)

    return (final)
Ejemplo n.º 12
0
def duck_gif(main_img):

    size_main = (main_img.shape[1], main_img.shape[0])

    body_parts = human_parser.parse(main_img)
    background = np.array(np.where(body_parts == 0))

    #background plain
    bg = Image.open("media/files/frame-leaves-Background-05.png"
                    )  #path to orange backgound
    print(bg)
    bg = bg.resize(size_main)

    img1 = bg.copy()  #1.png

    #elements
    p1 = Image.open("media/files/frame-leaves-02.png")  #path to back leaves
    p2 = Image.open("media/files/frame-leaves-03.png")  #path to front leaves
    p1 = p1.resize(size_main)
    p2 = p2.resize(size_main)

    flowerup = Image.open("media/files/f_up.png")  #(180,170)
    flowerd = Image.open("media/files/f_d.png")  #(80,80)
    bird = Image.open("media/files/bird.png")  #(350,250)
    rainbow = Image.open("media/files/rainbow.png")  #(250,350)
    bubbles = Image.open("media/files/bubbles.png")
    org_flowerup = flowerup
    org_flowerd = flowerd
    org_bird = bird
    org_rainbow = rainbow
    org_bubbles = bubbles
    #pasting leaves on background
    bg.paste(p1, (0, 0), p1)
    img2 = bg.copy()  #2.png
    #changing background of image
    bg = tocv(bg)
    bg = cv2.resize(bg, (main_img.shape[1], main_img.shape[0]))
    main_img[list(background)] = bg[list(background)]

    bg = topil(main_img)
    #pasting second on the image
    bg.paste(p2, (0, 0), p2)
    img3 = bg.copy()  #3.png

    # Frame 1
    bg.paste(rainbow, (-10, size_main[1] - 250), rainbow)
    bg.paste(bubbles, (0, 0), bubbles)
    bg.paste(flowerd, (70, size_main[1] - 130), flowerd)
    bg.paste(bird, (size_main[0] - 180, size_main[1] - 230), bird)
    bg.paste(flowerup, (size_main[0] - 200, 10), flowerup)
    img4 = bg.copy()  #4.png
    flowerup = org_flowerup
    flowerd = org_flowerd
    bird = org_bird
    rainbow = org_rainbow
    bubbles = org_bubbles

    # FRAME 2
    bg = img3.copy()
    flowerup = flowerup.rotate(50, PIL.Image.NEAREST, expand=1)
    flowerd = flowerd.rotate(50, PIL.Image.NEAREST, expand=1)
    bird = bird.rotate(-10, PIL.Image.NEAREST, expand=1)
    bubbles = bubbles.rotate(10, PIL.Image.NEAREST, expand=1)
    rainbow = rainbow.rotate(-5, PIL.Image.NEAREST, expand=1)

    bg.paste(rainbow, (-25, size_main[1] - 250), rainbow)
    bg.paste(bubbles, (0, 0), bubbles)
    bg.paste(flowerd, (70, size_main[1] - 130), flowerd)
    bg.paste(bird, (size_main[0] - 180, size_main[1] - 230), bird)
    bg.paste(flowerup, (size_main[0] - 200, 10), flowerup)
    img5 = bg.copy()

    # FRAME 3
    flowerup = org_flowerup
    flowerd = org_flowerd
    bird = org_bird
    rainbow = org_rainbow
    bubbles = org_bubbles

    bg = img3.copy()

    flowerup = flowerup.rotate(80, PIL.Image.NEAREST, expand=1)
    flowerd = flowerd.rotate(80, PIL.Image.NEAREST, expand=1)
    bird = bird.rotate(10, PIL.Image.NEAREST, expand=1)
    bubbles = bubbles.rotate(15, PIL.Image.NEAREST, expand=1)
    rainbow = rainbow.rotate(5, PIL.Image.NEAREST, expand=1)

    bg.paste(rainbow, (-25, size_main[1] - 250), rainbow)
    bg.paste(bubbles, (0, 0), bubbles)
    bg.paste(flowerd, (70, size_main[1] - 130), flowerd)
    bg.paste(bird, (size_main[0] - 180, size_main[1] - 230), bird)
    bg.paste(flowerup, (size_main[0] - 200, 10), flowerup)

    img6 = bg.copy()

    img4 = tocv(img4, False)
    img5 = tocv(img5, False)
    img6 = tocv(img6, False)

    images = [img4, img5, img6]  # here read all images
    fobj = io.BytesIO(b"")
    imageio.mimsave(fobj, images, 'GIF', duration=0.2)
    binData = fobj.getvalue()

    return binData