-
Notifications
You must be signed in to change notification settings - Fork 0
/
Main.py
100 lines (88 loc) · 3.39 KB
/
Main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import numpy as np, cv2
import transparency as tp
import superimpose as si
import cover as c
def play():
avatar_path = ["Alpaca","Cheetah","Leopard","Suricate","Tiger"]
avatars = [0,0,0,0,0] #imread 5 avatars at each frame
avatar_emotion = [0,0,0,0,0] #the string "happy",sad...(predicted answer from asgn2)
emotion = ["angry","disgusted","happy","nervous","neutral","sad","surprised"]
emotion_img = loadEmotion(emotion);#prepare for emoticons
for i in range(len(avatars)):
avatar_emotion[i] = parseArff(avatar_path[i])#get correct answers
cap = cv2.VideoCapture("video/movie.mp4")
video_w = 800
video_h = 400
frame_number = 0
view = np.zeros((560, 800,4), np.uint8)
while(cap.isOpened()):
frame_number+=1
if(frame_number>16200):
break
print frame_number
ret, videoframe = cap.read()
videoframe = cv2.resize(videoframe,(video_w,video_h))#upper part -- video
if frame_number%5 == 1:
for i in range(len(avatars)):
avatars[i] = prepareFrame(avatar_path[i],frame_number) #lower part -- 5 avatars
h1, w1 = videoframe.shape[:2]
h2, w2 = avatars[0].shape[:2]
dif = video_w/5-w2
view[:h1,:w1,:3] = videoframe #attach to view
if frame_number%5 == 1:
for i in range(len(avatars)):# superimpose emoticons
ss = getEmotion(avatar_emotion,i,frame_number)#get correct emotion
avatars[i] = c.cover(avatars[i], getEmotionImg(emotion,emotion_img,ss), 0.5, 0.5, 0, 90)
cv2.putText(avatars[i],avatar_path[i]+" "+ss, (0,150), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255))
view[h1:h1+h2, i*(w2+dif):(i+1)*(w2+dif)-dif] = avatars[i] # attach to view
cv2.imshow("test", view)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def getEmotion(avatar_emotion,i,frame_number):# get current frame emotion by putting the array into a stack
if(len(avatar_emotion[i])>1 and frame_number>int(avatar_emotion[i][1][0])-1):
avatar_emotion[i].pop(0) # pop up the out-dated one
return avatar_emotion[i][0][1] #read the top one
def loadEmotion(emotion):
emotion_img = []
for i in range(len(emotion)):
emotion_path = "emotion/"+emotion[i]+".jpg"
emotion_img.append(cv2.imread(emotion_path, cv2.IMREAD_UNCHANGED))
emotion_img[i] = imgAddAlphaChannel(emotion_img[i])
return emotion_img
def getEmotionImg(emotion_arr,emotion_img,emotion):
for i in range(len(emotion_arr)):
if(emotion_arr[i]==emotion):
return emotion_img[i]
def parseArff(animal):
path = "arff/"+animal+".predict.arff"
f = open(path, "r")
line_num = 0;
instance = [];
for line in f:
features = line.split(",")
if ((len(instance) == 0) or (features[len(features)-1]!=instance[len(instance)-1][1])):
instance.append([features[0],str.strip(features[len(features)-1])])
return instance
def imgAddAlphaChannel(img):
toappend = np.zeros((len(img),len(img[0]),1),np.uint8)
toappend[:len(img),:len(img[0]),:1] = 255
img = np.dstack((img,toappend))
return img
def prepareFrame(animal,index):
avatar_w = 210
avatar_h = 210
path = "./users/"+animal+"/avatar1/frames"+str(index).rjust(6,'0')+".jpg"
if(animal == "Leopard"):
path = "./users/"+animal+"/avatar1/frame"+str(index).rjust(6,'0')+".jpg"
avatar = cv2.imread(path)
if avatar is None:
return prepareFrame(animal,index-1)
avatar = cv2.resize(avatar,(avatar_w,avatar_h))
avatar = tp.openFile(avatar)
avatar = cv2.resize(avatar,(120,160))
ret = np.zeros((160, 160,4), np.uint8)
ret[:160,:120]=avatar
return ret
play()