def rf(self):
     self.stage = self.get_stage()
     self.stage.set_size(WIDTH,HEIGHT)
     self.stage.set_color(BLACK_BG) 
     self.stage.connect('key-press-event', self.parseKeyPress)
     self.index_cover_selected = 2
     self.cover = cover.cover(self)
     self.fx = fx_default.fx(self)
     self.fx.init_fx(self, self.get_covers(), self.get_index_cover_selected())
     self.scrollbar = scrollbar.scrollbar(self)
     self.scrollbar.init_scrollbar(self, self.get_index_cover_selected(), self.get_size_covers())
     self.control = control.control(self)
     self.show()
Exemple #2
0
def play():
	avatar_path = ["Alpaca","Cheetah","Leopard","Suricate","Tiger"]
	avatars = [0,0,0,0,0] #imread 5 avatars at each frame
	avatar_emotion = [0,0,0,0,0] #the string "happy",sad...(predicted answer from asgn2)
	emotion = ["angry","disgusted","happy","nervous","neutral","sad","surprised"]
	emotion_img = loadEmotion(emotion);#prepare for emoticons
	for i in range(len(avatars)):
		avatar_emotion[i] = parseArff(avatar_path[i])#get correct answers

	cap = cv2.VideoCapture("../video/movie.mp4")
	video_w = 800
	video_h = 400
	frame_number = 0
	avatarview = np.zeros((160, 800,4), np.uint8)
	view = np.zeros((660, 900,4), np.uint8)
	view = cv2.resize(cv2.imread("images.jpg", cv2.IMREAD_UNCHANGED),(900,660))
	while(cap.isOpened()): 
		frame_number+=1
		if(frame_number>16200):
			break
		print frame_number
		ret, videoframe = cap.read()
		videoframe = cv2.resize(videoframe,(video_w,video_h))#upper part -- video
		
		if frame_number%5 == 1:
			for i in range(len(avatars)):
				avatars[i] = prepareFrame(avatar_path[i],frame_number) #lower part -- 5 avatars
	
		h1, w1 = videoframe.shape[:2]
		h2, w2 = avatars[0].shape[:2]
		dif = video_w/5-w2
		view[50:h1+50,50:w1+50,:3] = videoframe #attach to view
		if frame_number%50== 1:
			for i in range(len(avatars)):# superimpose emoticons
				ss = getEmotion(avatar_emotion,i,frame_number)#get correct emotion
				avatars[i] = c.cover(avatars[i], getEmotionImg(emotion,emotion_img,ss), 0.5, 0.5, 0, 90)
				cv2.putText(avatars[i],avatar_path[i]+"   "+ss, (0,150), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255))
				avatarview[:h2, i*(w2+dif):(i+1)*(w2+dif)-dif] = avatars[i] # attach to view
		si.superimpose(view,avatarview,1,1,450,50)
		cv2.imshow("test", view)
		if cv2.waitKey(1) & 0xFF == ord('q'):
		    break
	cap.release()
	cv2.destroyAllWindows()
Exemple #3
0
def mdl_calc(codetable, dataset, singleton_table):
    """given codetable and dataset, return an MDL
    
    Arguments:
        codetable {dict} -- input codetable
        dataset {tuple} -- tuple of tuples containng data
        singleton_table {dict} -- dictionary containing singleton info
    
    Returns:
        [float] -- Mdl length as a number
    """
    c_ct, c_d = cover(codetable, dataset)
    #(support,total_length_pattern,timespan_of_pattern )
    sum_cov_ct = sum(v[0] for v in c_ct.values())
    sum_st = sum(v[0] for v in singleton_table.values())

    # Ln(|D|)
    mdl = ln(1)
    # Ln(|A|)
    mdl += ln(len(c_d))
    # Sigma {s[equence] in D}: Ln(|s|)
    mdl += sum(ln(len(sequence)) for sequence in c_d)
    #  L(C_p|CT)
    mdl += sum((v[0] * x_st(v[0], sum_cov_ct)) for v in c_ct.values())
    # Sigma {s[equence] in D}:
    #   Ln(Omega_s) +  lu(|D^s|,|Omega_s|)
    mdl += sum(
        ln(len(set(row))) + lu(len(row), len(set(row))) for row in dataset)
    # Ln(|P|+1)
    amount_p = sum(1 for v in c_ct.values() if v[1] > 1)
    mdl += ln(amount_p + 1)
    # Ln(usage(P)+1)
    usage_p = sum(v[0] for v in c_ct.values() if v[1] > 1)
    mdl += ln(usage_p + 1)
    # lu(usage(P),|P|)
    mdl += lu(usage_p, amount_p)
    #   Sigma {p[attern] in CT}: L(p given CT)
    mdl += sum(
        ln(v[2]) + sum(log2(x) for x in measure_columns(k)) +
        element_in__p(k, sum_st, singleton_table) for k, v in c_ct.items()
        if v[1] > 1)

    return mdl
Exemple #4
0
args = vars(ap.parse_args())

db = {}

for l in csv.reader(open(args["db"])):
    db[l[0]] = l[1:]

useSIFT = args["sift"] > 0
useHamming = args["sift"] == 0
ratio = 0.7
minMatches = 40

if useSIFT:
    minMatches = 50

cd = cover(useSIFT=useSIFT)
cv = matchCover(cd,
                glob.glob(args['covers'] + "/*.png"),
                ratio=ratio,
                minMatches=minMatches,
                useHamming=useHamming)

queryImage = cv2.imread(args["query"])
gray = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)
(queryKps, queryDescs) = cd.describe(gray)

results = cv.search(queryKps, queryDescs)

cv2.imshow("Query", queryImage)

if len(results) == 0:
Exemple #5
0
def main():
    pic1 = cv2.imread(document1)
    pic2 = cv2.imread(document2)
    h1, w1 = pic1.shape[:2]
    h2, w2 = pic2.shape[:2]
    character_coordinate1 = face_character(pic1)
    character_coordinate2 = face_character(pic2)
    f1, p1, cover1 = cover(pic1, character_coordinate1)
    f2, p2, cover2 = cover(pic2, character_coordinate2)
    #cv2.imshow('face1', f1)
    #cv2.imshow('face2', f2)
    #cv2.imshow('cover1', cover1)
    #cv2.imshow('cover2', cover2)

    #cv2.imwrite('D://Python//facechange//pic//face1.jpg', f1)
    #cv2.imwrite('D://Python//facechange//pic//face2.jpg', f2)
    #cv2.imwrite('D://Python//facechange//pic//cover1.jpg', cover1)
    #cv2.imwrite('D://Python//facechange//pic//cover2.jpg', cover2)

    M1 = cv2.getPerspectiveTransform(p1, p2)
    d11 = cv2.warpPerspective(f1, M1, (w2, h2))
    cover1_2 = cv2.warpPerspective(cover1, M1, (w2, h2))
    c1, hier1, rr1 = cv2.findContours(cover1_2.copy(), cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
    for i in hier1:
        x11, y11, w11, h11 = cv2.boundingRect(i)
    #cv2.rectangle(cover1_2, (x11,y11),(x11+w11, y11+h11), (255, 255, 255), 2)
    #cv2.line(cover1_2, (x11, y11), (x11+w11, y11+h11), (255, 255, 255), 1)
    #cv2.line(cover1_2, (x11+w11, y11), (x11, y11+h11), (255, 255, 255), 1)
    dot1 = (x11 + w11 // 2, y11 + h11 // 2)
    #cv2.circle(cover1_2, dot1, 3, (0, 0, 255), -1)

    #cv2.imshow('cover11', cover1_2)
    #cv2.imshow('cover2', d11)

    #cv2.imwrite('D://Python//facechange//pic//cover11.jpg', cover1_2)
    #cv2.imwrite('D://Python//facechange//pic//cover21.jpg', d11)

    cover_in1_2 = cv2.bitwise_not(cover1_2)
    d12 = cv2.bitwise_and(pic2, pic2, mask=cover_in1_2)
    d1_2 = cv2.add(d11, d12)
    pic1_2 = cv2.seamlessClone(d1_2, pic2, cover1_2, dot1, cv2.NORMAL_CLONE)
    cv2.imshow('d11', pic1)
    cv2.imshow('d1', pic1_2)
    cv2.imshow('d12', pic2)

    #cv2.imwrite('D://Python//facechange//pic//d11.jpg', pic1)
    #cv2.imwrite('D://Python//facechange//pic//d1.jpg', pic1_2)
    cv2.imwrite('D://Python//facechange//pic//Donald_Trumpandzhu.jpg', pic1_2)

    M2 = cv2.getPerspectiveTransform(p2, p1)
    d21 = cv2.warpPerspective(f2, M2, (w1, h1))
    cover2_1 = cv2.warpPerspective(cover2, M2, (w1, h1))
    c2, hier2, rr2 = cv2.findContours(cover2_1.copy(), cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
    for j in hier2:
        x21, y21, w21, h21 = cv2.boundingRect(j)
    cv2.rectangle(cover2_1, (x21, y21), (x21 + w21, y21 + h21),
                  (255, 255, 255), 2)
    cv2.line(cover1_2, (x21, y21), (x21 + w21, y21 + h21), (255, 255, 255), 1)
    cv2.line(cover1_2, (x21 + w21, y21), (x21, y21 + h21), (255, 255, 255), 1)
    dot2 = (x21 + w21 // 2, y21 + h21 // 2)
    cv2.circle(cover2_1, dot2, 3, (0, 0, 255), -1)
    cover_in2_1 = cv2.bitwise_not(cover2_1)
    d22 = cv2.bitwise_and(pic1, pic1, mask=cover_in2_1)
    d2_1 = cv2.add(d21, d22)
    pic2_1 = cv2.seamlessClone(d2_1, pic1, cover2_1, dot2, cv2.NORMAL_CLONE)
    #cv2.imshow('d1', cover1_2)
    #cv2.imshow('d2', d2_1)
    #cv2.imshow('d2', pic2_1)

    #cv2.imwrite('D://Python//facechange//pic//cover1_2.jpg', cover1_2)
    #cv2.imwrite('D://Python//facechange//pic//d21.jpg', d2_1)
    #cv2.imwrite('D://Python//facechange//pic//d22.jpg', pic2_1)

    if cv2.waitKey(0) == ord('q'):
        cv2.destroyAllWindows()