Esempio n. 1
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        open_cv_image = numpy.array(input_img)
        open_cv_image = open_cv_image[:, :, ::-1].copy()

        output_img_cv = self.makeup_artist.apply_makeup(open_cv_image)

        img = cv2.cvtColor(output_img_cv, cv2.COLOR_BGR2RGB)
        output_img = Image.fromarray(img)

        ################## where the hard work is done ############
        # # output_img is an PIL image
        # output_img = self.makeup_artist.apply_makeup(input_img)

        # output_str is a base64 string in ascii
        output_str = pil_image_to_base64(output_img)

        # convert eh base64 string in ascii to base64 string in _bytes_
        self.to_output.append(binascii.a2b_base64(output_str))
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        # output_img is an PIL image
        output_img = input_img

        # output_str is a base64 string in ascii
        output_str = pil_image_to_base64(output_img)

        # convert the base64 string in ascii to base64 string in _bytes_
        self.to_output.append(binascii.a2b_base64(output_str))
Esempio n. 3
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        ################## where the hard work is done ############
        data = self.vp.process(input_img, self.id)

        # output_str is a base64 string in ascii
        #output_str = pil_image_to_base64(output_img)

        # convert eh base64 string in ascii to base64 string in _bytes_
        self.to_output.append(data)
Esempio n. 4
0
		def answer(videoStrings):

			sampleLength = 10
			firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
			firstGauss = buildGauss(firstFrame, levels+1)[levels]
			sample = np.zeros((sampleLength, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
		
			idx = 0
			
			respRate = []	

			#pipeline = PipeLine(videoFrameRate)
			for i in range(len(videoStrings)):
				input_img = base64_to_pil_image(videoStrings[i])

				input_img = input_img.resize((320,240)) 

				frame  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)
		
				detectionFrame = frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):int(realWidth-int(videoWidth/2)), :]


				sample[idx] = buildGauss(detectionFrame, levels+1)[levels]
			
				freqs, signals = applyFFT(sample, videoFrameRate)
				signals = bandPass(freqs, signals, (0.2, 0.8))
				respiratoryRate = searchFreq(freqs, signals, sample, videoFrameRate)

				#frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):(realWidth-int(videoWidth/2)), :] = outFrame
				
				idx = (idx + 1) % 10 		

				respRate.append(respiratoryRate)

			l = []
			a = max(respRate)
			b = mean(respRate)
			if b < 0:
				b = 5
			l.append(a)
			l.append(b)


			return mean(l)	
Esempio n. 5
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string. 
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        ################## where the hard work is done ############
        # output_img is an PIL image
        output_img = self.makeup_artist.apply_makeup(input_img)

        # output_str is a base64 string in ascii
        output_str = pil_image_to_base64(output_img)

        # convert eh base64 string in ascii to base64 string in _bytes_
        self.to_output.append(binascii.a2b_base64(output_str))
    def process_one(self):
        if not self.to_process or not self.volume:
            print("empty list")
            return
        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)
        v = self.volume.pop(0)

        ################## where the hard work is done ############
        # output_img is an PIL image
        output_img, self.ikon = self.makeup_artist.apply_makeup(
            input_img, self.ikon, v)

        # output_str is a base64 string in ascii
        output_str = pil_image_to_base64(output_img)

        # convert eh base64 string in ascii to base64 string in _bytes_
        self.to_output.append(binascii.a2b_base64(output_str))
        print("to output list: {}".format(len(self.to_output)))
Esempio n. 7
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        # frame = np.array(input_img)
        print("Type of input image", input_img)
        data1 = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
        size = (224, 224)
        image = ImageOps.fit(input_img, size, Image.ANTIALIAS)
        image_array = np.asarray(image)
        normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
        data1[0] = normalized_image_array

        ################## where the hard work is done ############
        # output_img is an PIL image
        #image_array = cv2.resize(image_array,(300,150))
        #cv2.imshow("frame",image_array)
        # ret, jpeg = cv2.imencode('.jpg', image_array)
        # image_array = cv2.resize(jpeg, (300, 150))
        PIL_image = Image.fromarray(
            np.uint8(image_array)).convert('RGB').resize(size=(300, 150))
        b = BytesIO()
        PIL_image.save(b, format="jpeg")
        PIL_image = Image.open(b)
        #Image.open(BytesIO(base64.b64decode(base64_img))
        #output_img = self.makeup_artist.apply_makeup(jpeg)

        # output_str is a base64 string in ascii
        output_str = pil_image_to_base64(PIL_image)

        # convert eh base64 string in ascii to base64 string in _bytes_
        self.to_output.append(binascii.a2b_base64(output_str))
Esempio n. 8
0
def upload():
    # dict = request.form
    # for key in dict:
    #     print('form key ' + dict[key])

    target = os.path.join(APP_ROOT, "ids/")
    if not os.path.isdir(target):
        os.mkdir(target)

    user_id = str(request.form['username'])
    model = os.path.join(target, user_id + "/")
    if not os.path.isdir(model):
        os.mkdir(model)

    images_data = request.form['canvasImage']
    json_data = json.loads(images_data)

    # print(image_data)
    # for i in range(len(image_data)):
    #     print(i)
    for i in range(0, 4):
        name = "image_" + str(i)
        print("Image Name : " + name)
        image_data = json_data[name]
        if image_data:
            content = image_data.split(';')[1]
            image_encoded = content.split(',')[1]
            input_img = base64_to_pil_image(image_encoded)

            basename = user_id + name
            suffix = datetime.now().strftime("%y%m%d_%H%M%S.jpg")
            filename = "_".join([basename, suffix])
            print("Saving to: " + model + filename)
            input_img.save(model + filename)

    return render_template("task.html")
    def get(self, user_index):
        # frame_text = str(frame_text)
        #print
        # if user_index == '1':
        # 	frame_text =  '/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCACWASwDASIAAhEBAxEB/8QAHQAAAQUBAQEBAAAAAAAAAAAAAAMEBQYHAggBCf/EAEIQAAEEAQIDBgMFBgMGBwAAAAEAAgMRBAUhBhIxB0FRYXGBEyKRFCMyobEIFTNSYsEkctEXQlOS4fFDRIKissLw/8QAGwEAAgMBAQEAAAAAAAAAAAAAAgMAAQQFBgf/xAAoEQACAgEFAAICAgIDAAAAAAAAAQIRAwQSITFBE1EUYSJCBTIjUqH/2gAMAwEAAhEDEQA/AM04A4o/c2oRyyP/AMNNUUw6AC9ne36Wt2hma9jXtdbSLBvqvL+OJIHDka4B/X1Ww9mXE41DBOjZbwcjFFxk7c0Wwr2O3oR5rzX+OzJP42z0Ovw7lvRpMTyT1TuJwJBLq9VFQyUaH0T2KUfzWSu/FnEkTOPM2gA7opKCagC49dlX4ZCPKu9SEWRtV2nKQqXJP477IAqj4qQge0uDeb81B4UxNKVx3AuukaYsmsY0aFetKWxroUbUHiydB3qVx5RQItMiAyewjyuF9+3ip/CokEqtYZHy7qwYDxYF7HxTooTIsunsaSASPorLhMa4UD+aq+nv6U6uitGmuDwOYEV+aenSM8icxYwAKAT4CgmuLtunXcufkds06ZcWCEISzWCEIUICEIUICEIUICEIUICEIUICEIUICEIUICEIUICEIUICEIUIfh3NlAbx/UnYJXRtbn03UIs/GcfiQuBIs05veD5KKLea2O39Cu4cdzdxI4DzK8LhkoNSvk9nOKao9I6BrMGqafDm47xyStsd9eIPobCmWZJ6gg+aw3s54mfpmeNFyZQYMl1xOJ6P8Pf9a8VrceYT0cPYr1Glz/NFNHndTgeKdLosceRvun2PP/Uq5jZJd8rv1Uljz0Nh9FuTMcolnxskNHW7UrjTDqqtjZJ28QpbFyh0s/omJiHZacWYEDfdSuNkNFWqxi5Wwt2ylcbJLqNpkWA2WrDyO6/qp3AyP6gqhhzEUp/BnNi3A2nxdiZIu2mzmwC4+itWmSt23VE0+cfLvXurRpuSbHzUfVPg/DNMvOHIC0EHZPO5ROnShzR81qVaQRsQseaNMfpZWqZ9QhCSbQQhChAQvhIHUgJOTKxYRcuTEz/M8BWk30U5JdsVQmL9e0OMkP1nBaRubyGf6prNxjwtBfxNfwtjXyyh36Iljm+kwXlgu2iYQq1J2kcFRGn67H1raN5/RqZ5HaxwdAaZlzzDxjir/wCRCJYMr/qwPyMX/ZFxQs/yO2jhqMfcYuXIfAhrf7lMJe2/CG0OjOJ/qn/6Ivxsv0V+Ti+zT0LIJ+2/NN/Z9MxWD+tzj/cJlN2zcQSk/CGND/ljv9bRrSTfqAergvGbYhYQ/tS4qyNv3lyjp8rGt/MBfDxjxHkC36xkkHahIUS0b9kinq14jeCQOqQkz8GL+LmwM/zSALCZNV1GexNmTP7jb7SLpJ5NviO+tolpI+y/8Aesl5E3CbibQYP4mq4/s6/0TDI4/wCGcfrml/8AkasYcyQn5QfzST8eWr3TFpcS+wHqpvqkaxldq/D0A+5imlPpQUY/tkxA4hunkD/MSsoyopWn5vG7TRwfzdD7FX8ONf1Is2R+n5wPuI7RFw/m8E1yZgfmLwAOvik8nVzRbGbvvtQ+VqDnWJXO33a1fL4wVn0VyfhJR5jmytexxBaeZpGxvyW38EcTt13So5JX1kQ1HO3+ruPoR+djuXnL7RI94DQa9KU9w3xZPw9qkeSXn4Un3cwB2LfH1HX/ALrp6TI8U6+zHqcPyx65R6cx52gj5gD5KShzGDq4CvNZph8QyTRtfFLzNcAQ5psEHon8eq5Dt2v2XfjK+ThSjRpMOoQN3D6/VSEGr4zSC6VoWWt1HINffEgdd0s3UJ3H+MT7pykhLjZrkHEOI2iZApKDizTmCjKB7rF482S/4xF+aXblyf8AEN926YsiQlwNxg4506KrlHuncfaXgw/heFhUecdgX37p3HmOPQlGsoqWNm7w9rkUQAYfonjO2uRgAjYeqweLKdYDeb23KfwzTu/DG8+oTFmYt416bpB276rEfunHyF0nLO3ziXcRvoeaxPHZmOqsd+/kpTGwdSf+HHdaYs7YDxI1yPt04wk6ZQaTt6JcdsXGkw21Qt9FmmDouqyV9z9VZ9M4U1WeqjAPomrLYl40iyf7S+MpRvreQ2+4PKTk434qnaWya5mUev3p3SuD2eaxLRLavyU3j9lGqTii53/KjU2A1FdlQk4g1WV3NNqMzq/mkJTeTVskm35DzXi4q+u7HtQH4pH+VBcHsflAJe6TzTFkl9gPYjPTqsgPzSk+6Rk1iiR8Xb1WgO7J4IyfiNkOxuykh2Y6aPxxE+qpyl6y4uBnztbbyn7yvO1w7XWEfK+1pUfZzo7P/Li/MJzHwJpDK/w7NvJL3MYtplP75e7ZnM7w2SkedlS7Nx5T58pWuRcJaSwcogZuO4dQlY9H0SIiO4GluxbYtVufhLSMojGpyDbDmo+SeQafrUw2wn+61mPT9IiZ8R7og0D8TiAB7p/h4enyt54XRyAd7SCPcqJtkckjKsPQNekcP8PQ9VcNI4E1zKjaeQUeuyv+Fp+KacWN8LAVr0/HiiiAY0VSuc/jVsGP/JLajN8Ps01I/wAWRoKlIezOTb4mWB6BaB0QkflT8RrWkj6ykM7NscfjySV2zs4wRduu/Eq6IQ/lZAvxMZQ8rszxHttlEjoo1/ZrE11CAfVachEtXNdlPSx8Z+AjJCBRN30o9VzHG97rLj1TkYriRysG/encTIIgejiNt+i8HFJI95T9G7cSQNBvlb1PiU2mxTO4RwRucXEABu+6sGmaPqOtZTcXChc7xJvlb6lapwvwDi6Oxs0kYkySPmkIuj5eC2YNPKfLMufOsRCcD6NquLpkcGpuLXDeNhG7WdwKuePpjq3cbUti6WyMl1We8mlJY+E296XXxxcVRxss97shI9ILiN7HqncWhl1c1kDzU/DiCunXyT6HGYGixuOiZRnbRAwcPMfuW3XcpDH4fgHWJvpSm4sdlWGgJ/BA0dB1TEhTlZD4/D0AoiJvnspXF4dgsD4d+ykIIgpGANb1q0aVgNjPF4fxhX3bfoprE0GAEVE2/S0riNisBz22e5TmHDz0GAnyTVFCZSEMTRYNvuwPZTeFosII+QfRSOmaLn5RDYMW9+9wF/VXLSOzziTI5ScKOJp/33TMI/Ik/kmxj9iJTd0iv6dozHEDl7+4K98M8PxmVvxI9vRP8Ds6zsd7ftGTi8o6lrnE/oFbNO0aHTwKcHkd/LScpQir9FSxZcjpIUxNJxsdgAjA9E8bFGzo0L7zFHMVncpS7Zuhp4Q6R9LWnq0fRcOghd1YN/Jdh3ivqFWvRrxxfaIPWtPa2F0rG7AXdLDeO+2zhjhF+RiNjnzs+B5jfBG3lDXDrzOOw9rW+8Q6vHoWi5mqvAcceJzmNPRz6+UH1NLwP2hCfUdXzc/J/i5Uz5300NBc5xc6gOm5XS0mN5Yty6RyNZHHinUeyR4m/av4sJLdF0DTcNu4Jm55n+xBaB7grM+KP2hu1LiGMwScTT4MRN1gf4ck/wCdlO9rpQGtYrgXHlpVHOYQTv06rU4xj0hUXY4l4m1aXJflSallOnl2fIZnFzr62SbK5gznE/NXMohzg127vdOMaQBwFIHJjlFfRZsPLkNEvVs0HUMvGnZlYuTLDKKqSN5a4ehG4VJ09wcarvVv0VtlruUbb9UcZNi5xpG48D9q3aHpgZFFxJk5EYNlmXU9+XM8FwHkHBeley7tJyuKLwdaxYIcjl5o5IbDH+RBJo+9Hy7/ACJwuN224EGtivQPZW1zdQxTE0WHgGlpnpoZsct30YvmlgmpRPQfmhfG7tF+C+rzB6aLtWCEIULBCEKEPwVlyG/DpoA323T3h3SW6zn/AGfKyGxRM+Zxr5qvuCg3Nawin9/cLVr4K4U1jU9Ri1BkJhxoyfvH/wC/Xc0f36detUvIabHumuD1+eW2Lp8mraBBoOi4wx8KB9DcuoWfclTkesYrdhFIa8aUZiaA9rA1xdt5qSi0NpHTceK70Ft8OBNtu2Ls1uMfMMc34WlRr4BJbA0b9C61zHoTCAC205boELgA6hfmmqhLEW8UvDquIH3/ANUuziPMkb8rmj0aErBw1isqmN3UhDoMBFcooeCJfoB0Mma9mPpplcPQBLRaxmEkGaY9+xKk4dDhaBUd/wB09g0aBoosrwRqxTaIlmq5IcGuDyOpsqSxtQeaPK415KQi0aDb7oV5qRxtKgab+GPojQt8HOnankWLiNLQOFnfbmiSTNixqNESGiq1h4EbapgVhwII28u6dFiZu1wanoDdMgcx03EOORe4aHH+y0/Rdf0RkTcePUhIR38p/wBFgensiBbRqlbdKyvgkFrj9E9JSVGJuUJbrNsjminHNE8OC66bKlaJq2ZbWRAvvuJVmhhz5shmXLJHGGt5eTl5jR673t3fRVLFXNmnFq5S/i42/wBD9CEJRvQWvoK+L6OqhfBSO1HLazTBjP3aQXV5+P8A+8V5C49IM0hDfmsr0v2yao/GyDjSS8oEYLR6jr+v0K8xcY5jHmQ89r0enSx6WP75PK53LLqpt/ZkevCQ8/KQFSM/GkLj951V416QOc/5h7qn5TzzH81ky5HZuwYURDMMl1HoncGEAbG3uvomafl5q9E6ilYzci1kllaOlHDGiT03GaAAVcdHx2ktHKB6qm4mSGmxQVh0zUi2gXV6FFHJ+xc8KXhqvDsMbXN+bfxC3Ls11IabmQZLZPwHpfUd4/NebNF1inNId0HutL4W4gex7Ke5dLT51W183wcvUaa+UezsHNgzsdk8LrDxacLGODeOn4ZY2WUujJALT/Za9p2oY2p4rMvFkDmO/I+C5Os0ctNK1zF+m7R6v5VsnxIcoQhYToAhCFCH4l8HdlmRmSsz9eaWQkBzMa6cT/X4Dy6+lUdfwdJgxYWxRMY0NFNAFADwHknMGM1jQA3ZO42VuR9AuRhwxxrg7GbNKb5ZxHjNbyih6pyyEXsLXTG0QQPqEu1oJ2b060tK/Zkk7PkUW4JbScsiHghjD15U4Y0HarRqrEyZ9jZuPH0pPIWDvq+5JRQi+h+icsZR8a8UYpirWUe70S8YFVvsuGs7/ffdLsbRGyJAMWibv4eyeQ/is9yassH5u/zTqI2fGvJNSFNkjBI4VRv3UpjSHY0VEQuAI7lJwS9N0SFvksOBO8AG/alZ9KyHh45q7lTMPJjbXM9TeDqUMdOMhFeJTIOhEomscNZzIXtJI81fcfJjnYC1w6LENL4gx4yCXu28FbdO4whjDfmO3W1ocVNciYTnhluiaQhVzE4xwXtHOSApaDWcCdocJavokvFJeG+GsxS/2dD1JZeVDg478vIeGxs6k/RdsyIHi2yBV3tK5ncCay+GQB8OMZxv3RkPP5NKqEbmlIbLNHY3BqzEu3niGDUnNzoHtuBvw3cp6ts1+d/VeYOIdbLy8Nku1eOKeJhmQyxyTEh1g2Vi2uahyTPYXWAfFdieaMEoR6RxMeJye59kfqmol7yCSQq9kzF7jQ+pRqerxMceQBxHmq5m6tkucRGSAemyxTbl0dPHGMETQJB/EnUUgFNJ+h6KkSatqTSfnO3cVzHxHqMVAsB9Qs8oN+mhZYrw0SGZvNRdt3UpXDyA1wIdsO4rOcPiouI+M3lIUtFxJHykiUA+CFQkug98ZGq4GrNioAgHuV44f4ngg5S97QfArzvFxFkSO5Y5CPNTOn65k7femvMrTiUq7M2WmeqdI42jD2hmQz05qXoLsN4myNWl1DAle57GRRyss7Dcg/Xb6L8/9I13IDm/fUf869Ffs8cW8Vwapmt0nWtExIhhufJJqwe6JoDm97XNLdyNya6+K1OMsmNwsxNwxyU66PaKFiOX2/ajoHLDqY4X1yQbSP0TPlLGnqN3xlvf3Odv1Wm8HcXN4r4dw9el09+CctrnCF0nPQDiAQ6hYIAINdCFzJ6fJBW1wblqsXrosKEh9tx/+ID6L79sg/nStkvoP5sf2fmIyNuxrfxpLMYaBooY2zt3+SXYzlNOGywo6bdHyNn8zR5Gks0AfKPdfA0AUNko0XsBuoBYowb7j806iAAFBIxt8CE4YwN3FefiohcjtnuNvBOI9qJBSH4gB9ErGKO/1TU3QppD2MXv39Uq11lNY5QNrH1SrXm7tWmC0PoyK+YX7JxEN9gUwjlLW0T+adRTgfNe/qmKxbSJCMnawnsb22AG9OqimSkCubZKtm2q90aFtE1FKAQK6FPoMoA7tHuVXY5nVs40fFO4pwDRJRJi2i0QZoaQeWgpPG1QmgKVRinIO5KeRzlu4d6bpsZC3AuuPrD2uZ0G6sWJrdChI2vNZhHlyhwc2Wq7lJ4uquad/wAimxmKlis1TE4iljoNkZ9FQe0n9oGHgvUxw9laD+845sYPlccjkYWvLhylvIeYUN7rrSbM1rlr5iAvPP7ReoTw8UY2c4kQ5GG1sb+4ua93MPUW0+hCrJl2q0O0mkjmybJFY4p4n05+p5Z0/ClZiyTPdAJJLcI7PKD3bClQOJJ/tTfj20bdG7BMdU1iR127Zu4UZk6iJouUu6hLeqtdG16B45NWIZD4gwGNl7datRUoMzy0yFlC9wvv25schx53bE/I66HokcgtcSDzEHr3qvnvkFadrgY5GI4nm+07dbA6hRszoY5agc8gfzCv0UvL8MjkDDXdaZSMhiJe9pAHedgPdR6hURaed8sbvmDYyX2O4bKa0nThmxNklmIJG1Doq1PkjLmZjw24B3zuqxSt+i/dNa0PsCu9KnqmlwacelUn/LlHDS7S8l2LMOaxzMN1YT2DMnJ5mn5fCzsneZpsWqMYWyiPIi+aN3W/IjvCjxkHHeY8rHkjeDuWNL2nz2sj3TMWquPPYGbR7Xa6LRpmdLTQ2Qg+Ztbb2G69p8fEuHp+tYUGXi5HMx0c0bXsLuU8uzgR1ruu6Xn7C1KBhH42jzieL/JXbhLiXF0/LizPjzNdjubI0MhfbiDYANVe3eQtWPO0zJk0txPY3Hk/Cuj8EaxmYHD+nYcjsX4Qkgx443Dmc0VbQDVkfRaFwBrzNO4L0PByHFssGn47JAeocI22PqsHficQceDDh1nAbpWgxStyJIJJmS5GcWgFocGFzGR2TfzFxroNiNIg1FsbQ3p5J3ybYu32YpYlKv0aiOKccEU/v7yuJOKIi7+M76f9VnB1QDpXlZSZ1VwNF49igeQtYTy+1jW1uduqXBYaoJIVVir9F1zgCq3vqvPKz0bX2KbF1bX6LoBoIDfmSTa/EUtGQ5wNeiNADuNgLQT18F3zVsO9ccx5dh/ZfWkEb/REmLaFWEXularcbJuCeg2Hiu2nfc/mrVi2hwym+vkUsx212m7HMb1d+SXYWGvE9ESAY4aQfBdscAd+7ySHM1o/Fuu2SC9nBFdFD5jiGhKijvaZtlBqj7WlGPPQHpurUgHEfMeALKcCVt2AFHNfRrpfkuzO1leXemJgbCUZk+O/unTMuhXMN/NQYyW9eYpduQwgUevirsqiaZlOFHmH1TqHMBO71ARzVfK60uzJo7lEmwaRPDNA2L7HqqP2ycNHi7gzI+x45kzsA/asblFudQ+dg7zzNuh3uDVPfaQN+YH0XbMxhG5CvcXF7JbkeH8/Km+FXNzNPiP7qJdqFgN5qXqDjHsD0biXWJtY0rW36Wcp7pZ4fs/xoy89S0czS2zZIsjfahssU7buyZnZz+7c/S8qfKwsthillk2Lchu56dA4GwP6XdeqVKF8o6MdRGdL0ocszZGcrwCPPokHRY7R92HM8mPc39Coo5oBovCUizGEi32R0vdYpuUGaI1Lhkg6Nrmcvxckt7wZ30fzTDJhxWgmOFlkdTuV1k57Y4rD9x13UPNqgfbmjp3lLWSQz4ojk54xoCyEMbL/AFdCnOka5qsUnNkQtDSdiw9ygpct0reZz2NC7xZ42Nv49PP4RRATITX9i3BLo0WLXZhCTAG85GwJpKMy8qVjZcg0/vI2tUrFzGx7yTG+tgFTONrLAADMyT3ohOpPoXy2XDCywQATt0JIU7gSgPZyna+9UfB1ZkjwW0K81ZtIndmZMOLCeeSV4jaALsk0PzVwT3JICdKLbPaHCuRO7hvSnvILnYUBJ67mMWpgTv73V7KuafOMTCgxWcwZFG1jQNhQFBOftl95F+a6Ep8nGUUTRyD0LyfZcid47/yUOMs728o+2OGwc+kO9el1RjzabTu7w819oP3o7FJgmrb1/RKtb4CvFchOjtSVn1ja2o9Uq2gbrYd9r4GV1O/kjpsBSNOxdC7ZgRZK7bIQaHgm4J6NC6Yd991dgVwOA9x3Psuuau4AjqkmV1Bo1SUDb67eKsW0dtlBBCXZKaoApqW13UPFdtId37ok/sGhx8Qk7+yUbNVnv6dU1DiD/ZdFx69PVXaYFDxkoI67BLRTUdhfqo3mobpVkgHW0SZVEo3IB6rozAt5SbCYska4dd/NDuYdHbq9xW0d/H5T3eFrr7U4dD+aZte8GiW0uy5tbV57K97Jt/Q5blHwvdLszZAbr3JCjg/wd08Eo+YtiMkhDWDq9+wHurUmVtX0TAyi+jytQcg9Byj0UC3VtPFNGo4oPUXkNG31UTqvaLwdokJn1LiXDijBNvEvOCR1aC27Pl1V7ybLLqMqhQcbHkqz2jcNR8e8IZ3Dkjo45pWh+NI7pHO3djvIXsf6XOVG1L9ojs5woftWPk6jqGOBbpcfEc1jfImXks+lrKdd/aW4r1/Ui7h1sOi6NA4uc6RrZMido/3bIIBd/SPl/mPfTm2mNhhdqjGNSdLp+ZNh5LSySF7mSMI6OBoj6pm3Umh4PMNvM/3SfE2onPz59QLyXTSOkdtZJJslQP7wAPQ34lYskkzqKFFmnzxPGRz13UoznmfzNjPKPH/umTc5rgQHV4Arr7Q4AHmpJumNST7F4oJPiEZGRIXDw6KTxcPHcQBnTt2s2QP7KIbIJjYcPcdUq2ASHmbkOFdd0aySGKCjzVlghxcZrgH5k7j0HzD+wTqXTtNm5fhumbJ13cq6zFbHTxO9wPWyQpPGJjc0NlddUEazOJT2tf6k/hudA8NBsDxO613sUwH6xxbiSujuLB/xMhvYFv4f/dX0KxTTxNJmsxmtfJLI4Na1jSS4k0AAOpXrvsn4Nfwbw81uTFWoZdS5Lv5evKz/ANIP1J7qWvBJy/n4jm6tqMdq7ZqDc5/Q8pNei+jOk6AWoLJ1bC0/kOo5mPi85phlkbHzHwFkWoh/afwPj5P2HI4lxIprDSHuLd78SKTN9dnPWKVWkXYZcrepG/da+jKlIsBv1Vci4x4UkDXxcTaS4Hpy5sZ/+ydDibQ3C/3xp58xkM/1RbkRRfiKa0ADy2S4BA5j39EIXOXR0bYo2MkWSuQ07WbQhTtgNnVHvAK6aKBsD2QhGkVbFg0AX/Zdctjba0IRC7Poogiz4IG9gbEIQi8J2fQPyShIrrVeSEIUUz6ARuKXXM4fiNhCEaFnQdXTvTHUtdx9IZz5TZnCifuwD09SEIRJWRclE1vt107TZZIcXh/JmEYsySTNZW38oBv/AJgs+139qDiXnMOjaLhYt/8AiygyO9Q26HvaEKM0bUimar20dpOrE5OVxdm4ke9jBAh28KZygqqzcTYOUXZOW/UsqfqHZEgeCfGrBH1KEI4pIl2hfH4w1VzaicyOBnzNga0Bl+Y7/e13kajqGsSjVNZyn5T27RtcbaweAHQDyGyEJWeTNGKEfojeJM6XL0mNpLmcstGnbEeigpcp0UYa26aBtaELM2/jTGwitzGGVkmaMg3Tt1DyNcAS00EIS0aavsbsynsAZ3k15J5FkPa7wA613oQo+wUlYvDnOlsjmHcOicwZk0Zq9iPEoQqpWX4KjPl+KGEnpflScw6i6raDXWvNCE1RQCbbJHRtY/eOdNjmMh0MRcCao13ELYW9pvFul6Dh42p6/qGW8FjWyNmczliAHK2mkAkbbu5j5oQtGFtRaRl1HM0ipal2harhR58WPJN9qyn8jsp8hMjWd7WnqL7/ACFLrhVmEzEl1zWGS5bWuEbIgatxv8R8PRCEWJtpyfaFTSjBJfZc+HeOYoRJKdLjaxp+SOMBrQPbqVOHtpy8cmGPS4WtaaADGoQieWfVgrFByfB//9k='
        # 	# frame_text = '/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCACWASwDASIAAhEBAxEB/8QAFQABAQAAAAAAAAAAAAAAAAAAAAn/xAAUEAEAAAAAAAAAAAAAAAAAAAAA/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AJVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//9k='
        # elif user_index == '0':
        # 	frame_text = '/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCACWASwDASIAAhEBAxEB/8QAHQAAAQUBAQEBAAAAAAAAAAAAAAMEBQYHAgEICf/EAEAQAAEDAwEFBgMFBgUEAwAAAAEAAgMEBREhBhIxQVEHEyJhcYEUkaEjMkKxwQgVUmJy0SQzQ4LxFjSS4bLC8P/EABsBAAIDAQEBAAAAAAAAAAAAAAIDAAEEBQYH/8QAKhEAAgIBBQABBAIBBQAAAAAAAAECEQMEEiExQRMUIlFhMkIFI3GBobH/2gAMAwEAAhEDEQA/AMz7PdqXWS4RunfilnxHPk6N6O9j9CVu8EgeBIHZBGmCvl6m7ynf4mkBx5DQLY+zXaj940BtFVJmel1jP8UWn5E49Mea83/jMyj/AKdnoNfgbXyL/k0eOQHBKdxOLjwBHmouKUOOAE9hkwdCvQQZxZImaaUAAYACkoJfPTHVV+GbXGSeqkIJzgDRNViZUT1O8OdoR6qRhfqBn3UJSTlSkD8n1Rpi3RNUrxhStPkYwTqoOnl4YGil6aXeAIPBNiBInaIkEZB9FP0mpAwBlVujcS4OBCnqF5BHDP1TomeRZbe0EjGT7Ky0cYOBn/2qxbpM4wMqz26QPAynp/aZ5k5TQtAGnzT5rQBgBNaUDA0Tvkudkds1aVcWCEISjaCEIUICEIUICEIUICEIUICEIUICEIUICEIUICEIUICEIUICEIUIfh5LWMA3WYyBrngErZ7/AFFsuEVxpzmSF2SDwcOY9CFEu35HEOA04jCUhhMZy5wIPXVeHwpQ+5vk9lNblTR9JWO8QXSggrqZ2Y5WBzfLqD58lLR1Q+7lYf2bbTvt9cbLUynuah2YSeT+nv8AmB1WtxVRc0EBen0udZYp2ee1On+KVFgiqBpkkqQglBIxgZVcpasHQnHupOCYn+2FujKzE4ljpJ/wnT0UxSyjA1VWp6jAAz8+Sl6WpAaDnX1RpoS0Welk4HKlqWZvXy1VXparUYdxUtS1BJBcfJOixUki1Uc/DxKeoajJA/NVCknGQFPW+Y6apyYmSZdLZNqA7/0rba5GDBJOqodvmG8DkK0Wyqw5vi4p8XaM0kXikfkA8invJRNulDmAEhSrSCNCsWaNSNGklxR6hCEk3AhCFCAhC5dLE370jR6uAUoq0jpCaS3e1QjeludKwAZy6Zo0+abSbUbORAmS+0Ixx+3af1RrHN9JgvJBdtEohV+Xb/Y+E4ffYD/SHH8gmVT2pbGwZ3Lg+YjX7OI/rhEsGV/1YD1GJf2RbUKiT9seysQ+yirJD/Q0D81Hy9t9raD3Vold0zMB+iNaXK/AXqsS9NLQskn7cZj/ANvaYW9N95P9kym7aL7IPsaelj/pYT+ZRLSZPaB+rx+WbQhYU7tV2qqDuitawE5O7G0fXCDtxtLP968T45YdhEtG/ZIF6xeRZup046JGSspIRmaqiYP5ngLCH3u6z5dLXzvLuOZD/dIuqKl+plefUlGtHH2QD1kn1E3KbaGxwDMl1ptOkgd+SZTbc7MQAl1zY7H8IJWJvbM4kguPVJPgkIzqepRrSY/Wwfq8n6Ncqu1TZynB7sSykcMAAFRr+2W2B2GUDyPNyySpZI06ZTB7Xb2qv6fGvCvnnL0/OV27GSWxOfjmBoPVNaid7gX7wA5+SSqbu0giIg9fJRNXWulO688fw4K+YwT9PozY/grHMlbJG4tcx281+eY6Lcdi9p477ao5nP8A8RGNydgPB3X0I1+Y5L5wdVSvkDWudgHOVYdmdrp9nrjHL3h7mXDJmgZyOvqF0tJleGX+5i1WL5Y8dn01BMMZycBSNPXNGm8B55WbUm0D54myMl3mOAIcDxCfx3Wofq13HzXoIZLOFOMvwaTDcIxj7Ro90/p7tA06zAdVlwuU5/1XfNKtuU+fFM756JqmjPKLNegv1KwjMg0PVSEG1NAwDxtHXVYzHcZS05kd80rHWvxpIfYpimLeNs3SHbW3R/jGgzlPIe0egicN14/ssHirseDvCT6p3FWg65PzTI5aFSxs3qHtbp4tGuB8k7j7azFgMHusEhqnk6ZJ8k/gnlcQQx/yRrMKeL8m8wdvVyiH2QPllOW/tAbRZ+zPpqsNp21btRC8+ylIKO4vI3Kd3umLO2LeCJsbe3va5/3JmN88ZSw7attZsYrgz0aFl1Habq/GICOSs1r2Xu84A7oZTFkvwW4UWs9qe2sx1vMrR/KcJOTb7a6UHfvtXjhpIQvKHYC8zY8GM+Smqfssu8wBfkf7UxTYtqK7KzNtVf5QTNdqp485Smct6rTrJWSnPVxV4d2P3IEEvfr5Lh/Y/UfjdIQmLJLxgP4yhOu8xBJmf80g68Y+9KR1yVfHdkscZPePefPKSPZfb26vG8VTm0WtnhQnXoc5vqkjfmt073h5rR4+zW1R4BpxnzTqPYK0RYxTM9wgc2Gtpln76ldo3ePsu47jUPziGU56NK1iPY60MGkDNeeEvDYrJA7uz3LXN4tJGUO5sloyiJ9xdqyjl/8AHVO4qO8ynLKOTXhla1HbLTG3ePdBo56BPqOitsw3qcxPbnB3XZCtNkckjKaSyX2VwIpMaq22jYu+1bW/ZtA55C0OjttJvDdY3TkrXbaaKOPdDG8Fcp/GrZUZOctqM1pOze6PI33hoP0UnF2Z1Jxv1IHXRaNw0CFmerl4jWtGvWUNnZowY36nPXCWHZtSc5T7lXZCr6vIEtHj/Zn1X2X0zhvMIcfRRknZqxriO6HyWqIwDxCJayfpHpI+M/AVkzi3xBwHRcsjfI7OTqcYynDac74bujVPImQx6ktc4LwCjE982NYqN5GCN1o14aptPTOllbFExziTpgHX2Cn7ba7he6gUtFC555nk0dSVqWyvZ9T2hoqJWCapIwZHjIb6Dktun0rm9z6MmfPHEueyJ2Dtd2gtEcVwO6R/ltOS5reQP/7hhXGmtkrhgvPuFLUtrjjdvaE+Y4eikoaMdBny5rsQjtRxsk1J2QsdpcRq45TqKxbxH3j55U9FSDA8Cfw0owNAmJGeU6ICDZ9nMH3KkINnYwdYwQpyGnbp4QcJ/BAzgQmpMS530Q1Ps7A7/SHyUrS7PRZA3R6YUnBDG3kpGma0eaYkvRbbGVNs9TjBEYz6KZo9n6fQd2PdL0rWEjLm+5U7QQb5G4N7rgJiS8EuTGtJYoNAIxjyCm6KywjA3Bn0UlbLLXVLmiCkc4k41wPzVxtnZ7tNKQTbmtaeDjNHj6OJTYx/ImU74K/bbLFkfZgFXzZnZ+N0jd5mh4jCfW/s8uVNuOmmpR/EA92f/irbbrNDQAEO3iOgwnKUIK7Eyw5sjpI7pbRTwMaAxowBoAnrYYmDAYNF7vFG8VmlKUu2b8emx41wj3daeLR8lw6CJ41YAu94L1CrXo144vtENd6Ady5zGZyOOFi23fa9srsXNUUVT39RXQYBp4YzkEjIy44aNCDxJ8lvNzuEFqt9RcanJip43SOA4kAZwF8F9qE9Ver/AHC8VbWiWrnfK8MbhoJOcAdOS6WihLKm30jk63HDDNbe34Pdpv2s9om5bYtmrfTAEguqXvmOPLdLAD81mW1H7R/anf4XQC/ut8btSKBncO9njxjhyKrt6pntc7AHn1VRr2ObkkrW4KPSE42mL1O1l+qq51fV3atlqn/enfUPdI7rlxOSuIa+R5w7Ls9VCucd4knhzS8EhPMlC3IckvCz0lW9+PtCAOAVns1fURTsqIZ5I5GfdkY8hwPqNVS7e4HG6NOhVus2C5o3Qji2LmlRtuxHan2gWvcig2mrJoyQS2qIn9gZMuA8gQvpLss7Ua7aKZtuv1LTxySD7KaEOaCf4S0k68dc+y+StmWN3mtJGNOa3bs1Y/46mdEGjD28/Nafp4Z4NS/BhlmlgmpxPpTzQuYiTG0njhdLyz4PTwe6KYIQhQIEIQoQ/BWWoaGDwjhppqn+ztr/AH5WiKombHE3Vx4nHkoE+F2r/YK1bE7LXi6XCG4QxOipoySXuOjuWAOfrwH0XkdOpSmqVnr80tsHTNYsEFislO2CjiP8zg3Un1U4270WPAx5PLIUVRbPvjDQ4knHBSUdkYRqDkeq7mNbUcLJK3yxwy9wjBbTu8suSgv7WHIhHu5cx2JmNQE4Gz0R8LsYx0TkZ5UcM2nfvlrRE0j1KcM2lqnNBa5g5aNC6g2apW5PdtyOJxxT+OxQDwhvHyRqxT2jRm0FY/8A1SOpDR/ZLxXqrzjv5TnXiVIw2GnaABFhPIrNCNN0EnqExX+BMmiLZeKjeDX96RzO8dFJ0twkJGA8k8E+hs1OBkxZUjS2qBp/yx7olYD4PLfcp8gFjvzWibL7tW1sk9wipxwxJxVWoqCJpG6wY6lWKgp4mYzgZTY8diJu1waps8LNA5rptpIdNd1sbs/lhabZtoLE2JsEVyMjscXNcP0WCW6OEOaW6FW21VPc4IOCtCSkqMjbhLdZtcc8U4DonhwPBdKl2S71ejI2OfkqywxV89THWSvjjaxhZubu8SCcnXlwCqWLbzfBpw6xz+1xt/ofoXq8SToIF6CvF6oXwVPtHqu6s/cuBLX5PvhfIW3/AHffSlrcHJ1wvprtiupohHTveA10e+OXUfovl3bGsbIZHl4J816LSRWPSx/fJ5TVSeXVz/XBkO0DZcuLMDPEqiXKCQuPiKve0Eu8X+IceSpVY8bxydOpWbLldm7BiVWQ4oyXEu1CcwUG6QfySgnaPDvN15JeKVrceEfosk8sjoxxRZJ26nYHNJA05K32mBhIwAMY4qm0lUWnTCsNtuLgQCR81ccj9BnhXhqWzsLGluHY6cVtvZzWsoquGdj270ZBwc6r50st4Ld0Zx7rSdmdoXMkYWvOfVdPT6hLh+nK1OmtX0faNsuNPcaVk8Dshw4Hinaw/Yvbd9G9m9JlugLSeK2O03WlvFI2qpXhwOhHQ9CuVrtDLTPdHmLNmh1vyr48n8l/2PUIQuedMEIQoQ/EnY/stqa+RtffmPhhyHMpzo5/9X8I8uPHhz2KhtkNPE1kbA1rQAA0YAHQJzTU7GANDQMck7jYQSHEDK5eHCsSpHWy6iWR8icNNg66dE5bT8OGi6jYB97GvNLsbppg9VojZlcrPIomDXA1TmONp4jhyCGMDtA05wnETQdP0RciZUDGtxgDCdwx6ZK4jZjgE5jaB+LCNWKdCjG4GR9UvE3J4+ui4azLcAgj1S8TSD7ao0A6F4m666p5GNRxCbRZA5J1EAeevLKNCmP6d7m4LThStNK4aqIgyCBk+qk4JG+EZRoTJFgoJ36EkK0Wud4cDx9lTKSpjZrvfVTlDcYY93LvqmxYmUTV9ma1kL2lxGT1V9gqI5mAhw4LErXf6aEt8Z0Vtt22EMQA3nHHIlOcY5EKhOeCW6JoyFXaPbC3ytG88hS0F2oZ27zZcJLxTXh0IazFJfc6Hi5mljp4XzykBjBkkr1k8Lxlrwova4GbZW7tgO9J8FM6MNOpeGEtx7gIYr7kmNeWLi3FoxTt32hp7nTslp5RmlBxjTLTx/RfK+0d8JLvHnlqrztTtS6pjkjkmJJBByeKxTaCv7uokaXZB10K7csixxUI9I4WPG5yc5dkZdrll7jlVqqqnPcfNd3S7QxkhniPMqtV10neQY3OA/lasU530dTHHb2S4cQdSAnEczQ3AOdeqpMl1r8nD3e4IXDb9Xx5w8HXz0SJRbNCyxXFGiQ1DRjXClaOrbod76rNqTaaQECduD5qXg2jiDcGQDpgpeyS6CWSMjV7dd2xEeJvTirts/tPDAQ5729AMr55i2ilJxHIceXFTNBfKhxGZyCPNacakZ8rj0fVVo22ia5gbURg8tVu/YdtRU3O8z290m9C+jdMOmWvY3/7FfBVpvsrC37Ue7it/wD2etrNoo9p922Xm10bXU0gkkr2PkijYAHE7oe0jgNc+q07ZZIOG7wwzccbU0uj7fQsPr+3W87PzvbVXTY+/RtxhlpkqGvwfxFzt6PywHE/IrRdgtu27a2Bl9ktpomySPjYzvQ8PDdN4HA0zkcORXNnpskFu8N61eL10WtCR+Lg/iXnxlP/ABhJ2S/Az58b/sfmK1gODgE9Slg0j8PzXrAfvNA1+SWawnU59MLAjpOVnLW500A54S7BjyQGNBGfRdtAGgAH6ogHyKR+Y1TmMNbqBklJMGDw15JwxmNT/wAIkrFSZ20Y1OgS7CXHGfmUkDvDQeiUj8JOOP5o0LY9j9cpUOBIJwm0Ti3A+iU3yDpj5okwGPYic8z7JxEdeA+aYxyhg1Bz1TmObOugCNMW0SMTiceHTzT2KRoAGFFCfw6O9ClGzEjDTg+qYhTsmoZhx3cY1UhT1AB1aCByVcjldx3jr1TqOocMeIlEmA0WqCuDD9zgpOmuZyAOKqEM54g8fNO4p3txlxCZGTFuKLtT3l7XNaOqslFe3MaAJGY6arL462XIcJseSkqa6ub+I+xTY5BUsVmr0u0crBuiVo+apPaT2/M7P66ntklndcXVNOZnEVIiawFxaB9x29909OXHOkY29bmodqsG/aQuM4uttuLie4kpDC14P42vcSPk8fVSeXarGaXSRy5VGRVttdrbTV3erqLRbpKekqJXSRMfJkxgnO6ANMDOB5AcOCzraOoFZH3rnMaW8mjCjbpeZJGHGowoya597DguxolfU2joS0XxyqxtUSR93loJPM4Chqt+XHekc0HySklfHTSPZM7Eb9Wu5A9E3nfC8FzckO4Hio89qwVp6dEZO0Oy0TOAzoQ0JjN3Mco7mWR58+qlJYoHeHdOvkmr4o4RvPbkD8R0wheoii1p5WIOqHMhcZNMFS9mpW10LXSTObngAq5PV/FvbTweIZ8RHABWqylkTWs3gA3ognqGlwaIaWLfIs+GS1VQjkJex48HmncNZLvbzBw8yndRRU12ibBJK5kjDvMe3iCmG9LSu7iqpn7zPxxDeDvPHH6K8WqbVSfJWbR7X9q4LHa7g8hrcuDuPEkBa92PbQUtJtNQRXWkhqaaaZscrJY2uY4OOBlrgQdccvqsOo7lG0g7s2Ovcv8A7K3bOX2lp5mvc6oBByA2F+T6EjH1C2Y9RyYsum4pn3BtFNsla9mrtcaTZy20r46CoDZIqWNjgTG4YBAB1zj3Vg7H70y39nVjp5pMOfTun9pHuePo4LE4jtF2i2ijpqynFpsdSI5qhz5WuqqyPAcA1rCWxsJ5l29w0HPTaOripIGU8DGsijYGMa3g1o0Ax0TlNxTt8mGWNNJGqjaeAcJMddUk/aWMu0lcPQrOTdBjw8ueVwbsc/5jR/uKH5CLCj5fjxnJ58k4aGYGCfQJBuQM8QfNdtcMYHNedX7PSM7OCeGnBdNGoaMJMAZwfXQpVhG+HAcOaJC3SHkbQWg4x6LvIHIpLe3gMHieWoXQ8uPqjQDF2ObzBC7Dmk8PokB15JQObwOfZEA7F2ZGTk/NLMfyd7JtG9rf+U4aGEZBAJVpgMXaWgacEox3MEJuJA0Y6rtj2ZyUSdgsfNcWjjoQlmOzzGT0TFrxoC7QBKskAOHEHKNMFxsfxyYGvtqlxJoMYUeyXBA1Shm4je9UW4Bxok46nAwW/onLavAABB9SoNlQ0c9eicNmY7gSrsGiZZWHiXgfVOYqwOwC8KCjkOpaScJZk+v3seiK2itqJ8Vm6fvkj1VV7T9nv+stjqy208QfVxj4ik69638I/qGW66eLPJP/AIlrScHK7ZWscdXAe6K21RcfsluR8SV1RNE10eXZB1a5uCFDOuTtWAkHzX1Pt32G2bbC6yXq3XY2upqXF9QBAJY5Hc3Bu80tcTqTnBOuM5Jxftt7Hmdntstl4tVXPVQTE09XI8AYm1c0gDgHAOGNcbg11SZwSVo6S1KnSZms1UH5DyCD1TZwpSPBE1pOpLTjPyUa6tLTuvfp+aI6yMHBIyOmqxzbRoilLseysgccO7x2um88kApjM2nY4lkDM44hoyvZ61rWEn81FTXVoBcRnjryQfI0uxiwr8Ds15pYi2LDZCdCW5XVvvN1jl35RG4Z4tdqoaWs71pJlDR6r2nniYd9khyTjOCUcW2W410zRKW9StZvMcN8jIHDXzTmKsrZ42vqsF4JORp7KkUVaI3Ynkc5p4breCm6a9MjwPig4YxukYITaFfdFluoawZDXEe/JT9FMMtLRrxzlUaiu0Urg7IGVZKCs33MaDkHzVxtNIqXKtn2P2c1Mkmxlnc7JPwsYGnAY05q0Cd+SS4D/bgqpbNNdabDQWwud/haaOHIGMlrQCpQ1pIzk+66MpcnCSvlk06qON0vPySZqM9fkogVzxod5d/GOPEuQ7guDG2uwPETheljSd4ZykwAXcckeXBLtYS3BJwuOpHaaZ40AaDJ9Eq04xgaLxrGjmvdG8NSjTsXJMXD8jeb+q6EmnH1PBN2nGTwXTf5evDKtNgbUOTIcghv1XQk1xjVJNAzzzwXe4Mga+6K2A0KCYA8slLNmdpjCaOaWHIaDlKMOBx+SK2A4joSudp+q7ZM4O3XDHVNQ7IxnRdb4znGVNxNo8bOCcc0rHNjUclHhwHHKVbIBgYPmrBokmVLeBwuzKB+JvzTBrm4JJwF76HTzRWVQ7bMQeXlhdCsI0BTRsjgcuaMrpz2Ea4BOqtNlbRy2qdnTU+eiXirJGkHdKjxJoAHJRj3kFwbloHHdV7inCyXbVvfjwNx5rl1QcYw0eyhzXUsX36uFmNTvSgafNRVy2/2Qs8PxFw2kooomuLS7vw5ocPwktzr5cVe8r4y4NqsHG8fkoTbOx0+2WzFw2arHBra2ItbIRnu5Acsfjnhwacc8Y5rP7j+0J2d0UJnpq2uuEQzvvpqRwDPUy7gPtlZTtF+0xtNtBcXQbJsis1qjJLqmYNknkaM6agtGRjQNJB/EULyeDIaeV30Y1tDTVdju9XabgCyopJ5IJWDgHtcWkDHHUKJFwc3ONNeqX20vRvd2q7u6QmarmfM854uc4k/mqu2tYSQSTr1WCe5s6kItdljlr2yM3cnOPZRsks02WxSbvQhMm13hcASRjnyXMdQ77zScEYJ/wCEvlGlNSVMcxQneHezPLjzAUhTU1K4n/HyDHHxBRrZGy5aCccea6bTiQ4bO8DmN5Mhkk+GWko+FiipKcYMldKW48tfdOjbbXLG0skmEmc7xcVW4qGRuvfucAdBvEqVpMwBre+IB5OOUzdXKZLg1TiT9F/hpGCOVzhjg46rT+yagkvm1lBTbu9HC8VEvMBjddfU4HushpstqGtOdTy5r617HNh37JWMV1xgDLlXNa6QOHiiZxDPXmfPTkFp08pTe59L/wBOdqdsI7Y9s1qOudgN8OiVFfJnDfyUBVXahtgYbhXQUwecMM0gZvHyydT6KEqO1fYS31XwdZtHDHMDgtcx/H13cBOeSznLFSLyKuZxOPquviqgaBufdV2n232Uqo2TQ7SWlweND8ZHn5b2QlnbV7OA67QUDfL4tg/VTeW8bKi3AJaM5GqcNDSN4jXGhQhc9K42b7dnrRkEn8K93ScuGgQhSINs8Gg0A0PNKMxz1yhCJFS7FRhnEfJe8RgE6oQiTsXJ0dE4Gpzjqgkg+qEIiLo7b4hnougMuGHEZ5cUIRLsBs7wQOOfVdMecYwChCsE6ONRjkkKu4R2+Nr5Gv3Trhup+qEKk20WUK89uVlttRNT0tkrZ3wf5jpHsjGcZ0xvZ+izu/ftR32Iuis2z9FCCNJJw57vXdBAz7lCEUOezTGEb6KfdO2vtOvDWvn2pmo2Y0FAwQHHqzB59VUrhtdHVTGS61d0rahwzvzy74J6nXP1QhDKTj0Xk4VI7pdsbkAPhiIKYHeEDMhrv6gDr75R+8Ky9ztuF1qH1Do27sTHaMjYODWtGgHkEIQ55yjC0yYv5DPaG5PqrQQC9gbKGkA6EeigfipGxNjYBujkUIWfI2kmbMSTmyOq6wkEbgweqhahpaRIDg8UIVR/JpqxBtW5vhIzrxTplS5pBDRjlxQhMuwKp8CwqnvycYGU4irnwuBb0QhIb5aDUnR226StkaH5y8aY6KQhuD3+JgAGcHIQhED32SFsvLam5fBPY7LGFzXDGMgcwteoe0bamx7JUNFW3msqYm7jafdmczuYMDEYa0gHGNM5wMAYwhC2aZumjFqEvkSK1cu0K60k9wkglmNZVkM+KfIXSRt4ndJOQToM9EhssygbDPerxFJUsgI+zY7G84nGp6IQrwTk5Nv9icvn7aLjYdu4onPlNrjEUbvs4mNDWj16qcPbXWRExxW5jGt0Aa1qEInknXYueOKm+D//2Q=='

        credentials_json = open("credentials.json", "rb")
        config = pickle.load(credentials_json)
        credentials_json.close()

        firebase = pyrebase.initialize_app(config)
        db = firebase.database()
        #print(db)

        #print(res['spbase64'])
        #print(res['rr'])
        count = 0
        i = 0
        A = 100
        B = 5
        bo = 0.0
        nm = db.child("Appointments").child(user_index).get()
        res = nm.val()
        video_frames = res['spbase64']
        video_strings = video_frames.split(';')
        video_strings = video_strings[1:]
        #print(video_strings[0]==video_strings[4])
        #video_strings = video_strings*2
        spresult = 0
        # convert it to a pil image
        spcount = 0
        result = 0
        length = len(video_strings)
        print("The no of video_strings: " + str(length))
        for w in range(len(video_strings)):

            input_img = base64_to_pil_image(video_strings[w])

            input_img = input_img.resize((640, 480))

            img = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            avg = 0
            count = 0
            sumres = 0
            c1 = 0
            res = 0
            rows, cols = gray.shape
            for i in range(rows):
                for j in range(cols):
                    l = gray[i, j]
                    avg = avg + l
                    c1 += 1
            for i in range(285, 386):
                for j in range(177, 351):
                    k = gray[i, j]
                    # if i >= 285 and j >= 180 and i<=385 and j<=350:
                    count += 1
                    sumres = sumres + k

            avg = avg / c1
            sumres = sumres / count
            avg = round(avg, 0)
            sumres = round(sumres, 0)
            print("photo avg: " + str(avg))
            print("Fin avg:" + str(sumres))
            diff = avg - sumres
            diff = abs(diff)
            #print(diff)

            # if diff >= 5:
            if avg >= 140:
                if sumres >= 140 and sumres <= 200:
                    #print("Hand detected")
                    res = 1

            elif avg >= 120 and avg <= 139:
                if sumres >= 130 and sumres <= 200:

                    #print("Hand detected")
                    res = 1
                else:

                    #print("no hand")
                    res = 0

            elif avg >= 90 and avg <= 119:
                if sumres >= 90 and sumres <= 180:
                    #print("hand detected")
                    res = 1

                else:
                    #print("no hand")
                    res = 0

            else:
                #print("no hand")
                res = 0

            # else:
            # 	print("No hand")
            # 	res=0

            print("res", str(res))
            #res = 1
            if res == 1:

                #Red channel operations
                red_channel = img[:, :, 2]
                mean_red = np.mean(red_channel)
                #print("RED MEAN", mean_red)
                std_red = np.std(red_channel)
                #print("RED STD", std_red)
                red_final = std_red / mean_red
                #print("RED FINAL",red_final)

                #Blue channel operations
                blue_channel = img[:, :, 0]
                mean_blue = np.mean(blue_channel)
                #print("BLUE MEAN", mean_blue)
                std_blue = np.std(red_channel)
                #print("BLUE STD", std_blue)
                blue_final = std_blue / mean_blue
                #print("BLUE FINAL",blue_final)

                sp = A - (B * (red_final / blue_final))
                sp = round(sp, 2)
                spresult = spresult + sp
                spcount += 1

            else:
                sp = "No hand"

            result = result + res

        result = result / length
        print("final res value: " + str(result))
        print("positive hand counts: " + str(spcount))

        if result > 0.25:
            spresult = spresult / spcount
            spresult = round(spresult, 2)
        else:
            spresult = "Finger not recognised"

        db.child("Appointments").child(user_index).update({"spo2": spresult})
        db.child("Consultation").child(user_index).update({"fspo2": spresult})
        return (1)
Esempio n. 10
0
	def get(self, user_index):

		config = {
			"apiKey": "AIzaSyDlmyLHYxZYCdmlI-pzKwkudQ85jdydBJ4",
		    "authDomain": "televital-hack.firebaseapp.com",
		    "databaseURL": "https://televital-hack.firebaseio.com",
		    "projectId": "televital-hack",
		    "storageBucket": "televital-hack.appspot.com",
		    "messagingSenderId": "209026679607",
		    "appId": "1:209026679607:web:68ae56edcb1abae7f290b2",
		    "measurementId": "G-07L3WDKM0H"

		}

		firebase = pyrebase.initialize_app(config)
		db = firebase.database()
		
		count = 0
		i=0
		A=100
		B=5
		bo = 0.0
		nm = db.child(user_index).get()
		res= nm.val()
		frame_text = res['spbase64']
		# convert it to a pil image
		input_img = base64_to_pil_image(frame_text)


		input_img = input_img.resize((320,240))

		img  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)

		#Red channel operations
		red_channel = img[:,:,2]
		mean_red = np.mean(red_channel)
		#print("RED MEAN", mean_red)
		std_red = np.std(red_channel)
		#print("RED STD", std_red)
		red_final = std_red/mean_red
		#print("RED FINAL",red_final)


		#Blue channel operations
		blue_channel = img[:,:,0]
		mean_blue = np.mean(blue_channel)
		#print("BLUE MEAN", mean_blue)
		std_blue = np.std(red_channel)
		#print("BLUE STD", std_blue)
		blue_final = std_blue/mean_blue
		#print("BLUE FINAL",blue_final)


		sp = A-(B*(red_final/blue_final))
		#print("SP_VALUE",sp)
		bo = bo + sp



		#this is the value to be returned on the result screen
		bo = bo/100.0	

		
		db.child(user_index).update({"sp":sp})
		return (1)
Esempio n. 11
0
	def get(self, user_index):
		credentials_json = open("credentials.json","rb")
		config = pickle.load(credentials_json)
		credentials_json.close()

		firebase = pyrebase.initialize_app(config)
		db = firebase.database()
		nm = db.child("Appointments").child(user_index).get()
		res= nm.val()
		video_frames = res['hrbase64']
		video_strings = video_frames.split(';')
		#print(video_strings)
		video_strings = video_strings[1:]
		print(len(video_strings))
		video_strings = video_strings*10

		face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

		def buildGauss(frame, levels):
			pyramid = [frame]
			for level in range(levels):
				frame = cv2.pyrDown(frame)
				pyramid.append(frame)
			return pyramid
		def reconstructFrame(pyramid, index, levels):
			filteredFrame = pyramid[index]
			for level in range(levels):
				filteredFrame = cv2.pyrUp(filteredFrame)
			filteredFrame = filteredFrame[:videoHeight, :videoWidth]
			return filteredFrame


		def applyFFT(frames, fps):
			n = frames.shape[0]
			t = np.linspace(0,float(n)/fps, n)
			disp = frames.mean(axis = 0)
			y = frames - disp

			k = np.arange(n)
			T = n/fps
			frq = k/T # two sides frequency range
			freqs = frq[range(n//2)] # one side frequency range

			Y = np.fft.fft(y, axis=0)/n # fft computing and normalization
			signals = Y[range(n//2), :,:]
			
			return freqs, signals

		def bandPass(freqs, signals, freqRange):

			signals[freqs < freqRange[0]] *= 0
			signals[freqs > freqRange[1]] *= 0

			return signals


		def find(condition):
			res, = np.nonzero(np.ravel(condition))
			return res


		def freq_from_crossings(sig, fs):
			"""Estimate frequency by counting zero crossings
    
			"""
			#print(sig)
			# Find all indices right before a rising-edge zero crossing
			indices = find((sig[1:] >= 0) & (sig[:-1] < 0))
			x = sig[1:]
			x = mean(x)

			
			return x

		def searchFreq(freqs, signals, frames, fs):

			curMax = 0
			freMax = 0
			Mi = 0
			Mj = 0
			for i in range(10, signals.shape[1]):
				for j in range(signals.shape[2]):

					idxMax = abs(signals[:,i,j])
					idxMax = np.argmax(idxMax)
					freqMax = freqs[idxMax]
					ampMax = signals[idxMax,i,j]
					c, a = abs(curMax), abs(ampMax)
					if (c < a).any():
						curMax = ampMax
						freMax = freqMax
						Mi = i
						Mj = j
                # print "(%d,%d) -> Freq:%f Amp:%f"%(i,j,freqMax*60, abs(ampMax))

			y = frames[:,Mi, Mj]
			y = y - y.mean()
			fq = freq_from_crossings(y, fs)
			rate_fft = freMax*60
			
			rate_count = round(20+(fq*10))

			if np.isnan(rate_count):
				rate = rate_fft
			elif abs(rate_fft - rate_count) > 20:
				rate = rate_fft
			else:
				rate = rate_count

			return rate

		def answer(videoStrings):

			sampleLen = 10
			firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
			firstGauss = buildGauss(firstFrame, levels+1)[levels]
			sample = np.zeros((sampleLen, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
		
			idx = 0
			
			respRate = []	

			#pipeline = PipeLine(videoFrameRate)
			face_flag = 0 
			for i in range(len(videoStrings)):
				input_img = base64_to_pil_image(videoStrings[i])

				input_img = input_img.resize((320,240))
				gray = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2GRAY)
				
				faces = face_cascade.detectMultiScale(gray, 1.3, 5)
				#print(faces)
				#faces = [1,2,3]
				#print(len(faces))
				if len(faces) > 0:
					#print("FACE FOUND _ RR")

					face_flag = 1

					frame  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)
			
					detectionFrame = frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):int(realWidth-int(videoWidth/2)), :]


					sample[idx] = buildGauss(detectionFrame, levels+1)[levels]
				
					freqs, signals = applyFFT(sample, videoFrameRate)
					signals = bandPass(freqs, signals, (0.2, 0.8))
					respiratoryRate = searchFreq(freqs, signals, sample, videoFrameRate)

					#frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):(realWidth-int(videoWidth/2)), :] = outputFrame
					
					idx = (idx + 1) % 10 		

					respRate.append(respiratoryRate)

				else:
					print("Face not found")

			if face_flag == 1:
				l = []
				a = max(respRate)
				b = mean(respRate)
				if b < 0:
					b = 5
				l.append(a)
				l.append(b)

			
				rr = mean(l)
				rr = round(rr,2)
			else:
				rr = "Face not recognised!"



			return(rr)	


		# Webcam Parameters
		realWidth = 320
		realHeight = 240
		videoWidth = 160
		videoHeight = 120
		videoChannels = 3
		videoFrameRate = 15


		# Color Magnification Parameters
		levels = 3
		alpha = 170
		minFrequency = 1.0
		maxFrequency = 2.0
		bufferSize = 150
		bufferIndex = 0

		# Output Display Parameters
		font = cv2.FONT_HERSHEY_SIMPLEX
		loadingTextLocation = (20, 30)
		bpmTextLocation = (videoWidth//2 + 5, 30)
		fontScale = 1
		fontColor = (0,0,0)
		lineType = 2
		boxColor = (0, 255, 0)
		boxWeight = 3

		# Initialize Gaussian Pyramid
		firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
		firstGauss = buildGauss(firstFrame, levels+1)[levels]
		videoGauss = np.zeros((bufferSize, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
		fourierTransformAvg = np.zeros((bufferSize))

		# Bandpass Filter for Specified Frequencies
		frequencies = (1.0*videoFrameRate) * np.arange(bufferSize) / (1.0*bufferSize)
		mask = (frequencies >= minFrequency) & (frequencies <= maxFrequency)

		# Heart Rate Calculation Variables
		bpmCalculationFrequency = 15
		bpmBufferIndex = 0
		bpmBufferSize = 10
		bpmBuffer = np.zeros((bpmBufferSize))
		i = 0
		bpm_values = []
		face_flag = 0
		for j in range(len(video_strings)):
			# convert it to a pil image
			input_img = base64_to_pil_image(video_strings[j])

			input_img = input_img.resize((320,240))

			img  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)
			gray = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2GRAY)
			
			faces = face_cascade.detectMultiScale(gray, 1.3, 5)
			#faces = [1,2,3]

			if len(faces) > 0:
				face_flag = 1
				#print("FACE FOUND")
				detectionFrame = img[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):int(realWidth-int(videoWidth/2)), :]

				# Construct Gaussian Pyramid
				videoGauss[bufferIndex] = buildGauss(detectionFrame, levels+1)[levels]
				fourierTransform = np.fft.fft(videoGauss, axis=0)
				# Bandpass Filter
				fourierTransform[mask == False] = 0

				# Grab a Pulse
				if bufferIndex % bpmCalculationFrequency == 0:
					i = i + 1
					for buf in range(bufferSize):
						fourierTransformAvg[buf] = np.real(fourierTransform[buf]).mean()
					hz = frequencies[np.argmax(fourierTransformAvg)]
					bpm = 60.0 * hz
					bpmBuffer[bpmBufferIndex] = bpm
					# print("BPM Buffer List: ", bpmBuffer)
					bpmBufferIndex = (bpmBufferIndex + 1) % bpmBufferSize

				# Amplify
				filtered = np.real(np.fft.ifft(fourierTransform, axis=0))
				filtered = filtered * alpha

				# Reconstruct Resulting Frame
				filteredFrame = reconstructFrame(filtered, bufferIndex, levels)
				outputFrame = detectionFrame + filteredFrame
				outputFrame = cv2.convertScaleAbs(outputFrame)

				bufferIndex = (bufferIndex + 1) % bufferSize
				
				if i > bpmBufferSize:
					bpm_values.append(bpmBuffer.mean())
					#print(bpm_values)
			else:
				print("Face not found")

		if face_flag == 1:
			hr = max(bpm_values)
			hr = round(hr)

		else:
			hr = 'Face not found'

		print(hr)

		rr = answer(video_strings)
		print(rr)

		db.child("Appointments").child(user_index).update({"hr":hr,'rr':rr})
		db.child("Consultation").child(user_index).update({"fhr":hr,'frr':rr})
		return (1)
Esempio n. 12
0
		def answer(videoStrings):

			sampleLen = 10
			firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
			firstGauss = buildGauss(firstFrame, levels+1)[levels]
			sample = np.zeros((sampleLen, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
		
			idx = 0
			
			respRate = []	

			#pipeline = PipeLine(videoFrameRate)
			face_flag = 0 
			for i in range(len(videoStrings)):
				input_img = base64_to_pil_image(videoStrings[i])

				input_img = input_img.resize((320,240))
				gray = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2GRAY)
				
				faces = face_cascade.detectMultiScale(gray, 1.3, 5)
				#print(faces)
				#faces = [1,2,3]
				#print(len(faces))
				if len(faces) > 0:
					#print("FACE FOUND _ RR")

					face_flag = 1

					frame  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)
			
					detectionFrame = frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):int(realWidth-int(videoWidth/2)), :]


					sample[idx] = buildGauss(detectionFrame, levels+1)[levels]
				
					freqs, signals = applyFFT(sample, videoFrameRate)
					signals = bandPass(freqs, signals, (0.2, 0.8))
					respiratoryRate = searchFreq(freqs, signals, sample, videoFrameRate)

					#frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):(realWidth-int(videoWidth/2)), :] = outputFrame
					
					idx = (idx + 1) % 10 		

					respRate.append(respiratoryRate)

				else:
					print("Face not found")

			if face_flag == 1:
				l = []
				a = max(respRate)
				b = mean(respRate)
				if b < 0:
					b = 5
				l.append(a)
				l.append(b)

			
				rr = mean(l)
				rr = round(rr,2)
			else:
				rr = "Face not recognised!"



			return(rr)	
Esempio n. 13
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        ################## where the hard work is done ############
        # output_img is an PIL image
        output_img = self.makeup_artist.apply_makeup(input_img)
        frame = cv2.cvtColor(np.array(output_img), cv2.COLOR_RGB2BGR)
        cv2.resize(frame, (400, 560), interpolation=cv2.INTER_AREA)

        im = cv2.imread("newmask.png")

        # cv2.rectangle(image, (400, 300), (700, 500), (178, 190, 181), 5)

        # frame = cv2.flip(opencvImage, 2)

        gaze.refresh(frame)

        frame, x, y = gaze.annotated_frame()
        text = ""

        # cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)

        left_pupil = gaze.pupil_left_coords()

        right_pupil = gaze.pupil_right_coords()
        print(right_pupil, left_pupil)

        points_cnt = (x, y)

        # print(points_cnt)
        if left_pupil and right_pupil != None:
            a = left_pupil
            b = right_pupil
            c = points_cnt
            # dist = [(a - c) ** 2 for a, c in zip(a, c)]
            # dist = math.sqrt(sum(dist))
            # print("new method",dist)

            dst_left = distance.euclidean(a, c)
            mm = 0.26458333
            dist_left_mm = (dst_left * mm) + 20

            # print(dist_left_mm)
            print("left:::", dist_left_mm)
            dst_right = distance.euclidean(b, c)
            # print(dst_right)
            # print(dst_left)

            dist_right_mm = (dst_right * mm) + 20
            total_pd = dist_right_mm + dist_left_mm
            print("total::", total_pd)
            print("right::", dist_right_mm)

            cv2.putText(frame, "Left PD:  " + str(dist_left_mm) + 'mm',
                        (80, 135), cv2.FONT_HERSHEY_DUPLEX, 0.9, (0, 0, 255),
                        1)
            cv2.putText(frame, "Right PD: " + str(dist_right_mm) + 'mm',
                        (85, 175), cv2.FONT_HERSHEY_DUPLEX, 0.9, (0, 0, 255),
                        1)
            # cv2.putText(frame, "Total PD: " + str(total_pd) + 'mm', (85, 200), cv2.FONT_HERSHEY_DUPLEX, 0.9,
            #             (0, 0, 255), 1)
        # print(frame.shape[1::-1])
        im = cv2.resize(im, frame.shape[1::-1], interpolation=cv2.INTER_AREA)
        dst = cv2.addWeighted(frame, 0.5, im, 0.5, 0)

        # cv2.imshow("Frame",dst)
        # cv2.waitKey(1)
        ##Convert opencv output to pillow image
        img = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
        im_pil = Image.fromarray(img)

        # output_str is a base64 string in ascii
        output_str = pil_image_to_base64(im_pil)

        # convert eh base64 string in ascii to base64 string in _bytes_
        self.to_output.append(binascii.a2b_base64(output_str))
Esempio n. 14
0
def process_frame(input):
    img = base64_to_pil_image(input)
    name, score = search(img,simNet.model,db='index.json')
    message = {'Name':name,'Score':score}
    print(message)
    socketio.emit(MESSAGE_TO_CLIENT,message,namespace=NAME_SPACE)
Esempio n. 15
0
	def get(self, user_index):
		config = {
			"apiKey": "AIzaSyDlmyLHYxZYCdmlI-pzKwkudQ85jdydBJ4",
		    "authDomain": "televital-hack.firebaseapp.com",
		    "databaseURL": "https://televital-hack.firebaseio.com",
		    "projectId": "televital-hack",
		    "storageBucket": "televital-hack.appspot.com",
		    "messagingSenderId": "209026679607",
		    "appId": "1:209026679607:web:68ae56edcb1abae7f290b2",
		    "measurementId": "G-07L3WDKM0H"

		}

		firebase = pyrebase.initialize_app(config)
		db = firebase.database()
		nm = db.child(user_index).get()
		res= nm.val()
		video_frames = res['hrbase64']

		video_strings = video_frames.split(';')
		video_strings = video_strings[3:]

		def buildGauss(frame, levels):
			pyramid = [frame]
			for level in range(levels):
				frame = cv2.pyrDown(frame)
				pyramid.append(frame)
			return pyramid
		def reconstructFrame(pyramid, index, levels):
			fiFrame = pyramid[index]
			for level in range(levels):
				fiFrame = cv2.pyrUp(fiFrame)
			fiFrame = fiFrame[:videoHeight, :videoWidth]
			return fiFrame

		def applyFFT(frames, fps):
			n = frames.shape[0]
			t = np.linspace(0,float(n)/fps, n)
			disp = frames.mean(axis = 0)
			y = frames - disp

			k = np.arange(n)
			T = n/fps
			frq = k/T # two sides frequency range
			freqs = frq[range(n//2)] # one side frequency range

			Y = np.fft.fft(y, axis=0)/n # fft computing and normalization
			signals = Y[range(n//2), :,:]
			
			return freqs, signals

		def bandPass(freqs, signals, freqRange):

			signals[freqs < freqRange[0]] *= 0
			signals[freqs > freqRange[1]] *= 0

			return signals

		def find(condition):
			res, = np.nonzero(np.ravel(condition))
			return res


		def freq_from_crossings(sig, fs):
			"""Estimate frequency by counting zero crossings
    
			"""
			#print(sig)
			# Find all indices right before a rising-edge zero crossing
			indices = find((sig[1:] >= 0) & (sig[:-1] < 0))
			x = sig[1:]
			x = mean(x)

			
			return x

		def searchFreq(freqs, signals, frames, fs):

			curMaximumval = 0
			freMax = 0
			Mi = 0
			Mj = 0
			for i in range(10, signals.shape[1]):
				for j in range(signals.shape[2]):

					idxMaximumval = abs(signals[:,i,j])
					idxMaximumval = np.argmax(idxMaximumval)
					freqMaximumval = freqs[idxMaximumval]
					ampMaximumval = signals[idxMaximumval,i,j]
					c, a = abs(curMaximumval), abs(ampMaximumval)
					if (c < a).any():
						curMaximumval = ampMaximumval
						freMax = freqMaximumval
						Mi = i
						Mj = j
                # print "(%d,%d) -> Freq:%f Amp:%f"%(i,j,freqMaximumval*60, abs(ampMaximumval))

			y = frames[:,Mi, Mj]
			y = y - y.mean()
			fq = freq_from_crossings(y, fs)
			rate_fft = freMax*60
			
			rate_count = round(20+(fq*10))

			if np.isnan(rate_count):
				rate = rate_fft
			elif abs(rate_fft - rate_count) > 20:
				rate = rate_fft
			else:
				rate = rate_count

			return rate

		def answer(videoStrings):

			sampleLength = 10
			firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
			firstGauss = buildGauss(firstFrame, levels+1)[levels]
			sample = np.zeros((sampleLength, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
		
			idx = 0
			
			respRate = []	

			#pipeline = PipeLine(videoFrameRate)
			for i in range(len(videoStrings)):
				input_img = base64_to_pil_image(videoStrings[i])

				input_img = input_img.resize((320,240)) 

				frame  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)
		
				detectionFrame = frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):int(realWidth-int(videoWidth/2)), :]


				sample[idx] = buildGauss(detectionFrame, levels+1)[levels]
			
				freqs, signals = applyFFT(sample, videoFrameRate)
				signals = bandPass(freqs, signals, (0.2, 0.8))
				respiratoryRate = searchFreq(freqs, signals, sample, videoFrameRate)

				#frame[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):(realWidth-int(videoWidth/2)), :] = outFrame
				
				idx = (idx + 1) % 10 		

				respRate.append(respiratoryRate)

			l = []
			a = max(respRate)
			b = mean(respRate)
			if b < 0:
				b = 5
			l.append(a)
			l.append(b)


			return mean(l)	


		# Webcam Parameters
		realWidth = 320
		realHeight = 240
		videoWidth = 160
		videoHeight = 120
		videoChannels = 3
		videoFrameRate = 15


		# Color Magnification Parameters
		levels = 3
		alpha = 170
		minFrequency = 1.0
		maxFrequency = 2.0
		bufferSize = 150
		bufferIndex = 0

		# Output Display Parameters
		font = cv2.FONT_HERSHEY_SIMPLEX
		loadingTextLocation = (20, 30)
		bpmTextLocation = (videoWidth//2 + 5, 30)
		fontScale = 1
		fontColor = (0,0,0)
		lineType = 2
		boxColor = (0, 255, 0)
		boxWeight = 3

		# Initialize Gaussian Pyramid
		firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
		firstGauss = buildGauss(firstFrame, levels+1)[levels]
		videoGauss = np.zeros((bufferSize, firstGauss.shape[0], firstGauss.shape[1], videoChannels))
		fourierTransformAvg = np.zeros((bufferSize))

		# Bandpass Filter for Specified Frequencies
		frequencies = (1.0*videoFrameRate) * np.arange(bufferSize) / (1.0*bufferSize)
		mask = (frequencies >= minFrequency) & (frequencies <= maxFrequency)

		# Heart Rate Calculation Variables
		bpmCalculationFrequency = 15
		bpmBufferIndex = 0
		bpmBufferSize = 10
		bpmBuffer = np.zeros((bpmBufferSize))
		i = 0
		bpm_values = []
		for j in range(len(video_strings)):
			# convert it to a pil image
			input_img = base64_to_pil_image(video_strings[j])

			input_img = input_img.resize((320,240))

			img  = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)

			detectionFrame = img[int(videoHeight/2):int(realHeight-videoHeight/2), int(videoWidth/2):int(realWidth-int(videoWidth/2)), :]

			# Construct Gaussian Pyramid
			videoGauss[bufferIndex] = buildGauss(detectionFrame, levels+1)[levels]
			fourierTransform = np.fft.fft(videoGauss, axis=0)
			# Bandpass Filter
			fourierTransform[mask == False] = 0

			# Grab a Pulse
			if bufferIndex % bpmCalculationFrequency == 0:
				i = i + 1
				for buf in range(bufferSize):
					fourierTransformAvg[buf] = np.real(fourierTransform[buf]).mean()
				hz = frequencies[np.argmax(fourierTransformAvg)]
				bpm = 60.0 * hz
				bpmBuffer[bpmBufferIndex] = bpm
				# print("BPM Buffer List: ", bpmBuffer)
				bpmBufferIndex = (bpmBufferIndex + 1) % bpmBufferSize

			# Amplify
			filtered = np.real(np.fft.ifft(fourierTransform, axis=0))
			filtered = filtered * alpha

			# Reconstruct Resulting Frame
			fiFrame = reconstructFrame(filtered, bufferIndex, levels)
			outFrame = detectionFrame + fiFrame
			outFrame = cv2.convertScaleAbs(outFrame)

			bufferIndex = (bufferIndex + 1) % bufferSize
			
			if i > bpmBufferSize:
				bpm_values.append(bpmBuffer.mean())



		# take the maximum val of the calculated heart rates
		hr = max(bpm_values)
		print(hr)

		# call the function to calculate respiratory rate
		rr = answer(video_strings)
		print(rr)
		

		# push the data to database
		db.child(user_index).update({"hr":hr,'rr':rr})
		return (1)
Esempio n. 16
0
def hr():
    json_data = request.json
    video_frames = itemgetter("frames")(json_data)
    video_strings = video_frames.split(";")

    video_strings2 = video_strings[1:]
    print(len(video_strings))
    video_strings = video_strings2 * 20

    face_found_flag = 0
    for index, i in enumerate(video_strings2):
        imgdata = base64.b64decode(i)
        filename = str(index) + "some_image.jpg"
        with open(filename, "wb") as f:
            f.write(imgdata)
        face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

        img = cv2.imread(filename)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        if len(faces) > 0:
            face_found_flag = 1
            break

    face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

    def buildGauss(frame, levels):
        pyramid = [frame]
        for level in range(levels):
            frame = cv2.pyrDown(frame)
            pyramid.append(frame)
        return pyramid

    def reconstructFrame(pyramid, index, levels):
        filteredFrame = pyramid[index]
        for level in range(levels):
            filteredFrame = cv2.pyrUp(filteredFrame)
        filteredFrame = filteredFrame[:videoHeight, :videoWidth]
        return filteredFrame

    def applyFFT(frames, fps):
        n = frames.shape[0]
        t = np.linspace(0, float(n) / fps, n)
        disp = frames.mean(axis=0)
        y = frames - disp

        k = np.arange(n)
        T = n / fps
        frq = k / T
        freqs = frq[range(n // 2)]

        Y = np.fft.fft(y, axis=0) / n
        signals = Y[range(n // 2), :, :]

        return freqs, signals

    def bandPass(freqs, signals, freqRange):

        signals[freqs < freqRange[0]] *= 0
        signals[freqs > freqRange[1]] *= 0

        return signals

    def find(condition):
        (res,) = np.nonzero(np.ravel(condition))
        return res

    def freq_from_crossings(sig, fs):

        indices = find((sig[1:] >= 0) & (sig[:-1] < 0))
        x = sig[1:]
        x = mean(x)

        return x

    def searchFreq(freqs, signals, frames, fs):

        curMax = 0
        freMax = 0
        Mi = 0
        Mj = 0
        for i in range(10, signals.shape[1]):
            for j in range(signals.shape[2]):

                idxMax = abs(signals[:, i, j])
                idxMax = np.argmax(idxMax)
                freqMax = freqs[idxMax]
                ampMax = signals[idxMax, i, j]
                c, a = abs(curMax), abs(ampMax)
                if (c < a).any():
                    curMax = ampMax
                    freMax = freqMax
                    Mi = i
                    Mj = j

        y = frames[:, Mi, Mj]
        y = y - y.mean()
        fq = freq_from_crossings(y, fs)
        rate_fft = freMax * 60

        rate_count = round(20 + (fq * 10))

        if np.isnan(rate_count):
            rate = rate_fft
        elif abs(rate_fft - rate_count) > 20:
            rate = rate_fft
        else:
            rate = rate_count

        return rate

    def answer(videoStrings):

        sampleLen = 10
        firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
        firstGauss = buildGauss(firstFrame, levels + 1)[levels]
        sample = np.zeros(
            (sampleLen, firstGauss.shape[0], firstGauss.shape[1], videoChannels)
        )

        idx = 0

        respRate = []

        face_flag = 0
        for i in range(len(videoStrings)):

            input_img = base64_to_pil_image(videoStrings[i])

            input_img = input_img.resize((320, 240))

            if face_found_flag == 1:
                # print("FACE FOUND _ RR")

                face_flag = 1

                frame = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)

                detectionFrame = frame[
                    int(videoHeight / 2) : int(realHeight - videoHeight / 2),
                    int(videoWidth / 2) : int(realWidth - int(videoWidth / 2)),
                    :,
                ]

                sample[idx] = buildGauss(detectionFrame, levels + 1)[levels]

                freqs, signals = applyFFT(sample, videoFrameRate)
                signals = bandPass(freqs, signals, (0.2, 0.8))
                respiratoryRate = searchFreq(freqs, signals, sample, videoFrameRate)

                idx = (idx + 1) % 10

                respRate.append(respiratoryRate)

            else:
                print("Face not found")

        if face_flag == 1:
            l = []
            a = max(respRate)
            b = mean(respRate)
            if b < 0:
                b = 5
            l.append(a)
            l.append(b)

            rr = mean(l)
            rr = round(rr, 2)
        else:
            rr = "Face not recognised!"

        return rr

    # Webcam Parameters
    realWidth = 320
    realHeight = 240
    videoWidth = 160
    videoHeight = 120
    videoChannels = 3
    videoFrameRate = 15

    # Color Magnification Parameters
    levels = 3
    alpha = 170
    minFrequency = 1.0
    maxFrequency = 2.0
    bufferSize = 150
    bufferIndex = 0

    # Initialize Gaussian Pyramid
    firstFrame = np.zeros((videoHeight, videoWidth, videoChannels))
    firstGauss = buildGauss(firstFrame, levels + 1)[levels]
    videoGauss = np.zeros(
        (bufferSize, firstGauss.shape[0], firstGauss.shape[1], videoChannels)
    )
    fourierTransformAvg = np.zeros((bufferSize))

    # Bandpass Filter for Specified Frequencies
    frequencies = (1.0 * videoFrameRate) * np.arange(bufferSize) / (1.0 * bufferSize)
    mask = (frequencies >= minFrequency) & (frequencies <= maxFrequency)

    # Heart Rate Calculation Variables
    bpmCalculationFrequency = 15
    bpmBufferIndex = 0
    bpmBufferSize = 10
    bpmBuffer = np.zeros((bpmBufferSize))
    i = 0
    bpm_values = []
    face_flag = 0
    for j in range(len(video_strings)):
        # convert it to a pil image
        input_img = base64_to_pil_image(video_strings[j])

        input_img = input_img.resize((320, 240))

        img = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)

        if face_found_flag == 1:
            face_flag = 1
            # print("FACE FOUND")
            detectionFrame = img[
                int(videoHeight / 2) : int(realHeight - videoHeight / 2),
                int(videoWidth / 2) : int(realWidth - int(videoWidth / 2)),
                :,
            ]

            # Construct Gaussian Pyramid
            videoGauss[bufferIndex] = buildGauss(detectionFrame, levels + 1)[levels]
            fourierTransform = np.fft.fft(videoGauss, axis=0)
            # Bandpass Filter
            fourierTransform[mask == False] = 0

            # Grab a Pulse
            if bufferIndex % bpmCalculationFrequency == 0:
                i = i + 1
                for buf in range(bufferSize):
                    fourierTransformAvg[buf] = np.real(fourierTransform[buf]).mean()
                hz = frequencies[np.argmax(fourierTransformAvg)]
                bpm = 60.0 * hz
                bpmBuffer[bpmBufferIndex] = bpm

                bpmBufferIndex = (bpmBufferIndex + 1) % bpmBufferSize

            # Amplify
            filtered = np.real(np.fft.ifft(fourierTransform, axis=0))
            filtered = filtered * alpha

            # Reconstruct Resulting Frame
            filteredFrame = reconstructFrame(filtered, bufferIndex, levels)
            outputFrame = detectionFrame + filteredFrame
            outputFrame = cv2.convertScaleAbs(outputFrame)

            bufferIndex = (bufferIndex + 1) % bufferSize
            print(f"IIIIIIII is equal to {i} and bpmBufferSize is {bpmBufferSize}")
            if i < bpmBufferSize:
                bpm_values.append(bpmBuffer.mean())
                print("bpm_values: ")
                print(bpm_values)

        else:
            print("Face not found")

    if face_flag == 1:
        hr = max(bpm_values)
        hr = round(hr)

    else:
        hr = "Face not found"

    print(hr)

    rr = answer(video_strings)
    print(rr)

    answer = str(hr) + " " + str(rr)
    return answer
Esempio n. 17
0
def spo():
    count = 0
    i = 0
    A = 100
    B = 5
    json_data = request.json
    video_frames = itemgetter("frames")(json_data)
    video_strings = video_frames.split(";")
    video_strings = video_strings[0:]
    spresult = 0
    spcount = 0
    result = 0
    length = len(video_strings)
    print("The no of video_strings: " + str(length))
    for w in range(len(video_strings)):

        input_img = base64_to_pil_image(video_strings[w])

        input_img = input_img.resize((640, 480))

        img = cv2.cvtColor(np.array(input_img), cv2.COLOR_BGR2RGB)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        avg = 0
        count = 0
        sumres = 0
        c1 = 0
        res = 0
        rows, cols = gray.shape
        for i in range(rows):
            for j in range(cols):
                temp = gray[i, j]
                avg = avg + temp
                c1 += 1
        for i in range(285, 386):
            for j in range(177, 351):
                k = gray[i, j]
                count += 1
                sumres = sumres + k

        avg = avg / c1
        sumres = sumres / count
        avg = round(avg, 0)
        sumres = round(sumres, 0)
        print("photo avg: " + str(avg))
        print("Fin avg:" + str(sumres))
        diff = avg - sumres
        diff = abs(diff)

        if avg >= 140:
            if sumres >= 140 and sumres <= 200:
                # print("Hand detected")
                res = 1

        elif avg >= 120 and avg <= 139:
            if sumres >= 130 and sumres <= 200:
                res = 1
            else:
                res = 0

        elif avg >= 90 and avg <= 119:
            if sumres >= 90 and sumres <= 180:
                res = 1

            else:
                res = 0

        else:
            res = 0

        print("res", str(res))
        # res = 1
        if res == 1:

            # Red channel operations
            red_channel = img[:, :, 2]
            mean_red = np.mean(red_channel)

            std_red = np.std(red_channel)

            red_final = std_red / mean_red

            # Blue channel operations
            blue_channel = img[:, :, 0]
            mean_blue = np.mean(blue_channel)

            std_blue = np.std(red_channel)

            blue_final = std_blue / mean_blue

            sp = A - (B * (red_final / blue_final))
            sp = round(sp, 2)
            spresult = spresult + sp
            spcount += 1

        else:
            sp = "Finger not found"

        result = result + res

    result = result / length

    if result > 0.25:
        spresult = spresult / spcount
        spresult = round(spresult, 2)
    else:
        spresult = "Finger not recognised"

    return spresult
Esempio n. 18
0
    def process_one(self):
        if not self.to_process:
            return

        # input is an ascii string.
        input_str = self.to_process.pop(0)

        # convert it to a pil image
        input_img = base64_to_pil_image(input_str)

        ################## where the hard work is done ############
        # output_img is an PIL image
        # output_img = input_img #self.makeup_artist.apply_makeup(input_img)

        # output_str is a base64 string in ascii
        # output_str = pil_image_to_base64(output_img)

        # convert eh base64 string in ascii to base64 string in _bytes_
        # self.to_output.append(binascii.a2b_base64(output_str))

        open_cv_image = np.array(input_img)
        # Convert RGB to BGR
        open_cv_image = open_cv_image[:, :, ::-1].copy()

        print("Processing frame...")

        face_patches, padded_bounding_boxes, landmarks = detect_and_align.align_image(
            open_cv_image, self.pnet, self.rnet, self.onet)
        matching_id = "Unknown"
        if len(face_patches) > 0:
            face_patches = np.stack(face_patches)
            feed_dict = {
                self.images_placeholder: face_patches,
                self.phase_train_placeholder: False
            }
            embs = self.sess.run(self.embeddings, feed_dict=feed_dict)

            print('Matches in frame:')
            for i in range(len(embs)):
                bb = padded_bounding_boxes[i]

                matching_id, dist = self.find_matching_id(embs[i, :])
                if matching_id:
                    print('Hi %s! Distance: %1.4f' % (matching_id, dist))
                else:
                    matching_id = 'Unknown'
                    print('Unknown! Couldn\'t fint match.')

                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(open_cv_image, matching_id, (bb[0], bb[3]), font,
                            1, (255, 255, 255), 1, cv2.LINE_AA)

                cv2.rectangle(open_cv_image, (bb[0], bb[1]), (bb[2], bb[3]),
                              (255, 0, 0), 2)
        else:
            print("No face patches")

        match_dict = {}
        match_dict[matching_id] = open_cv_image
        # adding matching_name=>frame to array
        self.to_output.append(match_dict)