示例#1
0
def remove():

    start = time.time()

    if 'file' not in request.files:
        return jsonify({'error': 'missing file'}), 400

    if request.files['file'].filename.rsplit(
            '.', 1)[1].lower() not in ["jpg", "png", "jpeg"]:
        return jsonify({'error': 'invalid file format'}), 400

    data = request.files['file'].read()

    if len(data) == 0:
        return jsonify({'error': 'empty image'}), 400

    img = Image.open(io.BytesIO(data))

    output = detect.predict(net, np.array(img))
    output = output.resize((img.size),
                           resample=Image.BILINEAR)  # remove resample

    empty_img = Image.new("RGBA", (img.size),
                          (255, 255, 255))  #BG Color SCB:(83,40,130)
    new_img = Image.composite(img, empty_img, output.convert("L"))

    buffer = io.BytesIO()
    new_img.save(buffer, "PNG")
    buffer.seek(0)

    logging.info(f" Predicted in {time.time() - start:.2f} sec")

    return f"data:image/png;base64,{base64.b64encode(buffer.getvalue()).decode()}"
示例#2
0
def get_img():
    f=[]
    for file in os.listdir('./imgssave/'):
        f.append(file)
    path="./imgssave/"+f[0]
    frame=cv2.imread(path,cv2.IMREAD_COLOR)
    try:
        (locs, preds)=predict(frame,faceNet,model)
        for (box, pred) in zip(locs, preds):
            (startX, startY, endX, endY) = box
            cla=np.argmax(pred[0])
            label = "Mask" if cla==0 else "No Mask"
            color = (0, 255, 0) if cla == 0 else (0, 0, 255)

    		# include the probability in the label
            label = "{}: {:.2f}%".format(label, max(pred[0]) * 100)
            cv2.putText(frame, label, (startX, startY - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
        try:
            for i in os.listdir('./static/detectedimgs/'):
                file='./imgssave/detectedimgs/'+i
                os.remove(file)
        except:
            pass

        cv2.imwrite('./static/detectedimgs/detect.jpg',frame)
    except :
        pass
示例#3
0
def process(filename):
    new_file = os.path.join(os.getcwd(), 'processed',
                            filename.split('.')[0] + '.wav')
    filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
    #print('ffmpeg -i %s -loglevel 0 -ac 1 -ar 16000 -acodec pcm_s16le -y %s'%(filename,new_file))
    os.popen(
        'ffmpeg -i %s -loglevel 0 -ac 1 -ar 16000 -acodec pcm_s16le -y %s' %
        (filename, new_file))
    lang = detect.predict(new_file)
    text = transcribe.transcribe(model_path, alphabet_path, lm_path, trie_path,
                                 new_file)
    if text == "":
        return redirect(url_for('index1'))
    else:
        session['error'] = ''
        print(text)
        f = open('input.txt', 'w')
        f.write(text)
        f.close()
        cmd = 'python -m nmt.nmt.nmt --out_dir=../eng_hi_model --inference_input_file=input.txt --inference_output_file=translated.txt'
        #print(cmd)
        FNULL = open(os.devnull, 'w')
        retcode = subprocess.call(cmd.split(' '),
                                  stdout=FNULL,
                                  stderr=subprocess.STDOUT)
        out_file = open('translated.txt', encoding='utf-8')
        trans = out_file.readline()
        if '<unk>' in trans:
            return redirect(url_for('index1'))
        out_file.close()
    print(trans)
    save_hindi_audio(trans)
    return render_template("next.html", lang=lang, text=text, trans=trans)
示例#4
0
def pred():
	if request.method=='POST':
		 data = request.files['file'].read()
		 img = Image.open(io.BytesIO(data))
		 output = detect.predict(net, np.array(img))
		 output = output.resize((img.size), resample=Image.BILINEAR) # remove resample

		 empty_img = Image.new("RGBA", (img.size), 0)
		 new_img = Image.composite(img, empty_img, output.convert("L"))
		 

		 #uploaded image
		 img_x0=BytesIO()
		 plt.imshow(new_img)
		 plt.savefig(img_x0,format='png')
		 plt.close()
		 img_x0.seek(0)
		 plot_url0=base64.b64encode(img_x0.getvalue()).decode('utf8')

		

		 


	return render_template('pred.html', plot_url0=plot_url0)
示例#5
0
文件: app.py 项目: Glasgow19/team-11
def image():
    global global_time
    if request.method == 'GET':
        return
    #     save image to local dir. assume jpg as phones capture jpg
    new_time = time.time()
    if (new_time - global_time) > 3:
        data = request.get_json()
        img = data['image']
        img = base64.b64decode(img)
        wh, detections = predict(img, is_file=True)
        global_time = new_time
        return jsonify({"response": describe(wh[0], wh[1], detections)})
    return jsonify({"response": " Too many requests..."})
示例#6
0
    def get_frame(self):
        try:
            ret, frame = self.video.read()
            now = time.time()
            (locs, preds) = predict(frame, self.faceNet, self.model)
            for (box, pred) in zip(locs, preds):
                (startX, startY, endX, endY) = box
                cla = np.argmax(pred[0])
                label = "Mask" if cla == 0 else "No Mask"
                color = (0, 255, 0) if cla == 0 else (0, 0, 255)

                # include the probability in the label
                label = "{}: {:.2f}%".format(label, max(pred[0]) * 100)

                cv2.putText(frame, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.line(frame, (startX, startY), (startX, startY + 25), color,
                         2)
                cv2.line(frame, (startX, startY), (startX + 25, startY), color,
                         2)
                cv2.line(frame, (endX, startY), (endX, startY + 25), color, 2)
                cv2.line(frame, (endX, startY), (endX - 25, startY), color, 2)
                cv2.line(frame, (startX, endY), (startX, endY - 25), color, 2)
                cv2.line(frame, (startX, endY), (startX + 25, endY), color, 2)
                cv2.line(frame, (endX, endY), (endX, endY - 25), color, 2)
                cv2.line(frame, (endX, endY), (endX - 25, endY), color, 2)

            #  cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
            (hei, wid) = frame.shape[:2]
            # fps=cap.get(cv2.CAP_PROP_FPS)
            end = time.time()
            f = 1 / (end - now)
            FPS = 'FPS : ' + str(math.ceil(f))
            cv2.putText(frame, str(FPS), (0, hei - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 1)
            no_faces = 'No. of faces in video   : ' + str(len(locs))
            cv2.putText(frame, str(no_faces), (80, hei - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 1)
            ret, jpeg = cv2.imencode('.jpg', frame)
            return jpeg.tobytes()
        except:
            pass