def home(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file: filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER2'], filename)) shutil.copy(os.path.join(app.config['UPLOAD_FOLDER2'],filename), os.path.join(app.config['UPLOAD_FOLDER'],filename)) #file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) """ file_name=os.path.join(os.path.dirname(__file__),filename) (file_name) with io.open(file_name,'rb') as image_file: content=image_file.read() """ name,probability_name= main(filename) age_range,probability_age = age(filename) gender_range,probability_gender = gender(filename) text = name + ", " + str(probability_name) age_text = age_range + ", " + str(probability_age) gender_text = gender_range + ", " + str(probability_gender) return render_template('index.html',imgURL=filename, text = text, age_text = age_text, gender_text = gender_text) return render_template('index.html')
def attendance(t_id, c_id): students = recognize.main() print(students) cur = mysql.connection.cursor() formatted_date = now.strftime('%Y-%m-%d') for id in students: cur.execute("insert into attendance values (%s,%s,%s,%s,%s)", (c_id, t_id, id, 'p', formatted_date)) mysql.connection.commit() cur.close() return redirect('/')
dict(VOICEKIT_API_KEY=open(args.api_key).read().strip(), VOICEKIT_SECRET_KEY=open(args.secret_key).read().strip())) transcript = [] for t in json.load(open(args.input_path)): sample_rate, signal = scipy.io.wavfile.read(t['audio_path']) assert signal.dtype == 'int16' and sample_rate in [8_000, 16_000] sys.argv = [ 'recognize.py', t['audio_path'], '--rate', str(sample_rate), '--do_not_perform_vad', '--encoding', 'LINEAR16', '--num_channels', '1' ] stdout = io.StringIO() with contextlib.redirect_stdout(stdout): recognize.main() hyp = ' '.join( line.replace('Transcription ', '') for line in stdout.getvalue().splitlines() if line.startswith('Transcription ')) transcript.append(dict(t, hyp=hyp)) transcript_path = os.path.join( args.output_path, os.path.basename(args.input_path) + f'.{args.vendor}.json') json.dump(transcript, open(transcript_path, 'w'), ensure_ascii=False, indent=2, sort_keys=True) print(transcript_path)
def trans_compute(audio_list): initob = (speech_sess, input_x, output_y, relu, LM_model, alphabet, datagen) prediction = recognize.main(audio_list, initob) return prediction
def dodaj_gestu(self): from add_gesture import main main()
def prepoznaj(self): from recognize import main main()