def __init__(self): self.inference_model = inference.Model() self.default_metrics_dict = { 'accuracy': accuracy_score, 'f1': f1_score, 'recall': recall_score, 'precision': precision_score, }
cnt = 0 repeat_action = False filename = None data = None cnn_model = None last_operation = None EMPTY = np.array([]) gui = gui.GUI(interface=itfc, dataset=dataset) if dataset.model and not args.browser: import inference cnn_model = inference.Model(dataset) root = Tk.Tk() if args.fullscreen_mode: root.wm_title("") else: root.wm_title("Oscilloscope") if args.browser: fig, ax = plt.subplots(1, 1, figsize=(10, 4)) else: fig, ax = plt.subplots(1, 1, figsize=(11, 4)) fig.subplots_adjust(bottom=0.15) frame = Tk.Frame(master=root) frame_row0 = Tk.Frame(master=frame)
import argparse from flask import Flask, jsonify, request import inference __author__ = 'Junior Teudjio' app = Flask(__name__) inference_model = inference.Model() @app.route('/predict', methods=['POST', 'GET']) def predict(): request_args = request.args prediction = inference_model.predict(image_path=request_args['image_path'], model_type=request_args['model_type']) return jsonify(prediction) def _setup_args(): parser = argparse.ArgumentParser() parser.add_argument('--port', type=int, default=8383) parser.add_argument('--host', type=str, default='0.0.0.0') return parser.parse_args() if __name__ == '__main__': args = _setup_args() app.run(port=args.port,host=args.host)
def main(): parser, args = parse() print('list') if args.list_devices: print(sounddevice.query_devices()) parser.exit(0) if args.samplerate is None: device_info = sounddevice.query_devices(args.device, 'input') args.samplerate = device_info['default_samplerate'] mapping = [c - 1 for c in args.channels] # Channel numbers start with 1 q = queue.Queue() def audio_callback(indata, frames, time, status): """This is called (from a separate thread) for each audio block.""" if status: print(status, file=sys.stderr) # Fancy indexing with mapping creates a (necessary!) copy: q.put(indata[:, mapping]) #print(indata.shape) stream = sounddevice.InputStream( device=args.device, channels=max(args.channels), samplerate=args.samplerate, callback=audio_callback) print('stream open', args.device, stream) assert args.samplerate == 16000 n_samples = int(args.samplerate*0.960*1.05) hop_length = n_samples * (1-args.overlap) new_samples = 0 audio_buffer = numpy.zeros(shape=(n_samples,)) model = yamnet.Model() import keras import tensorflow with stream: while True: data = q.get() data = numpy.squeeze(data) #print(data.shape) # move existing data over audio_buffer = numpy.roll(audio_buffer, len(data), axis=0) # add the new data audio_buffer[len(audio_buffer)-len(data):len(audio_buffer)] = data # check if we have received enough new data to do new classification new_samples += len(data) if new_samples >= hop_length: t = datetime.datetime.now() new_samples = 0 waveform = audio_buffer with model.graph.as_default(): #x = numpy.reshape(waveform, [1, -1]) x = numpy.expand_dims(waveform, 0) #x = numpy.expand_dims(x, -1) scores, _spec, embeddings = model.yamnet.predict(x, steps=1, batch_size=1) # Report the highest-scoring classes and their scores. prediction = numpy.mean(scores, axis=0) top_n = 3 top = numpy.argsort(prediction)[::-1][:top_n] def format_pred(i): return ' {:12s}: {:.3f}'.format(model.class_names[i], prediction[i]) if numpy.max(prediction) > 0.3: print('classify', t) print('\n'.join(format_pred(i) for i in top)) print('stopped')