def experiment(): params = Params.Params() df_error = pickle.load(open('df_error.p', 'rb')) df_state = pickle.load(open('df_state.p', 'rb')) model = False trainFunctions = [ x for x in params.functions if x.name not in ['circle_4'] ] #trainFunctions = [x for x in params.functions if x.name in ['elipse']] df_state, scaler_state = scale_data(df_state, trainFunctions) df_error, scaler_error = scale_data(df_error, trainFunctions) pickle.dump(scaler_state, open("scaler_state.p", "wb")) pickle.dump(scaler_error, open("scaler_error.p", "wb")) for function in trainFunctions: name = function.name value = df_state[name] print("Training State for {} shape".format(name)) model = train_lstm(value.to_numpy(), 'state', model) model = False for function in trainFunctions: name = function.name value = df_error[name] print("Training Error for {} shape".format(name)) model = train_lstm(value.to_numpy(), 'error', model)
def main(): import params import sys pa = params.Params() # pa.simu_len = 1000 # 1000 # pa.num_ex = 100 # 100 # pa.num_nw = 10 # pa.num_seq_per_batch = 20 # pa.output_freq = 50 # # pa.max_nw_size = 5 # # pa.job_len = 5 # pa.new_job_rate = 0.3 # pa.episode_max_length = 10000 # 2000 # pa.compute_dependent_parameters() pg_resume = None # pg_resume = 'data/tmp_450.pkl' render = False launch(pa, pg_resume, render, repre='image', end='all_done')
def collect(self): p = params.Params() allShapes = [Shape(x) for x in p.functions] shapesData = pd.Series(dtype='object') for shape in allShapes: data, _ = shape.trace() shapesData = shapesData.append(pd.Series({shape.name: data}))
def train(self): p = params.Params() data = pkl.load(open('hessian_data.p', 'rb')) nfeatures = 21 rootpath = "/tmp/" scaler = MinMaxScaler(feature_range=(-1, 1)) model = Sequential() model.add(LSTM(nfeatures+1, batch_input_shape=(p.batch_size, 1, nfeatures), stateful=True)) model.add(Dense(4)) model.compile(loss='mean_squared_error', optimizer='adam') for function in p.functions: scaler = MinMaxScaler(feature_range=(-1, 1)) train = scaler.fit_transform(data[function.name]) pkl.dump(scaler, open("hessian_scaler.p", "wb")) #Skip last element, and upshift hessian for training next step trainX, trainY = train[:-1,:], train[1:, -4:] #Align to batch_size of 25 trainX, trainY = trainX[24:,:], trainY[24:,:] trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) es = EarlyStopping(monitor='loss', mode='min', verbose=1, patience=20) rs = LambdaCallback(on_epoch_end=lambda epoch, logs: model.reset_states()) mc = ModelCheckpoint(os.path.join(rootpath, "hessian.h5"), monitor='loss', mode='min', verbose=1, save_best_only=True) #Reuse model in loops - incremental training. history = model.fit(trainX, trainY, epochs=200, batch_size=p.batch_size, verbose=2, callbacks=[es, mc, rs], shuffle=False) trainPredict = model.predict(trainX, batch_size=p.batch_size) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY, trainPredict)) print("Train Score: {} RMSE".format(trainScore)) shutil.copyfile(os.path.join(rootpath, "hessian.h5"), "/home/012532065/final/MotionControl/hessian.h5")
def formationCenter(r_c, z_c, dz_c, hessian, x_2, y_2, mu_f, z_desired): # hessian = np.array([[2, 0],[2, 0]]) * np.random.rand(1) # z_c, dz_c, p = kalmanFilter(z_c, dz_c, r, z_r, r_c, r_c_old, p, hessian, numSensors) p = params.Params() rotateLeft = p.rotateLeft rotateRight = p.rotateRight K4 = p.K4 dt = p.dt y_1 = dz_c / norm(dz_c) x_1 = rotateRight @ y_1 theta = atan2(x_2[1], x_2[0]) - atan2(x_1[1], x_1[0]) kappa_1 = (x_1.T @ hessian @ x_1) / norm(dz_c) kappa_2 = (x_1.T @ hessian @ y_1) / norm(dz_c) f_z = mu_f * (1 - (z_desired / z_c) ** 2) u_c = kappa_1 * cos(theta) + \ kappa_2 * sin(theta) - \ (2 * f_z * norm(dz_c) * (cos(theta / 2) ** 2)) + \ K4 * sin(theta / 2) # print([x_2, y_2]) x_2 = x_2 + dt * u_c * y_2 x_2 = x_2 / norm(x_2) y_2 = rotateLeft @ x_2 pltVar.push(u_c) r_c = r_c + dt * x_2 return r_c, x_2, y_2
def __init__(self, function): p = params.Params() self.params = p self.function = function self.name = self.function.name self.r_c, self.r_c_old, self.r, = p.r_c, p.r_c, p.r r_c = self.r_c self.z_c = function.f(r_c[0], r_c[1]) self.dz_c = function.dz_f(r_c[0], r_c[1]) self.y_2 = self.dz_c / norm(self.dz_c) self.x_2 = p.rotateRight @ self.y_2 self.q, self.dq, self.u_r, self.vel_q = p.q, p.dq, p.u_r, p.vel_q self.p = np.zeros((3, 3)) z_r = np.array([self.function.f(*pt) for pt in self.r]) self.init_state = [ self.r_c, self.z_c, self.dz_c, self.r_c_old, self.p, self.r, self.q, self.dq, self.u_r, self.vel_q, self.x_2, self.y_2, z_r ] self.state = self.init_state self.old_state = self.init_state self.pltVarShape = False
def make_tf2_export(weights_path, export_dir): if os.path.exists(export_dir): log('TF2 export already exists in {}, skipping TF2 export'.format( export_dir)) return # Create a TF2 Module wrapper around YAMNet. log('Building and checking TF2 Module ...') params = yamnet_params.Params() yamnet = YAMNet(weights_path, params) check_model(yamnet, yamnet.class_map_path(), params) log('Done') # Make TF2 SavedModel export. log('Making TF2 SavedModel export ...') tf.saved_model.save(yamnet, export_dir) log('Done') # Check export with TF-Hub in TF2. log('Checking TF2 SavedModel export in TF2 ...') model = tfhub.load(export_dir) check_model(model, model.class_map_path(), params) log('Done') # Check export with TF-Hub in TF1. log('Checking TF2 SavedModel export in TF1 ...') with tf.compat.v1.Graph().as_default(), tf.compat.v1.Session() as sess: model = tfhub.load(export_dir) sess.run(tf.compat.v1.global_variables_initializer()) def run_model(waveform): return sess.run(model(waveform)) check_model(run_model, model.class_map_path().eval(), params) log('Done')
def embedding(self, input_paths, output_paths, embed_paths=""): """Extract YAMnet features with opensmile using a single process.""" if embed_paths == "": embed_paths = [""] * len(input_paths) save_embedding = False else: save_embedding = True paths = list(zip(input_paths, embed_paths, output_paths)) params = yamnet_params.Params(sample_rate=self.sample_rate, patch_hop_seconds=0.48) class_names = yamnet_model.class_names(self.class_names) yamnet = yamnet_model.yamnet_frames_model(params) yamnet.load_weights(self.model_checkpoint) func = partial( self._embed, yamnet=yamnet, params=params, class_names=class_names, save_embedding=save_embedding, ) self.single_process(func, paths)
def main(argv): assert argv, 'Usage: inference.py <wav file> <wav file> ...' params = yamnet_params.Params() yamnet = yamnet_model.yamnet_frames_model(params) yamnet.load_weights('yamnet.h5') yamnet_classes = yamnet_model.class_names('yamnet_class_map.csv') for file_name in argv: # Decode the WAV file. wav_data, sr = sf.read(file_name, dtype=np.int16) assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype waveform = wav_data / 32768.0 # Convert to [-1.0, +1.0] waveform = waveform.astype('float32') # Convert to mono and the sample rate expected by YAMNet. if len(waveform.shape) > 1: waveform = np.mean(waveform, axis=1) if sr != params.sample_rate: waveform = resampy.resample(waveform, sr, params.sample_rate) # Predict YAMNet classes. scores, embeddings, spectrogram = yamnet(waveform) # Scores is a matrix of (time_frames, num_classes) classifier scores. # Average them along time to get an overall classifier output for the clip. prediction = np.mean(scores, axis=0) # Report the highest-scoring classes and their scores. top5_i = np.argsort(prediction)[::-1][:5] print( file_name, ':\n' + '\n'.join( ' {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i]) for i in top5_i))
def formationControl(r_c, r, dz_c, q, dq, u_r, vel_q, t): p = params.Params() a = p.a b = p.b dt = p.dt K2 = p.K2 K3 = p.K3 phi_inv = p.phi_inv # Moving opposite to gradient direction : Source seeking # velocity_center = - dz_c / norm(dz_c) # Moving perpendiular to gradient direction : Boundary tracking velocity_center = p.rotateLeft @ (dz_c / norm(dz_c)) y1 = velocity_center x1 = p.rotateRight @ y1 # e_1 = r[1] - r[0] # e_1 = e_1 / np.linalg.norm(e_1) # e_2 = r[2] - r[3] # e_2 = e_2 / np.linalg.norm(e_2) # q_0 = np.array([ # [0, 0], # (a / sqrt(2)) * e_1, # (b / sqrt(2)) * e_2, # [0, 0] # ]) if p.numSensors == 4: q_0 = np.array([ [0, 0], (a / sqrt(2)) * y1, (b / sqrt(2)) * x1, [0, 0] ]) if p.numSensors == 2: sigma = 10.0 q_0 = np.array([ [0, 0], (a / sqrt(2)) * x1, ]) if floor( t / 4 ) % 2 == 0: q_0[1] = array([[sin(sigma/pi), cos(sigma/pi)], [-cos(sigma/pi), sin(sigma/pi)]]) @ q_0[1] else: q_0[1] = array([[sin(sigma), -cos(sigma)], [cos(sigma), sin(sigma)]]) @ q_0[1] # dq[1:] = dq[1:] + dt * u_r[1:] # u_r[1:] = -K2 * (q[1:] - q_0[1:]) - K3 * dq[1:] # vel_q[1:] = vel_q[1:] + dt * u_r[1:] vel_q[1:] = -10 * (q[1:] - q_0[1:]) q[1:] = q[1:] + dt * vel_q[1:] q_N = np.append([r_c], q[1:], axis=0) r = phi_inv @ q_N return r, q, dq, u_r, vel_q
def make_tflite_export(weights_path, model_path, export_dir): if os.path.exists(export_dir): log('TF-Lite export already exists in {}, skipping TF-Lite export'. format(export_dir)) return # Create a TF-Lite compatible Module wrapper around YAMNet. log('Building and checking TF-Lite Module ...') params = yamnet_params.Params(tflite_compatible=True) yamnet = YAMNet(weights_path, params, model_path) check_model(yamnet, yamnet.class_map_path(), params) log('Done') # Make TF-Lite SavedModel export. log('Making TF-Lite SavedModel export ...') saved_model_dir = os.path.join(export_dir, 'saved_model') os.makedirs(saved_model_dir) tf.saved_model.save(yamnet, saved_model_dir) log('Done') # Check that the export can be loaded and works. log('Checking TF-Lite SavedModel export in TF2 ...') model = tf.saved_model.load(saved_model_dir) check_model(model, model.class_map_path(), params) log('Done') # Make a TF-Lite model from the SavedModel. log('Making TF-Lite model ...') tflite_converter = tf.lite.TFLiteConverter.from_saved_model( saved_model_dir) tflite_model = tflite_converter.convert() tflite_model_path = os.path.join(export_dir, 'yamnet.tflite') with open(tflite_model_path, 'wb') as f: f.write(tflite_model) log('Done') # Check the TF-Lite export. log('Checking TF-Lite model ...') interpreter = tf.lite.Interpreter(tflite_model_path) audio_input_index = interpreter.get_input_details()[0]['index'] scores_output_index = interpreter.get_output_details()[0]['index'] embeddings_output_index = interpreter.get_output_details()[1]['index'] #spectrogram_output_index = interpreter.get_output_details()[2]['index'] def run_model(waveform): interpreter.resize_tensor_input(audio_input_index, [len(waveform)], strict=True) interpreter.allocate_tensors() interpreter.set_tensor(audio_input_index, waveform) interpreter.invoke() return (interpreter.get_tensor(scores_output_index), interpreter.get_tensor(embeddings_output_index)) #, # interpreter.get_tensor(spectrogram_output_index)) check_model(run_model, 'yamnet_class_map.csv', params) log('Done') return saved_model_dir
def collect(self): p = params.Params() dataAll = pd.Series(dtype='object') for function in p.functions: data = self.stepFunction(function, collect=True) dataAll = dataAll.append(pd.Series({function.name: data})) pkl.dump(dataAll, open("hessian_data.p", "wb")) return dataAll
def create_trials(num_hhld, num_days, num_hours, num_min, trial_code, chad_activity_params, \ demographic, num_people, do_minute_by_minute, do_print=False): """ This function creates the input data for each household in the simulation. :param int num_hhld: the number of households simulated :param int num_days: the number of days in the simulation :param int num_hours: the number of additional hours :param int num_min: the number of additional minutes :param int trial_code: the trial identifier :param chad_params.CHAD_params chad_activity_params: the activity parameters \ used to sample "good" CHAD data :param int demographic: the demographic identifier :param int num_people: the number of people per household :param bool do_minute_by_minute: a flag for how the time steps progress in the scheduler :param bool do_print: flag whether to print messages to the console :returns: input data where each entry corresponds to the input \ for the respective household in the simulation :rtype: list of :class:`trial.Trial` """ # load the parameters necessary for the runs comparing the ABMHAP to CHAD # run the simulation using default parameters param_list = [params.Params(num_days=num_days, num_hours=num_hours, num_min=num_min, \ num_people=num_people, do_minute_by_minute=do_minute_by_minute) \ for _ in range(num_hhld)] # print message if do_print: print('initializing trials...') # # create the conditions for each trial # # start timing start = time.time() # initialize the simulation inputs trials = initialize_trials(param_list, trial_code, chad_activity_params, demographic) # end timing end = time.time() # calculate the elapsed time dt_elapsed = end - start # print message if do_print: print('elapsed time to initialize %d trials:\t%.3f [s]\n' % (num_hhld, dt_elapsed)) return trials
def __enter__(self): self.params = params.Params() self.fridge = fridge.Fridge(self, self._nowait) self.uploader = uploader.Uploader(self) self.configwaiter = configwaiter.ConfigWaiter(self) self.params.load() self.set_sensors(sensor.make_sensor(self)) asyncio.get_event_loop().add_signal_handler(signal.SIGHUP, self.reload_signal) return self
def main(): import params import sys pa=params.Params() #pg_resume='/home/dell/testing_part2/880_10ex.pkl_' pg_resume = None test_only = True if len(sys.argv) == 2: pg_resume=sys.argv[1] #give the path of weights file test_only=True render=False launch(pa,pg_resume,render=render,repre='image',end='all_done', test_only=test_only)
def main(): import params import sys pa=params.Params() pg_resume='/home/dell/rl_sudoku/4x4sudoku_4_5_6_7_8_saved_weights/380.pkl_' #pg_resume = None test_only = False if len(sys.argv) == 2: pg_resume=sys.argv[1] #give the path of weights file test_only=True render=False launch(pa,pg_resume,render=render,repre='image',end='all_done', test_only=test_only)
def _setup_job(self, job): if not os.path.isdir(job['work_dir']): os.makedirs(job['work_dir']) job_params = params.Params((p.replace('job_params.', ''), v) for p, v in job.items() if p.startswith('job_params')) job_params.name = job['job_name'] job_scripts.write_job_script(job['job_file'], self.job_template, job_params) print(job['job_file'])
def buildpars(self, ): """ Build the model parameter object. Refer to params.py for the data that self.p contains. In general, this contains the electrochemical parameters necessary for the simulation of the full Pseudo-2D Newman-style model. """ self.p = params.Params() self.p.buildpars(self.V_init, self.Pdat) self.p.Ac = self.p.Area self.pars = self.p
def __init__(self, function): p = params.Params() self.params = p self.function = function self.name = self.function.name self.numSensors = p.numSensors self.r_c, self.r_c_old, self.r, = p.r_c, p.r_c, p.r r_c = self.r_c self.z_c = function.f(r_c[0], r_c[1]) self.dz_c = function.dz_f(r_c[0], r_c[1]) self.y_2 = self.dz_c / norm(self.dz_c) self.x_2 = p.rotateRight @ self.y_2 self.q, self.dq, self.u_r, self.vel_q = p.q, p.dq, p.u_r, p.vel_q self.p = np.zeros((3, 3))
def main(): import params import sys pa = params.Params() pg_resume = None test_only = False if len(sys.argv) == 2: pg_resume = sys.argv[1] #give the path of weights file test_only = True render = False launch(pa, pg_resume, render=render, repre='image', end='all_done', test_only=test_only)
def test_params_basic(self): defparams = params.Params() self.assertEqual(defparams.overshoot_factor, params._FIELD_DEFAULTS['overshoot_factor']) # fetching a bad parameter fails with self.assertRaises(KeyError): x = self.params.param_that_doesnt_exist # setting a parameter defparams.overshoot_factor = 8877 self.assertEqual(defparams.overshoot_factor, 8877) # setting a bad parameter fails with self.assertRaises(KeyError): self.params.somewrongthing = 5
def __init__(self, config_path="./config.yaml"): """Init method for the Searcher.""" super().__init__() # Load the configuration conf = OmegaConf.load(config_path) self.dataset_path = conf.dataset_path self.audio_path = os.path.join(conf.dataset_path, "podcasts-audio") self.es_url = conf.search_es_url # URL of Elasticsearch to query self.es_num = (conf.search_es_num ) # Number of segments to request from Elasticsearch self.sample_rate = 44100 # Hardcoded sample rate of all podcast audio # Load the podcast metadata self.metadata = load_metadata(self.dataset_path) # Set up the reranking model self.rerank_tokenizer = AutoTokenizer.from_pretrained( conf.search_rerank_model, use_fast=True, cache_dir=conf.search_cache_dir) self.rerank_model = AutoModelForSequenceClassification.from_pretrained( conf.search_rerank_model, cache_dir=conf.search_cache_dir) self.rerank_model.to("cpu", non_blocking=True) self.rerank_max_seq_len = 512 # Set up the openSMILE extractor self.smile = opensmile.Smile( feature_set=opensmile.FeatureSet.eGeMAPSv02, feature_level=opensmile.FeatureLevel.Functionals, options={ "frameModeFunctionalsConf": os.path.join( os.getenv("PODCAST_PATH"), "data/custom_FrameModeFunctionals.conf.inc", ) }, ) # Set up the YAMNet model params = yamnet_params.Params(sample_rate=self.sample_rate, patch_hop_seconds=0.48) self.yamnet_classes = yamnet_model.class_names( os.path.join(os.getenv("YAMNET_PATH"), "yamnet_class_map.csv")) self.yamnet_model = yamnet_model.yamnet_frames_model(params) self.yamnet_model.load_weights( os.path.join(os.getenv("PODCAST_PATH"), "data/yamnet.h5"))
def processData(self, df, learn=True): if isinstance(df, pd.DataFrame): raw_data = df.to_numpy() else: raw_data = df # ['s', 'hessian', 'r_c_delta', 'r_delta', 'z_r'] px = params.Params() numSensors = px.numSensors unscaled_data = np.array([[ x[0][0], x[0][1], x[0][2], x[1][0][0], x[1][0][1], x[1][1][0], x[1][1][1], x[2][0], x[2][1], x[3][0][0], x[3][0][1], x[4][0], x[3][1][0], x[3][1][1], x[4][1], x[3][2][0], x[3][2][1], x[4][2], x[3][3][0], x[3][3][1], x[4][3] ] for x in raw_data]) #Retain only every nth data element # unscaled_data = unscaled_data[::10,:] lagWindowSize = self.lagWindowSize numFeaturesState = self.numFeaturesState numFeaturesStateAdded = self.numFeaturesStateAdded datalength = unscaled_data.shape[0] if learn: scaler = MinMaxScaler(feature_range=(-1, 1)) scaled_data = scaler.fit_transform(unscaled_data) rootpath = "/tmp" if os.name == 'posix' else "" pkl.dump(scaler, open(os.path.join(rootpath, "scaler.p"), "wb")) else: self.history.append(unscaled_data[0]) self.history = self.history[1:] scaled_data = self.scaler.transform(self.history) # LSTM needs input in [samples, timestep, features] format. if learn: X = np.array([ scaled_data[i - lagWindowSize:i, :] for i in range(lagWindowSize, datalength) ]) Y = scaled_data[lagWindowSize:, :] # We dont need 4th parameter for prediction so -1 Y = Y[:, 0:numFeaturesState] # Y = [Y[:, 0:numFeaturesState], Y[:, numFeaturesStateAdded:]] else: X = np.array([scaled_data]) # X = [X[:, :, 0:numFeaturesStateAdded], X[:, :, numFeaturesStateAdded:]] Y = False return X, Y
def main(argv): assert argv, 'Usage: inference.py <wav file> <wav file> ...' model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'yamnet.h5') classes_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'yamnet_class_map.csv') event_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'event.json') params = yamnet_params.Params() yamnet = yamnet_model.yamnet_frames_model(params) yamnet.load_weights(model_path) yamnet_classes = yamnet_model.class_names(classes_path) for file_name in argv: # Decode the WAV file. wav_data, sr = sf.read(file_name, dtype=np.int16) assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype waveform = wav_data / 32768.0 # Convert to [-1.0, +1.0] waveform = waveform.astype('float32') # Convert to mono and the sample rate expected by YAMNet. if len(waveform.shape) > 1: waveform = np.mean(waveform, axis=1) if sr != params.sample_rate: waveform = resampy.resample(waveform, sr, params.sample_rate) # Predict YAMNet classes. scores, embeddings, spectrogram = yamnet(waveform) # Scores is a matrix of (time_frames, num_classes) classifier scores. # Average them along time to get an overall classifier output for the clip. prediction = np.mean(scores, axis=0) # Report the highest-scoring classes and their scores. top5_i = np.argsort(prediction)[::-1][:5] print(file_name, ':\n' + '\n'.join(' {:12s}: {:.3f}'.format(yamnet_classes[i], prediction[i]) for i in top5_i)) # print all classes b = prediction.tolist() # nested lists with same data, indices pred = [] for (i,cls) in enumerate(yamnet_classes): item={} item['label']=cls item['value']=round(b[i], 6) pred.append(item) pred = sorted(pred, key=lambda x: x['value'], reverse=True) json.dump(pred, codecs.open(event_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) ### this saves the array in .json format
def __init__(self, setType, transform=None, normalize=False, crop_size=(256, 512), root_dir=None): # setType: "train" or "test" p = params.Params() if root_dir is None: root_dir = p.DATA_PATH else: root_dir = root_dir self.setType = setType self.normalize = normalize self.crop_size = crop_size if setType == "train": path_paths_img_left = root_dir + p.sceneflow_paths_train_img_left path_paths_img_right = root_dir + p.sceneflow_paths_train_img_right path_paths_disp_left = root_dir + p.sceneflow_paths_train_disp_left path_paths_disp_right = root_dir + p.sceneflow_paths_train_disp_right if setType == "test": path_paths_img_left = root_dir + p.sceneflow_paths_test_img_left path_paths_img_right = root_dir + p.sceneflow_paths_test_img_right path_paths_disp_left = root_dir + p.sceneflow_paths_test_disp_left path_paths_disp_right = root_dir + p.sceneflow_paths_test_disp_right finl = open(path_paths_img_left, 'rb') finr = open(path_paths_img_right, 'rb') self.paths_img_left = pickle.load(finl) self.paths_img_right = pickle.load(finr) finl.close() finr.close() finl = open(path_paths_disp_left, 'rb') finr = open(path_paths_disp_right, 'rb') self.paths_disp_left = pickle.load(finl) self.paths_disp_right = pickle.load(finr) finl.close() finr.close() assert len(self.paths_img_left) == len(self.paths_img_right) == len( self.paths_disp_left) == len(self.paths_disp_right) self.transform = transform
def scaffold_model(model_file, force=True): scaff_out_file = model_file + '.scaffold_output' try: assert not force scaff_params = params.read_params(scaff_out_file) print(scaff_out_file) except (IOError, AssertionError): net = caffe_util.Net(model_file, phase=caffe.TRAIN) scaff_params = params.Params() scaff_params['n_params'] = net.get_n_params() scaff_params['n_activs'] = net.get_n_activs() scaff_params['size'] = net.get_approx_size() scaff_params['min_width'] = net.get_min_width() params.write_params(scaff_out_file, scaff_params) return scaff_params
def make_tflite_export(weights_path, export_dir): if os.path.exists(export_dir): log('TF-Lite export already exists in {}, skipping TF-Lite export'.format( export_dir)) return # Create a TF-Lite compatible Module wrapper around YAMNet. log('Building and checking TF-Lite Module ...') params = yamnet_params.Params(tflite_compatible=True) yamnet = YAMNet(weights_path, params) check_model(yamnet, yamnet.class_map_path(), params) log('Done') # Make TF-Lite SavedModel export. log('Making TF-Lite SavedModel export ...') saved_model_dir = os.path.join(export_dir, 'saved_model') os.makedirs(saved_model_dir) tf.saved_model.save( yamnet, saved_model_dir, signatures={'serving_default': yamnet.__call__.get_concrete_function()}) log('Done') # Check that the export can be loaded and works. log('Checking TF-Lite SavedModel export in TF2 ...') model = tf.saved_model.load(saved_model_dir) check_model(model, model.class_map_path(), params) log('Done') # Make a TF-Lite model from the SavedModel. log('Making TF-Lite model ...') tflite_converter = tf.lite.TFLiteConverter.from_saved_model( saved_model_dir, signature_keys=['serving_default']) tflite_model = tflite_converter.convert() tflite_model_path = os.path.join(export_dir, 'yamnet.tflite') with open(tflite_model_path, 'wb') as f: f.write(tflite_model) log('Done') # Check the TF-Lite export. log('Checking TF-Lite model ...') interpreter = tf.lite.Interpreter(tflite_model_path) runner = interpreter.get_signature_runner('serving_default') check_model(runner, 'yamnet_class_map.csv', params) log('Done') return saved_model_dir
def main(argv): global analysisdata, frame_counter log = open('/tmp/sound.log', 'w') # Set up yamnet params = yamnet_params.Params(sample_rate=ANALYSIS_SAMPLE_RATE, patch_hop_seconds=0.1) yamnet = yamnet_model.yamnet_frames_model(params) yamnet.load_weights('/home/pi/models/research/audioset/yamnet/yamnet.h5') yamnet_classes = yamnet_model.class_names( '/home/pi/models/research/audioset/yamnet/yamnet_class_map.csv') # Set up a live callback stream from the microphone stream = sd.InputStream(device=1, channels=1, samplerate=RECORD_SAMPLE_RATE, callback=audio_callback, blocksize=BUFFER_SIZE_F) with stream: while True: update_analysis_window() if (frame_counter >= int( ANALYSIS_LENGTH_S * ANALYSIS_SAMPLE_RATE)): frame_counter = 0 scores = yamnet.predict(analysisdata, steps=1)[0] if (len(scores)): prediction = np.mean(scores, axis=0) top5_i = np.argsort(prediction)[::-1][:1] for x in top5_i: if (prediction[x] > THRESHOLD): top_class_str = yamnet_classes[x] # Write any detected class (outside these noisy ones) to the log if (not top_class_str in [ "Fireworks", "Silence", "Inside, small room" ]): log.write("[%s] %s %0.4f\n" % (datetime.now().strftime( "%m/%d/%Y %H:%M:%S"), top_class_str, prediction[x])) log.flush() # And if it's one of the doorbell ones, ping the homebridge server if (top_class_str in [ "Beep, bleep", "Doorbell", "Glass", "Ding" ]): trigger_homekit_motion()
def scale_data(dataset, trainFunctions): params = Params.Params() scaler = MinMaxScaler(feature_range=(-1, 1)) training_functions = [function.name for function in trainFunctions] #fit_data = pd.concat([dataset[name] for name in training_functions]) #scaler = scaler.fit(fit_data) #for name in training_functions: # dataset[name] = pd.DataFrame(scaler.transform(dataset[name])) scaler_return = False for name in training_functions: scaler = MinMaxScaler(feature_range=(-1, 1)) # print(dataset[name].describe(include='all')) dataset[name] = pd.DataFrame(scaler.fit_transform(dataset[name])) # print(dataset[name].describe(include='all')) # Returning ellipse's scaler as it works better than rhombus in general. if name == "elipse": scaler_return = scaler return dataset, scaler_return
def estimate_rot(data_num=1): # Read the data imu_data = read_data_imu(data_num) q = quat.Quaternion(np.array([1, 0, 0, 0])) omega = np.array([0, 0, 0]) curr_state = State(q, omega) param = params.Params() ts = np.squeeze(imu_data['ts']) real_measurement = np.squeeze(imu_data['vals']).transpose() size_ts = ts.shape[0] sigma = Sigma_pts() rpy = [] for i in range(1, size_ts): dt = ts[i] - ts[i - 1] sigma.find_points(curr_state, dt) sigma.find_measurements() corrected_measurements = convert_measurements(real_measurement[i, :3], real_measurement[i, 3:]) curr_state.kalman_update(sigma, corrected_measurements) rpy.append(curr_state.q.quat2rpy()) # plot vicon data vicon_data = read_data_vicon(data_num) v_ts = np.squeeze(vicon_data['ts']) x, y, z = rotationMatrixToEulerAngles(vicon_data['rots']) # plt.plot(v_ts, x, 'r') plt.figure() vicon, = plt.plot(v_ts, x, label="Vicon") ukf, = plt.plot(ts[1:], np.asarray(rpy)[:, 0], label="UKF") plt.legend(handles=[vicon, ukf]) plt.title("Roll") plt.figure() vicon, = plt.plot(v_ts, y, label="Vicon") ukf, = plt.plot(ts[1:], np.asarray(rpy)[:, 1], label="UKF") plt.legend(handles=[vicon, ukf]) plt.title("Pitch") plt.figure() vicon, = plt.plot(v_ts, z, label="Vicon") ukf, = plt.plot(ts[1:], np.asarray(rpy)[:, 2], label="UKF") plt.legend(handles=[vicon, ukf]) plt.title("Yaw") plt.show()