def calc_motion_capa_data(file_name, file_dir, motion_out_dir, capa_out_dir): d = data_reader.Data() d.read(os.path.join(file_dir, file_name + ".txt")) print(u, d.task_id) t = get_vad_chunks(file_dir, file_name) # print(t) end = find_suitable_end(t, 0.4, 4.0) if calc_motion: out_file = os.path.join(motion_out_dir, d.task_id + '.txt') print(out_file) output = open(out_file, 'w', encoding='utf-8') output.write(d.user_pos + '\n') output.write(d.start_pos + '\n') output.write(d.description + '\n') output.write(d.hand + '\n') extract_motion_feature(0.05, end, d, output) output.close() if calc_capa: out_file = os.path.join(capa_out_dir, d.task_id + '.txt') output = open(out_file, 'w', encoding='utf-8') # feature = capa_feature.extract_time_feature(d, end * 1000, (end + 2.0) * 1000) feature = capa_feature.extract_time_feature_count_appearance_only(d, end * 1000, (end + 2.0) * 1000) for f in feature: output.write(str(f) + '\n') output.close()
def visualize_file(file_path): data = data_reader.Data() data.read(file_path) type_list = data.get_types() type_list.remove('TOUCH') type_list.remove('CAPACITY') type_list.sort() n = 0 plt.figure(figsize=(10, 40)) for t in type_list: n += 1 plt.subplot(len(type_list), 1, n) plt.xlim(0, data.get_max_time()) frame_list = data.get_list(t) plt.title(t) for i in range(frame_list.get_data_way()): if len(frame_list.value[i]) <= 100: plot_format = 'x:' else: plot_format = '-' plt.plot(frame_list.time_stamp, frame_list.value[i], plot_format, label=i) plt.legend() plt.suptitle(file_path, x=0.02, y=0.998, horizontalalignment='left') plt.show()
def read_data(file_path_list): data_list = [] for file_path in file_path_list: d = data_reader.Data() d.read(file_path) data_list.append(d) return data_list
def calc_data(file_name, file_dir, out_dir): print(file_name) d = data_reader.Data() d.read(os.path.join(file_dir, file_name + ".txt")) out_file = os.path.join(out_dir, d.task_id + ".txt") print(out_file) output = open(out_file, 'w', encoding='utf-8') output.write(d.user_pos + '\n') output.write(d.start_pos + '\n') output.write(d.description + '\n') output.write(d.hand + '\n') task = int(d.task_id.split("_")[0]) ''' # 1s if task < 32 or d.description == '接听': t = webrtcvad_utils.calc_vad(3, os.path.join(file_dir, file_name + ".wav")) print(t) if d.start_pos == '裤兜': end = find_suitable_end(t, 4.0, 10.0) else: end = find_suitable_end(t, 1.0, 4.0) extract_feature(end - 1.0, end, d, output) else: max_time = d.get_max_time() / 1000 start = 2.0 while start + 2.5 < max_time: extract_feature(start, start + 1.0, d, output) start += 2.0 ''' # 2s if task < 32 or d.description == '接听': t = webrtcvad_utils.calc_vad( 3, os.path.join(file_dir, file_name + ".wav")) print(t) if d.start_pos == '裤兜': end = find_suitable_end(t, 5.0, 10.0) else: end = find_suitable_end(t, 2.0, 4.0) extract_feature(end - 2.0, end, d, output) else: max_time = d.get_max_time() / 1000 start = 1.0 while start + 2.5 < max_time: extract_feature(start, start + 2.0, d, output) start += 2.0
def calc_motion_capa_data(file_name, file_dir): d = data_reader.Data() d.read(os.path.join(file_dir, file_name + '.txt')) print(u, d.task_id) t = get_vad_chunks(file_dir, file_name) end, voice_end = find_suitable_end(t, 0.4, 4.0) if calc_motion: for t in time_point: extract_motion_feature(0.05, end, voice_end, d, t) if calc_capa: for t in time_point: extract_capa_feature(0.05, end, voice_end, d, t)
def train(): '''kernel train ''' with tf.Session(config=config) as sess: saver = tf.train.Saver() global_step = 0 iter = 1 current_learning_rate = cfg.init_learning_rate init_variable = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init_variable) data = dtrd.Data() data.load(data_filename=cfg.train_dataset) puts_debug('data size: {}'.format(data.size())) while global_step < data.size() * cfg.epoch: batch_x, _, _, batch_y = data.decode_and_fetch( batch_size=cfg.batch_size) _, loss_val = sess.run([train_op, loss_op], feed_dict={ xs: batch_x, ys: batch_y, learning_rate: current_learning_rate }) puts_info('iter: {}, loss: {}'.format(iter, loss_val)) global_step += cfg.batch_size if iter % cfg.test_iter == 0: batch_x, _, _, batch_y = data.decode_and_fetch( batch_size=cfg.batch_size) accuracy_val, loss_val = sess.run([accuracy_op, loss_op], feed_dict={ xs: batch_x, ys: batch_y, learning_rate: current_learning_rate }) puts_info('accuracy: {:.4f}, loss: {:.4f}'.format( accuracy_val, loss_val)) if iter % cfg.save_iter == 0: saver.save( sess, os.path.join(cfg.save_path, 'model.ckpt-' + str(global_step))) puts_info( 'iter: {}, model has been saved under {}/model.ckpt-{}'. format(iter, cfg.save_path, global_step)) iter += 1 batch_x, _, _, batch_y = data.decode_and_fetch( batch_size=cfg.batch_size) accuracy_val, loss_val, outputs_val = sess.run( [accuracy_op, loss_op, outputs_op], feed_dict={ xs: batch_x, ys: batch_y, learning_rate: current_learning_rate }) puts_info('final >> val: \n{}, accuracy: {:.4f}, loss: {:.4f}'.format( outputs_val, accuracy_val, loss_val)) saver.save(sess, os.path.join(cfg.save_path, 'model.ckpt')) puts_info('final model has been saved under {}'.format( os.path.join(cfg.save_path, 'model.ckpt')))