def on_synthesize(self): curr_char = str(self.char_combbox.currentText()) curr_mdl = self.data_mdls[curr_char] #sample from rf model strk_num = curr_mdl.sample_strk_num() sample_data = [] if strk_num is not None: print 'Generate a sample consisted of {0} strokes'.format(strk_num+1) #select for the model list mdl_lst = curr_mdl.model_[strk_num+1] char_data = [] sample_parms = [] sample_noise = [] for mdl in mdl_lst: #for each stroke... tmp_sample = curr_mdl.sample_from_rf_mdl(mdl) sample_data, tree_idx, leaf_idx, noise = tmp_sample[0] if (tree_idx, leaf_idx) in mdl['kinparms_dict']: parms = mdl['kinparms_dict'][tree_idx, leaf_idx] else: parms = -1 char_data.append(sample_data) sample_parms.append(parms) sample_noise.append(noise) #prepare char model self.char_mdl = [] for strk_idx in range(len(char_data)): tmp_char_mdl = dict() tmp_char_data = np.reshape(char_data[strk_idx], (2, -1)) tmp_char_mdl['char_sample'] = tmp_char_data #evaluate model if sample_parms[strk_idx] == -1: #no valid parameters tmp_char_mdl['stroke'] = tmp_char_data else: tmp_char_mdl['start_pnt'] = sample_parms[strk_idx][0] tmp_char_mdl['opt_parms'] = sample_parms[strk_idx][1] t_array = np.linspace(0, 1.0, len(char_data[strk_idx])/2) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval(tmp_char_mdl['opt_parms'], t_array, sample_parms[strk_idx][0][0], sample_parms[strk_idx][0][1]) tmp_char_mdl['stroke'] = eval_traj tmp_char_mdl['vel_profile'] = np.sum(eval_vel**2, axis=1)**(1./2) self.char_mdl.append(tmp_char_mdl) self.strk_combbox.blockSignals(True) self.strk_combbox.clear() self.strk_combbox.addItems(map(str, range(len(char_data)))) self.strk_combbox.blockSignals(False) self.clear_parm_sliders_layout() #self.on_update_strk_comb(None) self.clear_parm_sliders_layout() self.populate_parm_sliders() self.plot_data() return char_data, sample_parms, sample_noise
def traj_eval_helper(self, strk_idx, t_array, parms, x0, y0): """ evaluate a trajectory with current parameters and perturbations... """ #opt opt_parms = copy.copy(parms) #get noise num_parm_per_comp = 5 noise_ratio_array = [] for slider_lst in self.parms_sliders[strk_idx]: for slider in slider_lst: noise_ratio_array.append(float((slider.value()-50))/100) noise_ratio_array = np.reshape(noise_ratio_array, (-1, num_parm_per_comp)) for row in range(opt_parms.shape[0]): # opt_parms[row][0] += noise_ratio_array[row][0] * np.abs(opt_parms[row][0]) * 0.8 # opt_parms[row][2] += noise_ratio_array[row][1] * np.abs(opt_parms[row][2]) * 0.5 # opt_parms[row][3] += noise_ratio_array[row][2] * np.abs(opt_parms[row][3]) * 0.5 opt_parms[row][0] += noise_ratio_array[row][0] * 5 opt_parms[row][2] += noise_ratio_array[row][1] * 1.0 opt_parms[row][3] += noise_ratio_array[row][2] * 1.0 #theta_s & theta_e: noise is applied to delta_theta # opt_theta_s = opt_parms[row][4] # opt_theta_e = opt_parms[row][5] # opt_parms[row][4] = (opt_theta_s + opt_theta_e)/2 - (opt_theta_e-opt_theta_s) * (1 + noise_ratio_array[row][3]*2) / 2 # opt_parms[row][5] = (opt_theta_s + opt_theta_e)/2 + (opt_theta_e-opt_theta_s) * (1 + noise_ratio_array[row][3]*2) / 2 opt_parms[row][4] += noise_ratio_array[row][3] * 2*np.pi opt_parms[row][5] += noise_ratio_array[row][4] * 2*np.pi traj_opt, vel_vec_opt = pytkrxz.rxzero_traj_eval(opt_parms, t_array, x0, y0) return traj_opt, vel_vec_opt, opt_parms
def obj_func_theta(x, *args): """ objective function to infer the latent parameter x: unknown parameters theta args: a tuple of ( start_pnt samples mu_theta sigma ) """ start_pnt, samples, mu_theta, sigma = args #evaluate theta parm = np.reshape(x, (-1, 6)) t_array = np.arange(0.0, 1.0, 0.01) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval(parms, t_array, start_pnt[0], start_pnt[1]) #regularization term val = 0.5 * np.linalg.norm(sigma) + 0.5*np.sum((x-mu_theta)*(x-mu_theta)*1./(sigma+1e-5)) #cost term ref_sample = eval_traj.transpose().flatten() err = samples - ref_sample #broadcasting val = val + np.sum(np.sum(err * err)) return val
def extend_data_with_lognormal_sampling_helper(char_traj, n_samples, shift_mean): #the input char_traj is flattened with the last entry as the time, get the 2D form data_len = (len(char_traj) - 1)/2 t_idx = np.linspace(0, 1.0, data_len) #is it necessary to also have noise on this? x0 = char_traj[0] y0 = char_traj[data_len] pos_traj = np.array([char_traj[:data_len], char_traj[data_len:-1]]).T #estimate the lognormal parms lognorm_parms = np.array(pytk_rz.rxzero_train(pos_traj)) if np.any(np.isinf(lognorm_parms)): print 'Unable to extract lognormal parameters. Only use the original trajectory.' return [] n_comps = len(lognorm_parms) #generate noise for each components, considering amplitude (+-20%), start angle(+-20 deg) and straightness(+-10% difference) ang_difference = lognorm_parms[:, 5] - lognorm_parms[:, 4] noises = np.random.randn(n_samples, n_comps, 3) / 3 #white noise to ensure 99.7% samples are within the specified range... parm_noises = np.array([ np.array([noise[:, 0]*.2*lognorm_parms[:, 0], np.zeros(n_comps), np.zeros(n_comps), np.zeros(n_comps), noise[:, 1]*np.pi/9, noise[:, 1]*np.pi/9 + noise[:, 2]*.1*ang_difference]).T for noise in noises]) perturbed_parms = np.array([lognorm_parms + parm_noise for parm_noise in parm_noises]) #apply the noise, remember to flatten and put back the phase scale... res_char_trajs = [np.concatenate([pytk_rz.rxzero_traj_eval(perturbed_parm, t_idx, x0, y0)[0].T.flatten(), [char_traj[-1]]]) for perturbed_parm in perturbed_parms] if shift_mean: mean_coords = [np.mean(np.reshape(traj[:-1], (2, -1)).T, axis=0) for traj in res_char_trajs] for d_idx in range(len(res_char_trajs)): data_len = (len(res_char_trajs[d_idx]) - 1)/2 res_char_trajs[d_idx][0:data_len] -= mean_coords[d_idx][0] res_char_trajs[d_idx][data_len:-1] -= mean_coords[d_idx][1] return res_char_trajs
def eval(self, t_idx, parms=None): """ evaluate model with given time indexes """ if parms is None: parms = self.mdl_parms_ res_pos = None res_vel = None if self.mdl_type_ == 'siglognormal': """ for siglognormal model, each parm should be a tuple: (D, t0, mu, sigma, theta_s, theta_e) """ """ The following seems not a good implementation, note the division of zero The problem is that the need to evaluate the limit of (sin(Phi(t)) - sin(theta_s))/(theta_e - theta_s) when theta_s -> theta_e so let's try the velocity profile implementation, this seems not that compact though... """ # x_comps = np.array([ comp_parm[0] / (comp_parm[5] - comp_parm[4]) * (np.sin(self.siglog_normal_Phi(comp_parm, t_idx)) - np.sin(comp_parm[4])) for comp_parm in self.mdl_parms_ ]) # y_comps = np.array([ comp_parm[0] / (comp_parm[5] - comp_parm[4]) * (-np.cos(self.siglog_normal_Phi(comp_parm, t_idx)) + np.cos(comp_parm[4])) for comp_parm in self.mdl_parms_]) # res = np.concatenate([ [np.sum(x_comps, axis=0)], # [np.sum(y_comps, axis=0)]], axis=0).transpose() # v_amp_array = np.array([self.siglog_vel_amp(parm, t_idx) for parm in parms]) # phi_array = np.array([self.siglog_normal_Phi(parm, t_idx) for parm in parms]) # v_x = np.sum(np.abs(v_amp_array) * np.cos(phi_array), axis=0) # v_y = np.sum(np.abs(v_amp_array) * np.sin(phi_array), axis=0) # v_vec = np.concatenate([[v_x], [v_y]], axis=0).transpose() # #more consideration is needed for this dt... # res_pos = np.array([self.x0, self.y0]) + np.cumsum(v_vec, axis=0) * self.dt # res_vel = v_vec res_pos, res_vel = pyrzx.rxzero_traj_eval(parms, t_idx, self.x0, self.y0) else: print 'Invalid or unsupported model type' return res_pos, res_vel
def traj_eval_helper(self, strk_idx, t_array, parms, x0, y0): """ evaluate a trajectory with current parameters and perturbations... """ #opt opt_parms = copy.copy(parms) #get noise num_parm_per_comp = 5 noise_ratio_array = [] for slider_lst in self.parms_sliders[strk_idx]: for slider in slider_lst: noise_ratio_array.append(float((slider.value() - 50)) / 100) noise_ratio_array = np.reshape(noise_ratio_array, (-1, num_parm_per_comp)) for row in range(opt_parms.shape[0]): # opt_parms[row][0] += noise_ratio_array[row][0] * np.abs(opt_parms[row][0]) * 0.8 # opt_parms[row][2] += noise_ratio_array[row][1] * np.abs(opt_parms[row][2]) * 0.5 # opt_parms[row][3] += noise_ratio_array[row][2] * np.abs(opt_parms[row][3]) * 0.5 opt_parms[row][0] += noise_ratio_array[row][0] * 5 opt_parms[row][2] += noise_ratio_array[row][1] * 1.0 opt_parms[row][3] += noise_ratio_array[row][2] * 1.0 #theta_s & theta_e: noise is applied to delta_theta # opt_theta_s = opt_parms[row][4] # opt_theta_e = opt_parms[row][5] # opt_parms[row][4] = (opt_theta_s + opt_theta_e)/2 - (opt_theta_e-opt_theta_s) * (1 + noise_ratio_array[row][3]*2) / 2 # opt_parms[row][5] = (opt_theta_s + opt_theta_e)/2 + (opt_theta_e-opt_theta_s) * (1 + noise_ratio_array[row][3]*2) / 2 opt_parms[row][4] += noise_ratio_array[row][3] * 2 * np.pi opt_parms[row][5] += noise_ratio_array[row][4] * 2 * np.pi traj_opt, vel_vec_opt = pytkrxz.rxzero_traj_eval( opt_parms, t_array, x0, y0) return traj_opt, vel_vec_opt, opt_parms
def trajkin_parm_exploration_to_fit_template(self, sample, template): #detect improvement suggestion in the span of kinematics feature space #first extract kinematics for the sample recons_sample = [] strk_parms_lst = [] fit_parms_lst = [] adjusted_comp = [] adjusted_comp_idx = [] print sample for strk_idx, strk in enumerate(sample): print '=================================' print 'For stroke ', strk_idx print '=================================' t_array = np.linspace(0, 1.0, len(strk)) strk_parms = pytkrxz.rxzero_train(strk, global_opt_itrs=1) strk_parms_lst.append(strk_parms) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval(strk_parms, t_array, strk[0, 0], strk[0, 1]) #<hyin/Jun-8th-2015> try another direction, fit the full parameters to the template # free_comp_idx = range(len(strk_parms)) # fit_parms = pytkrxz.fit_parm_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=free_comp_idx, maxIters=1) # comp_modulation_lst = [] # recons_parms_lst = [] # for comp_idx, parm_comp in enumerate(strk_parms): # print 'Examining component ', comp_idx # recons_parms = [parm for parm in fit_parms[0]] # # print recons_parms, comp_idx, parm_comp # recons_parms[comp_idx] = parm_comp # recons_parms_lst.append(recons_parms) # comp_modulation_lst.append(np.sum(np.abs(parm_comp-fit_parms[0][comp_idx]))) # # recons_err_comp_lst.append(recons_err) # # find the smallest one # significant_comp_idx = np.argmax(comp_modulation_lst) # print 'The most significant component: ', significant_comp_idx # print comp_modulation_lst[significant_comp_idx] # print recons_parms_lst[significant_comp_idx] # recons_eval_traj, recons_eval_vel = pytkrxz.rxzero_traj_eval(recons_parms_lst[significant_comp_idx], t_array, # template[strk_idx][0, 0], template[strk_idx][0, 1]) # # strk[0, 0], strk[0, 1]) # recons_sample.append(recons_eval_traj) # adjusted_comp.append([recons_parms_lst[significant_comp_idx]]) # adjusted_comp_idx.append(significant_comp_idx) # recons_sample.append(eval_traj) #call the global optimization in rxzero to see how can we fit the template by modulating the extracted parameters # fit_parms = pytkrxz.rxzero_global_optimization(template[strk_idx], strk_parms, dt=0.01, maxIters=1) # fit_parms_lst.append(fit_parms) # fit_eval_traj, fit_eval_vel = pytkrxz.rxzero_traj_eval(fit_parms, t_array, strk[0, 0], strk[0, 1]) # # recons_sample.append(fit_eval_traj) #for this stroke, see which component can lead us to a better reconstruction towards the template recons_err_comp_lst = [] comp_modulation_lst = [] if len(strk_parms) == 1: #only one component... print 'Examining the only components' opt_parms, recons_err = pytkrxz.fit_parm_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=[[0]], maxIters=1) # opt_parms, recons_err = pytkrxz.fit_parm_scale_ang_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=[[0]], maxIters=1) comp_modulation_lst.append(opt_parms) recons_err_comp_lst.append(recons_err) significant_comp_idx = np.argmin(recons_err_comp_lst) print 'The only significant components: ', significant_comp_idx, significant_comp_idx+1 print comp_modulation_lst[significant_comp_idx] recons_eval_traj, recons_eval_vel = pytkrxz.rxzero_traj_eval(comp_modulation_lst[significant_comp_idx], t_array, template[strk_idx][0, 0], template[strk_idx][0, 1]) # strk[0, 0], strk[0, 1]) recons_sample.append(recons_eval_traj) adjusted_comp.append(comp_modulation_lst[significant_comp_idx]) adjusted_comp_idx.append([significant_comp_idx]) else: for comp_idx, parm_comp in enumerate(strk_parms[:-1]): print 'Examining components ', comp_idx, comp_idx+1 free_comp_idx = [[comp_idx], [comp_idx+1]] opt_parms, recons_err = pytkrxz.fit_parm_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=free_comp_idx, maxIters=1) # opt_parms, recons_err = pytkrxz.fit_parm_scale_ang_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=free_comp_idx, maxIters=1) comp_modulation_lst.append(opt_parms) recons_err_comp_lst.append(recons_err) # find the smallest one significant_comp_idx = np.argmin(recons_err_comp_lst) print 'The most significant components: ', significant_comp_idx, significant_comp_idx+1 print comp_modulation_lst[significant_comp_idx] recons_eval_traj, recons_eval_vel = pytkrxz.rxzero_traj_eval(comp_modulation_lst[significant_comp_idx], t_array, template[strk_idx][0, 0], template[strk_idx][0, 1]) # strk[0, 0], strk[0, 1]) recons_sample.append(recons_eval_traj) adjusted_comp.append(comp_modulation_lst[significant_comp_idx]) adjusted_comp_idx.append([significant_comp_idx, significant_comp_idx+1]) # #blend these parms to see if this would give us meaningful instructions... # comb_parms_lst = [] # replace_strk_idx = [0] # replace_comp_idx = [0] # comb_recons_sample = [] # for strk_idx, strk_parms in enumerate(strk_parms_lst): # t_array = np.linspace(0, 1.0, len(sample[strk_idx])) # comb_strk_parms = [] # for comp_idx, strk_parm_comp in enumerate(strk_parms): # if strk_idx in replace_strk_idx and comp_idx in replace_comp_idx: # print 'replace...' # comb_strk_parms.append(fit_parms_lst[strk_idx][comp_idx]) # else: # comb_strk_parms.append(strk_parm_comp) # #evaluate comb trajectory # comb_eval_traj, comb_eval_vel = pytkrxz.rxzero_traj_eval(comb_strk_parms, t_array, strk[0, 0], strk[0, 1]) # comb_recons_sample.append(comb_eval_traj) # comb_parms_lst.append(comb_strk_parms) return strk_parms_lst, recons_sample, adjusted_comp, adjusted_comp_idx
def plot_data(self): #plot data #evaluate base curr_data = [ mdl['stroke'] for mdl in self.char_mdl ] bFirstStroke = True last_stroke_end_t = 0.0 if 'vel_profile' not in self.char_mdl[0]: print 'no velocity profile stored' return #currently, only consider one stroke for strk_idx, stroke in enumerate(curr_data): #vel profile vel_profile = self.char_mdl[strk_idx]['vel_profile'] #t_array t_array = np.linspace(0, 1.0, len(stroke)) + last_stroke_end_t last_stroke_end_t = t_array[-1] #vel vec & theta vel_vec = np.diff(stroke, axis=0) / (t_array[1] - t_array[0]) #theta = np.arctan2(vel_vec[:, 1], vel_vec[:, 0]) theta = utils.get_continuous_ang(stroke) #plot #only data #char profile self.ax_char_prf.plot(stroke[:, 0], -stroke[:, 1], 'b', linewidth=4.0) self.ax_char_prf.set_title('Character Profile', fontsize=8) self.ax_char_prf.set_xlim([-1.5, 1.5]) self.ax_char_prf.set_ylim([-1.5, 1.5]) self.ax_char_prf.set_xticks([]) self.ax_char_prf.set_yticks([]) #vel_x & vel_y self.ax_xvel.plot(t_array[0:-1], vel_vec[:, 0], 'b', linewidth=4.0) self.ax_xvel.set_title('X Velocity', fontsize=8) self.ax_xvel.set_xlabel('Time (s)', fontsize=8) self.ax_xvel.set_ylabel('Velocity (Unit/s)', fontsize=8) self.ax_yvel.plot(t_array[0:-1], vel_vec[:, 1], 'b', linewidth=4.0) self.ax_yvel.set_title('Y Velocity', fontsize=8) self.ax_yvel.set_xlabel('Time (s)', fontsize=8) self.ax_yvel.set_ylabel('Velocity (Unit/s)', fontsize=8) #vel profile self.ax_vel_prf.plot(t_array, vel_profile, 'b', linewidth=4.0) self.ax_vel_prf.set_title('Velocity Maganitude', fontsize=8) self.ax_vel_prf.set_xlabel('Time (s)', fontsize=8) self.ax_vel_prf.set_ylabel('Maganitude (Unit/s)', fontsize=8) #ang profile self.ax_ang_prf.plot(t_array[0:-1], theta, 'b', linewidth=4.0) self.ax_ang_prf.set_title('Angular Position', fontsize=8) self.ax_ang_prf.set_xlabel('Time (s)', fontsize=8) self.ax_ang_prf.set_ylabel('Angular Position (rad)', fontsize=8) if bFirstStroke: self.ax_char_prf.hold(True) self.ax_xvel.hold(True) self.ax_yvel.hold(True) self.ax_vel_prf.hold(True) self.ax_ang_prf.hold(True) bFirstStroke = False colors = ['r', 'y', 'k', 'g', 'w'] last_stroke_end_t = 0.0 for curr_idx in range(len(self.char_mdl)): #hold current drawings to add new curves #now only the first stroke #registration points... vel_profile = self.char_mdl[curr_idx]['vel_profile'] t_array = np.linspace(0, 1.0, len(curr_data[curr_idx])) if 'start_pnt' in self.char_mdl[curr_idx] and 'opt_parms' in self.char_mdl[curr_idx]: x0 = self.char_mdl[curr_idx]['start_pnt'][0] y0 = self.char_mdl[curr_idx]['start_pnt'][1] opt_parms = np.array(self.char_mdl[curr_idx]['opt_parms']) traj_opt, vel_vec_opt, opt_parms = self.traj_eval_helper(curr_idx, t_array, opt_parms, x0, y0) theta_opt = utils.get_continuous_ang(traj_opt) self.ax_char_prf.plot(traj_opt[:, 0], -traj_opt[:, 1], 'r', linewidth=4.0) self.ax_vel_prf.plot(t_array[:]+last_stroke_end_t, np.sum(vel_vec_opt**2, axis=1)**(1./2), 'r', linewidth=4.0) self.ax_xvel.plot(t_array[:]+last_stroke_end_t, vel_vec_opt[:, 0], 'r', linewidth=4.0) self.ax_yvel.plot(t_array[:]+last_stroke_end_t, vel_vec_opt[:, 1], 'r', linewidth=4.0) self.ax_ang_prf.plot(t_array[1:]+last_stroke_end_t, theta_opt, 'r', linewidth=4.0) #for each component for parm in opt_parms: comp_traj, comp_vel_vec = pytkrxz.rxzero_traj_eval([parm], t_array, x0, y0) self.ax_vel_prf.plot(t_array[:]+last_stroke_end_t, np.sum(comp_vel_vec**2, axis=1)**(1./2), 'g', linewidth=2.5) self.ax_xvel.plot(t_array[:]+last_stroke_end_t, comp_vel_vec[:, 0], 'g', linewidth=2.5) self.ax_yvel.plot(t_array[:]+last_stroke_end_t, comp_vel_vec[:, 1], 'g', linewidth=2.5) last_stroke_end_t += t_array[-1] else: last_stroke_end_t += t_array[-1] self.ax_char_prf.hold(False) self.ax_xvel.hold(False) self.ax_yvel.hold(False) self.ax_vel_prf.hold(False) self.ax_ang_prf.hold(False) self.canvas.draw() return
def trajkin_parm_exploration_to_fit_template(self, sample, template): #detect improvement suggestion in the span of kinematics feature space #first extract kinematics for the sample recons_sample = [] strk_parms_lst = [] fit_parms_lst = [] adjusted_comp = [] adjusted_comp_idx = [] print sample for strk_idx, strk in enumerate(sample): print '=================================' print 'For stroke ', strk_idx print '=================================' t_array = np.linspace(0, 1.0, len(strk)) strk_parms = pytkrxz.rxzero_train(strk, global_opt_itrs=1) strk_parms_lst.append(strk_parms) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval( strk_parms, t_array, strk[0, 0], strk[0, 1]) #<hyin/Jun-8th-2015> try another direction, fit the full parameters to the template # free_comp_idx = range(len(strk_parms)) # fit_parms = pytkrxz.fit_parm_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=free_comp_idx, maxIters=1) # comp_modulation_lst = [] # recons_parms_lst = [] # for comp_idx, parm_comp in enumerate(strk_parms): # print 'Examining component ', comp_idx # recons_parms = [parm for parm in fit_parms[0]] # # print recons_parms, comp_idx, parm_comp # recons_parms[comp_idx] = parm_comp # recons_parms_lst.append(recons_parms) # comp_modulation_lst.append(np.sum(np.abs(parm_comp-fit_parms[0][comp_idx]))) # # recons_err_comp_lst.append(recons_err) # # find the smallest one # significant_comp_idx = np.argmax(comp_modulation_lst) # print 'The most significant component: ', significant_comp_idx # print comp_modulation_lst[significant_comp_idx] # print recons_parms_lst[significant_comp_idx] # recons_eval_traj, recons_eval_vel = pytkrxz.rxzero_traj_eval(recons_parms_lst[significant_comp_idx], t_array, # template[strk_idx][0, 0], template[strk_idx][0, 1]) # # strk[0, 0], strk[0, 1]) # recons_sample.append(recons_eval_traj) # adjusted_comp.append([recons_parms_lst[significant_comp_idx]]) # adjusted_comp_idx.append(significant_comp_idx) # recons_sample.append(eval_traj) #call the global optimization in rxzero to see how can we fit the template by modulating the extracted parameters # fit_parms = pytkrxz.rxzero_global_optimization(template[strk_idx], strk_parms, dt=0.01, maxIters=1) # fit_parms_lst.append(fit_parms) # fit_eval_traj, fit_eval_vel = pytkrxz.rxzero_traj_eval(fit_parms, t_array, strk[0, 0], strk[0, 1]) # # recons_sample.append(fit_eval_traj) #for this stroke, see which component can lead us to a better reconstruction towards the template recons_err_comp_lst = [] comp_modulation_lst = [] if len(strk_parms) == 1: #only one component... print 'Examining the only components' opt_parms, recons_err = pytkrxz.fit_parm_component_with_global_optimization( template[strk_idx], strk_parms, free_comp_idx=[[0]], maxIters=1) # opt_parms, recons_err = pytkrxz.fit_parm_scale_ang_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=[[0]], maxIters=1) comp_modulation_lst.append(opt_parms) recons_err_comp_lst.append(recons_err) significant_comp_idx = np.argmin(recons_err_comp_lst) print 'The only significant components: ', significant_comp_idx, significant_comp_idx + 1 print comp_modulation_lst[significant_comp_idx] recons_eval_traj, recons_eval_vel = pytkrxz.rxzero_traj_eval( comp_modulation_lst[significant_comp_idx], t_array, template[strk_idx][0, 0], template[strk_idx][0, 1]) # strk[0, 0], strk[0, 1]) recons_sample.append(recons_eval_traj) adjusted_comp.append(comp_modulation_lst[significant_comp_idx]) adjusted_comp_idx.append([significant_comp_idx]) else: for comp_idx, parm_comp in enumerate(strk_parms[:-1]): print 'Examining components ', comp_idx, comp_idx + 1 free_comp_idx = [[comp_idx], [comp_idx + 1]] opt_parms, recons_err = pytkrxz.fit_parm_component_with_global_optimization( template[strk_idx], strk_parms, free_comp_idx=free_comp_idx, maxIters=1) # opt_parms, recons_err = pytkrxz.fit_parm_scale_ang_component_with_global_optimization(template[strk_idx], strk_parms, free_comp_idx=free_comp_idx, maxIters=1) comp_modulation_lst.append(opt_parms) recons_err_comp_lst.append(recons_err) # find the smallest one significant_comp_idx = np.argmin(recons_err_comp_lst) print 'The most significant components: ', significant_comp_idx, significant_comp_idx + 1 print comp_modulation_lst[significant_comp_idx] recons_eval_traj, recons_eval_vel = pytkrxz.rxzero_traj_eval( comp_modulation_lst[significant_comp_idx], t_array, template[strk_idx][0, 0], template[strk_idx][0, 1]) # strk[0, 0], strk[0, 1]) recons_sample.append(recons_eval_traj) adjusted_comp.append(comp_modulation_lst[significant_comp_idx]) adjusted_comp_idx.append( [significant_comp_idx, significant_comp_idx + 1]) # #blend these parms to see if this would give us meaningful instructions... # comb_parms_lst = [] # replace_strk_idx = [0] # replace_comp_idx = [0] # comb_recons_sample = [] # for strk_idx, strk_parms in enumerate(strk_parms_lst): # t_array = np.linspace(0, 1.0, len(sample[strk_idx])) # comb_strk_parms = [] # for comp_idx, strk_parm_comp in enumerate(strk_parms): # if strk_idx in replace_strk_idx and comp_idx in replace_comp_idx: # print 'replace...' # comb_strk_parms.append(fit_parms_lst[strk_idx][comp_idx]) # else: # comb_strk_parms.append(strk_parm_comp) # #evaluate comb trajectory # comb_eval_traj, comb_eval_vel = pytkrxz.rxzero_traj_eval(comb_strk_parms, t_array, strk[0, 0], strk[0, 1]) # comb_recons_sample.append(comb_eval_traj) # comb_parms_lst.append(comb_strk_parms) return strk_parms_lst, recons_sample, adjusted_comp, adjusted_comp_idx
def on_synthesize(self): curr_char = str(self.char_combbox.currentText()) curr_mdl = self.data_mdls[curr_char] #sample from rf model strk_num = curr_mdl.sample_strk_num() sample_data = [] if strk_num is not None: print 'Generate a sample consisted of {0} strokes'.format( strk_num + 1) #select for the model list mdl_lst = curr_mdl.model_[strk_num + 1] char_data = [] sample_parms = [] sample_noise = [] for mdl in mdl_lst: #for each stroke... tmp_sample = curr_mdl.sample_from_rf_mdl(mdl) sample_data, tree_idx, leaf_idx, noise = tmp_sample[0] if (tree_idx, leaf_idx) in mdl['kinparms_dict']: parms = mdl['kinparms_dict'][tree_idx, leaf_idx] else: parms = -1 char_data.append(sample_data) sample_parms.append(parms) sample_noise.append(noise) #prepare char model self.char_mdl = [] for strk_idx in range(len(char_data)): tmp_char_mdl = dict() tmp_char_data = np.reshape(char_data[strk_idx], (2, -1)) tmp_char_mdl['char_sample'] = tmp_char_data #evaluate model if sample_parms[strk_idx] == -1: #no valid parameters tmp_char_mdl['stroke'] = tmp_char_data else: tmp_char_mdl['start_pnt'] = sample_parms[strk_idx][0] tmp_char_mdl['opt_parms'] = sample_parms[strk_idx][1] t_array = np.linspace(0, 1.0, len(char_data[strk_idx]) / 2) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval( tmp_char_mdl['opt_parms'], t_array, sample_parms[strk_idx][0][0], sample_parms[strk_idx][0][1]) tmp_char_mdl['stroke'] = eval_traj tmp_char_mdl['vel_profile'] = np.sum(eval_vel**2, axis=1)**(1. / 2) self.char_mdl.append(tmp_char_mdl) self.strk_combbox.blockSignals(True) self.strk_combbox.clear() self.strk_combbox.addItems(map(str, range(len(char_data)))) self.strk_combbox.blockSignals(False) self.clear_parm_sliders_layout() #self.on_update_strk_comb(None) self.clear_parm_sliders_layout() self.populate_parm_sliders() self.plot_data() return char_data, sample_parms, sample_noise
def plot_data(self): #plot data #evaluate base curr_data = [mdl['stroke'] for mdl in self.char_mdl] bFirstStroke = True last_stroke_end_t = 0.0 if 'vel_profile' not in self.char_mdl[0]: print 'no velocity profile stored' return #currently, only consider one stroke for strk_idx, stroke in enumerate(curr_data): #vel profile vel_profile = self.char_mdl[strk_idx]['vel_profile'] #t_array t_array = np.linspace(0, 1.0, len(stroke)) + last_stroke_end_t last_stroke_end_t = t_array[-1] #vel vec & theta vel_vec = np.diff(stroke, axis=0) / (t_array[1] - t_array[0]) #theta = np.arctan2(vel_vec[:, 1], vel_vec[:, 0]) theta = utils.get_continuous_ang(stroke) #plot #only data #char profile self.ax_char_prf.plot(stroke[:, 0], -stroke[:, 1], 'b', linewidth=4.0) self.ax_char_prf.set_title('Character Profile', fontsize=8) self.ax_char_prf.set_xlim([-1.5, 1.5]) self.ax_char_prf.set_ylim([-1.5, 1.5]) self.ax_char_prf.set_xticks([]) self.ax_char_prf.set_yticks([]) #vel_x & vel_y self.ax_xvel.plot(t_array[0:-1], vel_vec[:, 0], 'b', linewidth=4.0) self.ax_xvel.set_title('X Velocity', fontsize=8) self.ax_xvel.set_xlabel('Time (s)', fontsize=8) self.ax_xvel.set_ylabel('Velocity (Unit/s)', fontsize=8) self.ax_yvel.plot(t_array[0:-1], vel_vec[:, 1], 'b', linewidth=4.0) self.ax_yvel.set_title('Y Velocity', fontsize=8) self.ax_yvel.set_xlabel('Time (s)', fontsize=8) self.ax_yvel.set_ylabel('Velocity (Unit/s)', fontsize=8) #vel profile self.ax_vel_prf.plot(t_array, vel_profile, 'b', linewidth=4.0) self.ax_vel_prf.set_title('Velocity Maganitude', fontsize=8) self.ax_vel_prf.set_xlabel('Time (s)', fontsize=8) self.ax_vel_prf.set_ylabel('Maganitude (Unit/s)', fontsize=8) #ang profile self.ax_ang_prf.plot(t_array[0:-1], theta, 'b', linewidth=4.0) self.ax_ang_prf.set_title('Angular Position', fontsize=8) self.ax_ang_prf.set_xlabel('Time (s)', fontsize=8) self.ax_ang_prf.set_ylabel('Angular Position (rad)', fontsize=8) if bFirstStroke: self.ax_char_prf.hold(True) self.ax_xvel.hold(True) self.ax_yvel.hold(True) self.ax_vel_prf.hold(True) self.ax_ang_prf.hold(True) bFirstStroke = False colors = ['r', 'y', 'k', 'g', 'w'] last_stroke_end_t = 0.0 for curr_idx in range(len(self.char_mdl)): #hold current drawings to add new curves #now only the first stroke #registration points... vel_profile = self.char_mdl[curr_idx]['vel_profile'] t_array = np.linspace(0, 1.0, len(curr_data[curr_idx])) if 'start_pnt' in self.char_mdl[ curr_idx] and 'opt_parms' in self.char_mdl[curr_idx]: x0 = self.char_mdl[curr_idx]['start_pnt'][0] y0 = self.char_mdl[curr_idx]['start_pnt'][1] opt_parms = np.array(self.char_mdl[curr_idx]['opt_parms']) traj_opt, vel_vec_opt, opt_parms = self.traj_eval_helper( curr_idx, t_array, opt_parms, x0, y0) theta_opt = utils.get_continuous_ang(traj_opt) self.ax_char_prf.plot(traj_opt[:, 0], -traj_opt[:, 1], 'r', linewidth=4.0) self.ax_vel_prf.plot(t_array[:] + last_stroke_end_t, np.sum(vel_vec_opt**2, axis=1)**(1. / 2), 'r', linewidth=4.0) self.ax_xvel.plot(t_array[:] + last_stroke_end_t, vel_vec_opt[:, 0], 'r', linewidth=4.0) self.ax_yvel.plot(t_array[:] + last_stroke_end_t, vel_vec_opt[:, 1], 'r', linewidth=4.0) self.ax_ang_prf.plot(t_array[1:] + last_stroke_end_t, theta_opt, 'r', linewidth=4.0) #for each component for parm in opt_parms: comp_traj, comp_vel_vec = pytkrxz.rxzero_traj_eval([parm], t_array, x0, y0) self.ax_vel_prf.plot(t_array[:] + last_stroke_end_t, np.sum(comp_vel_vec**2, axis=1)**(1. / 2), 'g', linewidth=2.5) self.ax_xvel.plot(t_array[:] + last_stroke_end_t, comp_vel_vec[:, 0], 'g', linewidth=2.5) self.ax_yvel.plot(t_array[:] + last_stroke_end_t, comp_vel_vec[:, 1], 'g', linewidth=2.5) last_stroke_end_t += t_array[-1] else: last_stroke_end_t += t_array[-1] self.ax_char_prf.hold(False) self.ax_xvel.hold(False) self.ax_yvel.hold(False) self.ax_vel_prf.hold(False) self.ax_ang_prf.hold(False) self.canvas.draw() return
def samle_from_rf_mdl_helper(from_kin=False): tree_idx = int(np.random.uniform(low=0, high=n_trees)) tmp_tree = mdl['rf_mdl'].estimators_[tree_idx].tree_ #print 'sampling from the {0}-th tree'.format(tree_idx) def recurse(tree, curr_node_idx): #check left and right children left_child_node_idx = tree.children_left[curr_node_idx] right_child_node_idx = tree.children_right[curr_node_idx] if left_child_node_idx == -1 and right_child_node_idx == -1: #leaf, return it return curr_node_idx elif left_child_node_idx == -1 and right_child_node_idx != -1: #expand right side return recurse(tree, right_child_node_idx) elif left_child_node_idx != -1 and right_child_node_idx == -1: #expand left side return recurse(tree, left_child_node_idx) else: #make a random decision based number of samples n_samples_left = tree.n_node_samples[left_child_node_idx] n_samples_right = tree.n_node_samples[right_child_node_idx] #binomial decision = np.random.binomial(1, n_samples_left/float(n_samples_left+n_samples_right)) if decision > 0.5: #expand left side return recurse(tree, left_child_node_idx) else: #expand right side return recurse(tree, right_child_node_idx) sample_leaf_idx = recurse(tmp_tree, 0) #local gaussian for leaf samples samples = mdl['samples_dict'][tree_idx, sample_leaf_idx] if len(samples) == 1: #local perturbation... print 'only one at the leaf node...' sample = samples[0] perturb_parm = 0 mean_parm = 1 else: #mean+std print 'In the {0}-th tree, leaf node {1} contains {2} samples'.format(tree_idx, sample_leaf_idx, len(samples)) if from_kin: if not mdl['kinparms_dict'][tree_idx, sample_leaf_idx]: sample = np.mean(samples, axis=0) else: #evaluate kinematics model start_pnt = mdl['kinparms_dict'][tree_idx, sample_leaf_idx][0] parms = mdl['kinparms_dict'][tree_idx, sample_leaf_idx][1] mean_parm = np.concatenate([start_pnt, parms.flatten()]) print mean_parm #apply a noise if necessary if 'kinparm_cov_dict' in mdl: noise_scale = 0.02 # print len(mean_parm), len(mdl['kinparm_cov_dict'][tree_idx, sample_leaf_idx]) # print 'covar:', mdl['kinparm_cov_dict'][tree_idx, sample_leaf_idx] perturb_parm = np.random.multivariate_normal(mean_parm,noise_scale * np.diag(mdl['kinparm_cov_dict'][tree_idx, sample_leaf_idx])) #apply perturbed parm applied_start_pnt = perturb_parm[0:2] # applied_start_pnt = start_pnt applied_parms = np.reshape(perturb_parm[2:], (-1, 6)) print 'Apply a noise:' print perturb_parm - mean_parm print 'to the parameters.' else: applied_start_pnt = start_pnt applied_parms = parms perturb_parm = mean_parm t_array = np.arange(0.0, 1.0, 0.01) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval(applied_parms, t_array, applied_start_pnt[0], applied_start_pnt[1]) sample = eval_traj.transpose().flatten() else: #use mean... sample = np.mean(samples, axis=0) #no parm perturb_parm = 0 mean_parm = 1 return sample, tree_idx, sample_leaf_idx, perturb_parm - mean_parm
def train_leaf_kin_mdl_var(self): if self.model_ is None: print 'Need pretrained kinematics models.' return #train variability for each leaf kinematics model, provide statistics for synthesis of char kin parameters for num_strk, mdls in self.model_.iteritems(): for tmp_rf_mdl in mdls: #check if their is field for kinematic parms if 'kinparms_dict' in tmp_rf_mdl: kinparms_var_by_node = defaultdict(list) for k, d in tmp_rf_mdl['kinparms_dict'].iteritems(): tree_idx, leaf_idx = k print 'Training Var model for tree {0} and leaf node {1}'.format(tree_idx, leaf_idx) start_pnt = d[0] parms = d[1] #leaf samples samples = tmp_rf_mdl['samples_dict'][tree_idx, leaf_idx] #learn local MaxEnt model, concerning the variability of kin parameters #<hyin/Feb-22nd-2015> use a new way to infer the variability of kinematics parameters #kinparm_mdl_var_inference is deprecated #latent_parm, latent_sigma = self.kinparm_mdl_var_inference(start_pnt, parms, samples) #covar in cartesian space, note the correlation between x and y is also taken into account #use block cov for each dimension if one expected independent analysis #extract diagonal covariance matrix #cart_cov = [np.cov(np.array(samples)[:, col_idx]) for col_idx in range(len(samples[0]))] #cart_cov = np.diag(cart_cov) cart_cov = np.cov(np.array(samples).transpose()) # print np.diag(cart_cov) #eval trajectory at mean parms t_array = np.arange(0.0, 1.0, 0.01) eval_traj, eval_vel = pytkrxz.rxzero_traj_eval(parms, t_array, start_pnt[0], start_pnt[1]) #gradient, use finite difference delta_p = 1e-4 grad_mat = [] parms_flatten = np.concatenate([start_pnt, parms.flatten()]) parms_flatten = np.nan_to_num(parms_flatten) for dim_idx in range(len(parms_flatten)): #evaluate central difference perturb_parm_1 = np.array(parms_flatten) perturb_parm_2 = np.array(parms_flatten) perturb_parm_1[dim_idx] += delta_p * 0.5 perturb_parm_2[dim_idx] -= delta_p * 0.5 perturb_traj_1, perturb_vel_1 = pytkrxz.rxzero_traj_eval(np.reshape(perturb_parm_1[2:], (-1, 6)), t_array, perturb_parm_1[0], perturb_parm_1[1]) perturb_traj_2, perturb_vel_2 = pytkrxz.rxzero_traj_eval(np.reshape(perturb_parm_2[2:], (-1, 6)), t_array, perturb_parm_2[0], perturb_parm_2[1]) delta_f = perturb_traj_1.transpose().flatten() - perturb_traj_2.transpose().flatten() grad_mat.append(delta_f/delta_p) grad_mat = np.array(grad_mat) grad_mat = np.nan_to_num(grad_mat) #svd, s is 1-d array U, s, V = np.linalg.svd(grad_mat) #print 'singular values:', s #regularizer for inversion reg_lambda = 5 invS = np.zeros((V.shape[1], U.shape[0])) invS[0:len(s), 0:len(s)] = np.diag(1./(s+reg_lambda)) grad_mat_reg_inverse = V.dot(invS).dot(U.transpose()) cov_parm = grad_mat_reg_inverse.transpose().dot(cart_cov).dot(grad_mat_reg_inverse) # eig_cov, eig_vec = np.linalg.eig(cov_parm) #tmp_rf_mdl['kinlat_dict'] = [ np.reshape(latent_parm, (-1, 6)), latent_sigma ] kinparms_var_by_node[tree_idx, leaf_idx] = np.diag(cov_parm) # print np.diag(cov_parm) # <hyin/Feb-22nd-2015> TODO: need some careful investigation on the scale # print 'variance:', latent_sigma tmp_rf_mdl['kinparm_cov_dict'] = kinparms_var_by_node return