def respond_to_demonstration_letter(self, demonstration, letter, mode='midway', mode_param=0.5): demo_stroke = Stroke() demo_stroke.stroke_from_xxyy( np.reshape(demonstration, len(demonstration))) demo_stroke.normalize_wrt_max() if mode == 'midway': learned_stroke = stroke.midway(demo_stroke, self.generated_letters[letter], mode_param) self.generated_letters[letter] = learned_stroke save_learned_allograph(self.child_path, letter, learned_stroke) _, score = stroke.euclidian_distance(demo_stroke, self.refs[letter]) if mode == 'simple': learned_stroke = stroke.weigthedSum(demo_stroke, self.generated_letters[letter], mode_param) self.generated_letters[letter] = learned_stroke save_learned_allograph(self.child_path, letter, learned_stroke) _, score = stroke.euclidian_distance(demo_stroke, self.refs[letter]) return self.shape_message(letter), score
def respond_to_demonstration_word(self, demonstrations, mode='midway'): #mutual_modeling will act here if mode == 'midway': for letter,stroke in demonstrations: learned_stroke = stroke.midway(stroke, self.generated_letters[letter]) self.generated_letters[letter] = learned_stroke save_learned_allograph(self.robot_data, letter, learned_stroke) score = stroke.euclidian_distance(demo_stroke, self.refs[letter])
def respond_to_demonstration_letter(self, demonstration, letter, grade, mode='midway'): demo_stroke = Stroke() demo_stroke.stroke_from_xxyy(np.reshape(demonstration,len(demonstration))) #demo_stroke.uniformize() demo_stroke.normalize_wrt_max() if mode == 'midway': learned_stroke = stroke.midway(demo_stroke, self.generated_letters[letter], grade) self.generated_letters[letter] = learned_stroke save_learned_allograph(self.robot_path, letter, learned_stroke) _,score = stroke.euclidian_distance(demo_stroke, self.refs[letter]) #if mode = 'PCA' #if mode = 'sigNorm' (mixture of sigma-log-normal) #if mode = 'CNN' (1-D convolutionnal neural networks) return self.shape_message(letter),score