def learnCurrentWinner(self, activation): self.SAL_WTA_Net.learnCurrentWinner(activation) self.resetWTA() TurnOff = MotivationUnit("TurnOFF_PD", bias=1, group_name='cognitive_expansion') TurnOff.addConnectionTo(TurnOff, 1)
def __init__(self, switchRobot, value=False): MotivationUnit.__init__(self, "switch", group_name='cognitive_layer') self.old_decoupled = value self.count_current_state = 0 self.decoupled = value self.call_side_effect_at_iteration = -1 self.switchRobot = switchRobot
def __init__(self, name, startValue=0, bias=0, group_name='', delay=5): MotivationUnit.__init__(self, name=name, startValue=startValue, bias=bias, group_name=group_name) self.delay = delay self.current_count = 0
def __init__(self, name, startValue=0, bias=0, group_name=''): MotivationUnit.__init__(self, name=name, startValue=startValue, bias=bias, group_name=group_name) self.__modulatedConnectedTo = [] self.activation_threshold = 0.5
def __init__(self, name, startValue=0, bias=0, group_name=''): MotivationUnit.__init__(self, name=name, startValue=startValue, bias=bias, group_name=group_name) WTAUnitFast.wtaUnitList.append(self) if (len(WTAUnitFast.wtaUnitList) > 1): WTAUnitFast.weight_other = -1. / (len(WTAUnitFast.wtaUnitList) - 1)
def applyOutputFunction(self): MotivationUnit.applyOutputFunction(self) if self.output_value >= self.threshold: self.mod_receiver_function( self.name, self.modulated_target_function(self.output_value, self.weight)) self.was_active_during_last_iteration = True else: if self.was_active_during_last_iteration: self.mod_receiver_function(self.name, 0) self.was_active_during_last_iteration = False
def applyOutputFunction(self): MotivationUnit.applyOutputFunction(self) if self.last_activation_time + self.min_activation_time >= getCurrentTime( ): if not (self.output_value >= self.threshold): self.output_value = self.threshold if (self.output_value >= self.threshold): self.modulated_target_function(self.output_value, self.mod_function_param) if not (self.preceding_activation): self.last_activation_time = getCurrentTime() self.preceding_activation = True else: self.preceding_activation = False
def __init__(self, name, startValue=0, bias=0, group_name='', time_window=100, threshold=0.5): MotivationUnit.__init__(self, name=name, startValue=startValue, bias=bias, group_name=group_name) self.time_window = time_window self.current_count = 0 self.threshold = threshold
def __init__(self, name, startValue=0, bias=0, group_name=''): MotivationUnit.__init__(self, name=name, startValue=startValue, bias=bias, group_name=group_name) WTAUnit.wtaUnitList.append(self) self.weight_vector = [] if (len(WTAUnit.wtaUnitList) > 1): inhib_WTA_param = -1. / (len(WTAUnit.wtaUnitList) - 1) else: inhib_WTA_param = -1. for i in range(0, len(WTAUnit.wtaUnitList)): WTAUnit.wtaUnitList[i].weight_vector = [inhib_WTA_param] * len( WTAUnit.wtaUnitList) WTAUnit.wtaUnitList[i].weight_vector[i] = 1.
def __init__(self, orig_MUs, cog_phases): self.Phase = cog_phases self.original_MUs = orig_MUs WTAUnitFast.foundWinner.addConnectionTo(self.Phase.MU_Sim, 1) WTAUnitFast.foundWinner.addConnectionTo(self.Phase.MU_WTA, -2) self.Phase.MU_SAL.addConnectionTo(WTAUnitFast.foundWinner, -1) # Modulating the input to the idea units after a solution is found. # This is done only for a short time window self.modulate_idea_input = ConnectionModulatorUnit("modulate_idea_input", group_name='cognitive_layer') self.modulate_idea_input_time_window = PhasicUnit("modulate_idea_input_time_window", group_name='cognitive_layer', time_window =20) self.Phase.MU_Sim.addConnectionTo(self.modulate_idea_input_time_window, 1) self.Phase.MU_TestBeh.addConnectionTo(self.modulate_idea_input_time_window, 1) self.Phase.MU_SAL.addConnectionTo(self.modulate_idea_input, -1) self.modulate_idea_input_time_window.addConnectionTo(self.modulate_idea_input, 1) self.sal_weight = 0.3 self.layer_SAL = [] self.layer_WTA = [] # Remember Tested Behavior self.layer_RTB = [] for i in range(len(self.original_MUs)): # Spreading Activation Layer: # 1. Units are activating neighboring units - this activation # is modulated by a spreading_modulator Motivation unit # (only when this is active the activation is spreading inside the layer) # 2. There is a recurrent connection onto itself (weight = 1) # meaning the units are integrating inputs. # 3. Units have a random component (noise) self.layer_SAL.append( NoisyUnit( ("SAL_"+ str(self.original_MUs[i].name[0:2] + self.original_MUs[i].name[self.original_MUs[i].name.index("_")+3] + self.original_MUs[i].name[-3:]) ), group_name='cognitive_expansion') ) if (i>0): self.Phase.MU_SAL.addModulatedConnectionBetweenNeurons( self.layer_SAL[-1], self.layer_SAL[-2], self.sal_weight ) self.Phase.MU_SAL.addModulatedConnectionBetweenNeurons( self.layer_SAL[-2], self.layer_SAL[-1], self.sal_weight ) self.layer_WTA.append( WTAUnitFast( ("WTA_"+ str(self.original_MUs[i].name[0:2] + self.original_MUs[i].name[self.original_MUs[i].name.index("_")+3] + self.original_MUs[i].name[-3:]) ), group_name='cognitive_expansion') ) # The already active MotivationUnits (original, part of behavior selection) # shall not be selected and therefore inhibit the selection on the WTA layer # Removed: first, testing a single behavior might # still be sensible. # second, the WTA unit gets too much inhibition during simulation # and would not be active at beginning of test of behavior # self.original_MUs[i].addConnectionTo(self.layer_WTA[-1], -0.2) # The selected winner gets activated self.modulate_idea_input.addModulatedConnectionBetweenNeurons(self.layer_WTA[i], self.original_MUs[i], 2.) self.Phase.MU_SAL.addModulatedConnectionBetweenNeurons( self.layer_SAL[-1], self.layer_WTA[-1], 0.5 ) #was 0.5 self.layer_RTB.append( MotivationUnit( ("RTB_"+ str(self.original_MUs[i].name[0:2] + self.original_MUs[i].name[self.original_MUs[i].name.index("_")+3] + self.original_MUs[i].name[-3:]) ) ) ) WTAUnitFast.foundWinner.addModulatedConnectionBetweenNeurons( self.layer_WTA[-1], self.layer_RTB[-1], 1 ) self.layer_RTB[-1].addConnectionTo(self.layer_WTA[-1], -0.2) self.layer_RTB[-1].addConnectionTo(self.layer_RTB[-1], 1)
def __init__(self, name, input_unit, delay=5, group_name=''): self.delay = 1. * delay self.input_unit = input_unit # First unit: is increasing activity over time (recurrent connection) self.circ_count = MotivationUnit(name="circ_counter_" + name, bias=-(self.delay / (self.delay + 1)), group_name=group_name) self.circ_count.addConnectionTo(self.circ_count, 1.) self.input_unit.addConnectionTo(self.circ_count, 1.) # Output unit: delayed signal self.circ_delayed = MotivationUnit(name="circ_delayed_" + name, bias=-delay, group_name=group_name) self.circ_count.addConnectionTo(self.circ_delayed, (delay + 1))
def propagate(self): MotivationUnit.propagate(self) # For the fast WTA: # Only once (when inhibition_sum not set = -1) all the activations in the WTA net are summed. # From this sum the inhibition can be calculated for each individual unit. if (WTAUnitFast.inhibition_sum == -1): WTAUnitFast.inhibition_sum = 0 WTAUnitFast.net_sum = 0 for i in range(0, len(WTAUnitFast.wtaUnitList)): WTAUnitFast.inhibition_sum += WTAUnitFast.weight_other * WTAUnitFast.wtaUnitList[ i].output_value self.input_sum += WTAUnitFast.inhibition_sum + ( 1 - WTAUnitFast.weight_other) * self.output_value if (self.input_sum > 0): # net_sum is used to keep track of all activations in the network for the next # timestep - it is used for normalization in the applyOutput time step. WTAUnitFast.net_sum += self.input_sum
def __init__(self, name, mod_target, threshold=1.0, startValue=0, bias=0, fct_param=None, group_name=''): MotivationUnit.__init__(self, name=name, startValue=startValue, bias=bias, group_name=group_name) # Source of PEP shift is a CoordinationInfluencePepShiftCalculator #self.source_activation = source_activation self.threshold = threshold self.modulated_target_function = mod_target self.mod_function_param = fct_param
def applyOutputFunction(self): MotivationUnit.applyOutputFunction(self) # Calls side effect number of steps after switching: # if (self.count_current_state == self.call_side_effect_at_iteration): # print("SIDE EFFECT CALLED") if self.output_value == 1: self.decoupled = True #print('sending all angle velocity from switch object.') self.switchRobot.wRobot.sendAllAngleVelocity() else: self.decoupled = False if self.old_decoupled == self.decoupled: self.count_current_state += 1 else: self.old_decoupled = self.decoupled self.count_current_state = 0 self.callImmediateSideEffect(self.decoupled)
def applyOutputFunction(self): MotivationUnit.applyOutputFunction(self) if (self.output_value >= self.threshold): self.modulated_target_function(self.output_value, self.mod_function_param)
def applyOutputFunction(self): MotivationUnit.applyOutputFunction(self) if (self.output_value != self.old_output_value): if (self.output_value >= self.threshold): self.modulated_target_function(self.output_value) self.old_output_value = self.output_value
def propagate(self): MotivationUnit.propagate(self) if (self.output_value > self.activation_threshold): for item in self.__modulatedConnectedTo: #print("Modulate: ", item[0].name, " / ", item[0].output_value) item[1].addIncomingValue(item[0].output_value * item[2])
class MotivationNetLeg(object): def __init__(self, wleg, motivationNetRobot): self.wleg = wleg self.motivationNetRobot = motivationNetRobot self.aep = numpy.array([0.0, 0.0, 0.0], dtype=float) self.aep_init = numpy.array(self.aep) self.aep_temp = None #numpy.array([0.0,0.0,0.0], dtype=float) # For one experiment the swing movements are pushed further to the front. # Usually, next_swing_mode is 0 # next_swing_mode in [1,9] counts when the leg is in a stable stance_motivation # and afterwards moves the swing target to the temporary target # During the next swing movement the next_swing_mode is further increased # and in the next stance movement the old swing target is reset. self.next_swing_mode = 0 self.pep = numpy.array([0.0, 0.0, 0.0], dtype=float) self.dep = numpy.array([0.0, 0.0, WSTATIC.depz], dtype=float) self._pep_shift = numpy.array([0.0, 0.0, 0.0], dtype=float) self._pep_shift_is_up_to_date = True self._pep_shifts = dict() self._aep_shift = numpy.array([0.0, 0.0, 0.0], dtype=float) self._aep_shift_is_up_to_date = True self._aep_shifts = dict() self.mmcLegStance = mmcAngularLegModelStance(self) leg_name = self.wleg.leg.name[0] + self.wleg.leg.name[ 1 + str.find(self.wleg.leg.name, '_')] self.legMU = MotivationUnit("leg_" + leg_name, 1, group_name='cognitive_layer') self.legMU.addConnectionTo(self.legMU, 1) self.stand = MotivationUnit("stand_" + leg_name, group_name='cognitive_layer') self.walk = MotivationUnit("walk_" + leg_name, group_name='cognitive_layer') self.forward = MotivationUnit("forw_" + leg_name, bias=1, group_name='cognitive_layer') self.backward = MotivationUnit("backw_" + leg_name, group_name='cognitive_layer') #if WSTATIC.swing_trajectory_generator=='bezier': # self.swing_net = SwingMovementBezier(self) # self.swing_motivation = ModulatingMotivationUnit ( ("swing_" + leg_name), self.swing_net.moveToNextPoint, threshold=0., group_name=self.wleg.leg.name+ '_coordination_rules') if WSTATIC.swing_trajectory_generator == 'quadratic_spline': self.swing_net = SwingMovementDirectInverseKinematics(self) self.swing_motivation = MotivationUnit(("swing_" + leg_name), group_name='behavior_layer') self.swing_motivation_toFront = ModulatingMotivationUnit( ("swing_toFront_" + leg_name), self.swing_net.modulatedRoutineFunctionCall, fct_param=self.get_aep_shifted, threshold=0.5, group_name='behavior_layer') self.swing_motivation_toBack = ModulatingMotivationUnit( ("swing_toBack_" + leg_name), self.swing_net.modulatedRoutineFunctionCall, fct_param=self.get_swing_backward_target, threshold=0.5, group_name='behavior_layer') else: raise Exception('No valid swing trajectory generator was chosen.') if WSTATIC.stance_trajectory_generator == 'bodymodel': self.stance_net = StanceMovementBodyModel(self) #elif WSTATIC.stance_trajectory_generator=='springdampermass': # self.stance_net = StanceMovementSpringDamperMassLeg(self) else: raise Exception('No valid stance trajectory generator was chosen.') self.stance_motivation = MotivationUnit(("stance_" + leg_name), startValue=1, group_name='behavior_layer') self.stance_motivation_toBack = ModulatingMotivationUnit( ("stance_toBack_" + leg_name), self.stance_net.modulatedRoutineFunctionCall, startValue=1, threshold=0.5, group_name='behavior_layer') self.stance_motivation_toFront = ModulatingMotivationUnit( ("stance_toFront_" + leg_name), self.stance_net.modulatedRoutineFunctionCall, startValue=1, threshold=0.5, group_name='behavior_layer') rec_w = 0.6 self.swing_motivation.addConnectionTo(self.swing_motivation, rec_w) self.stance_motivation.addConnectionTo(self.swing_motivation, -0.75) self.legMU.addConnectionTo(self.swing_motivation, 0.25) self.swing_motivation_toFront.addConnectionTo(self.swing_motivation, 0.8) self.swing_motivation_toBack.addConnectionTo(self.swing_motivation, 0.8) self.swing_motivation_toFront.addConnectionTo( self.swing_motivation_toFront, rec_w) self.swing_motivation.addConnectionTo(self.swing_motivation_toFront, 0.45) self.swing_motivation_toBack.addConnectionTo( self.swing_motivation_toFront, -0.6) self.backward.addConnectionTo(self.swing_motivation_toFront, -0.15) self.swing_motivation_toBack.addConnectionTo( self.swing_motivation_toBack, rec_w) self.swing_motivation.addConnectionTo(self.swing_motivation_toBack, 0.45) self.swing_motivation_toFront.addConnectionTo( self.swing_motivation_toBack, -0.6) self.forward.addConnectionTo(self.swing_motivation_toBack, -0.15) self.stance_motivation.addConnectionTo(self.stance_motivation, 0.25) self.swing_motivation.addConnectionTo(self.stance_motivation, -0.75) self.legMU.addConnectionTo(self.stance_motivation, 0.25) self.stance_motivation_toFront.addConnectionTo(self.stance_motivation, 0.8) self.stance_motivation_toBack.addConnectionTo(self.stance_motivation, 0.8) self.stance_motivation_toFront.addConnectionTo( self.stance_motivation_toFront, 0.25) self.stance_motivation.addConnectionTo(self.stance_motivation_toFront, 0.45) self.stance_motivation_toBack.addConnectionTo( self.stance_motivation_toFront, -0.6) self.forward.addConnectionTo(self.stance_motivation_toFront, -0.15) self.stance_motivation_toBack.addConnectionTo( self.stance_motivation_toBack, rec_w) self.stance_motivation.addConnectionTo(self.stance_motivation_toBack, 0.45) self.stance_motivation_toFront.addConnectionTo( self.stance_motivation_toBack, -0.6) self.backward.addConnectionTo(self.stance_motivation_toBack, -0.15) rule1_coordination_weights = WSTATIC.coordination_rules["rule1"] self.rule1 = CoordinationRules.Rule1( self, rule1_coordination_weights["swing_stance_pep_shift_ratio"], rule1_coordination_weights["velocity_threshold"], rule1_coordination_weights["velocity_threshold_delay_offset"], rule1_coordination_weights["lower_velocity_delay_factor"], rule1_coordination_weights["higher_velocity_delay_factor"], rule1_coordination_weights["max_delay"]) rule2_coordination_weights = WSTATIC.coordination_rules["rule2"] self.rule2 = CoordinationRules.Rule2( self, rule2_coordination_weights["start_delay"], rule2_coordination_weights["duration"]) rule3LinearThreshold_coordination_weights = WSTATIC.coordination_rules[ "rule3LinearThreshold"] self.rule3LinearThreshold = CoordinationRules.Rule3LinearThreshold( self, rule3LinearThreshold_coordination_weights["active_distance"], rule3LinearThreshold_coordination_weights["threshold_offset"], rule3LinearThreshold_coordination_weights["threshold_slope"]) rule3Ipsilateral_coordination_weights = WSTATIC.coordination_rules[ "rule3Ipsilateral"] self.rule3Ipsilateral = CoordinationRules.Rule3Ipsilateral( self, rule3Ipsilateral_coordination_weights["active_distance"], rule3Ipsilateral_coordination_weights["threshold_offset"], rule3Ipsilateral_coordination_weights["threshold_slope"], rule3Ipsilateral_coordination_weights["threshold_2_offset"], rule3Ipsilateral_coordination_weights["threshold_2_slope"]) self.rule4 = CoordinationRules.Rule4(self) self.behindPEP = BehindPEPSensorUnit(self, group_name='sensors') # Inhibition of stance not needed - due to swing starter inhibition. self.behindPEP.addConnectionTo(self.stance_motivation, -9 * WSTATIC.ws / (WSTATIC.ws + 1)) self.behindPEP.addConnectionTo(self.swing_motivation, 9 * WSTATIC.ws / (WSTATIC.ws + 1)) # was 9 self.swing_starter = PhasicUnit( ("sw_starter_" + leg_name), group_name='sensors', time_window=(RSTATIC.controller_frequency * WSTATIC.minimum_swing_time), threshold=0.25) self.swing_motivation.addConnectionTo(self.swing_starter, 1) self.swing_starter.addConnectionTo(self.swing_motivation, 9 * WSTATIC.ws / (WSTATIC.ws + 1)) self.swing_starter.addConnectionTo(self.stance_motivation, -9 * WSTATIC.ws / (WSTATIC.ws + 1)) self.gc = MNSTATIC.ground_contact_class_dict[ MNSTATIC.groundContactMethod](self, group_name='sensors') self.gc.addConnectionTo(self.stance_motivation, 9 / (WSTATIC.ws + 1)) # was 3 def __del__(self): pass def init_leg_mmc_model(self): """ As the MMC networks have to be initialised this function has to be called in the beginning to drive the network into a valid configuration. This is important for the Dual Quaternion network, but for the single leg network this might be dropped as there the relaxation is very fast. """ for _ in range(0, 10): self.mmcLegStance.set_joint_angles( self.wleg.leg.getInputPosition()) self.mmcLegStance.mmc_kinematic_iteration_step() if __debug__: print(self.wleg.leg.name, " in init mmc") def update_leg_mmc_model(self): """ Update the leg network. Sensory data is given back to the leg network and it is advanced one step. This is sufficient to update the leg network. At the same time the network acts as a filter: when applying noise (can be done in the mmc leg class on all sensory data) the incoming sensory data is fused with the current configuration into a coherent state. """ pass #=========== Shifting the PEP Methods ==================== @property def pep_shifted(self): tempPep = self.pep + self.pep_shift if tempPep[0] >= self.aep_shifted[0] - WSTATIC.minimum_swing_distance: tempPep[0] = self.aep_shifted[0] - WSTATIC.minimum_swing_distance return tempPep @property def pep_shift(self): if not self._pep_shift_is_up_to_date: self._pep_shift = sum(self._pep_shifts.values()) self._pep_shift_is_up_to_date = True return self._pep_shift def shiftPep(self, source_name, pep_shift): try: temp_pep_shift = numpy.array((float(pep_shift), 0, 0)) except TypeError: if len(pep_shift) == 3: temp_pep_shift = numpy.array((float(i) for i in pep_shift)) if temp_pep_shift.any(): self._pep_shifts[source_name] = temp_pep_shift else: try: del self._pep_shifts[source_name] except Exception: pass self._pep_shift_is_up_to_date = False def resetPepShift(self): self._pep_shifts = dict() def getPepDict(self): return self._pep_shifts def get_aep_shifted(self): return self.aep_shifted def get_pep_shifted(self): return self.pep_shifted def get_swing_backward_target(self): if (self.wleg.leg.name[0] == 'h'): return (self.pep + numpy.array([0.05, 0., 0.])) else: return (self.pep + numpy.array([0.05, 0., 0.])) @property def aep_shifted(self): return self.aep + self.aep_shift @property def aep_shift(self): if not self._aep_shift_is_up_to_date: self._aep_shift = sum(self._aep_shifts.values()) self._aep_shift_is_up_to_date = True return self._aep_shift def shiftAep(self, source_name, aep_shift): try: temp_aep_shift = numpy.array((float(aep_shift), 0, 0)) except TypeError: if len(aep_shift) == 3: temp_aep_shift = numpy.array((float(i) for i in aep_shift)) if temp_aep_shift.any(): self._aep_shifts[source_name] = temp_aep_shift else: try: del self._aep_shifts[source_name] except Exception: pass self._aep_shift_is_up_to_date = False def resetAepShift(self): self._aep_shifts = dict() def moveNextSwingToFront(self, targetShift): self.next_swing_mode = 1 self.aep_init = numpy.array(self.aep) self.aep_temp = numpy.array(self.aep_init + targetShift) ## Function which answers if the motivation unit for swing is active. def inSwingPhase(self): swing_test = ((self.swing_motivation_toFront.output_value > self.swing_motivation_toFront.threshold) or (self.swing_motivation_toBack.output_value > self.swing_motivation_toBack.threshold)) if swing_test: self.stance_net.resetStanceTrajectory() if (10 <= self.next_swing_mode): self.next_swing_mode += 1 else: self.swing_net.resetSwingTrajectory() if (0 < self.next_swing_mode < 9): self.next_swing_mode += 1 if (self.next_swing_mode == 9): self.aep = self.aep_temp print("SET new Swing Target for ", self.wleg.leg.name) self.next_swing_mode += 1 if (self.next_swing_mode > 25): self.aep = self.aep_init print("RESET Swing Target for ", self.wleg.leg.name) self.next_swing_mode = 0 return swing_test ## Function which answers if the motivation unit for stance is active. def inStancePhase(self): return ((self.stance_motivation_toBack.output_value > self.stance_motivation_toBack.threshold) or (self.stance_motivation_toFront.output_value > self.stance_motivation_toFront.threshold)) def isSwinging(self): return 0 < self.swing_motivation.output_value > self.stance_motivation.output_value def isStancing(self): return not self.isSwinging() # Function provides relative phase information: # 0 is beginning of stance, 0.5 beginning of swing, 1 end of swing def getCurrentLegPhase(self): current_phase = 0. if WSTATIC.swing_trajectory_generator == 'quadratic_spline': if (self.isSwinging()): current_phase = 0.5 + 0.5 * self.swing_net.iteration / self.swing_net.iteration_steps_swing if current_phase > 1.: current_phase = 1. else: current_phase = 0.5 * (self.aep[0] - self.wleg.leg.input_foot_position[0] ) / (self.aep[0] - self.pep_shifted[0]) if current_phase > 0.5: current_phase = 0.5 elif current_phase < 0.: current_phase = 0. #print(self.wleg.leg.name + " in " + str(current_phase)) return current_phase
def __init__(self, wleg, motivationNetRobot): self.wleg = wleg self.motivationNetRobot = motivationNetRobot self.aep = numpy.array([0.0, 0.0, 0.0], dtype=float) self.aep_init = numpy.array(self.aep) self.aep_temp = None #numpy.array([0.0,0.0,0.0], dtype=float) # For one experiment the swing movements are pushed further to the front. # Usually, next_swing_mode is 0 # next_swing_mode in [1,9] counts when the leg is in a stable stance_motivation # and afterwards moves the swing target to the temporary target # During the next swing movement the next_swing_mode is further increased # and in the next stance movement the old swing target is reset. self.next_swing_mode = 0 self.pep = numpy.array([0.0, 0.0, 0.0], dtype=float) self.dep = numpy.array([0.0, 0.0, WSTATIC.depz], dtype=float) self._pep_shift = numpy.array([0.0, 0.0, 0.0], dtype=float) self._pep_shift_is_up_to_date = True self._pep_shifts = dict() self._aep_shift = numpy.array([0.0, 0.0, 0.0], dtype=float) self._aep_shift_is_up_to_date = True self._aep_shifts = dict() self.mmcLegStance = mmcAngularLegModelStance(self) leg_name = self.wleg.leg.name[0] + self.wleg.leg.name[ 1 + str.find(self.wleg.leg.name, '_')] self.legMU = MotivationUnit("leg_" + leg_name, 1, group_name='cognitive_layer') self.legMU.addConnectionTo(self.legMU, 1) self.stand = MotivationUnit("stand_" + leg_name, group_name='cognitive_layer') self.walk = MotivationUnit("walk_" + leg_name, group_name='cognitive_layer') self.forward = MotivationUnit("forw_" + leg_name, bias=1, group_name='cognitive_layer') self.backward = MotivationUnit("backw_" + leg_name, group_name='cognitive_layer') #if WSTATIC.swing_trajectory_generator=='bezier': # self.swing_net = SwingMovementBezier(self) # self.swing_motivation = ModulatingMotivationUnit ( ("swing_" + leg_name), self.swing_net.moveToNextPoint, threshold=0., group_name=self.wleg.leg.name+ '_coordination_rules') if WSTATIC.swing_trajectory_generator == 'quadratic_spline': self.swing_net = SwingMovementDirectInverseKinematics(self) self.swing_motivation = MotivationUnit(("swing_" + leg_name), group_name='behavior_layer') self.swing_motivation_toFront = ModulatingMotivationUnit( ("swing_toFront_" + leg_name), self.swing_net.modulatedRoutineFunctionCall, fct_param=self.get_aep_shifted, threshold=0.5, group_name='behavior_layer') self.swing_motivation_toBack = ModulatingMotivationUnit( ("swing_toBack_" + leg_name), self.swing_net.modulatedRoutineFunctionCall, fct_param=self.get_swing_backward_target, threshold=0.5, group_name='behavior_layer') else: raise Exception('No valid swing trajectory generator was chosen.') if WSTATIC.stance_trajectory_generator == 'bodymodel': self.stance_net = StanceMovementBodyModel(self) #elif WSTATIC.stance_trajectory_generator=='springdampermass': # self.stance_net = StanceMovementSpringDamperMassLeg(self) else: raise Exception('No valid stance trajectory generator was chosen.') self.stance_motivation = MotivationUnit(("stance_" + leg_name), startValue=1, group_name='behavior_layer') self.stance_motivation_toBack = ModulatingMotivationUnit( ("stance_toBack_" + leg_name), self.stance_net.modulatedRoutineFunctionCall, startValue=1, threshold=0.5, group_name='behavior_layer') self.stance_motivation_toFront = ModulatingMotivationUnit( ("stance_toFront_" + leg_name), self.stance_net.modulatedRoutineFunctionCall, startValue=1, threshold=0.5, group_name='behavior_layer') rec_w = 0.6 self.swing_motivation.addConnectionTo(self.swing_motivation, rec_w) self.stance_motivation.addConnectionTo(self.swing_motivation, -0.75) self.legMU.addConnectionTo(self.swing_motivation, 0.25) self.swing_motivation_toFront.addConnectionTo(self.swing_motivation, 0.8) self.swing_motivation_toBack.addConnectionTo(self.swing_motivation, 0.8) self.swing_motivation_toFront.addConnectionTo( self.swing_motivation_toFront, rec_w) self.swing_motivation.addConnectionTo(self.swing_motivation_toFront, 0.45) self.swing_motivation_toBack.addConnectionTo( self.swing_motivation_toFront, -0.6) self.backward.addConnectionTo(self.swing_motivation_toFront, -0.15) self.swing_motivation_toBack.addConnectionTo( self.swing_motivation_toBack, rec_w) self.swing_motivation.addConnectionTo(self.swing_motivation_toBack, 0.45) self.swing_motivation_toFront.addConnectionTo( self.swing_motivation_toBack, -0.6) self.forward.addConnectionTo(self.swing_motivation_toBack, -0.15) self.stance_motivation.addConnectionTo(self.stance_motivation, 0.25) self.swing_motivation.addConnectionTo(self.stance_motivation, -0.75) self.legMU.addConnectionTo(self.stance_motivation, 0.25) self.stance_motivation_toFront.addConnectionTo(self.stance_motivation, 0.8) self.stance_motivation_toBack.addConnectionTo(self.stance_motivation, 0.8) self.stance_motivation_toFront.addConnectionTo( self.stance_motivation_toFront, 0.25) self.stance_motivation.addConnectionTo(self.stance_motivation_toFront, 0.45) self.stance_motivation_toBack.addConnectionTo( self.stance_motivation_toFront, -0.6) self.forward.addConnectionTo(self.stance_motivation_toFront, -0.15) self.stance_motivation_toBack.addConnectionTo( self.stance_motivation_toBack, rec_w) self.stance_motivation.addConnectionTo(self.stance_motivation_toBack, 0.45) self.stance_motivation_toFront.addConnectionTo( self.stance_motivation_toBack, -0.6) self.backward.addConnectionTo(self.stance_motivation_toBack, -0.15) rule1_coordination_weights = WSTATIC.coordination_rules["rule1"] self.rule1 = CoordinationRules.Rule1( self, rule1_coordination_weights["swing_stance_pep_shift_ratio"], rule1_coordination_weights["velocity_threshold"], rule1_coordination_weights["velocity_threshold_delay_offset"], rule1_coordination_weights["lower_velocity_delay_factor"], rule1_coordination_weights["higher_velocity_delay_factor"], rule1_coordination_weights["max_delay"]) rule2_coordination_weights = WSTATIC.coordination_rules["rule2"] self.rule2 = CoordinationRules.Rule2( self, rule2_coordination_weights["start_delay"], rule2_coordination_weights["duration"]) rule3LinearThreshold_coordination_weights = WSTATIC.coordination_rules[ "rule3LinearThreshold"] self.rule3LinearThreshold = CoordinationRules.Rule3LinearThreshold( self, rule3LinearThreshold_coordination_weights["active_distance"], rule3LinearThreshold_coordination_weights["threshold_offset"], rule3LinearThreshold_coordination_weights["threshold_slope"]) rule3Ipsilateral_coordination_weights = WSTATIC.coordination_rules[ "rule3Ipsilateral"] self.rule3Ipsilateral = CoordinationRules.Rule3Ipsilateral( self, rule3Ipsilateral_coordination_weights["active_distance"], rule3Ipsilateral_coordination_weights["threshold_offset"], rule3Ipsilateral_coordination_weights["threshold_slope"], rule3Ipsilateral_coordination_weights["threshold_2_offset"], rule3Ipsilateral_coordination_weights["threshold_2_slope"]) self.rule4 = CoordinationRules.Rule4(self) self.behindPEP = BehindPEPSensorUnit(self, group_name='sensors') # Inhibition of stance not needed - due to swing starter inhibition. self.behindPEP.addConnectionTo(self.stance_motivation, -9 * WSTATIC.ws / (WSTATIC.ws + 1)) self.behindPEP.addConnectionTo(self.swing_motivation, 9 * WSTATIC.ws / (WSTATIC.ws + 1)) # was 9 self.swing_starter = PhasicUnit( ("sw_starter_" + leg_name), group_name='sensors', time_window=(RSTATIC.controller_frequency * WSTATIC.minimum_swing_time), threshold=0.25) self.swing_motivation.addConnectionTo(self.swing_starter, 1) self.swing_starter.addConnectionTo(self.swing_motivation, 9 * WSTATIC.ws / (WSTATIC.ws + 1)) self.swing_starter.addConnectionTo(self.stance_motivation, -9 * WSTATIC.ws / (WSTATIC.ws + 1)) self.gc = MNSTATIC.ground_contact_class_dict[ MNSTATIC.groundContactMethod](self, group_name='sensors') self.gc.addConnectionTo(self.stance_motivation, 9 / (WSTATIC.ws + 1)) # was 3