예제 #1
0
    def on_received_get_commands(self, context: Context, data: GetCommands):
        # context.info(f'on_received_get_commands')

        if not self.agent.initialized:
            pwm_left, pwm_right = [0, 0]
        else:
            # TODO: let's use a queue here. Performance suffers otherwise.
            # What you should do is: *get the last command*, if available
            # otherwise, wait for one command.
            t0 = time.time()
            while not self.agent.updated:
                dt = time.time() - t0
                if dt > 2.0:
                    context.info(f"agent not ready since {dt:.1f} s")
                    time.sleep(0.5)
                if dt > 60:
                    msg = "I have been waiting for commands from the ROS part" f" since {int(dt)} s"
                    context.error(msg)
                    raise Exception(msg)
                time.sleep(0.02)
            dt = time.time() - t0
            if dt > 2.0:
                context.info(f"obtained agent commands after {dt:.1f} s")
                time.sleep(0.2)

            pwm_left, pwm_right = self.agent.action
            self.agent.updated = False

        grey = RGB(0.5, 0.5, 0.5)
        led_commands = LEDSCommands(grey, grey, grey, grey, grey)
        pwm_commands = PWMCommands(motor_left=pwm_left, motor_right=pwm_right)
        commands = DB20Commands(pwm_commands, led_commands)

        context.write("commands", commands)
예제 #2
0
    def on_received_set_robot_commands(self, data: DB18SetRobotCommands,
                                       context: Context):
        l, r = data.commands.wheels.motor_left, data.commands.wheels.motor_right

        if max(math.fabs(l), math.fabs(r)) > 1:
            msg = f'Received invalid PWM commands. They should be between -1 and +1.' \
                  f' Received left = {l!r}, right = {r!r}.'
            context.error(msg)
            raise Exception(msg)
        self.last_commands = data.commands
예제 #3
0
 def check_gpu_available(self, context: Context):
     available = torch.cuda.is_available()
     req = os.environ.get('AIDO_REQUIRE_GPU', None)
     context.info(f'torch.cuda.is_available = {available!r} AIDO_REQUIRE_GPU = {req!r}')
     context.info('init()')
     self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
     if available:
         i = torch.cuda.current_device()
         count = torch.cuda.device_count()
         name = torch.cuda.get_device_name(i)
         context.info(f'device {i} of {count}; name = {name!r}')
     else:
         if req is not None:
             msg = 'No GPU found'
             context.error(msg)
 def check_gpu_available(self, context: Context):
     import torch
     available = torch.cuda.is_available()
     req = os.environ.get('AIDO_REQUIRE_GPU', None)
     context.info(f'torch.cuda.is_available = {available!r} AIDO_REQUIRE_GPU = {req!r}')
     context.info('init()')
     if available:
         i = torch.cuda.current_device()
         count = torch.cuda.device_count()
         name = torch.cuda.get_device_name(i)
         context.info(f'device {i} of {count}; name = {name!r}')
     else:
         if req is not None:
             msg = 'I need a GPU; bailing.'
             context.error(msg)
             raise RuntimeError(msg)