def stim(self):
     # presenting chosen stimulus
     params = exp.get_merged_params()
     if params["stimulus"].lower() == "led":
         self.led_stimulus()
     else:
         self.monitor_stimulus()
 def dispatch_reward(self):
     params = exp.get_merged_params()
     if params["reward_delay"] == None:
         self.reward_delay = params["led_duration"] * params["led_blinks"]
     else:
         self.reward_delay = params["reward_delay"]
     schedule.once(self.dispatch_reward_actual, self.reward_delay)
 def monitor_stimulus(self):
     params = exp.get_merged_params()
     monitor.chnage_color(params.get("monitor_color", "random"))
     self.stim_cancel = schedule.once(
         mqtt.client.publish(topic="monitor/color",
                             payload=params.get("monitor_color", "black")),
         params.get("monitor_duration", 60),
     )
 def led_stimulus(self):
     params = exp.get_merged_params()
     if exp.state["arena", "signal_led"]:
         arena.signal_led(False)
     self.stim_cancel = schedule.repeat(
         lambda: arena.signal_led(not exp.state["arena", "signal_led"]),
         params["led_duration"],
         2 * params.get("led_blinks", 1),
     )
 def check_detection(self, locations):
     params = exp.get_merged_params()
     # getting the center of the detection (head), checking if its within range.
     center = ((locations[2] + locations[0]) / 2,
               (locations[3] + locations[1]) / 2)
     res = (True if math.sqrt(
         abs(center[0] - self.end_point[0])**2 +
         abs(center[1] - self.end_point[1])**2) < params["radius"] else
            False)
     return res and (locations[-1] >= params["min_confidence"]
                     )  # check if confidence is high enough
    def end_logic_trial(self):
        params = exp.get_merged_params()
        self.stim_cancel()  # canceling stimulus, if active.
        if params["stimulus"] == "monitor":
            monitor.chnage_color("black")
        timestap = time.time()
        # logging trial data
        if self.in_trial and not self.got_detection:
            self.log.info("Logic trial ended, failure")
            exp.event_logger.log(
                "learn_exp/logical_trial_ended",
                {
                    "type": self.ex_type,
                    "success": False
                },
            )
        elif self.in_trial and self.got_detection:
            self.log.info("Logic trial ended, success")
            exp.event_logger.log("learn_exp/logical_trial_ended", {
                "type": self.ex_type,
                "success": True
            })
        else:
            self.log.info("Logic trial ended")

        # continuous trial: schedule the next.
        self.in_trial = False
        self.got_detection = False
        if params.get("continuous", False):
            if params["record_exp"] and not params["record_all"]:
                video_system.stop_record()
            self.cancel_trials()
            self.cancel_trials = schedule.repeat(self.period_call, interval,
                                                 self.cur_trial - 1)
        elif self.consecutive:
            if params["record_exp"] and not params["record_all"]:
                schedule.once(lambda: video_system.stop_record(),
                              params.get("record_overhead", 0))
            if self.cur_trial > 0:
                exp.next_trial()
        else:
            if params["record_exp"] and not params["record_all"]:
                schedule.once(lambda: video_system.stop_record(),
                              params.get("record_overhead", 0))
 def detectAruco(self):
     # detecting aruco marker
     params = exp.get_merged_params()
     test_image, _ = exp.image_sources["top"].get_image()
     # currently using 4x4 arucos
     arucoDict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
     arucoParams = cv.aruco.DetectorParameters_create()
     (corners, ids,
      rejected) = cv.aruco.detectMarkers(test_image,
                                         arucoDict,
                                         parameters=arucoParams)
     img_w_markers = cv.cvtColor(test_image, cv.COLOR_GRAY2BGR)
     if corners != None and len(corners) > 0:
         detection = corners[0][0]
         mean_xy = np.mean(detection, axis=0)
         self.end_point = (mean_xy[0], mean_xy[1])
         self.log.info("End point is " + str(self.end_point))
         img_w_markers = cv.aruco.drawDetectedMarkers(
             img_w_markers, corners)
     else:
         self.log.info("Did not detect any aruco markers!")
         self.end_point = tuple(params["default_end"])
     # saving annotated frame
     img_w_circle = cv.circle(
         img_w_markers,
         self.end_point,
         radius=params["radius"],
         color=(0, 255, 0),
         thickness=5,
     )
     cv.imwrite(
         os.path.join(
             self.data_dir,
             "arena_reinforced_area_" +
             datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".jpg",
         ),
         img_w_circle,
     )
    def on_yolo_detection(self, payload):
        params = exp.get_merged_params()
        det = payload["detection"]
        if det is not None and len(det) != 0:
            self.yolo_log.log((payload["image_timestamp"], *det))
        else:
            self.yolo_log.log((payload["image_timestamp"], *((None, ) * 5)))
        if (det is not None and self.prev_det is not None and len(det) != 0
                and len(self.prev_det) != 0):
            if self.check_detection(det):
                # detection matched criteria
                if self.in_trial and not self.prev_trial_detection:
                    if self.consecutive:
                        if not self.consq_end:
                            self.stim()
                            if (params["reward_detections"]
                                    and not params["bypass_detection"]):
                                self.dispatch_reward()
                            exp.event_logger.log(
                                "learn_exp/consecutive_trial_in_radius", None)
                            self.consq_end = True
                            self.got_detection = True
                            self.time = 0.0
                    else:
                        # during trial and object moved since last success
                        self.got_detection = True
                        if (params["reward_detections"]
                                and not params["bypass_detection"]):
                            self.dispatch_reward()

                        self.cancel_logic_trial(
                        )  # got detection, canceling scheduled end
                        self.end_logic_trial()

                elif self.in_trial:
                    pass
                    # during trial, object did not move since last success
                    # self.log.info("Ignored success, location did not changed since last success")
                self.prev_trial_detection = True
            else:
                if self.prev_trial_detection:
                    # object location does not macth criteria
                    self.prev_trial_detection = False
                    if (
                            self.consq_end
                    ):  # during consecutive trial and holding to start the next
                        if self.time == 0.0:
                            self.time = time.time()
                            exp.event_logger.log(
                                "learn_exp/consecutive_trial_out_radius",
                                params.get("time_diff", 10),
                            )  # the remaining time
                        elif time.time() > self.time + params.get(
                                "time_diff", 10):
                            # exp.event_logger.log("learn_exp/trial",{"status": "consecutive: out of radius, ended trial"})
                            self.consq_end = False
                            self.end_logic_trial()
                        else:
                            pass

        self.prev_det = det