def is_in_area_changed(self, old, new):
        # TODO: make sure area changed continuously
        if not old and new:
            exp.event_logger.log(
                "loclearn/entered_area", {"cooldown": session_state["cooldown"]}
            )
            if session_state["cooldown"]:
                self.log.info("Animal entered the reinforced area during cooldown.")
                self.cancel_cooldown()
                session_state["cooldown"] = False
            else:
                self.log.info("Animal entered the reinforced area.")
                schedule.once(
                    self.maybe_end_trial, exp.get_params()["area_stay_duration"]
                )

        elif old and not new:
            exp.event_logger.log("loclearn/left_area", None)
            self.log.info("Animal left the reinforced area.")

            if session_state["reward_scheduled"]:
                session_state["cooldown"] = True
                self.cancel_cooldown = schedule.once(
                    self.maybe_end_cooldown, exp.get_params()["cooldown_duration"]
                )
示例#2
0
 def dispense(self):
     if self.left_trial:
         arena.run_command("dispense",
                           exp.get_params()["left_feeder"], None, False)
     else:
         arena.run_command("dispense",
                           exp.get_params()["right_feeder"], None, False)
    def update_is_in_area(self, det):
        if det is None:
            # later might take part in logic
            return

        centroid = bbox.xyxy_to_centroid(np.array(det))
        was_in_area = session_state["is_in_area"]

        loc = session_state["reinforced_location"]
        dist_to_location = (centroid[0] - loc[0])**2 + (centroid[1] -
                                                        loc[1])**2
        if session_state["cooldown_dist"] is True:
            if dist_to_location >= exp.get_params()["cooldown_radius"]**2:
                self.log.info(
                    "Distance cooldown off. Animal is far enough from reinforced area."
                )
                session_state["cooldown_dist"] = False

        is_in_area = (dist_to_location <=
                      exp.get_params()["reinforced_area"]["radius"]**2)
        if was_in_area != is_in_area:
            self.in_out_time = time.time()
            session_state["is_in_area"] = is_in_area

        return dist_to_location
示例#4
0
    def end_trial(self):

        if exp.get_params()["media_url"] is not None:
            mqtt.client.publish_json("event/command/hide_media", {})
        else:
            mqtt.client.publish_json("event/command/hide_bugs", {})

        if exp.get_params()["record_video"]:
            video_system.stop_record()
示例#5
0
 def run_trial(self):
     self.log.info("Trial " +
                   str(exp.get_params()["num_trials"] - self.cur_trial) +
                   " started " + str(datetime.datetime.now()))
     exp.event_logger.log(
         "simple_exp/trial_start",
         {"Trial": str(exp.get_params()["num_trials"] - self.cur_trial)},
     )
     self.stim()
     self.dispatch_reward()
     self.log.info("run trial procedure finished")
    def maybe_end_trial(self):
        params = exp.get_params()
        if (session_state["is_in_area"] and self.in_out_time is not None and
                time.time() - self.in_out_time > params["area_stay_duration"]):
            self.log.info("Trial successful!")
            session_state.update(
                (),
                {
                    "reward_scheduled": True,
                    "cooldown_time": True,
                    "cooldown_dist": True,
                },
            )

            self.cancel_cooldown = schedule.once(
                self.end_time_cooldown,
                exp.get_params()["cooldown_duration"])

            if session_state["out_of_rewards"] is True:
                self.log.warning(
                    "Out of rewards. Can't reward successful trial.")
                return

            interface = params["cue"]["interface"]
            led_dur = params["cue"]["led_duration"]
            num_blinks = params["cue"]["num_blinks"]
            period_time = 1000 * led_dur / num_blinks // 2
            self.log.info(
                f"Starting blinking {interface}. {num_blinks} blinks in {led_dur}s (period {period_time}ms)"
            )
            start_blink(
                interface,
                period_time,
            )

            def stop():
                stop_blink(interface)
                self.log.info("Stopped blinking.")

            schedule.once(stop, led_dur)

            if random.random() <= params["reward"]["stochastic_delay_prob"]:
                delay = params["reward"]["stochastic_delay"]
                self.using_stochastic_delay = True
            else:
                delay = params["reward"]["delay"]
                self.using_stochastic_delay = False

            self.cancel_reward_delay = schedule.once(self.dispense_reward,
                                                     delay)
示例#7
0
 def to_idle_state(self):
     session_state["state"] = "idle"
     params = exp.get_params()
     min_t, max_t = params["min_idle_time"], params["max_idle_time"]
     idle_time = self.rng.random() * (max_t - min_t) + min_t
     self.log.info(f"Waiting {idle_time:.2f} seconds.")
     schedule.once(exp.next_trial, idle_time)
示例#8
0
 def dispatch_reward(self):
     params = exp.get_params()
     if params["stimulus"].lower() == "led":
         self.reward_delay = params["led_duration"] * params["led_blinks"]
     else:
         self.reward_delay = params.get("monitor_duration", 60)
     schedule.once(self.dispatch_reward_actual, self.reward_delay)
    def find_reinforced_location(self):
        params = exp.get_params()
        if params["reinforced_area"]["use_aruco"] and self.aruco_img is not None:
            if "aruco" in session_state:
                session_state["reinforced_location"] = session_state["aruco"]
            else:
                session_state["reinforced_location"] = params["reinforced_area"][
                    "location"
                ]
        else:
            session_state["reinforced_location"] = params["reinforced_area"]["location"]

        if self.aruco_img is not None:
            img = np.copy(self.aruco_img)
        else:
            img, _ = image_sources[params["image_source_id"]].get_image()

        img = cv.circle(
            img,
            tuple(session_state["reinforced_location"]),
            radius=params["reinforced_area"]["radius"],
            color=(0, 255, 0),
            thickness=5,
        )
        now_str = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        area_image_path = session_state["data_dir"] / f"reinforced_area_{now_str}.jpg"
        self.log.info(f"Saving area image to {area_image_path}")
        cv.imwrite(str(area_image_path), img)
示例#10
0
 def playback_ended(self, timestamps):
     csv_path = exp.session_state["data_dir"] / "video_timestamps.csv"
     self.log.info(
         f"Video playback finished. Saving timestamps to: {csv_path}")
     df = pd.DataFrame(data=timestamps, columns=["frame", "time"])
     df.to_csv(csv_path, index=False)
     if exp.get_params()["block_duration"] is None:
         exp.next_block()
示例#11
0
    def end(self):
        if exp.get_params().get("record_exp", True):
            video_system.stop_record()
        if self.cancel_trials != None:
            self.cancel_trials()

        schedule.cancel_all()
        mqtt.client.publish(topic="monitor/color", payload="black")

        self.log.info("exp ended")
    def dispense_reward(self):
        if random.random() <= exp.get_params()["reward"]["dispense_prob"]:
            self.log.info("Trial ended. Dispensing reward.")
        else:
            self.log.info(
                "Trial ended. NOT dispensing reward (stochastic reward).")
            exp.next_trial()
            return

        rewards_count = session_state["rewards_count"] + 1

        session_state["reward_scheduled"] = False

        feeders = exp.get_params()["reward"]["feeders"]
        max_reward = sum(feeders.values())
        rewards_sum = 0

        for interface, rewards in feeders.items():
            rewards_sum += rewards

            if rewards_count <= rewards_sum:
                exp.event_logger.log(
                    "dispensing_reward",
                    {
                        "num": rewards_count,
                        "stochastic_delay": self.using_stochastic_delay,
                    },
                )

                self.log.info(
                    f"Dispensing reward #{rewards_count} from feeder {interface} (stochastic_delay={self.using_stochastic_delay})"
                )
                arena.run_command("dispense", interface, None, False)
                break

        if rewards_count >= max_reward:
            session_state["out_of_rewards"] = True
            self.log.info("Out of rewards!")

        session_state["rewards_count"] = rewards_count
        exp.next_trial()
    def find_reinforced_location(self):
        params = exp.get_params()
        ra = params["reinforced_area"]
        if "aruco_id" in ra:
            if "aruco" in session_state:
                id = ra["aruco_id"]
                ms = filter(lambda m: m["id"] == id, session_state["aruco"])
                if len(ms) == 1:
                    session_state["reinforced_location"] = ms[0].center
                else:
                    raise Exception(f"Aruco with id {id} was not found.")
            else:
                raise Exception("Can't find aruco markers in session state.")
        elif "location" in ra:
            session_state["reinforced_location"] = ra["location"]
        else:
            raise Exception(
                "Expecting either 'aruco_id' or 'location' params in 'reinforced_location'"
            )

        if self.aruco_img is not None:
            img = np.copy(self.aruco_img)
        else:
            img, _ = image_sources[params["image_source_id"]].get_image()
            img = np.stack((img, ) * 3, axis=-1)

        loc = tuple(session_state["reinforced_location"])
        r1 = params["reinforced_area"]["radius"]
        r2 = params["cooldown_radius"]
        img = cv.circle(
            img,
            loc,
            radius=r1,
            color=(0, 255, 0),
            thickness=5,
        )

        img = cv.circle(
            img,
            loc,
            radius=r2,
            color=(255, 0, 0),
            thickness=5,
        )

        now_str = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        area_image_path = session_state["data_dir"] / f"area_{now_str}.jpg"
        self.log.info(f"Saving area image to {area_image_path}")
        cv.imwrite(str(area_image_path), img)
示例#14
0
    def run(self):
        self.rng = np.random.default_rng()

        self.left_feeding_pos = None
        self.right_feeding_pos = None

        params = exp.get_params()

        for a in self.aruco_markers:
            if a["id"] == params["left_aruco_id"]:
                self.left_feeding_pos = a["center"]
            elif a["id"] == params["right_aruco_id"]:
                self.right_feeding_pos = a["center"]

        if self.left_feeding_pos is None or self.right_feeding_pos is None:
            raise ValueError(
                "Could not find left and/or right feeding positions")
        else:
            self.log.info(f"Left feeding position: {self.left_feeding_pos}")
            self.log.info(f"Right feeding position: {self.right_feeding_pos}")

        self.radius = params["feeding_radius"]

        if self.aruco_img is not None:
            img = np.copy(self.aruco_img)
        else:
            img, _ = image_sources[params["image_source_id"]].get_image()

        for a in self.aruco_markers:
            img = cv.circle(
                img,
                tuple(a["center"]),
                radius=self.radius,
                color=(0, 255, 0),
                thickness=5,
            )

        now_str = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        area_image_path = session_state[
            "data_dir"] / f"feeding_areas_{now_str}.jpg"
        self.log.info(f"Saving feeding areas image to {area_image_path}")
        cv.imwrite(str(area_image_path), img)

        if params["record_video"]:
            video_system.start_record()

        self.bbox_collector.run(self.on_bbox_detection)
        self.shaping_mode = params["shaping_mode"]
        session_state["state"] = "idle"
    def run(self):
        self.find_reinforced_location()
        self.bbox_collector.run(self.on_bbox_detection)
        session_state["is_in_area"] = False
        self.in_out_time = None
        session_state.add_callback("is_in_area", self.is_in_area_changed)
        session_state["cooldown"] = False
        session_state["reward_scheduled"] = False
        self.cancel_cooldown = None
        self.cancel_reward_delay = None
        self.cancel_blink = None
        self.using_stochastic_delay = None

        if exp.get_params()["record_video"]:
            video_system.start_record()
示例#16
0
    def show_cue(self, left):
        session_state["state"] = "cue"

        params = exp.get_params()
        blink_dur = params["blink_dur_left"] if left else params[
            "blink_dur_right"]
        arena.run_command("periodic", params["light"], [1, blink_dur], False)

        def stop_blink():
            arena.run_command("periodic", params["light"], [0], True)
            self.to_feed_state()

        self.cancel_stop_blink = schedule.once(
            stop_blink,
            params["cue_duration"],
        )
    def update_is_in_area(self, det):
        if det is None:
            # later might take part in logic
            return

        centroid = bbox.xyxy_to_centroid(np.array(det))
        was_in_area = session_state["is_in_area"]

        loc = session_state["reinforced_location"]
        dist_to_location = (centroid[0] - loc[0]) ** 2 + (centroid[1] - loc[1]) ** 2
        is_in_area = (
            dist_to_location <= exp.get_params()["reinforced_area"]["radius"] ** 2
        )
        if was_in_area != is_in_area:
            self.in_out_time = time.time()
            session_state["is_in_area"] = is_in_area

        return dist_to_location
    def run_trial(self):
        if session_state["cur_trial"] == 0:
            return

        # Success
        if exp.get_params()["dispense_reward"]:
            self.log.info("Trial ended. Dispensing reward.")
            exp.event_logger.log(
                "loclearn/reward", {"stochastic_delay": self.using_stochastic_delay}
            )
            self.log.info(
                f"Dispensing reward (stochastic_delay={self.using_stochastic_delay})"
            )
            arena.run_command("dispense", "Left feeder", None, False)
        else:
            self.log.info("Trial ended.")

        session_state["reward_scheduled"] = False
    def maybe_end_trial(self):
        params = exp.get_params()
        if (
            session_state["is_in_area"]
            and time.time() - self.in_out_time > params["area_stay_duration"]
        ):
            session_state["reward_scheduled"] = True
            self.cancel_blink = led_blink(
                params["cue"]["num_blinks"], params["cue"]["led_duration"]
            )

            if random.random() < params["stochastic_delay_prob"]:
                delay = params["stochastic_delay"]
                self.using_stochastic_delay = True
            else:
                delay = params["reward_delay"]
                self.using_stochastic_delay = False

            self.cancel_reward_delay = schedule.once(exp.next_trial, delay)
    def is_in_area_changed(self, old, new):
        # TODO: make sure area changed continuously
        if not old and new:
            exp.event_logger.log(
                "loclearn/entered_area",
                {
                    "cooldown_time": session_state["cooldown_time"],
                    "cooldown_dist": session_state["cooldown_dist"],
                },
            )
            if session_state["cooldown_time"] or session_state["cooldown_dist"]:
                self.log.info(
                    "Animal entered the reinforced area during cooldown.")
            else:
                self.log.info("Animal entered the reinforced area.")
                schedule.once(self.maybe_end_trial,
                              exp.get_params()["area_stay_duration"])

        elif old and not new:
            exp.event_logger.log("loclearn/left_area", None)
            self.log.info("Animal left the reinforced area.")
    def run_block(self):
        self.find_reinforced_location()
        self.in_out_time = None
        session_state.update(
            (),
            {
                "cooldown_time": False,
                "cooldown_dist": False,
                "reward_scheduled": False,
            },
        )

        self.cancel_cooldown_time = None
        self.cancel_reward_delay = None
        self.using_stochastic_delay = None

        rl = session_state["reinforced_location"]
        r = exp.get_params()["reinforced_area"]["radius"]
        self.log.info(
            f"Experiment started. Reinforced area at ({rl[0]}, {rl[1]}), radius: {r}."
        )
示例#22
0
    def run_trial(self):
        if exp.get_params()["record_video"]:
            video_system.start_record()

        if exp.get_params()["media_url"] is not None:
            schedule.once(
                lambda: mqtt.client.publish_json(
                    "event/command/init_media", {"url": exp.get_params()["media_url"]}
                ),
                exp.get_params()["start_delay"],
            )

        else:
            schedule.once(
                lambda: mqtt.client.publish_json(
                    "event/command/init_bugs", exp.get_params()
                ),
                exp.get_params()["start_delay"],
            )
    def run(self):
        self.paths = list(
            Path(exp.get_params()["stimuli_path"]).rglob("*.jpg")) + list(
                Path(exp.get_params()["stimuli_path"]).rglob("*.JPG"))

        random.shuffle(self.paths)
        exp.session_state["image_list"] = self.paths

        self.log.info(f"Loaded {len(self.paths)} images.")

        intervals = [exp.get_params()["preseq_delay"]] + [
            exp.get_params()["stimuli_duration"],
            exp.get_params()["interstimuli_duration"],
        ] * len(self.paths)

        self.cur_index = 0
        self.clear(exp.get_params()["interstimuli_color"])

        self.cancel_sequence = schedule.sequence(self.display_stimuli,
                                                 intervals)
        video_system.start_record()
    def run_block(self):
        interval = exp.get_params()["interval"]

        self.cancel_timer = schedule.repeat(
            self.timer_fn, interval,
            exp.get_params().get("$num_trials", True))
示例#25
0
 def led_stimulus(self):
     self.stim_cancel = schedule.repeat(
         lambda: arena.run_command("toggle", "Signal LED"),
         exp.get_params()["led_duration"],
         2 * exp.get_params().get("led_blinks", 1),
     )
示例#26
0
 def monitor_stimulus(self):
     monitor.set_color(exp.get_params().get("monitor_color", "random"))
     self.stim_cancel = schedule.once(
         mqtt.client.publish(topic="monitor/color", payload="black"),
         exp.get_params().get("monitor_duration", 60),
     )
示例#27
0
 def run(self):
     monitor.set_color(exp.get_params()["background_color"])
     if exp.get_params()["record_video"]:
         video_system.start_record()
示例#28
0
 def run_block(self):
     self.log.info("Playing video...")
     if len(exp.get_params()["vid_path"]) > 0:
         monitor.play_video(exp.get_params()["vid_path"])
     if exp.get_params()["block_duration"] is not None:
         schedule.once(exp.next_block, exp.get_params()["block_duration"])
示例#29
0
 def end(self):
     if exp.get_params()["record_video"]:
         video_system.stop_record()
示例#30
0
 def end(self):
     self.bbox_collector.end()
     if exp.get_params()["record_video"]:
         video_system.stop_record()