Esempio n. 1
0
    def _start_tracking(self, camera, result_writer, rois, M, TrackerClass,
                        tracker_kwargs, hardware_connection, StimulatorClass,
                        stimulator_kwargs):

        #Here the stimulator passes args. Hardware connection was previously open as thread.
        stimulators = [
            StimulatorClass(hardware_connection, **stimulator_kwargs)
            for _ in rois
        ]

        kwargs = self._monit_kwargs.copy()
        kwargs.update(tracker_kwargs)

        # todo: pickle hardware connection, camera, rois, tracker class, stimulator class,.
        # then rerun stimulators and Monitor(......)
        self._monit = Monitor(camera,
                              TrackerClass,
                              rois,
                              stimulators=stimulators,
                              *self._monit_args,
                              **self._monit_kwargs)

        self._info["status"] = "running"
        logging.info("Setting monitor status as running: '%s'" %
                     self._info["status"])

        quality_controller = QualityControl(result_writer)

        self._monit.run(result_writer, self._drawer, quality_controller, M)
Esempio n. 2
0
def test_stimulator(StimulatorClass,
                    InterfaceClass,
                    remove_db_file=True,
                    *args,
                    **kwargs):
    tmp = tempfile.mkstemp(suffix="_ethoscope_test.db")[1]

    print("Making a tmp db: " + tmp)
    cam = MovieVirtualCamera(VIDEO, drop_each=15)
    rb = SleepMonitorWithTargetROIBuilder()
    rois = rb.build(cam)
    cam.restart()

    connection = HardwareConnection(InterfaceClass)
    try:
        # stimulators = [MockSDStimulator(connection,min_inactive_time= 10) for _ in rois ]
        stimulators = [
            StimulatorClass(connection, *args, **kwargs) for _ in rois
        ]
        mon = Monitor(cam, AdaptiveBGModel, rois, stimulators=stimulators)
        drawer = DefaultDrawer(draw_frames=DRAW_FRAMES)

        with SQLiteResultWriter(tmp, rois) as rw:
            mon.run(result_writer=rw, drawer=drawer)
        # cred = {"name": "ethoscope_db",
        #  "user": "******",
        #  "password": "******"}
        # with ResultWriter( cred , rois) as rw:
        #     mon.run(result_writer=rw, drawer=drawer)

    finally:
        if remove_db_file:
            print("Removing temp db (" + tmp + ")")
            os.remove(tmp)
        else:
            print("db file lives in (" + tmp + ")")
        connection.stop()
Esempio n. 3
0
    def test_API(self):
        random.seed(1)
        cam = MovieVirtualCamera(VIDEO)
        rb = SleepMonitorWithTargetROIBuilder()
        rois = rb.build(cam)
        hc = HardwareConnection(MockInterface)
        stimulators = [MockStimulator(hc) for _ in rois]

        cam.restart()
        mon = Monitor(cam, AdaptiveBGModel, rois, stimulators)

        drawer = DefaultDrawer(draw_frames=DRAW_FRAMES)
        tmp = tempfile.mkstemp(suffix="_ethoscope_test.db")[1]
        try:
            print("Making a tmp db: " + tmp)
            with SQLiteResultWriter(tmp, rois) as rw:
                mon.run(result_writer=rw, drawer=drawer)
        except:
            self.fail("testAPI raised ExceptionType unexpectedly!")
        finally:
            hc.stop()
            cam._close()
            print("Removing temp db (" + tmp + ")")
            os.remove(tmp)
Esempio n. 4
0
class MockSDInterface(MockLynxMotionInterface, SleepDepriverInterface):
    pass


class MockSDStimulator(SleepDepStimulator):
    _HardwareInterfaceClass = MockSDInterface


tmp = tempfile.mkstemp(suffix="_ethoscope_test.db")[1]

print("Making a tmp db: " + tmp)
cam = MovieVirtualCamera(VIDEO, drop_each=15)
rb = SleepMonitorWithTargetROIBuilder()
rois = rb.build(cam)
cam.restart()

connection = HardwareConnection(MockSDInterface)
stimulators = [
    MockSDStimulator(connection, min_inactive_time=10) for _ in rois
]
mon = Monitor(cam, AdaptiveBGModel, rois, stimulators=stimulators)
drawer = DefaultDrawer(draw_frames=DRAW_FRAMES)

try:
    with SQLiteResultWriter(tmp, rois) as rw:
        mon.run(result_writer=rw, drawer=drawer)
finally:
    print("Removing temp db (" + tmp + ")")
    os.remove(tmp)
    connection.stop()
    parser.add_option("-p",
                      "--prefix",
                      dest="prefix",
                      help="The prefix for result dir")

    (options, args) = parser.parse_args()
    option_dict = vars(options)
    INPUT = option_dict["input"]
    OUTPUT = os.path.splitext(INPUT)[0] + ".db"
    OUTPUT = option_dict["prefix"] + "/" + OUTPUT
    try:
        os.makedirs(os.path.dirname(OUTPUT))
    except OSError:
        pass
    print INPUT + " ===> " + OUTPUT

    cam = MovieVirtualCamera(INPUT)
    rois = SleepMonitorWithTargetROIBuilder().build(cam)
    drawer = DefaultDrawer(draw_frames=True)
    mon = Monitor(cam, AdaptiveBGModel, rois)

    #fixme
    date = datetime.datetime.strptime("2016-05-03_08-25-02",
                                      "%Y-%m-%d_%H-%M-%S")
    ts = int(calendar.timegm(date.timetuple()))
    #todo parse metadata from filename
    # metadata = {}

    with SQLiteResultWriter(OUTPUT, rois) as rw:
        mon.run(rw, drawer)

class ABGMImageSaver(AdaptiveBGModel):

    fg_model = ObjectModelImageSaver()


# change these three variables according to how you name your input/output files
INPUT_VIDEO = "/home/quentin/comput/ethoscope-git/src/ethoscope/tests/integration_server_tests/test_video.mp4"
OUTPUT_VIDEO = "/tmp/my_output.avi"
OUTPUT_DB = "/tmp/results.db"

# We use a video input file as if it was a "camera"
cam = MovieVirtualCamera(INPUT_VIDEO)

# here, we generate ROIs automatically from the targets in the images
roi_builder = SleepMonitorWithTargetROIBuilder()

rois = roi_builder.build(cam)
# Then, we go back to the first frame of the video
cam.restart()

# we use a drawer to show inferred position for each animal, display frames and save them as a video
drawer = DefaultDrawer(OUTPUT_VIDEO, draw_frames=True)
# We build our monitor
monitor = Monitor(cam, ABGMImageSaver, rois)

# Now everything ius ready, we run the monitor with a result writer and a drawer
with SQLiteResultWriter(OUTPUT_DB, rois) as rw:
    monitor.run(rw, drawer)
Esempio n. 7
0
from ethoscope.utils.io import SQLiteResultWriter
from ethoscope.hardware.input.cameras import MovieVirtualCamera
from ethoscope.drawers.drawers import DefaultDrawer

# You can also load other types of ROI builder. This one is for 20 tubes (two columns of ten rows)
from ethoscope.roi_builders.roi_builders import DefaultROIBuilder
from ethoscope.roi_builders.target_roi_builder import TargetGridROIBuilder

# change these three variables according to how you name your input/output files
INPUT_VIDEO = "/home/quentin/comput/ethoscope-git/src/ethoscope/tests/integration_server_tests/test_video.mp4"
OUTPUT_VIDEO = "/tmp/my_output.avi"
OUTPUT_DB = "/tmp/results.db"

# We use a video input file as if it was a "camera"
cam = MovieVirtualCamera(INPUT_VIDEO)

# here, we generate ROIs automatically from the targets in the images
roi_builder = TargetGridROIBuilder(n_rows=1, n_cols=2)
rois = roi_builder.build(cam)
# Then, we go back to the first frame of the video
cam.restart()

# we use a drawer to show inferred position for each animal, display frames and save them as a video
drawer = DefaultDrawer(OUTPUT_VIDEO, draw_frames=True)
# We build our monitor
monitor = Monitor(cam, MultiFlyTracker, rois)

# Now everything ius ready, we run the monitor with a result writer and a drawer
# with SQLiteResultWriter(OUTPUT_DB, rois) as rw:
monitor.run(None, drawer)
Esempio n. 8
0
def main(argv):

    parser = ArgumentParser(
        description='Runs an Ethoscope machinery on the given video file,' +
        ' which is meant to be a recording of single daphnia moving' +
        ' in a bunch of wells.' +
        ' The area of each well is determined by non-black regions' +
        ' in the supplied regions of interest (ROI) image file.' +
        ' Optionally an output video may be produced, documenting the ' +
        ' detection of animals.')
    parser.add_argument("-i",
                        "--input-video",
                        dest="inp_video_filename",
                        required=True,
                        help="The video file to be processed.",
                        metavar='<input video filename>')
    parser.add_argument("-o",
                        "--output-db",
                        dest="db_filename",
                        required=True,
                        help="Create Sqlite DB file  for storing results.",
                        metavar='<output DB filename>')
    parser.add_argument(
        "-r",
        "--roi_image",
        dest="roi_filename",
        required=True,
        help="Create Sqlite DB file DB_FILENAME for storing results.",
        metavar='<roi image>')
    parser.add_argument("-a",
                        "--output-video",
                        dest="outp_video_filename",
                        help="The annotated output video file.",
                        metavar='<output video filename>')
    parser.add_argument(
        "-b",
        "--single-roi-video",
        dest="single_roi_video_filename",
        help=
        "For debugging purpose a video file of a single roi may be produced.",
        metavar='<single roi video filename>')
    parser.add_argument(
        "-n",
        "--single-roi-video-roi-nbr",
        dest="single_roi_video_roi_nbr",
        type=int,
        default=0,
        help="Select the number of the roi to produce a debugging video from."
        + " If not specified, roi 0 is the default.",
        metavar='<single roi video roi number>')

    args = parser.parse_args()

    # change these variables according to how you name your input/output files
    INPUT_DATA_DIR = "/home/lukas/tmp/AAA-Video/"
    OUTPUT_DATA_DIR = "/home/lukas/tmp/ethoscope/"

    #ROI_IMAGE = INPUT_DATA_DIR + "irbacklit_20200109_1525_4x4x6_squaremask_valued.png"
    ROI_IMAGE = INPUT_DATA_DIR + "4xWellplates4x6_registred_squaremask_tight.png"
    #INPUT_VIDEO = INPUT_DATA_DIR + "Basler_acA5472-17um__23065142__20200109_152536071.mp4"
    INPUT_VIDEO = INPUT_DATA_DIR + "Basler_acA5472-17um__23065142__20200205_172106124.mp4"

    logfile = OUTPUT_DATA_DIR + '20200205.log'
    OUTPUT_VIDEO = OUTPUT_DATA_DIR + "20200205.avi"
    OUTPUT_DB = OUTPUT_DATA_DIR + "results20200205.db"
    #logfile = OUTPUT_DATA_DIR + 'results.log'
    #OUTPUT_VIDEO = OUTPUT_DATA_DIR + "output.avi"
    #OUTPUT_DB = OUTPUT_DATA_DIR + "output.db"

    dbgImgWinSizeX = 2200
    dbgImgWinSizeY = 1500

    # setup logging
    logging.basicConfig(filename=logfile, level=logging.INFO)
    #logging.basicConfig(filename=logfile, level=logging.DEBUG)
    # define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    # add the handler to the root logger
    logging.getLogger('').addHandler(console)

    # Make the ethoscope packages accessible
    package_path = os.path.join(os.path.dirname(sys.path[0]), '')
    logging.info("path of ethoscope package: %s" % package_path)
    sys.path.insert(0, package_path)

    import cv2

    # import the bricks from ethoscope package
    # Use a mask image to define rois. Mask image must have black background, every non-black
    # region defines a roi.
    from ethoscope.roi_builders.img_roi_builder import ImgMaskROIBuilder
    from ethoscope.core.monitor import Monitor
    from ethoscope.trackers.adaptive_bg_tracker import AdaptiveBGModel
    from ethoscope.utils.io import SQLiteResultWriter
    from ethoscope.hardware.input.cameras import MovieVirtualCamera
    from ethoscope.drawers.drawers import DefaultDrawer

    # Generate ROIs from the mask image
    logging.info("reading roi mask")
    roi_builder = ImgMaskROIBuilder(args.roi_filename)

    logging.info("building rois from \"%s\"" % args.roi_filename)
    roi_builder.build(
        None)  # use image already loaded by ImgMaskROIBuilder instance
    rois = roi_builder.gridSort(50, 50)

    #for r in rois:
    #  print("Roi %d: value: %d, (%d,%d)" % (r.idx, r._value, r._rectangle[0], r._rectangle[1]))

    # We use a video input file as if it were a camera
    cam = MovieVirtualCamera(args.inp_video_filename)
    logging.info("Loading \"%s\"-encoded movie with %d FPS of duration %d s." %
                 (cam.fourcc, cam.frames_per_sec,
                  cam._total_n_frames / cam.frames_per_sec))

    # we use a drawer to show inferred position for each animal, display frames and save them as a video
    do_draw_frames = False
    if args.outp_video_filename is not None:
        do_draw_frames = True
    drawer = DefaultDrawer(args.outp_video_filename,
                           draw_frames=do_draw_frames,
                           framesWinSizeX=dbgImgWinSizeX,
                           framesWinSizeY=dbgImgWinSizeY)

    # We build our monitor
    #monitor = Monitor(cam, AdaptiveBGModel, rois)
    if args.single_roi_video_filename is not None:
        monitor = Monitor(
            cam,
            AdaptiveBGModel,
            rois,
            dbg_roi_value=args.single_roi_video_roi_nbr,
            dbg_roi_video_filename=args.single_roi_video_filename)
    else:
        monitor = Monitor(cam, AdaptiveBGModel, rois)

    # Now everything is ready, we run the monitor with a result writer and a drawer
    logging.info("run monitor with drawer")
    with SQLiteResultWriter(args.db_filename, rois) as rw:
        monitor.run(rw, drawer)
Esempio n. 9
0
    # We use the npy tracker to save data in a npy file
    rdw = rawdatawriter(basename=input_video + ".npy",
                        n_rois=len(rois),
                        entities=entities)
    #rdw = None

    #for multifly tracking using BS subtraction

    if ttype == "multi":
        monit = Monitor(camera,
                        MultiFlyTracker,
                        rois,
                        stimulators=None,
                        data={
                            'maxN': 50,
                            'visualise': False,
                            'fg_data': {
                                'sample_size': 400,
                                'normal_limits': (50, 200),
                                'tolerance': 0.8
                            }
                        })

    #For the haar tracking
    if ttype == "haar":
        monit = Monitor(camera,
                        HaarTracker,
                        rois,
                        stimulators=None,
                        data={
                            'maxN': entities,
Esempio n. 10
0
    def _start_tracking(self, camera, result_writer, rois, TrackerClass,
                        tracker_kwargs, hardware_connection, StimulatorClass,
                        stimulator_kwargs):

        #Here the stimulator passes args. Hardware connection was previously open as thread.
        stimulators = [
            StimulatorClass(hardware_connection, **stimulator_kwargs)
            for _ in rois
        ]

        kwargs = self._monit_kwargs.copy()
        kwargs.update(tracker_kwargs)

        # todo: pickle hardware connection, camera, rois, tracker class, stimulator class,.
        # then rerun stimulators and Monitor(......)
        self._monit = Monitor(camera,
                              TrackerClass,
                              rois,
                              stimulators=stimulators,
                              *self._monit_args)

        conditionVariables = []
        try:
            variable = camera.metadata(
                "analog_gain"
            )  # This might fail depending on the type of camera
            conditionVariables.append(
                ConditionVariable(variable, "camera_gain"))
        except KeyError as error:
            # analog_gain is not part of the meta data for this camera
            logging.warning(
                "Unable to get the camera gain to add to the condition database"
            )
        try:
            sensorChip = BufferedHIH6130()
            conditionVariables.append(
                ConditionVariableFunction(sensorChip.humidity, "humidity"))
            conditionVariables.append(
                ConditionVariableFunction(sensorChip.temperature,
                                          "temperature"))
        except Exception as error:
            logging.warning(
                "Unable to get the temperature or humidity sensor to add to the conditions database because: "
                + str(error))
        try:
            lightSensorChip = TSL2591()
            lightSensorChip.powerOn()
            conditionVariables.append(
                ConditionVariableFunction(lightSensorChip.lux, "illuminance"))
        except Exception as error:
            logging.warning(
                "Unable to get the light sensor to add to the conditions database because: "
                + str(error))
        self._conditionsMonitor = ConditionsMonitor(conditionVariables)
        dbConnectionString = "mysql://" + self._db_credentials[
            "user"] + ":" + self._db_credentials[
                "password"] + "@localhost/" + self._db_credentials["name"]

        self._info["status"] = "running"
        logging.info("Setting monitor status as running: '%s'" %
                     self._info["status"])

        # Set all times in the database to be relative to the start of the run. Usually this means setting
        # to zero, unless continuing a previous run (e.g. after power failure). self._monit handles this
        # internally. The "1000" is so that everything is in milliseconds.
        self._conditionsMonitor.setTime(
            (time.time() - camera.start_time) * 1000, 1000)
        self._conditionsMonitor.run(
            dbConnectionString)  # This runs asynchronously
        self._monit.run(result_writer, self._drawer)  # This blocks
Esempio n. 11
0
    def run(self):

        try:
            self._info["status"] = "initialising"
            logging.info("Starting Monitor thread")

            self._info["error"] = None

            self._last_info_t_stamp = 0
            self._last_info_frame_idx = 0
            try:

                CameraClass = self._option_dict["camera"]["class"]

                camera_kwargs = self._option_dict["camera"]["kwargs"]

                ROIBuilderClass = self._option_dict["roi_builder"]["class"]
                roi_builder_kwargs = self._option_dict["roi_builder"]["kwargs"]

                InteractorClass = self._option_dict["interactor"]["class"]
                interactor_kwargs = self._option_dict["interactor"]["kwargs"]
                HardWareInterfaceClass = InteractorClass.__dict__[
                    "_hardwareInterfaceClass"]

                TrackerClass = self._option_dict["tracker"]["class"]
                tracker_kwargs = self._option_dict["tracker"]["kwargs"]

                ResultWriterClass = self._option_dict["result_writer"]["class"]
                result_writer_kwargs = self._option_dict["result_writer"][
                    "kwargs"]

                cam = CameraClass(**camera_kwargs)

                roi_builder = ROIBuilderClass(**roi_builder_kwargs)
                rois = roi_builder.build(cam)

                logging.info("Initialising monitor")
                cam.restart()
                #the camera start time is the reference 0

                ExpInfoClass = self._option_dict["experimental_info"]["class"]
                exp_info_kwargs = self._option_dict["experimental_info"][
                    "kwargs"]
                self._info["experimental_info"] = ExpInfoClass(
                    **exp_info_kwargs).info_dic
                self._info["time"] = cam.start_time

                self._metadata = {
                    "machine_id": self._info["id"],
                    "machine_name": self._info["name"],
                    "date_time":
                    cam.start_time,  #the camera start time is the reference 0
                    "frame_width": cam.width,
                    "frame_height": cam.height,
                    "version": self._info["version"]["id"],
                    "experimental_info": str(self._info["experimental_info"]),
                    "selected_options": str(self._option_dict)
                }

                hardware_interface = HardWareInterfaceClass()
                interactors = [
                    InteractorClass(hardware_interface, **interactor_kwargs)
                    for _ in rois
                ]
                kwargs = self._monit_kwargs.copy()
                kwargs.update(tracker_kwargs)

                self._monit = Monitor(cam,
                                      TrackerClass,
                                      rois,
                                      interactors=interactors,
                                      *self._monit_args,
                                      **kwargs)

                logging.info("Starting monitor")

                #fixme
                with ResultWriter(self._db_credentials,
                                  rois,
                                  self._metadata,
                                  take_frame_shots=True) as rw:
                    self._info["status"] = "running"
                    logging.info("Setting monitor status as running: '%s'" %
                                 self._info["status"])
                    self._monit.run(rw, self._drawer)
                logging.info("Stopping Monitor thread")
                self.stop()

            finally:
                try:
                    cam._close()
                except:
                    logging.warning("Could not close camera properly")
                    pass

        except EthoscopeException as e:
            if e.img is not None:
                cv2.imwrite(self._info["dbg_img"], e.img)
            self.stop(traceback.format_exc(e))
        except Exception as e:
            self.stop(traceback.format_exc(e))

        #for testing purposes
        if self._evanescent:
            import os
            del self._drawer
            self.stop()
            os._exit(0)
Esempio n. 12
0
                            self._crop_counter[0] += 1
        
                        except Exception as e:
                            print ("Error saving the positive file")
        
        print("Saved %s positives and %s negatives so far" % (self._crop_counter[0], self._crop_counter[1]))



# the input video - we work on an mp4 acquired with the Record function
positive_video = "/home/gg/Downloads/test_video.mp4"
negative_video = "/home/gg/Downloads/whole_2020-08-25_13-10-07_2020f8bceb334c1c84518f62359ddc76_emptyarena_1280x960@25_00000.mp4"



camera = MovieVirtualCamera(negative_video)

# we use the default drawer and we show the video as we track - this is useful to understand how things are going
# disabling the video will speed things up
#drawer = DefaultDrawer(draw_frames = True, video_out = output_video, video_out_fps=25)
drawer = HaarCutter(filepath="/home/gg/haar_training", draw_frames = False, create_positives=False, create_negatives=True, interval=10, negatives_per_frame=30)


# One Big ROI using the Default ROIBuilder
roi_builder = DefaultROIBuilder()
rois = roi_builder.build(camera)

# Starts the tracking monitor
monit = Monitor(camera, MultiFlyTracker, rois, stimulators=None )
monit.run(drawer=drawer, result_writer = None)