Beispiel #1
0
    def test_sort_y(self):
        """sorting on the Y coordinate in place"""
        targs = read_targets("testing_fodder/frame/cam1.", 333)
        revs = read_targets("testing_fodder/frame/cam1_reversed.", 333)
        revs.sort_y()

        for targ, rev in zip(targs, revs):
            self.assertTrue(targ.pos(), rev.pos())
Beispiel #2
0
 def test_sort_y(self):
     """sorting on the Y coordinate in place"""
     targs = read_targets("testing_fodder/frame/cam1.", 333)
     revs = read_targets("testing_fodder/frame/cam1_reversed.", 333)
     revs.sort_y()
     
     for targ, rev in zip(targs, revs):
         self.failUnless(targ.pos(), rev.pos())
Beispiel #3
0
    def test_write_targets(self):
        """Round-trip test of writing targets."""
        targs = read_targets("../../liboptv/tests/testing_fodder/sample_", 42)
        targs.write(b"testing_fodder/round_trip.", 1)
        tback = read_targets("testing_fodder/round_trip.", 1)

        self.assertEqual(len(targs), len(tback))
        self.assertEqual([targ.tnr() for targ in targs],
                         [targ.tnr() for targ in tback])
        self.assertEqual([targ.pos()[0] for targ in targs],
                         [targ.pos()[0] for targ in tback])
        self.assertEqual([targ.pos()[1] for targ in targs],
                         [targ.pos()[1] for targ in tback])
Beispiel #4
0
 def test_write_targets(self):
     """Round-trip test of writing targets."""
     targs = read_targets("../../liboptv/tests/testing_fodder/sample_", 42)
     targs.write("testing_fodder/round_trip.", 1)
     tback = read_targets("testing_fodder/round_trip.", 1)
     
     self.failUnlessEqual(len(targs), len(tback))
     self.failUnlessEqual([targ.tnr() for targ in targs], 
         [targ.tnr() for targ in tback])
     self.failUnlessEqual([targ.pos()[0] for targ in targs], 
         [targ.pos()[0] for targ in tback])
     self.failUnlessEqual([targ.pos()[1] for targ in targs],
         [targ.pos()[1] for targ in tback])
Beispiel #5
0
    def detect_part_track(self, info):
        """ track detected particles is handled by 2 bindings:
            1) tracking_framebuf.read_targets(..)
            2) ptv.py_get_mark_track_c(..)
        """
        info.object.clear_plots(remove_background=False)  #clear everything
        info.object.update_plots(info.object.orig_image, is_float=False)

        prm = info.object.exp1.active_params.m_params
        seq_first = prm.Seq_First  #get sequence parameters
        seq_last = prm.Seq_Last
        base_names = [
            prm.Basename_1_Seq, prm.Basename_2_Seq, prm.Basename_3_Seq,
            prm.Basename_4_Seq
        ]

        info.object.load_set_seq_image(
            seq_first)  #load first seq image and set appropriate C array
        n_images = len(info.object.camera_list)
        print "Starting detect_part_track"
        x1_a, x2_a, y1_a, y2_a = [], [], [], []
        for i in range(n_images):  #initialize result arrays
            x1_a.append([])
            x2_a.append([])
            y1_a.append([])
            y2_a.append([])

        for i_seq in range(seq_first, seq_last + 1):  #loop over sequences
            for i_img in range(n_images):
                intx_green, inty_green, intx_blue, inty_blue = [], [], [], []
                imx, imy, zoomx, zoomy, zoomf = ptv.py_get_mark_track_c(i_img)
                targets = read_targets(base_names[i_img], i_seq)

                for h in range(len(targets)):
                    #get data from C
                    tx, ty = targets[h].pos()

                    if (targets[h].tnr() > -1):
                        intx_green.append(int(imx / 2 + zoomf * (tx - zoomx)))
                        inty_green.append(int(imy / 2 + zoomf * (ty - zoomy)))
#                    else:
#                        intx_blue.append(int(imx/2 + zoomf*(tx - zoomx)))
#                        inty_blue.append(int(imy/2 + zoomf*(ty - zoomy)))

                x1_a[i_img] = x1_a[
                    i_img] + intx_green  # add current step to result array
                #                x2_a[i_img]=x2_a[i_img]+intx_blue
                y1_a[i_img] = y1_a[i_img] + inty_green
#                y2_a[i_img]=y2_a[i_img]+inty_blue
#                info.object.camera_list[i_img].drawcross(str(i_seq)+"x_tr_gr",str(i_seq)+"y_tr_gr",intx_green,inty_green,"green",3)
#                info.object.camera_list[i_img].drawcross(str(i_seq)+"x_tr_bl",str(i_seq)+"y_tr_bl",intx_blue,inty_blue,"blue",2)
#plot result arrays
        for i_img in range(n_images):
            info.object.camera_list[i_img].drawcross("x_tr_gr", "y_tr_gr",
                                                     x1_a[i_img], y1_a[i_img],
                                                     "green", 3)
            #            info.object.camera_list[i_img].drawcross("x_tr_bl","y_tr_bl",x2_a[i_img],y2_a[i_img],"blue",2)
            info.object.camera_list[i_img]._plot.request_redraw()

        print "Finished detect_part_track"
Beispiel #6
0
    def test_read_targets(self):
        """Reading a targets file from Python."""
        targs = read_targets("../../liboptv/tests/testing_fodder/sample_", 42)

        self.assertEqual(len(targs), 2)
        self.assertEqual([targ.tnr() for targ in targs], [1, 0])
        self.assertEqual([targ.pos()[0] for targ in targs], [1127., 796.])
        self.assertEqual([targ.pos()[1] for targ in targs], [796., 809.])
Beispiel #7
0
    def test_read_targets(self):
        """Reading a targets file from Python."""
        targs = read_targets("../../liboptv/tests/testing_fodder/sample_", 42)

        self.failUnlessEqual(len(targs), 2)
        self.failUnlessEqual([targ.tnr() for targ in targs], [1, 0])
        self.failUnlessEqual([targ.pos()[0] for targ in targs], [1127., 796.])
        self.failUnlessEqual([targ.pos()[1] for targ in targs], [796., 809.])
Beispiel #8
0
    def image_targets(self, frame, cam):
        tmpl = re.sub('%1', '%d', self._tmpl)
        tmpl = re.sub('%2', '', tmpl)
        targets = read_targets(tmpl % (cam + 1), frame)

        return TargetListModel(self,
            [dict(zip(['x', 'y'], targets[t].pos())) \
                for t in xrange(len(targets))] )
Beispiel #9
0
 def image_targets(self, frame, cam):
     tmpl = re.sub('%1', '%d', self._tmpl)
     tmpl = re.sub('%2', '', tmpl)
     targets = read_targets(tmpl % (cam + 1), frame)
     
     return TargetListModel(self, 
         [dict(zip(['x', 'y'], targets[t].pos())) \
             for t in xrange(len(targets))] )
Beispiel #10
0
    def detect_part_track(self, info):
        """ track detected particles is handled by 2 bindings:
            1) tracking_framebuf.read_targets(..)
            2) ptv.py_get_mark_track_c(..)
        """
        info.object.clear_plots(remove_background=False) #clear everything
        info.object.update_plots(info.object.orig_image,is_float=False)
        
        prm = info.object.exp1.active_params.m_params
        seq_first = prm.Seq_First #get sequence parameters
        seq_last = prm.Seq_Last
        base_names = [prm.Basename_1_Seq, prm.Basename_2_Seq, 
            prm.Basename_3_Seq, prm.Basename_4_Seq]
        
        info.object.load_set_seq_image(seq_first) #load first seq image and set appropriate C array
        n_images=len(info.object.camera_list)
        print "Starting detect_part_track"
        x1_a,x2_a,y1_a,y2_a=[],[],[],[]
        for i in range (n_images): #initialize result arrays
            x1_a.append([])
            x2_a.append([])
            y1_a.append([])
            y2_a.append([])
        
        for i_seq in range(seq_first, seq_last+1): #loop over sequences
            for i_img in range(n_images):
                intx_green,inty_green,intx_blue,inty_blue=[],[],[],[]
                imx, imy, zoomx, zoomy, zoomf = ptv.py_get_mark_track_c(i_img)
                targets = read_targets(base_names[i_img], i_seq)
                
                for h in range(len(targets)):
                    #get data from C
                    tx, ty = targets[h].pos()
                    
                    if (targets[h].tnr() > -1):
                        intx_green.append(int(imx/2 + zoomf*(tx - zoomx)))
                        inty_green.append(int(imy/2 + zoomf*(ty - zoomy)))
                    else:
                        intx_blue.append(int(imx/2 + zoomf*(tx - zoomx)))
                        inty_blue.append(int(imy/2 + zoomf*(ty - zoomy)))
            
                x1_a[i_img]=x1_a[i_img]+intx_green # add current step to result array
                x2_a[i_img]=x2_a[i_img]+intx_blue
                y1_a[i_img]=y1_a[i_img]+inty_green
                y2_a[i_img]=y2_a[i_img]+inty_blue
#                info.object.camera_list[i_img].drawcross(str(i_seq)+"x_tr_gr",str(i_seq)+"y_tr_gr",intx_green,inty_green,"green",3)
#                info.object.camera_list[i_img].drawcross(str(i_seq)+"x_tr_bl",str(i_seq)+"y_tr_bl",intx_blue,inty_blue,"blue",2)
        #plot result arrays
        for i_img in range(n_images):
            info.object.camera_list[i_img].drawcross("x_tr_gr","y_tr_gr",x1_a[i_img],y1_a[i_img],"green",3)
            info.object.camera_list[i_img].drawcross("x_tr_bl","y_tr_bl",x2_a[i_img],y2_a[i_img],"blue",2)
            info.object.camera_list[i_img]._plot.request_redraw()
                                     
        print "Finished detect_part_track"
Beispiel #11
0
    def test_instantiate(self):
        """Creating a MatchedCoords object"""
        cal = Calibration()
        cpar = ControlParams(4)

        cal.from_file("testing_fodder/calibration/cam1.tif.ori",
                      "testing_fodder/calibration/cam2.tif.addpar")
        cpar.read_control_par("testing_fodder/corresp/control.par")
        targs = read_targets("testing_fodder/frame/cam1.", 333)

        mc = MatchedCoords(targs, cpar, cal)
        pos, pnr = mc.as_arrays()

        # x sorted?
        self.failUnless(np.all(pos[1:, 0] > pos[:-1, 0]))

        # Manually verified order for the loaded data:
        np.testing.assert_array_equal(
            pnr, np.r_[6, 11, 10, 8, 1, 4, 7, 0, 2, 9, 5, 3, 12])
Beispiel #12
0
    def test_instantiate(self):
        """Creating a MatchedCoords object"""
        cal = Calibration()
        cpar = ControlParams(4)

        cal.from_file(
            b"testing_fodder/calibration/cam1.tif.ori",
            b"testing_fodder/calibration/cam2.tif.addpar")
        cpar.read_control_par(b"testing_fodder/corresp/control.par")
        targs = read_targets("testing_fodder/frame/cam1.", 333)
        
        mc = MatchedCoords(targs, cpar, cal)
        pos, pnr = mc.as_arrays()
        
        # x sorted?
        self.failUnless(np.all(pos[1:,0] > pos[:-1,0]))
        
        # Manually verified order for the loaded data:
        np.testing.assert_array_equal(
            pnr, np.r_[6, 11, 10,  8,  1,  4,  7,  0,  2,  9,  5,  3, 12])
Beispiel #13
0
def py_detection_proc_c(list_of_images, cpar, tpar, cals):
    """ Detection of targets """

    pftVersionParams = par.PftVersionParams(path='./parameters')
    pftVersionParams.read()
    Existing_Target = np.bool(pftVersionParams.Existing_Target)

    detections, corrected = [], []
    for i_cam, img in enumerate(list_of_images):
        if Existing_Target:
            targs = read_targets(cpar.get_img_base_name(i_cam), 0)
        else:
            targs = target_recognition(img, tpar, i_cam, cpar)

        targs.sort_y()
        detections.append(targs)
        mc = MatchedCoords(targs, cpar, cals[i_cam])
        corrected.append(mc)

    return detections, corrected
Beispiel #14
0
def py_sequence_loop(exp):
    """ Runs a sequence of detection, stereo-correspondence, determination and stores
        the data in the cam#.XXX_targets (rewritten) and rt_is.XXX files. Basically
        it is to run the batch as in pyptv_batch.py without tracking
    """
    n_cams, cpar, spar, vpar, tpar, cals = \
        exp.n_cams, exp.cpar, exp.spar, exp.vpar, exp.tpar, exp.cals

    pftVersionParams = par.PftVersionParams(path='./parameters')
    pftVersionParams.read()
    Existing_Target = np.bool(pftVersionParams.Existing_Target)

    # sequence loop for all frames
    for frame in range(spar.get_first(), spar.get_last() + 1):
        print("processing frame %d" % frame)

        detections = []
        corrected = []
        for i_cam in range(n_cams):
            if Existing_Target:
                targs = read_targets(spar.get_img_base_name(i_cam), frame)
            else:
                imname = spar.get_img_base_name(i_cam) + str(frame).encode()
                print(imname)
                if not os.path.exists(imname):
                    print(os.path.abspath(os.path.curdir))
                    print('{0} does not exist'.format(imname))

                img = imread(imname.decode())
                # time.sleep(.1) # I'm not sure we need it here
                hp = simple_highpass(img, cpar)
                targs = target_recognition(hp, tpar, i_cam, cpar)

            targs.sort_y()
            detections.append(targs)
            mc = MatchedCoords(targs, cpar, cals[i_cam])
            pos, pnr = mc.as_arrays()
            corrected.append(mc)

        #        if any([len(det) == 0 for det in detections]):
        #            return False

        # Corresp. + positions.
        sorted_pos, sorted_corresp, num_targs = correspondences(
            detections, corrected, cals, vpar, cpar)

        # Save targets only after they've been modified:
        for i_cam in range(n_cams):
            detections[i_cam].write(spar.get_img_base_name(i_cam), frame)


        print("Frame " + str(frame) + " had " \
              + repr([s.shape[1] for s in sorted_pos]) + " correspondences.")

        # Distinction between quad/trip irrelevant here.
        sorted_pos = np.concatenate(sorted_pos, axis=1)
        sorted_corresp = np.concatenate(sorted_corresp, axis=1)

        flat = np.array([corrected[i].get_by_pnrs(sorted_corresp[i]) \
                         for i in range(len(cals))])
        pos, rcm = point_positions(flat.transpose(1, 0, 2), cpar, cals, vpar)

        # if len(cals) == 1: # single camera case
        #     sorted_corresp = np.tile(sorted_corresp,(4,1))
        #     sorted_corresp[1:,:] = -1

        if len(cals) < 4:
            print_corresp = -1 * np.ones((4, sorted_corresp.shape[1]))
            print_corresp[:len(cals), :] = sorted_corresp
        else:
            print_corresp = sorted_corresp

        # Save rt_is
        print(default_naming['corres'])
        rt_is = open(default_naming['corres'] + b'.' + str(frame).encode(),
                     'w')
        rt_is.write(str(pos.shape[0]) + '\n')
        for pix, pt in enumerate(pos):
            pt_args = (pix + 1, ) + tuple(pt) + tuple(print_corresp[:, pix])
            rt_is.write("%4d %9.3f %9.3f %9.3f %4d %4d %4d %4d\n" % pt_args)
        rt_is.close()