def __init__(self, algorithm, target_color, stream_only, is_test): """ get first frame of Video """ ##self.video = cv2.VideoCapture(0) self.video = cv2.VideoCapture(0, cv2.CAP_V4L) ret, frame = self.video.read() video_prop = self._get_video_prop() self.tracking = tracking.Tracking(ret, frame, video_prop, algorithm, target_color, stream_only, is_test)
def __init__(self, config): """ Initialises the fitter """ self.config = config # object that handles the MAUS interface (geometry and tracking stuff) self.tracking = tracking.Tracking(config, self._get_configuration()) # the measured transfer matrices, errors and tof12 (set on call to # fitter, list of dictionaries as per self.fit(...) docs) self.measured_tm_list = [] # seed for the misalignment and scale factor (starting point for the # iterations) self.seed = { "dx": 0.0, "dy": 0.0, "dz": 0.0, "dxp": 0.0, "dyp": 0.0, "scale_factor": self.config.fc_current_seed } # minuit is only allowed to vary parameters within $seed +- max_delta$ self.max_delta = { "dx": 20.0, "dy": 20.0, "dz": 50.0, "dxp": 0.02, "dyp": 0.02, "scale_factor": 10.0 } # best guess i.e. output from latest fit iteration self.best_guess = copy.deepcopy(self.seed) # gets filled with the estimated error on each parameter after fitting self.estimated_error = {} # maximum number of iterations for each step of the fitter self.max_iterations = self.config.fit_max_iterations # fitter will attempt to minimise sum chi2 within +- 0.1 self.resolution = self.config.fit_resolution # gets filled with the calculated transfer matrices on each iteration of # the fitter (one for each tof) self.calculated_tm_list = [] # index of the upstream virtual plane self.plane_us = self.config.fit_plane_us # index of the downstream virtual plane self.plane_ds = self.config.fit_plane_ds # iteration counter self._iteration = 0 # tof12 distance # 8222.85 - straight tracks from run 7417 # 8224.8 - fc data from run 7541 self.measured_data = None
def start_tracking_thread(self): self.tracking_running = True self.tracking_thread = tracking.Tracking(name="Tracking", shared_variables=self) self.tracking_thread.start()
def start_tracking_thread(self, index=0): self.setting[index][SETTINGS.TRACKING.value] = True self.tracking_thread = tracking.Tracking(name="Tracking", shared_variables=self, index=index) self.tracking_thread.start()
self.lastimg = img return self.sp(img, box) DLIB_NN = DlibNetAdapter() fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, '..', 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') DEF_ALIGN = openface.AlignDlib(DLIB_MODEL) DEF_DETECTOR = DEF_ALIGN #DEF_DETECTOR = opencv_detector DEF_NET = openface.TorchNeuralNet(NN_MODEL, imgDim=IMG_DIM, cuda=CUDA) if TRACKING_ENABLED: TRACKER = tracking.Tracking(dlib.correlation_tracker) else: TRACKER = None # scale rectangle to factor def scaleRect(rect, factor): r_l = int(rect.left()*factor) r_t = int(rect.top()*factor) r_r = int(rect.right()*factor) r_b = int(rect.bottom()*factor) return dlib.rectangle(r_l,r_t,r_r,r_b) # convert dlib rectangle to array def bbToArray(bb): outArr = []
def FLS_Client(): global img_list global range_list img_list = [] def feedback_cb (feedback): #print ('[Feedback] img_ctr = %d'%(feedback.img_ctr())) br = CvBridge() cv_img = br.imgmsg_to_cv2(feedback.img_msg,"mono8") ctr = feedback.img_ctr filename = "Test%i.jpg" %ctr cv2.imwrite (filename,cv_img) img_list.append (cv_img) # Creates the SimpleActionClient, passing the type of the action # (FLSAction) to the constructor. client = actionlib.SimpleActionClient('FLS', FLSAction) # Waits until the action server has started up and started # listening for goals. client.wait_for_server() # Creates a goal to send to the action server. #goal = learning_image_transport.msg.FLSGoal(get_n_img=1) goal = FLSGoal() goal.get_n_img = 3 # Sends the goal to the action server. client.send_goal(goal,feedback_cb=feedback_cb) print ("goal sent") client.wait_for_result() print('[Result] State: %d'%(client.get_state())) print('[Result] Status: %s'%(client.get_goal_status_text())) # Vered t = tracking.Tracking() index = 0 for cv_img in img_list: index += 1 #cv2.imshow('tracking', cv_img) #cv2.waitKey(0) # reading the frame img = cv_img #grayscale image rows, cols = img.shape color_img = np.zeros((rows,cols,3)) color_img[:,:,0] = img color_img[:,:,1] = img color_img[:,:,2] = img #cv2.waitKey(0) # clean and find ROIs in the frame rois = objDetect.ROIfind(img) mask, centroids, bboxes, tiny_masks = rois.apply() # cleaning by tracking t.add_next_frame_features(color_img, mask, centroids, bboxes, tiny_masks) t.detection_to_tracks_assignment() t.update_tracks() t.create_new_tracks()
import cv2 import objDetect import tracking # import matplotlib.pyplot as plt # import time # t1 = time.time() t = tracking.Tracking() i1=0 for i in range(250, 262): i1=i1+1 # reading the frame name = 'images/Try' + str(i) + '.jpg' # name = 'images/Swimmer/Test' + str(i) + '.jpg' img1 = cv2.imread(name) img = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY) # clean and find ROIs in the frame rois = objDetect.ROIfind(img) mask, centroids, bboxes, tiny_masks = rois.apply() # cleaning by tracking t.add_next_frame_features(img1, mask, centroids, bboxes, tiny_masks) t.detection_to_tracks_assignment() t.update_tracks() t.create_new_tracks() # t.show_tracks() # plt.show()