예제 #1
0
파일: app.py 프로젝트: LongNguyen1012/latte
def upload_file():
    f = request.files['image_file']
    file_path = "./app/zip_dataset/{}".format(f.filename)
    f.save(file_path)

    output_path = "./app/unzip_dataset/{}".format(
        f.filename.replace(".zip", ""))
    with zipfile.ZipFile(file_path, 'r') as zip_ref:
        zip_ref.extractall(output_path)

    file_name = f.filename.replace(".zip", "")
    source = "./app/unzip_dataset/{}/{}".format(file_name, file_name)
    destination = "./app/test_dataset/{}".format(file_name, file_name)
    dest = shutil.move(source, destination)

    global fh
    global bp
    fh = FrameHandler()
    bp = BoundingBoxPredictor(fh)

    return 'file uploaded successfully'
예제 #2
0
src = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.float32)
dst = np.array([[dx1, dy1], [dx2, dy2], [dx3, dy3], [dx4, dy4]], np.float32)

# Get the homography that maps src to dst (hence removes perspective distortion from the image)
H = cv2.getPerspectiveTransform(src, dst)
print("Homography : ", H)
############################################################################
# MAYBE plt.figure() wont hang with this... But it will ... Pleeeze fix this!
matplotlib.interactive(True)
#f = plt.figure()
# create the frame handler
frame_handler = FrameHandler(
    H,
    rows,
    cols,
    h_thresh=(230, 255),
    s_thresh=(100, 255),  # (100, 255) initially
    dir_thresh=(0, 45),
    mag_thresh=(100, 140),
    xgrad_thresh=(40, 150),
    sobel_kernel=17)
lane_tracker = LaneTracker(H, rows, cols, nwindows=9)
#cv2.namedWindow("h-binary")
#cv2.namedWindow("original frame")
#cv2.namedWindow("undistorted frame")
cv2.namedWindow("s-binary")
cv2.namedWindow("xgrad-binary")
cv2.namedWindow("final binary")
cv2.namedWindow("warped")
cv2.namedWindow("Tracked Lines")

# necessary for namedwindow not to get stuck:
예제 #3
0

@app.route("/predictNextFrameBoundingBoxes", methods=['POST'])
def predictNextFrameBoundingBoxes():
    json_request = request.get_json()
    fname = json_request["fname"]
    drivename, fname = fname.split("/")
    frame = fh.load_annotation(drivename, fname)
    res = bp.predict_next_frame_bounding_boxes(frame)
    keys = res.keys()
    for key in keys:
        res[str(key)] = res.pop(key)
    print(res)

    return str(res)


@app.route("/loadAnnotation", methods=['POST'])
def loadAnnotation():
    json_request = request.get_json()
    fname = json_request["fname"]
    frame = fh.load_annotation(fname)
    return str(frame.bounding_boxes)


if __name__ == "__main__":
    fh = FrameHandler()
    bp = BoundingBoxPredictor(fh)
    os.system("rm {}/*".format(os.path.join(DIR_PATH, "static/images")))
    app.run(host="127.0.0.1", port='2000')
    def predict_next_frame_bounding_boxes(self, frame, json_request):
        
        fh = FrameHandler()
        
        main_start = time.time() 

        drivename, fname = frame.fname.split('.')[0].split("/")

        idx = self.frame_handler.drives[drivename].index(fname)
        next_fname = self.frame_handler.drives[drivename][idx+1]
        current_fname = self.frame_handler.drives[drivename][idx]
        
        
        
        #prev_frame = fh.load_annotation(drivename, current_fname, json_request["settingsControls"])

        
        self.next_frame_idx = idx+1    
        _search_number = 30
        self.search_number = _search_number
        self.treshold_point_annotation_error = 1
        _padding = 0.1
        self.padding = _padding
        self.max_angle_changes_on_deg = 15

        
        ground_removed  = json_request["settingsControls"]["GroundRemoval"]
        is_guided_tracking  = json_request["settingsControls"]["GuidedTracking"]
        

        # Get Foreground mask
        car_points = get_pointcnn_labels(drivename+"/"+next_fname, json_request["settingsControls"], ground_removed=ground_removed)
        
        # Full_pc, segmented_pc
        next_all_pc_fresh  = self.frame_handler.get_pointcloud(drivename, next_fname, dtype=float, ground_removed=ground_removed)

        # Foreground Points
        next_pc = np.copy(next_all_pc_fresh[car_points])

        z_axis = np.max(next_pc[:,2]) # Ground already removed
        
        next_all_pc = np.copy(next_all_pc_fresh[next_all_pc_fresh[:,2]> z_axis-3 ])

        bounding_boxes = sorted(frame.bounding_boxes, 
                            key=lambda box:box.box_id)
        centers = {box.box_id:box.center for box in bounding_boxes}
        velocities = {box_id:np.zeros(2) for box_id in centers.keys()}
        

        next_bounding_boxes = [] 
        #Init bounding boxes
        print("Init bounding boxes", self.padding, self.search_number, self.treshold_point_annotation_error, is_guided_tracking )
        
        self.prevBbox = {}
        self.boxHistoryLocations = {}
        F = frame.F_MATRIX
        for bounding_box in bounding_boxes:
            
            self.prevBbox[bounding_box.box_id] = {}
            
            self.prevBbox[bounding_box.box_id]["max_distance"] = 5
            
            self.boxHistoryLocations[bounding_box.box_id] = {} #Init history location
            
            start = time.time()
            self.padding = _padding
            self.search_number = _search_number
            _pc = next_pc
            
           
            print("box id", bounding_box.box_id, bounding_box.tracking_idx)
            if bounding_box.tracking_idx > 1:
                self.padding = 0.05 
                self.search_number = 20
                
                #bounding_box.grow_pointcloud(np.copy(next_all_pc))
                
                
                
                x_hat_k =  bounding_box.predicted_state
                center_old = bounding_box.center
                x_hat_k_prediction = np.matmul(F, x_hat_k)  # + Q should be N(0, Q) 

                x_diff = np.abs(x_hat_k-x_hat_k_prediction)
                update_length = np.max(x_diff[:2])

                self.prevBbox[bounding_box.box_id]["max_distance"] = update_length + 1
                #prev_pc_annotated_bbox = bounding_box.filter_pointcloud( np.copy(current_pc_png), update_length*2 )[1]
                
                pc_annotated_bbox = bounding_box.filter_pointcloud( np.copy(next_pc), update_length*2 )[1]
                
                if(pc_annotated_bbox.shape[0] <= 300): #Use check bbox
                    _, self.prevBbox[bounding_box.box_id]["pcs"] = bounding_box.filter_pointcloud( np.copy(next_all_pc),1.0)
                    bounding_box.center = x_hat_k_prediction[:2]
                    _, pred_pcs = bounding_box.filter_pointcloud( np.copy(next_all_pc),1.0)
                    
                    self.prevBbox[bounding_box.box_id]["pcs"] = np.concatenate( [ pred_pcs, np.copy(self.prevBbox[bounding_box.box_id]["pcs"]) ], axis=0)
                    bounding_box.center = center_old
                    
                else:
                    self.prevBbox[bounding_box.box_id]["pcs"] = np.copy(next_pc) #pc_annotated_bbox
                        
                
                print("\t\t prev_pc_annotated_bbox", update_length, pc_annotated_bbox.shape, self.prevBbox[bounding_box.box_id]["pcs"].shape)
            else:
                
                _, pc_annotated_bbox = bounding_box.filter_pointcloud( np.copy(_pc), 0.0 )
                if(pc_annotated_bbox.shape[0] > 100):                     
                    self.prevBbox[bounding_box.box_id]["pcs"] = np.copy(_pc)
                else:
                    _, self.prevBbox[bounding_box.box_id]["pcs"] = bounding_box.filter_pointcloud( np.copy(next_all_pc), 1.0 ) 
                
                
            _pc = np.concatenate( [ np.copy(_pc), np.copy(self.prevBbox[bounding_box.box_id]["pcs"]) ], axis=0)
                
            new_bbox = self._predict_next_frame_bounding_box(frame, bounding_box, np.copy(_pc), is_guided_tracking) 
            next_bounding_boxes.append( NextFrameBBOX(bounding_box.box_id, new_bbox[1], new_bbox[0],  new_bbox[2], bounding_box.tracking_idx) )

            print("time to predict bounding box: ", bounding_box.box_id, time.time() - start)
  


        self.next_bounding_boxes = next_bounding_boxes

        #Clean overlapping boxes 
        if( is_guided_tracking):

            start = time.time()
            try:
                self.fixed_overlapping_boxes(False)
            except:
                pass
        print("time to fixed_overlapping_boxes: ", time.time() - start)


        
        _padding = 0.2       
        self.treshold_point_annotation_error = 1
        print("Retrack bounding boxes", self.padding, self.search_number, self.treshold_point_annotation_error )
        #Retrack box locations 
        
        is_bboxes_updated = np.ones([len((self.next_bounding_boxes)) ])
        
        is_bboxes_updated[:] = True
        self.search_number = 20
        if( is_guided_tracking):   
            while( is_bboxes_updated.any() and _padding > 0.01 ):               

                updated_bounding_boxes = []          


                print("self.padding", start, self.padding)
                
                self.padding = _padding
                idx_box_status = 0
                for bbox in self.next_bounding_boxes:

                    
                    start = time.time()
                    print("box id", bbox.box_id)
                    box_state = bbox.get_bounding_box({})   
                    all_corners_set = {}
                    all_corners_set[bbox.get_tracking_index()] = [bbox.get_corners(), bbox.get_center_dist()] # Init location

                    
                    _pc = next_pc
            

                    if bbox.tracking_idx > 1:
                        self.padding = _padding  * 1/bbox.tracking_idx
                        self.search_number = 20
                        _pc = np.concatenate( [ np.copy(_pc), np.copy(self.prevBbox[bbox.box_id]["pcs"]) ], axis=0)



                    if(is_bboxes_updated[idx_box_status]):
                    

                        _, all_corners_set = self.guided_search_bbox_location(bbox.get_corners(), np.copy(_pc), bbox.get_tracking_index(), all_corners_set, bbox)             

                    updated_bounding_boxes.append( NextFrameBBOX(bbox.box_id, all_corners_set, box_state, bbox.center, bbox.tracking_idx) )

                    print("time to predict bounding box: ", bbox.box_id, time.time() - start)

                    
                    idx_box_status = idx_box_status +1


                print("Retrack box locations: ", time.time() - start)


                self.next_bounding_boxes = updated_bounding_boxes

                #Clean overlapping boxes 
                
                start = time.time()
                try:
                    self.fixed_overlapping_boxes(False)
                except:
                    pass
                print("time to fixed_overlapping_boxes: ", time.time() - start)
                
                idx_box_status = 0
                for bbox_check in self.next_bounding_boxes:
                    is_bboxes_updated[idx_box_status] = bbox_check.is_bbox_updated
                    idx_box_status = idx_box_status +1
                    
                _padding = _padding * 0.1
                
                print("is_bboxes_updated",is_bboxes_updated,  is_bboxes_updated.any() )
       
        final_bounding_boxes = {}
        criterion = closeness_criterion
        start = time.time()
        for bbox in self.next_bounding_boxes: 
            bbox_structure, _, _ = self.corners_to_bounding_box( bbox.get_corners(), None, False)
            final_bounding_boxes[str(bbox.id)]=bbox.get_bounding_box(bbox_structure)

        print("Recalculate angle: ", time.time() - start)
        
        #pretty( self.boxHistoryLocations )
        print("Total time for predict frame: ", time.time() - main_start)
        return final_bounding_boxes, round(time.time() - main_start,2)