コード例 #1
0
def main(in_path: str, ref_channel: str, out_dir: str, n_workers: int, tile_size: int, overlap: int, ):

    if not osp.exists(out_dir):
        os.makedirs(out_dir)

    if n_workers == 1:
        dask.config.set({'scheduler': 'synchronous'})
    else:
        dask.config.set({'num_workers': n_workers, 'scheduler': 'processes'})

    st = datetime.now()

    with tif.TiffFile(in_path, is_ome=True) as stack:
        ome = stack.ome_metadata

    cycle_size, ncycles, first_ref_position = get_cycle_composition(ome, ref_channel)
    block_width = tile_size
    block_height = tile_size
    overlap = overlap

    warper = Warper()
    warper.block_w = block_width
    warper.block_h = block_height
    warper.overlap = overlap


    # perform registration of full stack
    register(in_path, out_dir, cycle_size, ncycles, first_ref_position, ome, warper, block_width, block_height, overlap)

    fin = datetime.now()
    print('time elapsed', fin - st)
コード例 #2
0
 def __init__(self, gray_shape, objpoints, imgpoints):
     self.camera_undistort = CameraUndistort(gray_shape, objpoints,
                                             imgpoints)
     self.gradient_filter = GradientFilter()
     self.warper = Warper(gray_shape)
     self.window_searcher = WindowSearcher()
     self.cnt = 0
コード例 #3
0
 def __init__(self, opts):
     super(Loss, self).__init__()
     self.ssim_wt = opts.ssim_wt 
     self.l1_wt = opts.l1_wt 
     self.smooth_wt = opts.smooth_wt
     self.ssim = SSIM(opts)
     self.warper = Warper(opts)
     # for debugging only
     self.recon_im = []
コード例 #4
0
    def process_level(self, level, u0, v0):

        I0_ = self.I0pyr[level]
        I1_ = self.I1pyr[level]

        #print I1_.shape, u0.shape, self.I0pyr[level].shape
        wrpr = Warper(I0_.shape, u0, v0, I0_, I1_,
                      train_function=self.class_train_function(u0, v0, **self.train_function_args), display=0)

        for i in range(self.warps):
            wrpr.warp()
        self.u, self.v = wrpr.u, wrpr.v
コード例 #5
0
class Loss():
    def __init__(self, opts):
        super(Loss, self).__init__()
        self.ssim_wt = opts.ssim_wt 
        self.l1_wt = opts.l1_wt 
        self.smooth_wt = opts.smooth_wt
        self.ssim = SSIM(opts)
        self.warper = Warper(opts)
        # for debugging only
        self.recon_im = []

    def __call__(self, in_data, disp):
        left_im = in_data['left_im']
        right_im = in_data['right_im']
        right_recon = self.warper.warp(left_im, disp)
        self.recon_im = right_recon 
        photo_loss = self.ssim_wt * self.ssim(right_im, right_recon).mean() + \
            self.l1_wt * (right_recon - right_im).abs().mean()
        smooth_loss = self.compute_smooth_loss(disp, left_im)
        net_loss = (1.0 - self.smooth_wt) * photo_loss + self.smooth_wt * smooth_loss 
        return {'photo_loss': photo_loss,
                'smooth_loss': smooth_loss}, net_loss

    def compute_smooth_loss(self, depth, frame):
        depth = depth.unsqueeze(1)
        mean_depth = depth.mean(2, True).mean(3, True) 
        n_depth = depth / (mean_depth + 1e-7) 
        grad_depth_x = torch.abs(n_depth[:, :, :, :-1] - n_depth[:, :, :, 1:])
        grad_depth_y = torch.abs(n_depth[:, :, :-1, :] - n_depth[:, :, 1:, :]) 
        grad_fr_x = torch.mean(torch.abs(frame[:, :, :, :-1] - frame[:, :, :, 1:]), 1, keepdim=True)
        grad_fr_y = torch.mean(torch.abs(frame[:, :, :-1, :] - frame[:, :, 1:, :]), 1, keepdim=True)
        wt_grad_depth_x = grad_depth_x * torch.exp(-grad_fr_x) 
        wt_grad_depth_y = grad_depth_y * torch.exp(-grad_fr_y)
        return torch.mean(wt_grad_depth_x) + torch.mean(wt_grad_depth_y)
コード例 #6
0
def main():
    flag = False
    cap = cv2.VideoCapture("canny2.avi")

    while True:

        # 이미지를 캡쳐
        ret, img = cap.read()

        # 캡쳐되지 않은 경우 처리
        if not ret:
            break
        if cv2.waitKey(0) & 0xFF == 27:
            break

        # Warper 객체 생성 (초기 1번만)
        if not flag:
            flag = True
            global warper
            warper = Warper(img)

        # warper, slidewindow 실행
        slideImage, x_location = process_image(img)

        cv2.imshow("originImage", img)
        # cv2.imshow("warper", warper.warp(img))
        cv2.imshow("slidewindow", slideImage)
コード例 #7
0
    def process_level(self, level, u0, v0):

        I0_ = self.I0pyr[level]
        I1_ = self.I1pyr[level]

        #print I1_.shape, u0.shape, self.I0pyr[level].shape
        wrpr = Warper(I0_.shape,
                      u0,
                      v0,
                      I0_,
                      I1_,
                      train_function=self.class_train_function(
                          u0, v0, **self.train_function_args),
                      display=0)

        for i in range(self.warps):
            wrpr.warp()
        self.u, self.v = wrpr.u, wrpr.v
コード例 #8
0
def main(in_path: str, ref_channel: str, out_dir: str, n_workers: int, tile_size: int, overlap: int, method: str):

    if not osp.exists(out_dir):
        os.makedirs(out_dir)

    if n_workers == 1:
        dask.config.set({'scheduler': 'synchronous'})
    else:
        dask.config.set({'num_workers': n_workers, 'scheduler': 'processes'})

    avail_methods = ('farneback', 'denselk', 'deepflow', 'rlof', 'pcaflow')
    if method not in avail_methods:
        raise ValueError('Provided opt flow method is not recognised. ' +
                         '\nAvailable methods: ' + str(avail_methods))
    print('Using method', method)

    st = datetime.now()

    with tif.TiffFile(in_path, is_ome=True) as stack:
        ome = stack.ome_metadata

    cycle_size, ncycles, first_ref_position = get_cycle_composition(ome, ref_channel)
    block_width = tile_size
    block_height = tile_size
    overlap = overlap

    warper = Warper()
    warper.block_w = block_width
    warper.block_h = block_height
    warper.overlap = overlap

    # perform registration of full stack
    register(in_path, out_dir,
             cycle_size, ncycles,
             first_ref_position, ome, warper,
             block_width, block_height, overlap, method)

    fin = datetime.now()
    print('time elapsed', fin - st)
コード例 #9
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True
    pidcal = PidCal()
    opt = opts.parse()
    warper = Warper()
    slidewindow = SlideWindow()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version", torch.__version__)
    print("cuda_version", torch.version.cuda)

    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    cap = cv2.VideoCapture("input_video/test.avi")

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    # out = cv2.VideoWriter('output_video/TEST_1.avi', fourcc, 20.0, (1280,480),0)

    prev_time = 0

    fps_list = []

    # fourcc =cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter('input_video/processed_video.avi', fourcc, 40.0,
                          (480, 320), 0)

    steer_list = list()
    lpf_list = list()

    while True:
        ret, frame = cap.read()

        if ret:
            cur_time = time.time()
            frame_new = cv2.resize(frame, (320, 180))

            input_img = frame_new / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)
                output = torch.sigmoid(output)
                #output = F.softmax(output, dim=1)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output = np.clip(output, 0, 1)
                output *= 255
                output = np.uint8(output)

                output = cv2.resize(output, (640, 360))
                output[output > 80] = 255
                output[output <= 80] = 0

                warper_img, point_img = warper.warp(output)
                ret, left_start_x, right_start_x, cf_img = slidewindow.w_slidewindow(
                    warper_img)

                if ret:
                    left_x_current, right_x_current, sliding_img, steer_theta = slidewindow.h_slidewindow(
                        warper_img, left_start_x, right_start_x)
                    cv2.imshow('sliding_img', sliding_img)
                    steer_list.append(steer_theta)
                    lpf_result = lpf(steer_theta, 0.5)
                    lpf_list.append(lpf_result)
                    print("steer theta:", steer_theta)
                    if steer_theta < -28 or steer_theta > 28:
                        continue
                    else:
                        pid = round(pidcal.pid_control(int(50 * steer_theta)),
                                    6)
                        print("pid :", pid)
                        '''
                        auto_drive(pid)
                        '''
                else:
                    pidcal.error_sum = 0

                end_time = time.time()
                sec = end_time - cur_time

                fps = 1 / sec
                fps_list.append(fps)

                print("Estimated fps {0} ".format(fps))

                # out.write(add_img)

                cv2.imshow("frame", frame)
                out.write(warper_img)
                # cv2.imshow("src", warper_img)
                # cv2.imshow("out_img", output)
                cv2.imshow("cf_img", cf_img)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)
    plt.figure(1)
    plt.plot(steer_list)
    plt.figure(2)
    plt.plot(lpf_list)
    plt.show()
コード例 #10
0
ファイル: ros_lane.py プロジェクト: sjhaiitd/dlive
    def depth_callback(self, data):
        try:
            global depth
            depth = self.bridge.imgmsg_to_cv2(data, "32FC1")
            # cv_image.astype('uint8')
        except CvBridgeError as e:
            print(e)
        # out.write(cv_image)
        # print depth.shape
        # cv2.imshow('Depth', depth)
        # cv2.waitKey(1)


if __name__ == '__main__':
    rospy.init_node('image_display', anonymous=True)
    threshold_object = Thresholder()
    warp_object = Warper()
    fitt_object = Polyfitter()
    draw_object = Polydrawer()
    # fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('test.avi', fourcc, 30.0, (1080, 720))
    image_object = Image_process()
    try:
        rospy.spin()
    except KeyboardInterrupt:
        rospy.is_shutdown()
        print 'Shutting Down'
        out.release()
    cv2.destroyAllWindows()
コード例 #11
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()
    warper = Warper()
    #slidewindow  = SlideWindow()
    # slidewindow  = LineDetector()

    pidcal = PidCal()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version", torch.__version__)
    print("cuda_version", torch.version.cuda)

    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')
    #trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################
    model.eval()

    cap = cv2.VideoCapture(
        "/home/foscar/ISCC_2019/src/race/src/my_lane_detection/input_video/0.avi"
    )
    ret, frame = cap.read()
    slidewindow = LineDetector(frame)
    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    video_name = time.time()
    out = cv2.VideoWriter('output_video/{}.avi'.format(video_name), fourcc,
                          25.0, (video_width, video_height), 0)

    prev_time = 0

    count = 0

    while True:
        ret, frame = cap.read()
        count += 1
        if ret:
            cur_time = time.time()
            frame = cv2.resize(frame, (480, 360))

            input_img = frame / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)

                print("output.shape : ", output.shape)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                # cv2.imshow("203",output)
                output *= 255
                output = np.clip(output, 0, 255)
                output = np.uint8(output)

                # resize
                output = cv2.resize(output, (640, 480))
                # cv2.imshow('resize',output)
                # threshold
                ret, thr_img = cv2.threshold(output, 20, 255, 0)
                # cv2.imshow('threshold',thr_img)
                # warp
                warp_img = warper.warp(thr_img)

                # cv2.imshow('warped',warp_img)
                # cv2.imshow("new output", canny_like_output)

                #canny = cv2.Canny(warp_img, 40, 255)
                kernel1 = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
                kernel2 = np.ones((
                    5,
                    5,
                ), np.uint8)

                #dilate = cv2.dilate(warp_img, kernel1, iterations=2)
                #closed = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernel2)
                # x_start_L, x_start_R=slidewindow.find_sliding_point(warp_img)
                # img, x_location = slidewindow.slide_window(x_start_L,x_start_R,warp_img)
                slided_img, x_location, point_list_left, point_list_right = slidewindow.main(
                    warp_img)

                if x_location != None:
                    # cv2.circle(img,(int(x_location),300),5,(0,0,255),3)
                    pid = round(pidcal.pid_control(int(x_location)), 6)
                    #print("pid rate : ", pid)
                    auto_drive(pid, x_location)
                else:
                    pid = pidcal.pid_control(slidewindow_middle)
                    print("pid rate : ", pid)
                    auto_drive(pid)

                end_time = time.time()
                sec = end_time - cur_time

                fps = 1 / sec
                fps_list.append(fps)

                print("Estimated fps {0} ".format(fps))

                out.write(output)

                cv2.imshow("src", frame)
                pid_draw.append(pid)

                # cv2.imshow("th_img", thr_img)
                # cv2.imshow("output", output)
                # img = cv2.imread('/home/foscar/Downloads/wapped_screenshot_12.08.2020.png',cv2.IMREAD_GRAYSCALE)
                # img = cv2.resize(img, (640, 480))
                # ret, thr_img = cv2.threshold(img,20,255,cv2.THRESH_BINARY)
                # img ,xloc, point_list_left, point_list_right = slidewindow.main(thr_img)
                # plt.xlim(0,640)
                # plt.ylim(0,480)
                # plt.plot(point_list_left[0], point_list_left[1])
                # plt.plot(point_list_right[0], point_list_right[1])
                # plt.show()
                # cv2.imshow('aa',img)
                # key = cv2.waitKey(1) & 0xFFx
                # if key == 27: break
                # elif key == ord('p'):
                #     cv2.waitKey(-1)
                cv2.imshow("ws", slided_img)
                print("x_loc :", x_location)
                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)
コード例 #12
0
ファイル: inference_unet.py プロジェクト: yxw027/ISCC_2020
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()
    warper = Warper()
    slidewindow  = SlideWindow()
    pidcal = PidCal()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')
    #trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################
    model.eval()

    cap = cv2.VideoCapture(0)

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    video_name = time.time()
    out = cv2.VideoWriter('output_video/{}.avi'.format(video_name), fourcc, 25.0, (video_width,video_height),0)

    prev_time = 0

    fps_list = []


    while True:
        ret, frame = cap.read()

        if ret:
            cur_time = time.time()
            frame = cv2.resize(frame, (480,360))

            input_img = frame / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)

                print("output.shape : ", output.shape)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output *= 255
                output = np.clip(output, 0, 255)
                output = np.uint8(output)

                # resize
                output = cv2.resize(output, (640, 480))

                # threshold
                ret, thr_img = cv2.threshold(output, 180, 255, 0)
                # warp
                output, warp_img = warper.warp(output, thr_img)
                img, x_location = slidewindow.slidewindow(warp_img)

                if x_location != None:
                    pid = round(pidcal.pid_control(int(x_location)), 6)
                    print("pid rate : ", pid)
                    auto_drive(pid, x_location)
                else:
                    pid = pidcal.pid_control(320)
                    print("pid rate : ", pid)
                    auto_drive(pid)


                end_time = time.time()
                sec = end_time - cur_time

                fps = 1/sec
                fps_list.append(fps)

                print("Estimated fps {0} " . format(fps))

                out.write(output)

                cv2.imshow("src", frame)


                cv2.imshow("output", output)
                cv2.imshow("thre", img)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)
コード例 #13
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True
    pidcal = PidCal()
    opt = opts.parse()
    warper = Warper()
    slidewindow = SlideWindow()
    stopline = StopLine()
    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    cap = None

    if opt.video_idx is 0:
        cap = cv2.VideoCapture("input_video/720p.mp4")
    elif opt.video_idx is 1:
        cap = cv2.VideoCapture("input_video/straight.avi")
    elif opt.video_idx is 2:
        cap = cv2.VideoCapture("input_video/test.avi")
    elif opt.video_idx is 3:
        cap = cv2.VideoCapture("input_video/track.avi")
    elif opt.video_idx is 4:
        cap =cv2.VideoCapture("output_video/field.avi")
    elif opt.video_idx is 5:
        cap = cv2.VideoCapture("output_video/2020-08-23 19:20:01.166517.avi")
    else:
        cap = cv2.VideoCapture(0)
        # video test
        cap.set(3,1280)
        cap.set(4,720)

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))



    #fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
    # out = cv2.VideoWriter('output_video/TEST_1.avi', fourcc, 20.0, (1280,480),0)

    prev_time = 0

    fps_list = []

    now = datetime.datetime.now()

    fourcc = None
    out = None

    if opt.video_idx > 2:
        fourcc =cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output_video/' + str(now) + '.avi',fourcc,30.0,(1280,720))

    pid_list=list()
    steer_list = list()
    lpf_list = list()

    pid_old = None
    steer_theta = 0
    i=0
    x_location = 240
    frame_cnt = 0
    while True:
        ret, frame = cap.read()
        if frame is None:
            break
        frame_height, frame_width, frame_channels = frame.shape

        print("Frame Info : (Height, Width, Channels) : ({}, {}, {})".format(frame_height, frame_width, frame_channels))

        record_frame = cv2.resize(frame, (1280,720))

        if ret:
            cur_time = time.time()
            frame_new = cv2.resize(frame, (320,180))

            input_img = frame_new / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)
                output = torch.sigmoid(output)
                #output = F.softmax(output, dim=1)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output = np.clip(output, 0, 1)
                output *= 255
                output = np.uint8(output)


                output = cv2.resize(output, (640, 360))
                output[output>80] = 255
                output[output<=80] = 0

                # cv2.circle(output, (output.shape[1]/2, output.shape[0]), 9, (255,255,0), -1)
                cv2.imshow("output_img", output)

                print("shape_info", output.shape)
                # cv2.circle(output, (output.shape[0]/2, output.shape[1]/2), 9, (0,255,0), -1)
                #warper_img = warper.warp(output)
                warper_img = warper.warp_test(output)
                cv2.imshow("warp_img", warper_img)

                # warper_img_test = warper.warp_test(output)
                # cv2.imshow("warp_img_test",warper_img_test)
                ret, left_start_x, right_start_x, cf_img = slidewindow.w_slidewindow(warper_img, 180)

                if ret:
                    i+=1
                    left_x_current,right_x_current, sliding_img,steer_theta,center, length = slidewindow.h_slidewindow(warper_img, left_start_x, right_start_x)
                    #stop_test Lee youn joo
                    # if center != None:
                    #     locate_x, locate_y = center
                    #     if (warper_img[int(locate_y)][int(locate_x)] != 0):
                    #         stopFlag, id_L, id_R = stopline.findline(warper_img,locate_x,locate_y,length,left_x_current,right_x_current)
                    #         if stopFlag != None:
                    #             if frame_cnt == 0:
                    #                 print('STOP!')
                    #                 cv2.line(warper_img,id_L,id_R,(0,0,255),2)
                    #                 cv2.waitKey(-1)
                    #             frame += 1
                    #         if (frame_cnt > 0):
                    #             frame_cnt = 0
                    #         print(stopFlag,frame_cnt)
                    # SD.stop(warper_img)
                    SD.stoping_tmp(warper_img)
                    cv2.imshow('sliding_img', sliding_img)
                    steer_list.append(steer_theta)

                    x_location = (left_x_current+right_x_current)/2

                    # low pass filter
                    steer_theta = lpf(steer_theta, 0.3)
                    lpf_list.append(steer_theta)

                    # steer theta : Degree
                    print("steer theta:" ,steer_theta)
                    #
                    # if steer_theta<-28.0 or steer_theta >28.0:
                    #     # auto_drive(pid_old)
                    #     auto_drive(steer_theta)

                    # else:
                        # degree angle
                    pid = round(pidcal.pid_control(steer_theta),6)
                    pid_list.append(pid)

                    print("pid :",pid)

                    pid_old = pid
                    auto_drive(steer_theta)

                        # auto_drive(pid)
                else:
                    auto_drive(steer_theta)
                    # auto_drive(pid)
                    pidcal.error_sum = 0
                    pidcal.error_old = 0


                end_time = time.time()
                sec = end_time - cur_time

                fps = 1/sec
                fps_list.append(fps)

                print("Estimated fps {0} " . format(fps))

                # out.write(add_img)

                cv2.imshow("frame",frame)


                if opt.video_idx == -1:
                    print("frame.shape : {}".format(frame.shape))
                    out.write(frame)
                # cv2.imshow("src", warper_img)
                # cv2.imshow("out_img", output)
                cv2.imshow("cf_img", cf_img)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)

    cap.release()
    cv2.destroyAllWindows()

    plt.plot(range(i),steer_list,label='steer')
    plt.legend()
    plt.plot(range(i),pid_list,label='pid')
    plt.legend()

    plt.plot(range(i),lpf_list,label='lpf')
    plt.legend()
    pid_info=pidcal.info_p()
    plt.savefig('output_video/video_idx:'+ str(opt.video_idx)+' '+str(pid_info) +'.png', dpi=300)
コード例 #14
0
class LaneFinder:
    def __init__(self, gray_shape, objpoints, imgpoints):
        self.camera_undistort = CameraUndistort(gray_shape, objpoints,
                                                imgpoints)
        self.gradient_filter = GradientFilter()
        self.warper = Warper(gray_shape)
        self.window_searcher = WindowSearcher()
        self.cnt = 0

    def run_pipeline(self, img, left_fit=None, right_fit=None):
        self.cnt += 1
        dst = self.camera_undistort.undistort(img)
        #cv2.imwrite('../output_images/undistort_images/undistort_' + file_name, dst)
        self.binary = self.gradient_filter.process(dst)
        #    cv2.imwrite('../output_images/gradient_filter/filter_' + file_name, binary.astype('uint8') * 255)

        #        cv2.imwrite('../output_images/debug/binary_%d.png' % self.cnt, self.binary.astype('uint8') * 255)
        #cv2.polylines(img, [np.int32(warper.src.reshape(-1,1,2))], True,(0,0,255), 3)
        #cv2.imwrite('../output_images/warped/src_' + file_name, img)
        #     warped = warper.warp(img)
        #     cv2.polylines(warped, [np.int32(warper.dst.reshape(-1,1,2))], True, (0,0,255), 3)
        #     cv2.imwrite('../output_images/warped/dst_' + file_name, warped)

        self.warped = self.warper.warp(self.binary)
        #       cv2.imwrite('../output_images/debug/warped_%d.png' % self.cnt, self.warped.astype('uint8') * 255)
        #    cv2.imwrite('../output_images/warped/dst_binary_' + file_name, warped.astype('uint8') * 255)
        out_img = self.window_searcher.fit_polynomial(self.warped, left_fit,
                                                      right_fit)
#      cv2.imwrite('../output_images/debug/out_%d.png' % self.cnt, out_img)
#    cv2.imwrite('../output_images/window_search/' + file_name, out_img)
#    unwarped = warper.unwarp(out_img)
#    cv2.imwrite('../output_images/unwarped/' + file_name, unwarped)
#     print(window_searcher.measure_curvature_real())
#     print(window_searcher.measure_center())

    def add_detect_lanes(self, img, left_line, right_line):
        ploty = left_line.ally
        left_fitx = left_line.allx
        right_fitx = right_line.allx
        dst = self.camera_undistort.undistort(img)
        warp_zero = np.zeros_like(self.warped).astype(np.uint8)
        color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
        pts_right = np.array(
            [np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
        pts = np.hstack((pts_left, pts_right))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        newwarp = self.warper.unwarp(color_warp)
        # Combine the result with the original image
        result = cv2.addWeighted(dst, 1, newwarp, 0.3, 0)
        #        cv2.imwrite('../output_images/final/' + file_name, result)
        font = cv2.FONT_HERSHEY_SIMPLEX
        result = cv2.putText(
            result, 'radius of curvature = %d m' %
            ((left_line.radius_of_curvature + right_line.radius_of_curvature) /
             2.0), (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
        result = cv2.putText(
            result,
            'distance from the center = %.2f m' % left_line.line_base_pos,
            (50, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA)

        return result
コード例 #15
0
ファイル: main.py プロジェクト: parkhojun/KMUADC_2019
from std_msgs.msg import String
from sensor_msgs.msg import Image
from obstacle_detector.msg import Obstacles
from cv_bridge import CvBridge, CvBridgeError

from ackermann_msgs.msg import AckermannDriveStamped
from datetime import datetime # for record

from Stop_Counter import Stop_Counter
from CurveDetector import CurveDetector
from ObstacleDetector import ObstacleDetector, Position

x_location_old = None

warper = Warper()
slidewindow  = SlideWindow()
pidcal = PidCal()

q1 = que.Queue()
bridge = CvBridge()

now = datetime.now() # for record

cv_image = None
obstacles = None
ack_publisher = None
car_run_speed = 0.5

OBSTACLE_NUM = 3
コード例 #16
0
import cv2
import numpy as np
from thresholder import Thresholder
from warper import Warper
from polyfitter import Polyfitter
from polydrawer import Polydrawer

if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    threshold_object = Thresholder()
    warp_object = Warper()
    fitt_object = Polyfitter()
    draw_object = Polydrawer()

    while (cap.isOpened()):
        ret, color = cap.read()
        print color.shape
        th = threshold_object.threshold(color)
        # cv2.imshow('Combined', threshold_object.threshold(color))
        # cv2.imshow('Warped', warp_object.warp(th))
        img = warp_object.warp(th)
        left_fit, right_fit = fitt_object.polyfit(img)
        print left_fit, right_fit
        img = draw_object.draw(color, left_fit, right_fit, warp_object.Minv)
        cv2.imshow('Lane Detection', img)
        if cv2.waitKey(1) & 0xFF == 27:
            break

cv2.destroyAllWindows()
コード例 #17
0
ファイル: stopDetector.py プロジェクト: jiyoony/kmu_autodrive
        nonzero = self.check_many_lines(warp_img)

        if nonzero > 1350 and self.check_time():
            self.on_detected_crosswallk()
            self.previous_time = time.time()
            return True

        return False


if __name__ == '__main__':
    stop_counter = StopDetector()
    warper = None

    cap = cv2.VideoCapture('../capture/origin18654.avi')
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        if warper == None:
            warper = Warper(frame)

        #  stop_counter.check_yellow_line(frame)
        warp_img = warper.warp(frame)
        stop_counter.check_crocss_walk(frame, warp_img)

        # cv2.imshow('frame', detect_img)
        if cv2.waitKey(0) == 27:
            break