Esempio n. 1
0
def visualize(img, seg_pred, exist_pred):
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    img = cv2.add(img, (-50, -50, -50, 0))
    lane_img = np.zeros_like(img)
    color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]],
                     dtype='uint8')  #b, g, r, y
    coord_mask = np.argmax(seg_pred, axis=0)
    exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]
    lines = []
    for i in getLane.prob2lines_CULane(seg_pred, exist):
        print(i)
        if len(i) < 8:
            continue
        i1_x = []
        i1_y = []
        for j in i:
            i1_x.append(j[0])
            i1_y.append(j[1])
        lines.append(np.polyfit(np.array(i1_x), np.array(i1_y), 2))

    yMax = img.shape[1]
    for li in range(len(lines)):
        #if exist_pred[0, li] > 0.5:
        #    lane_img[coord_mask == (li + 1)] = color[li]
        for Y in np.linspace(0, yMax - 1, yMax):
            X = lines[li][0] * Y**2 + lines[li][1] * Y + lines[li][2]
            if (0 > Y) or (Y > 750):
                continue
            if (0 > X) or (X > 320):
                continue

            img[int(X), int(Y)] = color[li]

    #img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
    return img
Esempio n. 2
0
def post_processor(arg):
    img_queue, arg_visualize = arg

    while True:
        if not img_queue.empty():
            x, seg_pred, exist_pred = img_queue.get()
            seg_pred = seg_pred.numpy()[0]
            exist_pred = exist_pred.numpy()

            exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

            print(exist)
            for i in getLane.prob2lines_CULane(seg_pred, exist):
                print(i)

            if arg_visualize:
                frame = x.squeeze().permute(1, 2, 0).numpy()
                img = visualize(frame, seg_pred, exist_pred)
                cv2.imshow('input_video', frame)
                cv2.imshow("output_video", img)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        else:
            pass
Esempio n. 3
0
def main():
    args = parse_args()
    img_path = args.img_path
    weight_path = args.weight_path

    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = transform_img({'img': img})['img']
    x = transform_to_net({'img': img})['img']
    x.unsqueeze_(0)

    save_dict = torch.load(weight_path, map_location='cpu')
    net.load_state_dict(save_dict['net'])
    net.eval()

    seg_pred, exist_pred = net(x)[:2]
    seg_pred = seg_pred.detach().cpu().numpy()
    exist_pred = exist_pred.detach().cpu().numpy()
    seg_pred = seg_pred[0]
    exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    lane_img = np.zeros_like(img)
    color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
    coord_mask = np.argmax(seg_pred, axis=0)
    for i in range(0, 4):
        if exist_pred[0, i] > 0.5:
            lane_img[coord_mask == (i + 1)] = color[i]
    img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
    cv2.imwrite("demo/demo_result.jpg", img)

    print(" ")
    print("Lane Lines:")
    for x in getLane.prob2lines_CULane(seg_pred, exist):
        print(x)
        img = cv2.polylines(img, np.int32([np.array(x)]), 0, (0, 0, 255), thickness=3)
    #
    cv2.imwrite("demo/demo_result2.png", img)

    print(" ")
    print("exist: ", exist)
    print("exist_pred: ", exist_pred)
    if args.visualize:
        print([1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)])
        cv2.imshow("", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Esempio n. 4
0
with torch.no_grad():
    for batch_idx, sample in enumerate(test_loader):
        img = sample['img'].to(device)
        img_name = sample['img_name']

        seg_pred, exist_pred = net(img)[:2]
        seg_pred = F.softmax(seg_pred, dim=1)
        seg_pred = seg_pred.detach().cpu().numpy()
        exist_pred = exist_pred.detach().cpu().numpy()

        for b in range(len(seg_pred)):
            seg = seg_pred[b]
            exist = [1 if exist_pred[b, i] > 0.5 else 0 for i in range(4)]
            lane_coords = getLane.prob2lines_CULane(seg,
                                                    exist,
                                                    resize_shape=(590, 1640),
                                                    y_px_gap=20,
                                                    pts=18)

            path_tree = split_path(img_name[b])
            save_dir, save_name = path_tree[-3:-1], path_tree[-1]
            save_dir = os.path.join(out_path, *save_dir)
            save_name = save_name[:-3] + "lines.txt"
            save_name = os.path.join(save_dir, save_name)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

            with open(save_name, "w") as f:
                for l in lane_coords:
                    for (x, y) in l:
                        print("{} {}".format(x, y), end=" ", file=f)
def main():
    #
    # determine which computer/platform we are running on
    if (os.name == "posix"):
        os_list = os.uname()
        if (os_list[0] == "Darwin"):
            pf_detected = 'MAC'
        elif (os_list[0] == "Linux"):
            if (os_list[1] == 'en4119351l'):
                pf_detected = 'Quadro'
            elif (os_list[1] == '19fef43c2174'):
                pf_detected = 'Exxact'
            elif (os_list[1] == 'EN4113948L'):
                pf_detected = 'Kevin'
    else:
        pf_detected = 'PC'

    # set the root path based on the computer/platform
    #   rootPath is path to directory in which webots/ and imdata/ directories reside
    if (pf_detected == 'MAC'):
        rootPath = '/Users/mes/Documents/ASU-Classes/Research/Ben-Amor/code/'

    elif (pf_detected == 'Quadro'):
        rootPath = '/home/local/ASUAD/mestric1/Documents/AVCES/'

    elif (pf_detected == 'Exxact'):
        rootPath = '/home/dockeruser/Documents/AVCES/'

    elif (pf_detected == 'Kevin'):
        rootPath = '/home/local/ASUAD/mestric1/Documents/AVCES/'

    elif (pf_detected == 'PC'):
        # rootPath = 'C:\Users\cesar\Desktop\Furi\'
        print("PC platform detected.  Exiting.")
        exit()
    else:
        print("Computer/Platform not detected.  Exiting.")
        exit()
    #
    # args = parse_args()
    #  CCT007-Scene-005 has 153 frames
    #  CCT007-Scene-009 has 218 frames
    #  CCT007-Scene-045 has 665 frames
    for fno in range(1, 666):
        img_path = rootPath + "imdata/video/processed/CCT007/CCT007-Scene-045/Run/rgb{0:06d}.png".format(
            fno)
        weight_path = "experiments/exp10/exp10_best.pth"

        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = transform_img({'img': img})['img']
        x = transform_to_net({'img': img})['img']
        x.unsqueeze_(0)

        save_dict = torch.load(weight_path, map_location='cpu')
        net.load_state_dict(save_dict['net'])
        net.eval()

        seg_pred, exist_pred = net(x)[:2]
        seg_pred = seg_pred.detach().cpu().numpy()
        exist_pred = exist_pred.detach().cpu().numpy()
        seg_pred = seg_pred[0]
        exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        lane_img = np.zeros_like(img)
        color = np.array(
            [[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]],
            dtype='uint8')
        coord_mask = np.argmax(seg_pred, axis=0)
        for i in range(0, 4):
            if exist_pred[0, i] > 0.5:
                lane_img[coord_mask == (i + 1)] = color[i]
        img = cv2.addWeighted(src1=lane_img,
                              alpha=0.8,
                              src2=img,
                              beta=1.,
                              gamma=0.)
        # cv2.imwrite("demo/demo_result.jpg", img)

        # print(" ")
        # print("Lane Lines:")
        for x in getLane.prob2lines_CULane(seg_pred, exist):
            # print(x)
            img = cv2.polylines(img,
                                np.int32([np.array(x)]),
                                0, (0, 0, 255),
                                thickness=3)
        #
        img_rsz = cv2.resize(img, (1280, 720),
                             interpolation=cv2.INTER_LANCZOS4)
        cv2.imwrite(
            rootPath +
            "imdata/video/processed/CCT007/CCT007-Scene-045/Lane/lin{0:06d}.png"
            .format(fno), img_rsz)
        #
        print("frame {} is complete.".format(fno))
Esempio n. 6
0
def main():
    args = parse_args()
    video_path = args.video_path
    weight_path = args.weight_path
    if pipeline:
        input_queue = JoinableQueue()
        pre_process = Process(target=pre_processor,
                              args=((input_queue, video_path), ))
        pre_process.start()
        output_queue = SimpleQueue()
        post_process = Process(target=post_processor,
                               args=((output_queue, args.visualize), ))
        post_process.start()
    else:
        cap = cv2.VideoCapture(video_path)
    save_dict = torch.load(weight_path, map_location='cpu')
    net.load_state_dict(save_dict['net'])
    net.eval()
    net.cuda()
    while True:
        if pipeline:
            loop_start = time.time()
            x = input_queue.get()
            input_queue.task_done()
            gpu_start = time.time()
            seg_pred, exist_pred = network(net, x)
            gpu_end = time.time()
            output_queue.put((x, seg_pred, exist_pred))
            loop_end = time.time()
        else:
            if not cap.isOpened():
                break
            ret, frame = cap.read()
            if ret:
                loop_start = time.time()
                frame = transform_img({'img': frame})['img']
                img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                x = transform_to_net({'img': img})['img']
                x.unsqueeze_(0)
                gpu_start = time.time()
                seg_pred, exist_pred = network(net, x)
                gpu_end = time.time()
                seg_pred = seg_pred.numpy()[0]
                exist_pred = exist_pred.numpy()
                exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]
                i2i2 = []

                #change
                for i in getLane.prob2lines_CULane(seg_pred, exist):
                    i2i2 += i
                    pass
                loop_end = time.time()
                if args.visualize:
                    img = visualize(img, seg_pred, exist_pred)
                    cv2.imshow('input_video', frame)
                    cv2.imshow("output_video", img)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break
        print("gpu_runtime:", gpu_end - gpu_start, "FPS:",
              int(1 / (gpu_end - gpu_start)))
        print("total_runtime:", loop_end - loop_start, "FPS:",
              int(1 / (loop_end - loop_start)))
    cv2.destroyAllWindows()
Esempio n. 7
0
def main():

    # ------------ Arguments parsing ------------
    args = parse_args()
    img_path = args.img_path
    weight_path = args.weight_path
    exp_dir = args.exp_dir
    while exp_dir[-1]=='/':
        exp_dir = exp_dir[:-1]
    exp_name = exp_dir.split('/')[-1]

    with open(os.path.join(exp_dir, "model_config.json")) as f:
        exp_cfg = json.load(f)
    resize_shape = tuple(exp_cfg['dataset']['resize_shape'])

    # net = SCNN(input_size=(512, 288), pretrained=False)
    # ------------ preparation ------------
    if args.model == "scnn":
        net = SCNN(resize_shape, pretrained=False)
    elif args.model == "enet_sad":
        net = ENet_SAD(resize_shape, sad=True)
    else:
        raise Exception("Model not match. 'model' in 'model_config.json' should be 'scnn' or 'enet_sad'.")

    mean=(0.3598, 0.3653, 0.3662) # TuSimple mean, std
    std=(0.2573, 0.2663, 0.2756)
    # Resize the image for TuSimple format
    transform_img = Resize(resize_shape)
    transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))

    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = transform_img({'img': img})['img']
    x = transform_to_net({'img': img})['img']
    x.unsqueeze_(0)

    save_dict = torch.load(weight_path, map_location='cpu')
    net.load_state_dict(save_dict['net'])
    net.eval()

    seg_pred, exist_pred = net(x)[:2]
    seg_pred = seg_pred.detach().cpu().numpy()
    exist_pred = exist_pred.detach().cpu().numpy()
    seg_pred = seg_pred[0]
    exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    lane_img = np.zeros_like(img)
    color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
    coord_mask = np.argmax(seg_pred, axis=0)
    for i in range(0, 4):
        if exist_pred[0, i] > 0.5:
            lane_img[coord_mask == (i + 1)] = color[i]
    img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
    cv2.imwrite("image/demo_result.jpg", img)

    for x in getLane.prob2lines_CULane(seg_pred, exist):
        print(x)

    if args.visualize:
        print([1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)])
        cv2.imshow("", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Esempio n. 8
0
        img = transform({'img': img})['img']
        img = img.unsqueeze(0).to(device)

        with torch.no_grad():
            seg_pred, exist_pred = net(img)[:2]
        seg_pred = F.softmax(seg_pred, dim=1)
        seg_pred = seg_pred.cpu().numpy()
        exist_pred = exist_pred.cpu().numpy()

        b=0
        seg = seg_pred[b]
        exist = [1 if exist_pred[b, i] > 0.5 else 0 for i in range(4)]
        if dataset_name == 'Tusimple':
            lane_coords = getLane.prob2lines_tusimple(seg, exist, resize_shape=original_shape[::-1], y_px_gap=10, pts=56)
        elif dataset_name == 'CULane':
            lane_coords = getLane.prob2lines_CULane(seg, exist, resize_shape=original_shape[::-1], y_px_gap=20, pts=18)
        for i in range(len(lane_coords)):
            lane_coords[i] = sorted(lane_coords[i], key=lambda pair: pair[1])
            
        for lane in lane_coords:
            foo = np.int32([lane])[:,np.int32([lane])[0,:,0] >= 0]
            cv2.polylines(img_vis, foo, isClosed=False, color=(0,0,255), thickness=2)

        #show segmentation map
        seg = seg.swapaxes(0,2).swapaxes(0,1)
        seg = cv2.resize(seg, dsize=original_shape, interpolation=cv2.INTER_CUBIC)

        lane_img = np.zeros_like(img_vis)
        color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
        coord_mask = np.argmax(seg, axis=-1)
        for i in range(0, 4):
Esempio n. 9
0
def main():
    args = parse_args()
    img_path = args.img_path
    weight_path = args.weight_path

    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    x = transform({'img': img})['img']
    x.unsqueeze_(0)

    save_dict = torch.load(weight_path, map_location='cpu')

    net.load_state_dict(save_dict['net'])
    net.eval()

    if torch.cuda.is_available():
        x = x.cuda()
    
    # start_time = time.time()
    # for i in tqdm(range(100)):
    #     seg_pred, exist_pred = net(x)[:2]
    # print("Spend time {} seconds for 100 times".format(time.time() - start_time))
    # CPU spend time 64.43067598342896 seconds for 100 times, --> 640ms
    # GPU spend time 7.734357118606567 seconds for 100 times, --> 70ms

    seg_pred, exist_pred = net(x)[:2]
    # pdb.set_trace()
    # (pdb) pp x.shape
    # torch.Size([1, 3, 288, 512])

    # (pdb) pp seg_pred.shape
    # torch.Size([1, 5, 288, 512])

    # (Pdb) pp seg_pred.min(), seg_pred.max()
    # (tensor(-13.8127, device='cuda:0', grad_fn=<MinBackward1>),
    # tensor(26.3522, device='cuda:0', grad_fn=<MaxBackward1>))

    # (Pdb) pp exist_pred.shape, exist_pred
    # (torch.Size([1, 4]),
    #  tensor([[0.9989, 0.9999, 0.9993, 0.9983]], device='cuda:0',
    #        grad_fn=<SigmoidBackward>))


    seg_pred = seg_pred.detach().cpu().numpy()
    exist_pred = exist_pred.detach().cpu().numpy()
    seg_pred = seg_pred[0]
    exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    img = cv2.resize(img, (image_resize_width, 288))
    lane_img = np.zeros_like(img)
    color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
    coord_mask = np.argmax(seg_pred, axis=0)
    for i in range(0, 4):
        if exist_pred[0, i] > 0.5:
            lane_img[coord_mask == (i + 1)] = color[i]
    cv2.imshow("lane", lane_img)

    # pdb.set_trace()
    # (Pdb) seg_pred.shape, coord_mask.shape
    # ((5, 288, 512), (288, 512))

    img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
    for x in getLane.prob2lines_CULane(seg_pred, exist):
        print(x)

    if args.visualize:
        print([1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)])
        cv2.imshow("", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    else:
        cv2.imwrite("demo/demo_result.jpg", img)
Esempio n. 10
0
def main():
    args = parse_args()
    video_path = args.video_path
    weight_path = args.weight_path

    # Loading configs
    exp_dir = args.exp_dir
    while exp_dir[-1] == '/':
        exp_dir = exp_dir[:-1]
    exp_name = exp_dir.split('/')[-1]

    with open(os.path.join(exp_dir, "model_config.json")) as f:
        exp_cfg = json.load(f)
    resize_shape = tuple(exp_cfg['dataset']['resize_shape'])

    # Build the scnn/enet-sad model according to the argument
    if args.model == "scnn":
        net = SCNN(resize_shape, pretrained=True)
    elif args.model == "enet_sad":
        net = ENet_SAD(resize_shape, sad=True)
    else:
        raise Exception(
            "Model not match. '--model' in argument should be 'scnn' or 'enet_sad'."
        )

    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    # Resize the image for TuSimple format
    transform_img = Resize(resize_shape)
    transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))

    if pipeline:
        input_queue = JoinableQueue()
        pre_process = Process(target=pre_processor,
                              args=((input_queue, video_path), ))
        pre_process.start()

        output_queue = SimpleQueue()
        post_process = Process(target=post_processor,
                               args=((output_queue, args.visualize), ))
        post_process.start()
    else:
        cap = cv2.VideoCapture(video_path)

    save_dict = torch.load(weight_path, map_location='cpu')
    net.load_state_dict(save_dict['net'])
    net.eval()
    net.cuda()

    result = cv2.VideoWriter('video/demo.avi', cv2.VideoWriter_fourcc(*'MJPG'),
                             30, resize_shape)

    while True:
        if pipeline:
            loop_start = time.time()
            x = input_queue.get()
            input_queue.task_done()

            gpu_start = time.time()
            seg_pred, exist_pred = network(net, x)
            gpu_end = time.time()

            output_queue.put((x, seg_pred, exist_pred))

            loop_end = time.time()

        else:
            if not cap.isOpened():
                break

            ret, frame = cap.read()

            if ret:
                loop_start = time.time()
                #frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
                frame = transform_img({'img': frame})['img']
                img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                x = transform_to_net({'img': img})['img']
                x.unsqueeze_(0)

                gpu_start = time.time()
                seg_pred, exist_pred = network(net, x)
                gpu_end = time.time()

                seg_pred = seg_pred.numpy()[0]
                exist_pred = exist_pred.numpy()

                exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]

                print(exist)
                for i in getLane.prob2lines_CULane(seg_pred, exist):
                    print(i)

                loop_end = time.time()

                if args.visualize:
                    img = visualize(img, seg_pred, exist_pred)
                    #cv2.imshow('input_video', frame)
                    #cv2.imshow("output_video", img)
                    result.write(img)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break

        print("gpu_runtime:", gpu_end - gpu_start, "FPS:",
              int(1 / (gpu_end - gpu_start)))
        print("total_runtime:", loop_end - loop_start, "FPS:",
              int(1 / (loop_end - loop_start)))

    result.release()
    cv2.destroyAllWindows()