예제 #1
0
def train(args):
    generator = Generator(patch_size=args.size, batch_size=args.batch, channels=args.channels, augment=args.augmentation)

    model, model_name = get_model(args.algorithm, args.size, args.classes, get_loss(args.loss), args.channels)

    if args.name:
        run_name = "{}_{}".format(model_name, args.name)
    else:
        timenow = datetime.datetime.now().strftime("%Y.%m.%d_%H.%M.%S")
        run_name = "{}_{}_{}channel_{}_{}".format(model_name, timenow, args.channels, args.loss, "augment" if args.augmentation else "noaugment")

    create_directories(run_name)

    # TODO: Update with ability to choose weights
    if os.path.isfile('weights/{}.hdf5'.format(model_name)):
        load = input("Saved weights found. Load? (y/n)")
        if load == "y":
            print("Loading saved weights")
            model.load_weights('weights/{}.hdf5'.format(model_name))

    if args.verbose:
        model.summary()

    # Some doesn't have graphviz installed. Skip if not installed.
    try:
        plot_model(model, os.path.join('images', run_name, 'model.png'))
        plot_model(model, os.path.join('images', run_name, 'model_shapes.png'), show_shapes=True)
    except Exception as e:
        logger.warning("GraphViz missing. Skipping model plot")
        logger.warning(e)

    model_checkpoint = ModelCheckpoint('weights/{}.hdf5'.format(run_name), monitor='val_loss', save_best_only=True)

    # Setup tensorboard model
    tensorboard_callback = TensorBoard(log_dir='tensorboard_log/{}/'.format(run_name), histogram_freq=0, write_graph=True, write_images=False)

    val_x, val_y = generator.get_validation_data()

    logger.debug("Starting training")

    model.fit_generator(generator.generator(), steps_per_epoch=args.batch,
                        epochs=args.epochs, verbose=1 if args.verbose else 2,
                        callbacks=[model_checkpoint, tensorboard_callback],
                        validation_data=(val_x, val_y))
예제 #2
0
    def __init__(self):
        # Get dataset
        self.loader = Generator()

        # Get Agent
        if p.model_path == "":
            lane_agent = agent.Agent()
        else:
            lane_agent = agent.Agent()
            lane_agent.load_weights(640, "tensor(0.2298)")

        # Check GPU
        print('Setup GPU mode')
        if torch.cuda.is_available():
            lane_agent.cuda()

        # testing
        print('Testing loop')
        lane_agent.evaluate_mode()

        self.lane_agent = lane_agent
예제 #3
0
def Testing():
    print('Testing')

    #########################################################################
    ## Get dataset
    #########################################################################

    print("Check mode for dataset")
    #skip dataset loading if will be tested on special image or video
    if (p.mode not in [1, 2]):
        loader = Generator()
        print("Dataset loaded")
    else:
        print("Skipped loading dataset")

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(640, "tensor(0.2298)")
    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0)

    elif p.mode == 1:  # check model with video
        cap = cv2.VideoCapture("video_path")
        while (cap.isOpened()):
            ret, frame = cap.read()
            prevTime = time.time()
            frame = cv2.resize(frame, (512, 256)) / 255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame]))
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1 / (sec)
            s = "FPS : " + str(fps)
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0))
            cv2.imshow('frame', ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2:  # check model with a picture
        test_image = cv2.imread(p.test_root_url +
                                "clips/0530/1492720840345996040_0/20.jpg")
        test_image = cv2.resize(test_image, (512, 256)) / 255.0
        test_image = np.rollaxis(test_image, axis=2, start=0)
        _, _, ti = test(lane_agent, np.array([test_image]))
        cv2.imshow("test", ti[0])
        cv2.waitKey(0)

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #4
0
파일: test.py 프로젝트: wzhang1/PINet_new
def Testing():
    print('Testing')
    
    #########################################################################
    ## Get dataset
    #########################################################################
    if p.mode != 2:
        print("Get dataset")
        loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        # lane_agent.load_weights(804, "tensor(0.5786)")
        lane_agent.load_weights(32, "tensor(1.1001)")

	
    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0 : # check model with test data 
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0) 

    elif p.mode == 1: # check model with video
        cap = cv2.VideoCapture("/home/kym/research/autonomous_car_vision/lane_detection/code/Tusimple/git_version/LocalDataset_Day.mp4")
        while(cap.isOpened()):
            ret, ori_frame = cap.read()
            torch.cuda.synchronize()
            prevTime = time.time()
            frame = cv2.resize(ori_frame, (512,256))/255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame])) 
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1/(sec)
            s = "FPS : "+ str(fps)
            ti[0] = cv2.resize(ti[0], (1280,800))
            # cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
            # cv2.imshow('frame',ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2: # check model with a picture
        # test_image = cv2.imread(p.test_root_url+"clips/0530/1492720840345996040_0/20.jpg")
        # test_image = cv2.resize(test_image, (512,256))/255.0
        # test_image = np.rollaxis(test_image, axis=2, start=0)
        # _, _, ti = test(lane_agent, np.array([test_image]))
        # cv2.imshow("test", ti[0])
        # cv2.waitKey(0)   
        test_images = './test_curves'
        
        save_test_dir = './test_curves_result'
        if not os.path.exists(save_test_dir):
            os.makedirs(save_test_dir)

        img_list = os.listdir(test_images)
        img_list = [img for img in img_list if '.jpg' in img]
        use_ori = True
        print("image test")
        for img in img_list[3:4]:
            print("Now Dealing With:",img)

            ori_image = cv2.imread(test_images + '/' + img) #hw, cv2.IMREAD_UNCHANGED
            test_image = cv2.resize(ori_image, (p.x_size, p.y_size)) / 255.0
            test_image = np.rollaxis(test_image, axis=2, start=0)

            if not use_ori:
                _, _, ti = test(lane_agent, np.array([test_image]))
            if use_ori:
                w_ratio = p.x_size * 1.0 / ori_image.shape[1]
                h_ratio = p.y_size* 1.0 / ori_image.shape[0]
                _, _, ti = test_ori(lane_agent, ori_image, np.array([test_image]), w_ratio, h_ratio,draw_type= 'point',thresh=p.threshold_point)
            cv2.imwrite(save_test_dir + '/' + "{}_tested.jpg".format(img.split('.jpg')[0]), ti[0])

    elif p.mode == 3: #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
def Testing():
    print('Testing')

    ## Get dataset

    print("Get dataset")
    loader = Generator()

    ## Get agent and model

    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(804, "tensor(0.5786)")

    ## testing

    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0)

    elif p.mode == 1:  # check model with video
        cap = cv2.VideoCapture(
            "/Users/minootaghavi/Desktop/GA/Capstone-Project-1/test/IMG_1398.mp4"
        )
        writer = cv2.VideoWriter('filename.avi',
                                 cv2.VideoWriter_fourcc(*'MJPG'), 10,
                                 (1280, 800))
        while (cap.isOpened()):
            ret, frame = cap.read()
            #torch.cuda.synchronize()
            prevTime = time.time()
            frame = cv2.resize(frame, (512, 256)) / 255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame]))
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1 / (sec)
            s = "FPS : " + str(fps)
            ti[0] = cv2.resize(ti[0], (1280, 800))
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0))
            cv2.imshow('frame', ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            writer.write(ti[0])

        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2:  # check model with a picture
        test_image = cv2.imread(
            "/Users/minootaghavi/Desktop/GA/tusimple-trained model/minoo/Deep Neural Networks/data/test_set/clips/0530/1492626047222176976_0/20.img"
        )
        test_image = cv2.resize(test_image, (512, 256)) / 255.0
        test_image = np.rollaxis(test_image, axis=2, start=0)
        _, _, ti = test(lane_agent, np.array([test_image]))
        cv2.imwrite(
            '/Users/minootaghavi/Desktop/GA/tusimple-trained model/minoo/Deep Neural Networks/save_test/image2_result.png',
            ti[0])
        cv2.imshow("test", ti[0])
        cv2.waitKey(0)

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #6
0
파일: test.py 프로젝트: timsean/PINet_new
def Testing():
    print('Testing')

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(804, "tensor(0.5786)")

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    cudnn.benchmark = True
    cudnn.fastest = True

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0)

    elif p.mode == 1:  # check model with video
        cap = cv2.VideoCapture(
            "/home/tim/Codes-for-Lane-Detection/ERFNet-CULane-PyTorch/data/day2.MOV"
        )
        while (cap.isOpened()):
            ret, frame = cap.read()
            torch.cuda.synchronize()
            prevTime = time.time()
            #            frame = frame[:-489, :, :]
            frame = cv2.resize(frame, (512, 256)) / 255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame]))
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1 / (sec)
            s = "FPS : " + str(fps)
            ti[0] = cv2.resize(ti[0], (1280, 800))
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0))
            cv2.imshow('frame', ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2:  # check model with a picture
        test_image = cv2.imread(p.test_root_url +
                                "clips/0530/1492720840345996040_0/20.jpg")
        test_image = cv2.resize(test_image, (512, 256)) / 255.0
        test_image = np.rollaxis(test_image, axis=2, start=0)
        _, _, ti = test(lane_agent, np.array([test_image]))
        cv2.imshow("test", ti[0])
        cv2.waitKey(0)

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #7
0
def Testing():
    print('Testing')

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_assistant = Assistant.Assistant()
    else:
        lane_assistant = Assistant.Assistant()
        lane_assistant.load_weights(804, "tensor(0.5786)")

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_assistant.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_assistant.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_assistant, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0)

    elif p.mode == 1:  # check model with video
        cap = cv2.VideoCapture("abc.mp4")
        while (cap.isOpened()):
            ret, frame = cap.read()
            torch.cuda.synchronize()
            prevTime = time.time()
            frame = cv2.resize(frame, (512, 256)) / 255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_assistant, np.array([frame]))
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1 / (sec)
            s = "FPS : " + str(fps)
            ti[0] = cv2.resize(ti[0], (1280, 800))
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0))
            cv2.imshow('frame', ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2:  # check model with a picture
        test_image = cv2.imread('20.jpg')
        test_image = cv2.resize(test_image, (512, 256)) / 255.0
        test_image = np.rollaxis(test_image, axis=2, start=0)
        _, _, ti = test(lane_assistant, np.array([test_image]))
        cv2.imshow("test", ti[0])
        cv2.waitKey(0)

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_assistant)
예제 #8
0
def Testing(filename=None):
    print('Testing')

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(640, "tensor(0.2298)")
    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0)

    elif p.mode == 1:  # check model with video
        cap = cv2.VideoCapture("video_path")
        while (cap.isOpened()):
            ret, frame = cap.read()
            prevTime = time.time()
            frame = cv2.resize(frame, (512, 256)) / 255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame]))
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1 / (sec)
            s = "FPS : " + str(fps)
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0))
            cv2.imshow('frame', ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2:  # check model with a picture
        #test_image = cv2.imread(p.test_root_url+"clips/0530/1492720840345996040_0/20.jpg")
        test_image = cv2.imread(filename)
        assert test_image is not None, 'Test image {} not Found.'.format(
            filename)
        test_image = cv2.resize(test_image, (512, 256)) / 255.0
        test_image = np.rollaxis(test_image, axis=2, start=0)
        _, _, ti = test(lane_agent, np.array([test_image]))

        folder, filename_ = os.path.split(filename)
        name, ext = os.path.splitext(filename_)
        output_filename = os.path.join(folder, name + '_output.' + ext)

        cv2.imwrite(output_filename, ti[0])
        #cv2.imshow("test", ti[0])
        #cv2.waitKey(0)

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #9
0
파일: train.py 프로젝트: zhuMingXu/PINet
def Training():
    print('Training')

    ####################################################################
    ## Hyper parameter
    ####################################################################
    print('Initializing hyper parameter')

    vis = visdom.Visdom()
    loss_window = vis.line(X=torch.zeros((1, )).cpu(),
                           Y=torch.zeros((1)).cpu(),
                           opts=dict(xlabel='epoch',
                                     ylabel='Loss',
                                     title='Training Loss',
                                     legend=['Loss']))

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(4235, "tensor(0.2127)")

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## Loop for training
    ##############################
    print('Training loop')
    step = 0
    for epoch in range(p.n_epoch):
        lane_agent.training_mode()
        for inputs, target_lanes, target_h, test_image in loader.Generate():
            #training
            print("epoch : " + str(epoch))
            print("step : " + str(step))
            loss_p = lane_agent.train(inputs, target_lanes, target_h, epoch,
                                      lane_agent)
            loss_p = loss_p.cpu().data

            if step % 50 == 0:
                vis.line(X=torch.ones((1, 1)).cpu() * int(step / 50),
                         Y=torch.Tensor([loss_p]).unsqueeze(0).cpu(),
                         win=loss_window,
                         update='append')

            if step % 100 == 0:
                lane_agent.save_model(int(step / 100), loss_p)
                testing(lane_agent, test_image, step, loss_p)
            step += 1

        #evaluation
        if epoch > 0 and epoch % 10 == 0:
            print("evaluation")
            lane_agent.evaluate_mode()
            th_list = [0.3, 0.5, 0.7]
            lane_agent.save_model(int(step / 100), loss_p)

            for th in th_list:
                print("generate result")
                print(th)
                test.evaluation(loader,
                                lane_agent,
                                thresh=th,
                                name="test_result_" + str(epoch) + "_" +
                                str(th) + ".json")

            for th in th_list:
                print("compute score")
                print(th)
                with open("eval_result_" + str(th) + "_.txt",
                          'a') as make_file:
                    make_file.write("epoch : " + str(epoch) + " loss : " +
                                    str(loss_p.cpu().data))
                    make_file.write(
                        evaluation.LaneEval.bench_one_submit(
                            "test_result_" + str(epoch) + "_" + str(th) +
                            ".json", "test_label.json"))
                    make_file.write("\n")

        if int(step) > 700000:
            break
예제 #10
0
def test(args):
    prediction_cutoff = 0.5
    generator = Generator(patch_size=args.size, channels=args.channels)

    model, model_name = get_model(args.algorithm, args.size, args.classes, get_loss(args.loss), args.channels)

    weight_file = select_weights(model_name)
    logger.debug("Loading saved weights from weights/{}".format(weight_file))
    model.load_weights('weights/{}'.format(weight_file))

    create_directories(os.path.splitext(weight_file)[0])
    save_folder = os.path.join('images', os.path.splitext(weight_file)[0])

    test_images = ['6140_3_1', '6180_4_3']

    for test_image in test_images:
        logger.debug('Testing image {}'.format(test_image))
        test_x, test_y, new_size, splits, w, h = generator.get_test_patches(image=test_image, network_size=args.size)

        cutoff_array = np.full((len(test_x), args.size, args.size, 1), fill_value=prediction_cutoff)

        test_y_result = model.predict(test_x, batch_size=1, verbose=1)
        test_y_result = np.append(cutoff_array, test_y_result, axis=3)

        out = np.zeros((new_size, new_size, args.classes + 1))

        for row in range(splits):
            for col in range(splits):
                out[args.size * row:args.size * (row + 1), args.size * col:args.size * (col + 1), :] = test_y_result[row * splits + col, :, :, :]

        result = np.argmax(np.squeeze(out), axis=-1).astype(np.uint8)
        result = result[:w, :h]

        palette = []

        palette.extend([255, 255, 255])

        for i in range(args.classes):
            palette.extend(list(webcolors.hex_to_rgb(COLOR_MAPPING[int(i + 1)])))

        # for i in range(len(test_x)):
        result_img = Image.fromarray(result, mode='P')
        result_img.putpalette(palette)
        result_img.save(os.path.join(save_folder, '{}_{}.png'.format(test_image, model_name)))

        if test_y is not None:
            y_train = np.load(os.path.join('data/cache/{}_y.npy'.format(test_image)))
            y_train[:, :, 6] = np.logical_or(y_train[:, :, 6], y_train[:, :, 7])

            y_train = y_train[:, :, :args.classes]

            y_mask = generator.flatten(y_train)
            result_img = Image.fromarray(y_mask, mode='P')
            result_img.putpalette(palette)
            result_img.save(os.path.join(save_folder, '{}_gt.png'.format(test_image)))

            y_mask_flat = y_mask.flatten()
            result_flat = result.flatten()

            mean_iou = calculate_mean_iou(y_mask_flat, result_flat, args.classes)

            print('Mean IoU: {}'.format(mean_iou))

            print_confusion_matrix(y_mask_flat, result_flat, args.classes)

    # Old plotting methods.
    # Maybe we need some of this later
    '''
예제 #11
0
def Testing():
    print('Testing')
    
    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(296, "tensor(1.6947)")
	
    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0 : # check model with test data 
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0) 

    elif p.mode == 1: # check model with video
        cap = cv2.VideoCapture("C:\\Users\\sprkzoff\\Desktop\\PINet_new\\CULane\\dataset\\test_set\\Driving.mp4")
        while(cap.isOpened()):
            ret, frame = cap.read()
            torch.cuda.synchronize()
            prevTime = time.time()
            frame = cv2.resize(frame, (512,256))/255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame])) 
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1/(sec)
            s = "FPS : "+ str(fps)
            ti[0] = cv2.resize(ti[0], (1280,800))
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
            cv2.imshow('frame',ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2: # check model with a picture
        #test_image = cv2.imread(p.test_root_url+"clips/0530/1492720840345996040_0/20.jpg")
        test_image = cv2.imread("./aa.png")
        test_image = cv2.resize(test_image, (512,256))/255.0
        test_image = np.rollaxis(test_image, axis=2, start=0)
        _, _, ti = test(lane_agent, np.array([test_image]))
        cv2.imshow("test", ti[0])
        cv2.waitKey(0)   

    elif p.mode == 3: #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #12
0
def Training():
    print('Training')

    # Hyper parameter

    print('Initializing hyper parameter')

    vis = visdom.Visdom()
    loss_window = vis.line(X=torch.zeros((1, )).cpu(),
                           Y=torch.zeros((1)).cpu(),
                           opts=dict(xlabel='epoch',
                                     ylabel='Loss',
                                     title='Training Loss',
                                     legend=['Loss']))

    ## Get dataset

    print("Get dataset")
    loader = Generator()

    ## Get agent and model

    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(1912, "tensor(0.9420)")

    ## Loop for training

    print('Training loop')
    step = 0
    sampling_list = None
    for epoch in range(p.n_epoch):
        lane_agent.training_mode()
        for inputs, target_lanes, target_h, test_image, data_list in loader.Generate(
                sampling_list):
            #training
            #util.visualize_points(inputs[0], target_lanes[0], target_h[0])
            print("epoch : " + str(epoch))
            print("step : " + str(step))
            loss_p = lane_agent.train(inputs, target_lanes, target_h, epoch,
                                      lane_agent, data_list)
            torch.cuda.synchronize()
            loss_p = loss_p.cpu().data

            if step % 50 == 0:
                vis.line(X=torch.ones((1, 1)).cpu() * int(step / 50),
                         Y=torch.Tensor([loss_p]).unsqueeze(0).cpu(),
                         win=loss_window,
                         update='append')

            if step % 100 == 0:
                lane_agent.save_model(int(step / 100), loss_p)
                testing(lane_agent, test_image, step, loss_p)
            step += 1

        sampling_list = copy.deepcopy(lane_agent.get_data_list())
        lane_agent.sample_reset()

        #evaluation
        if epoch >= 0 and epoch % 1 == 0:
            print("evaluation")
            lane_agent.evaluate_mode()
            th_list = [0.8]
            index = [3]
            lane_agent.save_model(int(step / 100), loss_p)

            for idx in index:
                print("generate result")
                test.evaluation(loader,
                                lane_agent,
                                index=idx,
                                name="test_result_" + str(epoch) + "_" +
                                str(idx) + ".json")

        if int(step) > 700000:
            break
예제 #13
0
def Training():
    print('Training')

    ####################################################################
    ## Hyper parameter
    ####################################################################
    print('Initializing hyper parameter')

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
        lane_agent.load_weights(50, "tensor(0.2378)", False)  # quan model
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(50, "tensor(0.2378)")  # quan model

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## Loop for training
    ##############################
    print('Training loop')
    step = 0
    sampling_list = None
    for epoch in range(p.n_epoch):
        lane_agent.training_mode()

        for inputs, target_lanes, target_h, test_image, data_list in loader.Generate(
                sampling_list):
            #training
            print("epoch : " + str(epoch))
            print("step : " + str(step))
            loss_p = lane_agent.train(inputs, target_lanes, target_h, epoch,
                                      lane_agent, data_list)
            torch.cuda.synchronize()
            loss_p = loss_p.cpu().data
            step += 1

        sampling_list = copy.deepcopy(lane_agent.get_data_list())
        lane_agent.sample_reset()

        #evaluation
        if epoch >= 0 and epoch % 1 == 0:
            print("evaluation")
            lane_agent.evaluate_mode()
            th_list = [0.8]
            index = [3]
            lane_agent.save_model(int(step / 100), loss_p)

            for idx in index:
                print("generate result")
                test.evaluation(loader,
                                lane_agent,
                                index=idx,
                                name="./eval_res/test_result_" + str(epoch) +
                                "_" + str(idx) + ".json")

            for idx in index:
                print("compute score")
                with open("./eval_res/eval_acc.txt", 'a') as make_file:
                    make_file.write("epoch : " + str(epoch) + " loss : " +
                                    str(loss_p.cpu().data))
                    make_file.write(
                        evaluation.LaneEval.bench_one_submit(
                            "./eval_res/test_result_" + str(epoch) + "_" +
                            str(idx) + ".json", "test_label.json"))
                    make_file.write("\n")

        if int(step) > 50000:
            break

        if epoch > 20:
            # Freeze quantizer parameters
            lane_agent.d_observer()
        if epoch > 20:
            # Freeze batch norm mean and variance estimates
            lane_agent.freeze_bn()
예제 #14
0
def Testing():
    print('Testing')

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(36, "tensor(0.6383)")

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for _, _, _, test_image in loader.Generate():
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imshow("test", ti[0])
            cv2.waitKey(0)

    elif p.mode == 1:  # check model with video
        cap = cv2.VideoCapture(
            "/home/kym/research/autonomous_car_vision/lane_detection/code/Tusimple/git_version/LocalDataset_Day.mp4"
        )
        while (cap.isOpened()):
            ret, frame = cap.read()
            torch.cuda.synchronize()
            prevTime = time.time()
            frame = cv2.resize(frame, (512, 256)) / 255.0
            frame = np.rollaxis(frame, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([frame]))
            curTime = time.time()
            sec = curTime - prevTime
            fps = 1 / (sec)
            s = "FPS : " + str(fps)
            ti[0] = cv2.resize(ti[0], (1280, 800))
            cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 0))
            cv2.imshow('frame', ti[0])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()

    elif p.mode == 2:  # check model with a picture
        file_folder = "0530/" + p.test_folder + "/"
        save_folder = "PI_Net_Results_epoch36_4h/" + file_folder
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)
        test_image = None
        with tf.io.gfile.GFile(
                p.test_root_url + "clips/" + file_folder + str(1) + ".jpg",
                'rb') as f:
            test_image = np.asarray(bytearray(f.read()), dtype="uint8")
        test_image = cv2.imdecode(test_image, cv2.IMREAD_COLOR)
        height, width, channels = test_image.shape

        for i in range(1, 21):
            with tf.io.gfile.GFile(
                    p.test_root_url + "clips/" + file_folder + str(i) + ".jpg",
                    'rb') as f:
                test_image = np.asarray(bytearray(f.read()), dtype="uint8")
            test_image = cv2.imdecode(test_image, cv2.IMREAD_COLOR)
            height, width, channels = test_image.shape
            # test_image = cv2.imread(p.test_root_url+"clips/0530/1492626047222176976_0/1.jpg")
            # cv2.imshow("test_11", test_image)
            test_image = cv2.resize(test_image, (512, 256)) / 255.0
            test_image = np.rollaxis(test_image, axis=2, start=0)
            _, _, ti = test(lane_agent, np.array([test_image]))
            # cv2.imshow("test", ti[0])
            cv2.imwrite(save_folder + str(i) + '_Result.png',
                        cv2.resize(ti[0], (width, height)))
            # cv2.waitKey(0)
            # img = cv2.imread(save_folder+str(i)+'_Result.png')
            # video.write(img)

        img = cv2.imread(save_folder + '1_Result.png')
        height, width, channels = img.shape

        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        video = cv2.VideoWriter(
            save_folder + file_folder.split('/')[-2] + '.mp4', fourcc, 10,
            (width, height))
        for i in range(1, 21):
            img = cv2.imread(save_folder + str(i) + '_Result.png')
            video.write(img)

        cv2.destroyAllWindows()
        video.release()

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #15
0
def Testing():
    print('Testing')

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(50, "tensor(0.2378)")  # quan model
    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()

    ##############################
    ## testing
    ##############################
    print('Testing loop')
    lane_agent.evaluate_mode()

    if p.mode == 0:  # check model with test data
        for test_image, _, _, _, i in loader.Generate_Test(
        ):  #loader.Generate()
            _, _, ti = test(lane_agent, np.array([test_image]))
            cv2.imwrite("./eval/{}_test.png".format(i), ti[0])

    elif p.mode == 1:  # check model with video
        test_videos = 'test_videos.txt'
        save_dir = 'test_video_result'
        video_list = open(test_videos).readlines()
        use_ori = True
        for video in video_list:
            video = video.strip()

            save_video_dir = save_dir + "/".join(video.split("/")[-3:-1])
            if not os.path.exists(save_video_dir):
                os.makedirs(save_video_dir)

            cap = cv2.VideoCapture(video)
            fps = int(cap.get(cv2.CAP_PROP_FPS))

            size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            print(size)  # wh
            h, w = size[1], size[0]
            hcrop = h // 5
            tmp = hcrop * 2
            videoWriter = cv2.VideoWriter(
                save_video_dir + '/{}.avi'.format(video.split('/')[-1][:-4]),
                cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, size)
            count = 0
            while (cap.isOpened()):
                ret, ori_frame = cap.read()
                count += 1
                if ret == False:
                    break

                crop_frame = ori_frame[tmp:, :]
                prevTime = time.time()
                frame = cv2.resize(crop_frame, (256, 256)) / 255.0
                frame = np.rollaxis(frame, axis=2, start=0)
                if not use_ori:
                    _, _, ti = test(lane_agent, np.array([frame]))
                if use_ori:
                    ratio_w = p.x_size * 1.0 / (size[0])
                    ratio_h = p.y_size * 1.0 / (size[1] - tmp)
                    _, _, ti = test_ori(tmp,
                                        lane_agent,
                                        ori_frame,
                                        np.array([frame]),
                                        ratio_w,
                                        ratio_h,
                                        draw_type='point',
                                        thresh=p.threshold_point)
                curTime = time.time()
                sec = curTime - prevTime
                fps = 1 / (sec)
                s = "FPS : " + str(round(fps, 3))
                cv2.putText(ti[0], s, (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 255, 0))
                videoWriter.write(ti[0])
            cap.release()
            print("video writer finished")

    elif p.mode == 2:  # check model with a picture
        test_images = "./test_pic"
        save = "./test_pic_res/"
        img_list = os.listdir(test_images)
        img_list = [img for img in img_list if '.jpg' in img]
        use_ori = True
        print("image test")
        for img in img_list:
            ori_image = cv2.imread(test_images + '/' + img)  #hw
            h, w = ori_image.shape[0], ori_image.shape[1]
            hcrop = h // 5
            tmp = hcrop * 2
            crop_image = ori_image[tmp:, :]

            test_image = cv2.resize(crop_image, (256, 256)) / 255.0
            test_image = np.rollaxis(test_image, axis=2, start=0)

            if not use_ori:
                _, _, ti = test(lane_agent, np.array([test_image]))
            if use_ori:
                ratio_w = p.x_size * 1.0 / ori_image.shape[1]
                ratio_h = p.y_size * 1.0 / (ori_image.shape[0] - tmp)
                _, _, ti = test_ori(tmp,
                                    lane_agent,
                                    ori_image,
                                    np.array([test_image]),
                                    ratio_w,
                                    ratio_h,
                                    draw_type='point',
                                    thresh=p.threshold_point)
            cv2.imwrite(save + img, ti[0])

    elif p.mode == 3:  #evaluation
        print("evaluate")
        evaluation(loader, lane_agent)
예제 #16
0
def Training():
    print('Training')

    ####################################################################
    ## Hyper parameter
    ####################################################################
    print('Initializing hyper parameter')

    vis = visdom.Visdom()
    loss_window = vis.line(X=torch.zeros((1, )).cpu(),
                           Y=torch.zeros((1)).cpu(),
                           opts=dict(xlabel='epoch',
                                     ylabel='Loss',
                                     title='Training Loss',
                                     legend=['Loss']))

    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')
    if p.model_path == "":
        lane_assistant = Assistant.Assistant()
    else:
        lane_assistant = Assistant.Assistant()
        lane_assistant.load_weights(1912, "tensor(0.9420)")

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_assistant.cuda()
        #torch.backends.cudnn.benchmark=True

    ##############################
    ## Loop for training
    ##############################
    print('Training loop')
    step = 0
    sampling_list = None

    for epoch in range(p.n_epoch):
        lane_assistant.training_mode()
        for inputs, target_lanes, target_h, test_image, data_list in loader.Generate(
                sampling_list):
            #training
            #util.visualize_points(inputs[0], target_lanes[0], target_h[0])
            print("epoch : " + str(epoch))
            print("step : " + str(step))
            loss_p = lane_assistant.train(inputs, target_lanes, target_h,
                                          epoch, lane_assistant, data_list)
            torch.cuda.synchronize()
            loss_p = loss_p.cpu().data

            if step % 50 == 0:
                vis.line(X=torch.ones((1, 1)).cpu() * int(step / 50),
                         Y=torch.Tensor([loss_p]).unsqueeze(0).cpu(),
                         win=loss_window,
                         update='append')

            if step % 100 == 0:
                lane_assistant.save_model(int(step / 100), loss_p)
                testing(lane_assistant, test_image, step, loss_p)
            step += 1

        sampling_list = copy.deepcopy(lane_assistant.get_data_list())
        lane_assistant.sample_reset()

        #evaluation
        if epoch >= 0 and epoch % 1 == 0:
            print("evaluation")
            lane_assistant.evaluate_mode()
            th_list = [0.8]
            index = [3]
            lane_assistant.save_model(int(step / 100), loss_p)

            for idx in index:
                print("generate result")
                test.evaluation(loader,
                                lane_assistant,
                                index=idx,
                                name="test_result_" + str(epoch) + "_" +
                                str(idx) + ".json")

            for idx in index:
                print("compute score")
                with open(
                        "/home/kym/Dropbox/eval_result2_" + str(idx) + "_.txt",
                        'a') as make_file:
                    make_file.write("epoch : " + str(epoch) + " loss : " +
                                    str(loss_p.cpu().data))
                    make_file.write(
                        evaluation.LaneEval.bench_one_submit(
                            "test_result_" + str(epoch) + "_" + str(idx) +
                            ".json", "test_label.json"))
                    make_file.write("\n")
                with open("eval_result_" + str(idx) + "_.txt",
                          'a') as make_file:
                    make_file.write("epoch : " + str(epoch) + " loss : " +
                                    str(loss_p.cpu().data))
                    make_file.write(
                        evaluation.LaneEval.bench_one_submit(
                            "test_result_" + str(epoch) + "_" + str(idx) +
                            ".json", "test_label.json"))
                    make_file.write("\n")

        if int(step) > 700000:
            break
예제 #17
0
파일: train.py 프로젝트: wzhang1/PINet_new
def Training():
    print('Training')

    ####################################################################
    ## Hyper parameter
    ####################################################################
    print('Initializing hyper parameter')

    vis = visdom.Visdom()
    loss_window = vis.line(X=torch.zeros((1,)).cpu(),
                           Y=torch.zeros((1)).cpu(),
                           opts=dict(xlabel='epoch',
                                     ylabel='Loss',
                                     title='Training Loss',
                                     legend=['Loss']))
    
    #########################################################################
    ## Get dataset
    #########################################################################
    print("Get dataset")
    loader = Generator()

    ##############################
    ## Get agent and model
    ##############################
    print('Get agent')

    if p.model_path == "":
        lane_agent = agent.Agent()
    else:
        lane_agent = agent.Agent()
        lane_agent.load_weights(0, "tensor(1.3984)")

    ##############################
    ## Check GPU
    ##############################
    print('Setup GPU mode')
    if torch.cuda.is_available():
        lane_agent.cuda()
        #torch.backends.cudnn.benchmark=True

    ##############################
    ## Loop for training
    ##############################
    print('Training loop')
    step = 0
    sampling_list = None
    for epoch in range(p.n_epoch):
        lane_agent.training_mode()
        for inputs, target_lanes, target_h, test_image, data_list in loader.Generate(sampling_list):

            #util.visualize_points(inputs[0], target_lanes[0], target_h[0])
            #training
            print("epoch : " + str(epoch))
            print("step : " + str(step))
            loss_p = lane_agent.train(inputs, target_lanes, target_h, epoch, lane_agent, data_list)
            torch.cuda.synchronize()
            loss_p = loss_p.cpu().data
            
            if step%50 == 0:
                vis.line(
                    X=torch.ones((1, 1)).cpu() * int(step/50),
                    Y=torch.Tensor([loss_p]).unsqueeze(0).cpu(),
                    win=loss_window,
                    update='append')
                
            if step%100 == 0:
                lane_agent.save_model(int(step/100), loss_p)
                testing(lane_agent, test_image, step, loss_p)
            step += 1

        sampling_list = copy.deepcopy(lane_agent.get_data_list())
        lane_agent.sample_reset()

        #evaluation
        if epoch%1 == 0:
            print("evaluation")
            lane_agent.evaluate_mode()
            th_list = [0.9]
            index = [3]
            lane_agent.save_model(int(step/100), loss_p)

            for idx in index:
                print("generate result")
                test.evaluation(loader, lane_agent, index = idx, name="test_result_"+str(epoch)+"_"+str(idx)+".json")
                name = "epoch_idx_"+str(epoch) + str(idx) + str(step/100)
                os.system("sh /home/kym/research/autonomous_car_vision/lane_detection/code/ITS/CuLane/evaluation_code/SCNN_Pytorch/utils/lane_evaluation/CULane/Run.sh " + name)

        if int(step)>700000:
            break