from std_msgs.msg import String from sensor_msgs.msg import Image from obstacle_detector.msg import Obstacles from cv_bridge import CvBridge, CvBridgeError from ackermann_msgs.msg import AckermannDriveStamped from datetime import datetime # for record from Stop_Counter import Stop_Counter from CurveDetector import CurveDetector from ObstacleDetector import ObstacleDetector, Position x_location_old = None warper = Warper() slidewindow = SlideWindow() pidcal = PidCal() q1 = que.Queue() bridge = CvBridge() now = datetime.now() # for record cv_image = None obstacles = None ack_publisher = None car_run_speed = 0.5 OBSTACLE_NUM = 3
def main(): os.environ["CUDA_VISIBLE_DEVICES"] = '0' #cudnn.benchmark = True opt = opts.parse() warper = Warper() slidewindow = SlideWindow() pidcal = PidCal() print(("device id: {}".format(torch.cuda.current_device()))) print("torch.version",torch.__version__) print("cuda_version",torch.version.cuda) models = importlib.import_module('models.init') # print(models) criterions = importlib.import_module('criterions.init') checkpoints = importlib.import_module('checkpoints') Trainer = importlib.import_module('models.' + opt.netType + '-train') # Data loading print('=> Setting up data loader') #trainLoader, valLoader = DataLoader.create(opt) #print('opt',opt) # Load previous checkpoint, if it exists print('=> Checking checkpoints') checkpoint = checkpoints.load(opt) # Create model model, optimState = models.setup(opt, checkpoint) model.cuda() criterion = criterions.setup(opt, checkpoint, model) ################################################################################## model.eval() cap = cv2.VideoCapture(0) if cap.isOpened(): print("width : {}, height : {}".format(cap.get(3), cap.get(4))) video_width = int(cap.get(3)) video_height = int(cap.get(4)) fourcc = cv2.VideoWriter_fourcc(*'DIVX') video_name = time.time() out = cv2.VideoWriter('output_video/{}.avi'.format(video_name), fourcc, 25.0, (video_width,video_height),0) prev_time = 0 fps_list = [] while True: ret, frame = cap.read() if ret: cur_time = time.time() frame = cv2.resize(frame, (480,360)) input_img = frame / 255. input_img = preprocess_img(input_img) # array to tensor input_img = torch.from_numpy(input_img).float() with torch.no_grad(): inputData_var = Variable(input_img).unsqueeze(0).cuda() # inference output = model.forward(inputData_var) print("output.shape : ", output.shape) # gpu -> cpu, tensor -> numpy output = output.detach().cpu().numpy() output = output[0] output = postprocess_img(output) output *= 255 output = np.clip(output, 0, 255) output = np.uint8(output) # resize output = cv2.resize(output, (640, 480)) # threshold ret, thr_img = cv2.threshold(output, 180, 255, 0) # warp output, warp_img = warper.warp(output, thr_img) img, x_location = slidewindow.slidewindow(warp_img) if x_location != None: pid = round(pidcal.pid_control(int(x_location)), 6) print("pid rate : ", pid) auto_drive(pid, x_location) else: pid = pidcal.pid_control(320) print("pid rate : ", pid) auto_drive(pid) end_time = time.time() sec = end_time - cur_time fps = 1/sec fps_list.append(fps) print("Estimated fps {0} " . format(fps)) out.write(output) cv2.imshow("src", frame) cv2.imshow("output", output) cv2.imshow("thre", img) key = cv2.waitKey(1) & 0xFF if key == 27: break elif key == ord('p'): cv2.waitKey(-1)
def main(): os.environ["CUDA_VISIBLE_DEVICES"] = '0' #cudnn.benchmark = True pidcal = PidCal() opt = opts.parse() warper = Warper() slidewindow = SlideWindow() print(("device id: {}".format(torch.cuda.current_device()))) print("torch.version", torch.__version__) print("cuda_version", torch.version.cuda) models = importlib.import_module('models.init') # print(models) criterions = importlib.import_module('criterions.init') checkpoints = importlib.import_module('checkpoints') Trainer = importlib.import_module('models.' + opt.netType + '-train') # Data loading print('=> Setting up data loader') # Load previous checkpoint, if it exists print('=> Checking checkpoints') checkpoint = checkpoints.load(opt) # Create model model, optimState = models.setup(opt, checkpoint) model.cuda() criterion = criterions.setup(opt, checkpoint, model) ################################################################################## model.eval() cap = cv2.VideoCapture("input_video/test.avi") if cap.isOpened(): print("width : {}, height : {}".format(cap.get(3), cap.get(4))) video_width = int(cap.get(3)) video_height = int(cap.get(4)) fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') # out = cv2.VideoWriter('output_video/TEST_1.avi', fourcc, 20.0, (1280,480),0) prev_time = 0 fps_list = [] # fourcc =cv2.VideoWriter_fourcc(*'MJPG') out = cv2.VideoWriter('input_video/processed_video.avi', fourcc, 40.0, (480, 320), 0) steer_list = list() lpf_list = list() while True: ret, frame = cap.read() if ret: cur_time = time.time() frame_new = cv2.resize(frame, (320, 180)) input_img = frame_new / 255. input_img = preprocess_img(input_img) # array to tensor input_img = torch.from_numpy(input_img).float() with torch.no_grad(): inputData_var = Variable(input_img).unsqueeze(0).cuda() # inference output = model.forward(inputData_var) output = torch.sigmoid(output) #output = F.softmax(output, dim=1) # gpu -> cpu, tensor -> numpy output = output.detach().cpu().numpy() output = output[0] output = postprocess_img(output) output = np.clip(output, 0, 1) output *= 255 output = np.uint8(output) output = cv2.resize(output, (640, 360)) output[output > 80] = 255 output[output <= 80] = 0 warper_img, point_img = warper.warp(output) ret, left_start_x, right_start_x, cf_img = slidewindow.w_slidewindow( warper_img) if ret: left_x_current, right_x_current, sliding_img, steer_theta = slidewindow.h_slidewindow( warper_img, left_start_x, right_start_x) cv2.imshow('sliding_img', sliding_img) steer_list.append(steer_theta) lpf_result = lpf(steer_theta, 0.5) lpf_list.append(lpf_result) print("steer theta:", steer_theta) if steer_theta < -28 or steer_theta > 28: continue else: pid = round(pidcal.pid_control(int(50 * steer_theta)), 6) print("pid :", pid) ''' auto_drive(pid) ''' else: pidcal.error_sum = 0 end_time = time.time() sec = end_time - cur_time fps = 1 / sec fps_list.append(fps) print("Estimated fps {0} ".format(fps)) # out.write(add_img) cv2.imshow("frame", frame) out.write(warper_img) # cv2.imshow("src", warper_img) # cv2.imshow("out_img", output) cv2.imshow("cf_img", cf_img) key = cv2.waitKey(1) & 0xFF if key == 27: break elif key == ord('p'): cv2.waitKey(-1) plt.figure(1) plt.plot(steer_list) plt.figure(2) plt.plot(lpf_list) plt.show()
def main(): os.environ["CUDA_VISIBLE_DEVICES"] = '0' #cudnn.benchmark = True pidcal = PidCal() opt = opts.parse() warper = Warper() slidewindow = SlideWindow() print(("device id: {}".format(torch.cuda.current_device()))) print("torch.version", torch.__version__) print("cuda_version", torch.version.cuda) models = importlib.import_module('models.init') # print(models) criterions = importlib.import_module('criterions.init') checkpoints = importlib.import_module('checkpoints') Trainer = importlib.import_module('models.' + opt.netType + '-train') # Data loading print('=> Setting up data loader') # Load previous checkpoint, if it exists print('=> Checking checkpoints') checkpoint = checkpoints.load(opt) # Create model model, optimState = models.setup(opt, checkpoint) model.cuda() criterion = criterions.setup(opt, checkpoint, model) ################################################################################## model.eval() cap = None if opt.video_idx is 0: cap = cv2.VideoCapture("input_video/720p.mp4") elif opt.video_idx is 1: cap = cv2.VideoCapture("input_video/straight.avi") elif opt.video_idx is 2: cap = cv2.VideoCapture("input_video/test.avi") elif opt.video_idx is 3: cap = cv2.VideoCapture("input_video/track.avi") elif opt.video_idx is 4: cap = cv2.VideoCapture("output_video/field.avi") else: cap = cv2.VideoCapture(0) # video test cap.set(3, 1280) cap.set(4, 720) if cap.isOpened(): print("width : {}, height : {}".format(cap.get(3), cap.get(4))) video_width = int(cap.get(3)) video_height = int(cap.get(4)) #fourcc = cv2.VideoWriter_fourcc('M','J','P','G') # out = cv2.VideoWriter('output_video/TEST_1.avi', fourcc, 20.0, (1280,480),0) prev_time = 0 fps_list = [] now = datetime.datetime.now() fourcc = None out = None if opt.video_idx > 2: fourcc = cv2.VideoWriter_fourcc(*'MJPG') # fourcc =cv2.VideoWriter_fourcc(*'MP4V') out = cv2.VideoWriter('output_warper_video/' + str(now) + '.avi', fourcc, 30.0, (480, 320)) pid_list = list() steer_list = list() lpf_list = list() pid_old = None steer_theta = 0 i = 0 x_location = 440 while True: ret, frame = cap.read() if frame is None: break frame_height, frame_width, frame_channels = frame.shape print("Frame Info : (Height, Width, Channels) : ({}, {}, {})".format( frame_height, frame_width, frame_channels)) # record_frame = cv2.resize(frame, (1280,720)) # record_frame = cv2.resize(frame, (1280,720)) if ret: cur_time = time.time() frame_new = cv2.resize(frame, (320, 180)) input_img = frame_new / 255. input_img = preprocess_img(input_img) # array to tensor input_img = torch.from_numpy(input_img).float() with torch.no_grad(): inputData_var = Variable(input_img).unsqueeze(0).cuda() # inference output = model.forward(inputData_var) output = torch.sigmoid(output) #output = F.softmax(output, dim=1) # gpu -> cpu, tensor -> numpy output = output.detach().cpu().numpy() output = output[0] output = postprocess_img(output) output = np.clip(output, 0, 1) output *= 255 output = np.uint8(output) output = cv2.resize(output, (640, 360)) output[output > 80] = 255 output[output <= 80] = 0 cv2.circle(output, (output.shape[1] / 2, output.shape[0]), 9, (255, 255, 0), -1) cv2.imshow("output_img", output) print("shape_info", output.shape) # cv2.circle(output, (output.shape[0]/2, output.shape[1]/2), 9, (0,255,0), -1) #warper_img = warper.warp(output) warper_img = warper.warp_test(output) cv2.imshow("warp_img", warper_img) if opt.video_idx > 2: print("frame.shape : {}".format(frame.shape)) tmp = cv2.cvtColor(warper_img, cv2.COLOR_GRAY2RGB) # print("warper_img.shape : {}".format(warper_img.shape)) out.write(tmp) # warper_img_test = warper.warp_test(output) # cv2.imshow("warp_img_test",warper_img_test) ret, left_start_x, right_start_x, cf_img = slidewindow.w_slidewindow( warper_img, 180) if ret: i += 1 left_x_current, right_x_current, sliding_img, steer_theta = slidewindow.h_slidewindow( warper_img, left_start_x, right_start_x) cv2.imshow('sliding_img', sliding_img) steer_list.append(steer_theta) x_location = (left_x_current + right_x_current) / 2 # low pass filter lpf_result = lpf(x_location, 0.3) lpf_list.append(lpf_result) # steer theta : Degree print("steer theta:", steer_theta) if steer_theta < -28.0 or steer_theta > 28.0: # auto_drive(pid_old) auto_drive(steer_theta) else: # degree angle pid = round(pidcal.pid_control(lpf_result), 6) pid_list.append(pid) print("pid :", pid) pid_old = pid auto_drive(steer_theta) # auto_drive(pid) else: auto_drive(steer_theta) # auto_drive(pid) pidcal.error_sum = 0 pidcal.error_old = 0 end_time = time.time() sec = end_time - cur_time fps = 1 / sec fps_list.append(fps) print("Estimated fps {0} ".format(fps)) # out.write(add_img) cv2.imshow("frame", frame) # cv2.imshow("src", warper_img) # cv2.imshow("out_img", output) cv2.imshow("cf_img", cf_img) key = cv2.waitKey(1) & 0xFF if key == 27: break elif key == ord('p'): cv2.waitKey(-1) cap.release() cv2.destroyAllWindows()