def gen_webconfig() : demo_css_files = [] css_files = [] for css_file in config.CSS_FILES : css_file = '/%s/%s' % ( 'resource/css', helper.get_file_name(css_file), ) css_files.append("'%s'" % css_file) for css_file in config.DEMO_CSS_FILES : css_file = '/%s/%s' % ( 'resource/css', helper.get_file_name(css_file), ) demo_css_files.append("'%s'" % css_file) text = '\n'.join([ '# Generated file. DO NOT EDIT', 'COMPILED = %s' % helper.should_compile(config.DEFAULT_COMPILED), 'CSS_FILES = [\n%s\n]' % ',\n'.join(css_files), 'DEMO_CSS_FILES = [\n%s\n]' % ',\n'.join(demo_css_files) ]) helper.write_text(config.WEB_CONFIG_FILE_PATH, text)
def gen_tpl(path) : if (helper.is_modified(path) and not helper.should_compile(config.DEFAULT_COMPILED)) : print 'skip %s' % path return print 'gen_tpl %s\n' % path gen_flags = GEN_FLAGS[0 :] if helper.should_compile(config.DEFAULT_COMPILED) : gen_flags.append('--cssHandlingScheme REFERENCE') flags_str = ' '.join([str(soy_flag) for soy_flag in gen_flags]) output_flag = '--outputPathFormat %s.js' % path cmd = 'java -jar %s %s %s %s' % ( config.TPL_COMPILER_PATH, flags_str, output_flag, path) print cmd os.system(cmd) if helper.should_compile(config.DEFAULT_COMPILED) : helper.write_text(helper.get_build_info_file(path), 'build..') else : helper.write_text(helper.get_build_info_file(path), str(os.path.getmtime(path)))
def gen_js_bin_deps(target_name, module_files) : output_path = '%s/%s.txt' % (config.JS_DEPS_OUTPUT_DIR, target_name) output_str = ('-o list > %s ' % output_path) cmd = gen_base_cmd(module_files) + output_str print cmd os.system(cmd) lines = helper.get_file_lines(output_path, True) new_lines = [] idx = 0 last_module_name = None for line in lines : line = line.strip() new_lines.append('--js %s' % line) idx = idx + 1 if line in module_files : module_name = helper.get_module_name(line) if last_module_name is None : new_lines.append('--module %s:%s' % (module_name, idx)) else : new_lines.append( '--module %s:%s:%s' % (module_name, idx, last_module_name)) last_module_name = module_name idx = 0 helper.write_text(output_path, '\n'.join(new_lines))
def gen_mock_data() : requires = [] for data_file in config.MOCK_DATA_FILES : lines = helper.get_file_lines(data_file, True) for line in lines : if line.find('goog.provide(') == 0 : line = 'goog.require' + line[line.rfind('(') :] requires.append(line) break requires.sort() input_lines = helper.get_file_lines(config.JS_FBAPI_FILE_PATH) output_lines = [] provide_line = None for line in input_lines : stripped_line = line.strip() if stripped_line.find('goog.provide(') == 0 : provide_line = stripped_line elif stripped_line.find('goog.require(') == 0 : if not stripped_line in requires : requires.append(stripped_line) else : output_lines.append(line) requires.sort() output_text = '%s\n\n%s\n\n%s' % ( provide_line, '\n'.join(requires), ''.join(output_lines) ) helper.write_text(config.JS_FBAPI_FILE_PATH, output_text)
def build_css() : all_css_text = [] css_files = None if helper.should_compile() : css_files = config.CSS_FILES else : css_files = config.DEMO_CSS_FILES for css_file in config.CSS_FILES : all_css_text.append(helper.get_file_text(css_file)) all_css_text = ''.join(all_css_text) output_raw_path = config.BUILD_INFO_DIR + '/.raw.all_css_text.css' output_min_path = config.BUILD_INFO_DIR + '/.min.all_css_text.css' helper.write_text(output_raw_path, all_css_text) cmd = 'java -jar %s %s > %s' % ( config.CSS_COMPRESSOR_PATH, output_raw_path, output_min_path) os.system(cmd) min_css_text = helper.get_file_text(output_min_path) js_vars = [] css_names = re.findall(CSS_NAME_PATTERN, min_css_text) old_new_css_names = [(cn, rename_css_name(cn, js_vars))for cn in css_names] if helper.should_compile(config.DEFAULT_COMPILED) : bin_css_text = min_css_text for pair in old_new_css_names : bin_css_text = bin_css_text.replace(pair[0], pair[1]) helper.write_text(config.CSS_BIN_DIR + '/all.css', bin_css_text) else : print 'Skip generate compiled CSS.' js_code = JS_CODE_TEMPLATE % ''.join(js_vars) helper.write_text(config.JS_CSS_NAMES_PATH, js_code.strip())
def video(perspective_matrix_path, source="cam", save=False, save_path=None, file_name="out", cam_cal=None): """ apply edge detection on video frames :param perspective_matrix_path: Path to pickle file contain transform matrix M and Minv :param source: source of video, if cam, apply edge detection on real time camera, else source should be path to local video :param save: True if want to save output video, available only when source is video not camera :param save_path: path to save output video, if save is True :param file_name: name of output video if saved is True, default = out :param cam_cal: path to Pickle file contain camera calibration parameters [ mtx , dist] :return: None """ if not os.path.isfile(perspective_matrix_path): raise FileNotFoundError("Path to perspective matrix file not exist!") with open(perspective_matrix_path, "rb") as p: perspective_matrix = pickle.load(p) M = perspective_matrix["M"] Minv = perspective_matrix["Minv"] if source == "cam": cap = cv2.VideoCapture(0) else: if not os.path.isfile(source): raise FileNotFoundError(source, " not Exist!") cap = cv2.VideoCapture(source) # camera calibration parameters [ mtx , dist] mtx = None dist = None out = None if save: if not os.path.isdir(save_path): raise FileNotFoundError(save_path, " Not Exist!") file_name += ".mp4" out = cv2.VideoWriter(save_path + file_name, -1, 20, (int(cap.get(3)), int(cap.get(4)))) if cam_cal: if not os.path.isfile(cam_cal): raise FileNotFoundError(cam_cal, " Not Exist!") with open(cam_cal, "rb") as p: calibration = pickle.load(p) mtx = calibration["mtx"] dist = calibration["dist"] left_line = Line(5) right_line = Line(5) while True: # Capture frame-by-frame ret, frame = cap.read() if not ret: print("Finished..") sys.exit(0) # cv2 read frame as BGR, convert it to RGB frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # camera calibration if not (mtx is None or dist is None): frame = cv2.undistort(frame, mtx, dist, None, mtx) # get edges in image edges = apply_edge_detection(frame) # transform image to bird view warped = warped_img(edges, M) # init out image which will draw lane line on it then weight it with original frame out_img = np.zeros_like(warped) if len(warped.shape) == 3 and warped.shape[2] == 3: pass else: out_img = np.dstack((out_img, out_img, out_img)) # if line not detected, apply sliding window if not left_line.detected or not right_line.detected: leftx, lefty, rightx, righty = sliding_window(warped, 9, 200) # if already detected apply search around detected line else: leftx, lefty = search_around_poly(left_line, warped) rightx, righty = search_around_poly(right_line, warped) # will used for plotting line, find x fitted ploty = np.linspace(warped.shape[0] // 4, warped.shape[0] - 1, warped.shape[0]) # check if at least 100 pixels detected as line if len(leftx) > 100 and len(rightx) > 100: # make detected flag true left_line.detected = True right_line.detected = True left_line.current_x = leftx left_line.current_y = lefty right_line.current_x = rightx right_line.current_y = righty left_line.fit_polynomial(ploty) right_line.fit_polynomial(ploty) else: print("Line not detected in this frame ") # we just draw line form previous frame # make detected flag true left_line.detected = False right_line.detected = False # update Lane line radius left_line.radius() right_line.radius() # avg radius of to lines, and plot it radius = (left_line.radius_of_curvature + right_line.radius_of_curvature) // 2 frame = write_text(frame, "Radius of Curvature = " + str(radius) + " M", pos=(20, 50)) # calculate Alignment ( how much car away from center between Lane lines dir = "Left" # car far from left or right left_line.car_offset(frame.shape) # distance from left line right_line.car_offset(frame.shape) # distance from right line distance = round(right_line.line_base_pos - left_line.line_base_pos, 2) if distance < 0: # car far away from left line not right line distance = -distance dir = "Right" frame = write_text(frame, "Vehicle is {}m {} of center".format(distance, dir), pos=(20, 80)) # ** plot lane lines on image ** # left_line.draw_line(out_img, ploty) # right_line.draw_line(out_img, ploty) # color pixel which belong to lane lines left_line.color_pixel(out_img, (255, 0, 0)) right_line.color_pixel(out_img, (255, 100, 0)) # fit green triangle in area between lane lines pts_left = np.array( [np.transpose(np.vstack([left_line.bestx, ploty]))]) pts_right = np.array( [np.flipud(np.transpose(np.vstack([right_line.bestx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(out_img, np.int_([pts]), (0, 255, 0)) # return image to normal view from bird view out_img_undit = warped_img(out_img, Minv) # weight out_image_undit with original frame frame = cv2.addWeighted(out_img_undit, 0.5, frame, 1, 0) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) cv2.imshow("frame", frame) # write video if save: out.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() cv2.destroyAllWindows()