def gene_center_line(labels, data_list, rev_index, trace_list, debug=False): n = len(labels) x_list, y_list, angle_list = zip(*data_list) pts_dict = defaultdict(list) angle_dict = defaultdict(list) rev_dict = defaultdict(list) for i in range(n): pts_dict[labels[i]].append([x_list[i], y_list[i]]) angle_dict[labels[i]].append(angle_list[i]) rev_dict[labels[i]].append(rev_index[i]) colors = ['pink', 'orange', 'y', 'blue', 'c', 'g', 'lime', 'red'] for label, pt_list in pts_dict.items(): rev = rev_dict[label] if label == -2: # draw_points(pt_list, '+', 'k', 0.1, -3) pass else: if debug and label != 21: continue angle_list = angle_dict[label] a = mean_angle(angle_list) try: # print a idx = int(a / 45) except ValueError: continue # print a if len(pt_list) > 50: a = 90 - a line_list = collect_line_around(pt_list, trace_list, rev, a) pt_list = rotate(pt_list, a) org_list = rotate(pt_list, -a) if debug: draw_points(pt_list, 'o', colors[idx], .2, label) # else: # draw_points(pt_list, 'o', colors[idx], .1, label) try: print "label", label road0, road1 = center_road(pt_list, line_list, debug) road0 = road1 if not debug: road0 = rotate(road0, -a) draw_center(road0, 'k') draw_line_idx(road0, label) except ValueError: print label, "ValueError" except TypeError: print label, "TypeError"
def collect_line_around(pt_list, trace_list, rev, rot): """ MOST important to road segment generation trace_list[ti][tj] is pt collect all the points from beginning to end as trace goes on if the point is anchor because the vehicle may turn around and trace back, the trace should be split when turning happened :param pt_list: list of anchor point :param trace_list: original trace list, list of TaxiData :param rev: each index :param rot: rotate angle :return: list of line(list of Point) """ # begin, end = {}, {} trace_detail = {} for i, pt in enumerate(pt_list): ti, tj = rev[i][:] try: trace_detail[ti].append(tj) except KeyError: trace_detail[ti] = [tj] for ti in trace_detail.keys(): trace_detail[ti].sort() line_list = [] fact_ort = 90 - rot for i, idx_list in trace_detail.items(): iset = set(idx_list) for j in idx_list: if j + 1 < len(trace_list[i]): iset.add(j + 1) fill_list = list(iset) fill_list.sort() line = [] last_j = None for j in fill_list: pt = trace_list[i][j] if last_j is not None and j - last_j > 3: if len(line) > 1: line_list.append(rotate(line, rot)) line = [[pt.x, pt.y]] else: line.append([pt.x, pt.y]) last_j = j if len(line) > 1: line_list.append(rotate(line, rot)) return line_list
def run(self): cap = cv2.VideoCapture(video) out = cv2.VideoWriter(record, fourcc, cap.get(cv2.CAP_PROP_FPS), (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))), True) while True: ret, frame = cap.read() if ret: if fliplr_mark == -1: frame = common.flipLeftToRight(frame) if updown_mark == -1: frame = common.upsidedown(frame) if rotate_mark == -1: frame = common.rotate(frame) if RGBToWhite_mark == -1: frame = common.RGBToWhite(frame) if histogram_mark == -1: frame = common.histogram_equalization_opti(frame) frame = common.adjustS(frame, adjustS_mark) print(adjustS_mark) out.write(frame) else: break window.pushButton_11.setText("Record/Save") out.release()
def run(self): cap = cv2.VideoCapture(video) while True: ret, frame = cap.read() if ret: if fliplr_mark == -1: frame = common.flipLeftToRight(frame) if updown_mark == -1: frame = common.upsidedown(frame) if rotate_mark == -1: frame = common.rotate(frame) if RGBToWhite_mark == -1: frame = common.RGBToWhite(frame) if histogram_mark == -1: frame = common.histogram_equalization_opti(frame) frame = common.adjustS(frame, adjustS_mark) rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888) v = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio) if image_or_video == 1: self.changePixmap.emit(v) time.sleep(0.01) else: break
def run(self): frame = cv2.imread(image) if fliplr_mark == -1: frame = common.flipLeftToRight(frame) if updown_mark == -1: frame = common.upsidedown(frame) if rotate_mark == -1: frame = common.rotate(frame) if RGBToWhite_mark == -1: frame = common.RGBToWhite(frame) if histogram_mark == -1: frame = common.histogram_equalization_opti(frame) frame = common.adjustS(frame, adjustS_mark) cv2.imwrite(image_save, frame) window.pushButton_11.setText("Record/Save")
shapelyPoligons = [] for poligon in mark.poligons: pol = Polygon(poligon) shapelyPoligons.append(pol) #Generating random poligons refLocalPoligons = [] for i in range(negativeNum): negAngle = random.random() * 90 negIndex = int(random.random() * len(refRects)) shift = (np.random.rand(2) * img.shape).astype(dtype=int)[::-1] #rotate and transpose negPoligon = refRects[negIndex].copy() negPoligon = common.rotate(negPoligon, negAngle) negPoligon = common.transpose(negPoligon, shift) negPolygonShapely = Polygon(negPoligon) #Check collision noCollisions = True if not imgPolygonShapely.contains(negPolygonShapely): noCollisions = False for shapelyPoligon in shapelyPoligons: if shapelyPoligon.intersects(negPolygonShapely): noCollisions = False if masked and not common.checkMaskContains(mask, negPoligon): noCollisions = False
outParser = markParser.MarkParser() refRects, refP, refAngles = common.loadParams(paramsPath) for mark in parser.marks: refLocalPoligons = [] for poligon in mark.poligons: longLine1, longLine2 = common.getLongLines(poligon) angle = np.mean([common.getAngle(longLine1), common.getAngle(longLine2)]) qAngle = min(refAngles, key=lambda x:abs(x-angle)) meanCoord = poligon.mean(axis=0) perim = common.perim(poligon) rectIndex = refP.index( min(refP, key=lambda x:abs(x-perim)) ) refPoligon = refRects[rectIndex] refPoligonTr = common.rotate(refPoligon, qAngle) refPoligonTr = common.transpose(refPoligonTr, meanCoord) refPoligonTrShapely = Polygon(refPoligonTr) if not imgPolygonShapely.contains(refPoligonTrShapely): continue if masked and not common.checkMaskContains(mask, refPoligonTr): continue refLocalPoligons.append(refPoligonTr) outParser.marks.append( markParser.Mark(mark.imageName, refLocalPoligons) ) outParser.save(outListPath)
classifier = cPickle.load(fid) img = cv2.imread(imagePath, cv2.CV_LOAD_IMAGE_GRAYSCALE) mask = cv2.imread(maskPath, cv2.CV_LOAD_IMAGE_GRAYSCALE) refRects, refP, refAngles = common.loadParams(paramsPath) outPoligon = [(0, 0), (img.shape[1], 0), (img.shape[1], img.shape[0]), (0, img.shape[0])] imgPolygonShapely = Polygon(outPoligon) for rectIndex in range(len(refRects)): rect = refRects[rectIndex] for angleIndex in range(len(refAngles)): angle = refAngles[angleIndex] for x in range(0, img.shape[1] - 50, 8): for y in range(0, img.shape[0] - 50, 8): rotRect = common.rotate(rect, angle) trRotRect = common.transpose(rotRect, np.asarray((x, y))) if mask[y, x] != 255: continue polygonShapely = Polygon(trRotRect) if not imgPolygonShapely.contains(polygonShapely): continue crop = common.extractCrop(img, trRotRect, size) hist = hog.compute(crop) #print hist #print hist.shape pred = classifier.predict(np.transpose(hist)) print pred