Пример #1
0
def do_fr(imgs_paths):

    modelmame = basename(splitext(FR_netcfg)[0])
    weight = basename(splitext(FR_weights)[0])

    model_dir = join('output_txt/', modelmame + '_' + valid_dataset)
    final_dir = join(model_dir, weight)

    if not isdir(model_dir):
        mkdir(model_dir)
    if not isdir(final_dir):
        mkdir(final_dir)
    for img_path in imgs_paths:
        print 'FRD processing on', img_path
        img = cv2.imread(img_path)
        frds, wh = dn.detect(FR_net, FR_meta, img, threshold)
        txt_file = open(
            join(final_dir,
                 basename(splitext(img_path)[0]) + '.txt'), 'w')
        if len(frds) == 0:
            txt_file.write('')
            continue
        for frd in frds:
            cx, cy, w, h = (np.array(frd[2])).tolist()
            tl = np.array([cx - w / 2., cy - h / 2.])
            br = np.array([cx + w / 2., cy + h / 2.])
            label_FR = Label(tl=tl, br=br)
            tl = label_FR.tl().astype(int)
            br = label_FR.br().astype(int)
            txt_file.write(frd[0] + ' ' + str('%.2f' % frd[1]) + ' ' +
                           str(tl[0]) + ' ' + str(tl[1]) + ' ' + str(br[0]) +
                           ' ' + str(br[1]) + '\n')
            print '\twrote result to', join(
                final_dir,
                basename(splitext(img_path)[0]) + '.txt')
        txt_file.close()
def augment_sample(I,pts,dim):

	maxsum,maxangle = 120,np.array([80.,80.,45.])
	angles = np.random.rand(3)*maxangle
	if angles.sum() > maxsum:
		angles = (angles/angles.sum())*(maxangle/maxangle.sum())

	I = im2single(I)
	iwh = getWH(I.shape)

	whratio = random.uniform(2.,4.)
	wsiz = random.uniform(dim*.2,dim*1.)
	
	hsiz = wsiz/whratio

	dx = random.uniform(0.,dim - wsiz)
	dy = random.uniform(0.,dim - hsiz)

	pph = getRectPts(dx,dy,dx+wsiz,dy+hsiz)
	pts = pts*iwh.reshape((2,1))
	T = find_T_matrix(pts2ptsh(pts),pph)

	H = perspective_transform((dim,dim),angles=angles)
	H = np.matmul(H,T)

	Iroi,pts = project(I,H,pts,dim)
	
	hsv_mod = np.random.rand(3).astype('float32')
	hsv_mod = (hsv_mod - .5)*.3
	hsv_mod[0] *= 360
	Iroi = hsv_transform(Iroi,hsv_mod)
	Iroi = np.clip(Iroi,0.,1.)

	pts = np.array(pts)

	if random.random() > .5:
		Iroi,pts = flip_image_and_pts(Iroi,pts)

	tl,br = pts.min(1),pts.max(1)
	llp = Label(0,tl,br)

	return Iroi,llp,pts
def fr_detect(img):
	print '\t\t\tdetecting front and rear using FRD..., Model:', FR_netcfg
	results, wh = dn.detect(FR_net, FR_meta, img, threshold)

	# the results will be list according to its probability , high prob -> low prob
	if len(results):
		print '\t\t\tFR detection completed'
		FRs = []
		category = []
		for i, result in enumerate(results):
			WH = np.array(img.shape[1::-1], dtype=float)
			cx, cy, w, h = (np.array(result[2]) / np.concatenate((WH, WH))).tolist()
			tl = np.array([cx - w / 2., cy - h / 2.])
			br = np.array([cx + w / 2., cy + h / 2.])
			print '\t\t\tFR number', i, 'position:', tl, br, 'prob:', result[1]
			FRs.append(Label(tl=tl, br=br))
			category.append(result[0])
		return np.array(FRs), category

	else:
		print '\t\t\tFR detection failed'
Пример #4
0
def do_fr_after_YOLO(imgs_paths):
    output_dir = 'output_txt/fr_after_YOLO_kr'
    # using yolo v2 - voc
    YOLO_weights = 'data/vehicle-detector/yolov3.weights'
    YOLO_netcfg = 'data/vehicle-detector/yolov3.cfg'
    YOLO_data = 'data/vehicle-detector/coco.data'

    print 'YOLOv3 weights pre-loading...'
    YOLO_net = dn.load_net(YOLO_netcfg, YOLO_weights, 0)
    YOLO_meta = dn.load_meta(YOLO_data)
    threshold = 0.5

    for img_path in imgs_paths:
        print 'detecting cars in', img_path
        img = cv2.imread(img_path)
        results, wh = dn.detect(YOLO_net, YOLO_meta, img, threshold)
        txt_file = open(
            join(output_dir,
                 basename(splitext(img_path)[0]) + '.txt'), 'w')
        if len(results) == 0:
            txt_file.write('')
            continue
        for result in results:
            if result[0] in ['car', 'bus']:
                WH = np.array(img.shape[1::-1], dtype=float)
                cx, cy, w, h = (np.array(result[2]) / np.concatenate(
                    (WH, WH))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label_sub = Label(tl=tl, br=br)
                sub_img = crop_region(img, label_sub)

                # sub_image FRD, only process the highest prob one
                print '\tFRD processing...'
                frd, _ = dn.detect(FR_net, FR_meta, sub_img, threshold)
                if len(frd) == 0:
                    continue
                WH_sub = np.array(sub_img.shape[1::-1], dtype=float)
                cx, cy, w, h = (np.array(frd[0][2]) / np.concatenate(
                    (WH_sub, WH_sub))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label_FR = Label(tl=tl, br=br)
                label_scale_up = Label(
                    tl=label_sub.tl() * WH + label_FR.tl() * WH_sub,
                    br=label_sub.tl() * WH + label_FR.br() * WH_sub)
                tl = label_scale_up.tl().astype(int)
                br = label_scale_up.br().astype(int)
                txt_file.write(frd[0][0] + ' ' + str('%.2f' % frd[0][1]) +
                               ' ' + str(tl[0]) + ' ' + str(tl[1]) + ' ' +
                               str(br[0]) + ' ' + str(br[1]) + '\n')
                print '\twrote result to', join(
                    output_dir,
                    basename(splitext(img_path)[0]) + '.txt')
        txt_file.close()
Пример #5
0
 def __init__(self, cl, pts, prob):
     self.pts = pts
     tl = np.amin(pts, 1)
     br = np.amax(pts, 1)
     Label.__init__(self, cl, tl, br, prob)
	def getSquare(self):
		tl,br = self.pts.min(1),self.pts.max(1)
		return Label(-1,tl,br)
Пример #7
0
			# R = [r for r in R if r[0] in ['front', 'rear']]

			print '\t\t%d cars found' % len(R)

			if len(R):

				Iorig = cv2.imread(img_path)
				WH = np.array(Iorig.shape[1::-1],dtype=float)
				Lcars = []

				# each detected vehicle
				for i,r in enumerate(R):
					# make the bounding box coordinate be related to the original image size
					cx,cy,w,h = (np.array(r[2])/np.concatenate( (WH,WH) )).tolist()
					# tl-> top left, br-> bottom right
					tl = np.array([cx - w/2., cy - h/2.])
					br = np.array([cx + w/2., cy + h/2.])
					label = Label(0,tl,br)
					Icar = crop_region(Iorig,label)

					Lcars.append(label)

					cv2.imwrite('%s/%s_%dcar.png' % (output_dir,bname,i),Icar)

				lwrite('%s/%s_cars.txt' % (output_dir,bname),Lcars)

	except:
		traceback.print_exc()
		sys.exit(1)

	sys.exit(0)
def union_area(bb_1, bb_2):
    final_tl = np.minimum.reduce([bb_1[0], bb_1[1], bb_2[0], bb_2[1]])
    final_br = np.maximum.reduce([bb_1[0], bb_1[1], bb_2[0], bb_2[1]])
    return Label(tl=final_tl, br=final_br)
Пример #9
0
        R = [r for r in R if r[0] in ['car', 'bus']]

        print '\t\t%d cars found' % len(R)

        if len(R):

            WH = np.array(img.shape[1::-1], dtype=float)
            Lcars = []

            for i, r in enumerate(R):
                cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                    (WH, WH))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label = Label(0, tl, br)
                Lcars.append(label)

            lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)

    # license plate detection
    try:

        # colors are BGR in opencv
        YELLOW = (0, 255, 255)
        RED = (0, 0, 255)
        PINK = (232, 28, 232)

        input_dir = output_dir
        lp_threshold = 0.5