コード例 #1
0
ファイル: tool.py プロジェクト: huzhenhong/100known
    def measure_cost_time(*args, **kwargs):
        start = cv2.getCPUTickCount()
        result = func(*args, **kwargs)
        end = cv2.getCPUTickCount()
        cost_time = (end - start) / cv2.getTickFrequency()

        print(f"{func.__name__} cost: {cost_time: .5f} s")
        return result
コード例 #2
0
    def get_dual_frame_batch(self, seq_id, call_id, f1, f2, dwn_factor=1.0):
        self.call_id = call_id
        self.f1 = f1
        self.f2 = f2
        t0 = cv2.getCPUTickCount()
        lines_1, negs_1, pos_1 = self.compose_frame_data(seq_id, call_id, f1)
        lines_2, negs_2, pos_2 = self.compose_frame_data(seq_id, call_id, f2)
        t1 = cv2.getCPUTickCount()
        pos_matches = []

        for p1i in range(0, len(pos_1)):
            p1 = pos_1[p1i]
            if len(pos_2) <= p1i:
                p2 = []
            else:
                p2 = pos_2[p1i]
            pos_matches.append([p1, p2])

        if not dwn_factor == 1.0:
            lines_1 = dwn_factor * lines_1
            lines_2 = dwn_factor * lines_2
        lines = [lines_1, lines_2]
        neg_matches = [negs_1, negs_2]

        t2 = cv2.getCPUTickCount()
        self.img_ids = []
        self.pair_ids = []
        self.seq_id = seq_id
        im1 = self.get_image_gs(call_id, seq_id, f1)
        im2 = self.get_image_gs(call_id, seq_id, f2)
        t3 = cv2.getCPUTickCount()
        dsize = (0, 0)
        im1 = cv2.resize(im1,
                         dsize,
                         fx=dwn_factor,
                         fy=dwn_factor,
                         interpolation=cv2.INTER_LINEAR)
        im2 = cv2.resize(im2,
                         dsize,
                         fx=dwn_factor,
                         fy=dwn_factor,
                         interpolation=cv2.INTER_LINEAR)
        # ims, im_sizes, lus = self.format_images(im1, im2)
        ims1, im_size1, lu1 = self.format_image(im1)
        ims2, im_size2, lu2 = self.format_image(im2)
        ims = np.stack((ims1, ims2), axis=0)
        im_sizes = im_size1
        lus = [lu1, lu2]
        lines = self.format_lines(lines, im_sizes, lus)
        lines = [
            normalize_lines_for_gridsampler(ims[0], lines[0]),
            normalize_lines_for_gridsampler(ims[1], lines[1])
        ]
        if self.is_report_dist:
            return ims, lines, neg_matches, pos_matches, f2 - f1, self.img_ids, self.pair_ids
        return ims, lines, neg_matches, pos_matches
コード例 #3
0
# while success and numFramesRemaining>0:
# 	cameraWriter.write(frame)
# 	success,frame=cameraCapture.read()
# 	numFramesRemaining-=1
# cameraCapture.release()

# live photos
clicked=False
def onMouse(event,x,y,flags,param):
	global clicked
	if event == cv2.EVENT_LBUTTONUP:
		clicked=True
		print("mouse left button up!")
cameraCapture=cv2.VideoCapture(0)
cv2.namedWindow("MyFace")
cv2.setMouseCallback("MyFace",onMouse)
print("Showing camera captures.Click window or press any key to stop it.")
success,frame=cameraCapture.read();
t=float(cv2.getCPUTickCount())
while success and cv2.waitKey(1)==-1 and not clicked:
	t=(float(cv2.getTickCount())-t)/cv2.getTickFrequency()
	fps=1.0/t
	cv2.flip(frame,1,frame)
	cv2.putText(frame,"FPS:"+str(fps),(20,20),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0))
	cv2.imshow("MyFace",frame)
	success,frame=cameraCapture.read()
cv2.destroyWindow("MyFace")
cameraCapture.release()
print("camera has released.")

コード例 #4
0
import cv2
import numpy as np

# 开始计时
start = cv2.getCPUTickCount()

# 读入一张图片并调整对比度和亮度
img = cv2.imread('test1.jpg')
res = np.uint8(np.clip((0.8 * img + 80), 0, 255))

# 停止计时
end = cv2.getTickCount()

print((end - start) / cv2.getTickFrequency())
コード例 #5
0
ファイル: cv_3 np数组.py プロジェクト: by777/python_pratice
    # img[: , :, 0] = np.ones([400,400]) * 127
    # 单通道的灰度
    # 等价于:
    # img.ones([400,400,1],uint8)
    # img = img * 127
    # cv.imshow('new image',img)
    # 初始化一个二维的
    m1 = np.ones([3, 3], dtype=np.uint8)

    m1.fill(122.388)
    print(m1.reshape([1, 9]))


def inverse(img):
    # 像素取反
    dst = cv.bitwise_not(img)
    cv.imshow('inverse', dst)


src = cv.imread(r'C:\Users\1900\Pictures\demo.jpg')
# cv.namedWindow('input image', cv.WINDOW_AUTOSIZE)
# cv.imshow('input image', src)
t1 = cv.getTickCount()
# create_image()
inverse(src)
# access_pixels(src)
t2 = cv.getCPUTickCount()
print('time (ms):', 1000 * (t2-t1)/cv.getTickFrequency())
cv.waitKey(0)
cv.destroyAllWindows()
コード例 #6
0
ファイル: run.py プロジェクト: songyy14/3D_surveillance
def main():

    # Ouput settings.
    DRAW = True
    SAVE_TO_FILE = False
    SAVE_FRAMES = False

    font = cv2.FONT_HERSHEY_DUPLEX
    font_scale = 0.8
    thickness = 2

    # Tracking settings.
    HOG = True
    FIXEDWINDOW = True
    MULTISCALE = True
    LAB = False

    fe = FeatureExtractor('trinet/checkpoint/checkpoint.ckpt-25000')
    gallery = Gallery()

    PATH_TOP = 'C:/E/Matlab/Object Tracking/dataset'
    cams = ['cam2', 'cam3']

    queried_pids = dict()

    trackers = dict()
    outputs = dict()
    fdetects = dict()
    camera_matrices = dict()
    for cam in cams:
        queried_pids[cam] = []
        trackers[cam] = []
        fdetects[cam] = open(os.path.join(PATH_TOP, cam, 'detect.txt'), 'r')
        camera_matrices[cam] = read_camera_matrix(
            os.path.join(PATH_TOP, cam, 'camera_matrix_our.txt'))
        if DRAW:
            cv2.namedWindow(str(cam), 0)
        if SAVE_TO_FILE:
            outputs[cam] = open(os.path.join(PATH_TOP, cam, 'track23.txt'),
                                'w')

    start_count = cv2.getCPUTickCount()
    num_frames = len(os.listdir(os.path.join(PATH_TOP, 'cam1', 'img')))

    # Main loop.
    for i in range(1, num_frames + 1):
        print('\n#frame %d' % i)
        for cam in cams:
            print('')
            print('       ', cam)
            print('')
            frame = cv2.imread(
                os.path.join(PATH_TOP, cam, 'img', '%04d.jpg' % i))

            # Update tracking positions and delete some bad trackers.
            j = 0
            while j < len(trackers[cam]):
                trackers[cam][j].update(frame)
                if trackers[cam][j].out_of_sight:
                    print("Delete tracker %d due to out of sight" %
                          trackers[cam][j].pid)
                    del trackers[cam][j]
                elif trackers[cam][j].occluded_so_long:
                    print("Delete tracker %d due to occluded so long" %
                          trackers[cam][j].pid)
                    del trackers[cam][j]
                else:
                    j = j + 1

            # Add new trackers every 10 frames, of course including the first frame.
            if i % 10 == time_to_detect[cam]:
                # Note we have not deleted the model-drifted trackers.
                # Sometimes good trackers are considered as model drift,
                # due to the imperfect criterion.
                # So if we delete these trackers and re-add them, the tracking result
                # may look consistent.

                # First, delete model-drifted trackers.
                j = 0
                while j < len(trackers[cam]):
                    if trackers[cam][j].model_drift:
                        print("Delete tracker %d due to model drift" %
                              trackers[cam][j].pid)
                        del trackers[cam][j]
                    else:
                        j = j + 1

                # Then, add new trackers.

                # Read detection results of current frame.
                # Locate current frame.
                while fdetects[cam].readline() != ('#frame %d\n' % i):
                    pass
                num_detect = int(fdetects[cam].readline().split()[0])
                detect_pos = np.zeros((num_detect, 4), dtype=np.int32)
                for j in range(num_detect):
                    line = fdetects[cam].readline()
                    splits = line.split()
                    tmp = [int(k) for k in splits]
                    # Detection bounding boxes are in the form of (x1, y1, x2, y2).
                    detect_pos[j, :] = [
                        tmp[0], tmp[1], tmp[2] - tmp[0], tmp[3] - tmp[1]
                    ]

                # Put tracking results together.
                track_pos = np.zeros((len(trackers[cam]), 4))
                for j in range(len(trackers[cam])):
                    track_pos[j, :] = trackers[cam][j].get_roi()

                # Determine which detection boxes are used to initialize new trackers.
                indices = detection_query(detect_pos, track_pos)

                if len(indices) > 0:
                    # Person re-identification.
                    feature_list = []
                    blacklist = []
                    for j in range(len(indices)):
                        x, y, w, h = detect_pos[indices[j], :]
                        person_img = frame[y:y + h, x:x + w, :]
                        person_feature = fe.feature(adjust_image(person_img))
                        feature_list.append(person_feature)
                        # Get blacklist for this person.
                        one_blacklist = get_blacklist(x + w / 2, y + h, cam,
                                                      trackers,
                                                      camera_matrices)
                        blacklist.append(one_blacklist)

                    features_p = np.vstack(feature_list)
                    gallery_pids = gallery.get_pids()
                    pids_this_cam = [tracker.pid for tracker in trackers[cam]]
                    queried_pids[cam] = gallery.query(features_p, cam, i,
                                                      pids_this_cam, blacklist)

                    # Initialize new trackers with the queried pids.
                    # frame_b = frame.copy()
                    # new_persons = False
                    for j in range(len(indices)):
                        tracker = KCF.kcftracker(queried_pids[cam][j], HOG,
                                                 FIXEDWINDOW, MULTISCALE, LAB)
                        tracker.init(list(detect_pos[indices[j], :]), frame)
                        trackers[cam].append(tracker)
                        if queried_pids[cam][j] in gallery_pids:
                            print('---------------- Resume tracker %d' %
                                  queried_pids[cam][j])
                        else:
                            print('---------------- Add tracker %d' %
                                  queried_pids[cam][j])
                            # new_persons = True
                        # x, y, w, h = detect_pos[indices[j], :]
                        # cv2.rectangle(frame_b, (x, y), (x + w, y + h), get_color(cam, -1, queried_pids), thickness, 8)
                        # cv2.putText(frame_b, str(queried_pids[cam][j]), (x, y), font, font_scale, get_color(cam, -1, queried_pids), thickness)

                    # if new_persons:
                    # cv2.imwrite(os.path.join(PATH_TOP, 'cam1', 'gallery', '%d.jpg' % i), frame_b)

            # Draw and save trackers positions to file.
            if SAVE_TO_FILE:
                outputs[cam].write('#frame\t%d\n' % i)
                outputs[cam].write('%d\n' % len(trackers[cam]))

            for j in range(len(trackers[cam])):
                x, y, w, h = trackers[cam][j].get_roi()
                if SAVE_TO_FILE:
                    outputs[cam].write('%d\t%d\t%d\t%d\t%d\n' %
                                       (trackers[cam][j].pid, x, y, w, h))

                if DRAW:
                    cv2.rectangle(
                        frame, (x, y), (x + w, y + h),
                        get_color(cam, trackers[cam][j].pid, queried_pids),
                        thickness, 8)
                    cv2.putText(
                        frame, str(trackers[cam][j].pid), (x, y), font,
                        font_scale,
                        get_color(cam, trackers[cam][j].pid,
                                  queried_pids), thickness)

            if DRAW:
                cv2.imshow(str(cam), frame)
                cv2.waitKey(0)
                if i == num_frames:
                    cv2.waitKey(0)

            if SAVE_FRAMES:
                # Save frames with tracking results.
                cv2.imwrite(
                    os.path.join(PATH_TOP, cam, 'img_tracking_reid',
                                 '%04d.jpg' % i), frame)

    # Release resources.
    fe.close()
    for cam in cams:
        fdetects[cam].close()
    if SAVE_TO_FILE:
        for cam in cams:
            outputs[cam].close()

    elapsed_time_s = float(cv2.getCPUTickCount() -
                           start_count) / cv2.getTickFrequency()
    fps = num_frames / elapsed_time_s
    print('%f fps' % fps)
コード例 #7
0
ファイル: ElapsedTimeTest.py プロジェクト: zhy29563/Learning
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv

imgRaw = np.zeros((2000,2000),dtype=np.uint8)

time1 = cv.getCPUTickCount()
circle1 = cv.rectangle(imgRaw, (1251, 50), (1216, 1307), (255, 255, 255), -1)
time2 = cv.getCPUTickCount()
print((time2 - time1) / cv.getTickFrequency())