Пример #1
0
def tryload():

    # -- Carga de cascadas e info OPENCV/CPU
    print("-"*100)
    print("Hilos:", cv2.getNumThreads())
    print("Nucleos:", cv2.getNumberOfCPUs())
    print("OpenCV optimizado:", cv2.useOptimized())
    print("-"*100)
    file = frontalface_cascades.read()
    if not frontalface_cascade.load(cv2.samples.findFile(file)):
        print(' -- (!)Error cargando "face cascade"')
        exit(0)
    file = profileface_cascades.read()
    if not profileface_cascade.load(cv2.samples.findFile(file)):
        print(' -- (!)Error cargando "face cascade"')
        exit(0)
    file = eyes_cascades.read()
    if not eyes_cascade.load(cv2.samples.findFile(file)):
        print(' -- (!)Error cargando "eyes cascade"')
        exit(0)
    file = smile_cascades.read()
    if not smile_cascade.load(cv2.samples.findFile(file)):
        print(' -- (!)EError cargando "smile cascade"')
        exit(0)
    playvideo()
Пример #2
0
def process_video(bgsubImpl,
                  vidreader,
                  vidwriter,
                  num_blocks=6,
                  threaded=False):
    vidwriter.build()
    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    pending = deque()
    N = vidreader.frames
    frameIdx = num_blocks
    while True:
        while len(pending) > 0 and pending[0].ready():
            task = pending.popleft()
            frame, idx = task.get()
            vidwriter.write(frame)
            print 'Proceesing ... {0}%\r'.format((idx * 100 / N)),
        if len(pending) < threadn and frameIdx < N:
            (cnt, frames) = vidreader.read(frameIdx - num_blocks, num_blocks)
            if cnt == num_blocks:
                if threaded:
                    task = pool.apply_async(bgsub_process,
                                            [bgsubImpl, frames, frameIdx])
                else:
                    task = DummyTask(bgsub_process(bgsubImpl, frames,
                                                   frameIdx))
                pending.append(task)
            frameIdx += 1
        if len(pending) == 0:
            break
Пример #3
0
    def run(self):
        threadn = cv2.getNumberOfCPUs() - 1
        pool = ThreadPool(processes=threadn)
        process_frame = Joint_View()
        pending = deque()

        cv2.namedWindow('Threaded Video', cv2.WINDOW_NORMAL)
        for _ in range(self.duration):

            frame = next(self)
            task = pool.apply_async(process_frame.transform, (frame, ))
            pending.append(task)
        print("processing done")
        for _ in range(self.duration):

            if len(pending) > 0 and pending[0].ready():
                result = pending.popleft().get()
                if self.isLabeling:
                    data, label = next(self.label)
                    cv2.polylines(result, [data], False, (0, 0, 255), 2)
                    cv2.polylines(result, [label], False, (180, 105, 255), 2)

                self.combine_writer.write(result)
                if (self.isViz):
                    cv2.imshow('Threaded Video', result)
                    cv2.waitKey(100)
Пример #4
0
    def detect_video(self, inputFname, outputFname=None, skip_frames=1):
        """Detects FEX from a video file.

        Args:
            inputFname (str): Path to video file
            outputFname (str, optional): Path to output file. Defaults to None.
            skip_frames (int, optional): Number of every other frames to skip for speed or if not all frames need to be processed. Defaults to 1.

        Returns:
            dataframe: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        self.info['inputFname'] = inputFname
        self.info['outputFname'] = outputFname 
        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        cap = cv.VideoCapture(inputFname)

        # Determine whether to use multiprocessing.
        n_jobs = self['n_jobs']
        if n_jobs==-1:
            thread_num = cv.getNumberOfCPUs() # get available cpus
        else: 
            thread_num = n_jobs
        pool = ThreadPool(processes=thread_num)
        pending_task = deque()
        counter = 0
        processed_frames = 0
        frame_got = True
        detected_faces = []
        print("Processing video.")
        while True:
            # Consume the queue.
            while len(pending_task) > 0 and pending_task[0].ready():
                df = pending_task.popleft().get()
                # Save to output file.
                if outputFname:
                    df.to_csv(outputFname, index=True, header=False, mode='a')
                else:
                    init_df = pd.concat([init_df, df], axis=0)
                processed_frames = processed_frames + 1
         
            if not frame_got:
                break
         
            # Populate the queue.
            if len(pending_task) < thread_num:
                frame_got, frame = cap.read()
                # Process at every seconds. 
                if counter%skip_frames == 0:
                    if frame_got:
                        task = pool.apply_async(self.process_frame, (frame.copy(), counter))
                        pending_task.append(task)
                counter = counter + 1
        cap.release() 
        if outputFname:
            return True 
        else:
            return init_df
Пример #5
0
    def run(self):
        self.results = np.array([])

        # Let subscriber know the total
        self.startProgress.emit(config.NISSL_COUNT)

        # For updating the progress in the UI
        self.total = 1

        # Compute SIFT for region before starting
        self.kp1, self.des1 = feature.extract_sift(self.im)

        # Set multithreading if capable
        if config.MULTITHREAD:
            pool = ThreadPool(processes = cv2.getNumberOfCPUs())
        else:
            pool = ThreadPool(processes = 1)

        # Total number of images to compare against
        nissl = range(1, config.NISSL_COUNT + 1)

        # Begin mapping process
        pool.map(self.process_level, nissl)
        pool.close()
        pool.join()

        # Tell UI the results
        self.endProgress.emit(self.results)
Пример #6
0
    def __init__(self, templates, ratio=0.75):
        self.templates = templates
        self.ratio = ratio

        flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})
        self.pool = ThreadPool(processes=cv2.getNumberOfCPUs())
Пример #7
0
    def detect_video(self,
                     inputFname,
                     outputFname=None,
                     skip_frames=1,
                     verbose=False):
        """Detects FEX from a video file.
        Args:
            inputFname (str): Path to video file
            outputFname (str, optional): Path to output file. Defaults to None.
            skip_frames (int, optional): Number of every other frames to skip for speed or if not all frames need to be processed. Defaults to 1.
        Returns:
            dataframe: Prediction results dataframe if outputFname is None. Returns True if outputFname is specified.
        """
        self.info["inputFname"] = inputFname
        self.info["outputFname"] = outputFname
        init_df = pd.DataFrame(columns=self["output_columns"])
        if outputFname:
            init_df.to_csv(outputFname, index=False, header=True)

        cap = cv2.VideoCapture(inputFname)
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frames_to_process = int(np.ceil(length / skip_frames))

        # Determine whether to use multiprocessing.
        n_jobs = self["n_jobs"]
        if n_jobs == -1:
            thread_num = cv2.getNumberOfCPUs()  # get available cpus
        else:
            thread_num = n_jobs
        if verbose:
            print(f"Using {thread_num} cpus")
        pool = ThreadPool(processes=thread_num)
        pending_task = deque()
        counter = 0
        processed_frames = 0
        frame_got = True
        detected_faces = []
        if verbose:
            print("Processing video.")
        #  single core
        while True:
            frame_got, frame = cap.read()
            if counter % skip_frames == 0:
                df = self.process_frame(frame, counter=counter)
                df["input"] = inputFname
                if outputFname:
                    df[init_df.columns].to_csv(outputFname,
                                               index=False,
                                               header=False,
                                               mode="a")
                else:
                    init_df = pd.concat([init_df, df[init_df.columns]], axis=0)
            counter = counter + 1
            if not frame_got:
                break
        cap.release()
        if outputFname:
            return True
        else:
            return init_df
 def init__threads(self):
     if self.threadn == 0 :
         from multiprocessing.pool import ThreadPool
         self.threadn = cv2.getNumberOfCPUs()
         self.pool = ThreadPool(processes = self.threadn)
         from collections import deque
         self.pending = deque()
Пример #9
0
def dump_matching_result(fn2, testset_full_path):
    #外部ファイルに出力する
    #個々のファイル毎にデータを出力する
    fn, ext = os.path.splitext(fn2)
    testcase_full_path = os.path.join(testset_full_path, fn2)
    imgT = cv2.imread(testcase_full_path, 0)
    if imgT is None:
        logger.info('Failed to load fn2:', testcase_full_path)
        raise ValueError('Not found the file')
    logger.info("Using Training: {}".format(fn2))

    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    with Timer('Detection'):
        kpT, descT = spla.affine_detect(detector,
                                        imgT,
                                        pool=pool,
                                        simu_param='test')
    logger.info('imgQ - %d features, imgT - %d features' %
                (spla.count_keypoints(splt_kpQ), len(kpT)))

    with Timer('matching'):
        mesh_pQ, mesh_pT, mesh_pairs = spla.match_with_cross(
            matcher, splt_descQ, splt_kpQ, descT, kpT)

    index_mesh_pairs = format4pickle_pairs(mesh_pairs)
    import joblib
    dump_match_testcase_dir = myfsys.setup_output_directory(dump_match_dir, fn)
    joblib.dump(mesh_pQ,
                os.path.join(dump_match_testcase_dir, 'mesH_pQ.pikle'),
                compress=True)
    joblib.dump(mesh_pT,
                os.path.join(dump_match_testcase_dir, 'mesH_pT.pikle'),
                compress=True)
    import pickle
    with open(os.path.join(dump_match_testcase_dir, 'mesh_pairs.pickle'),
              'wb') as f:
        pickle.dump(index_mesh_pairs, f)
        f.close()
    # for i, mesh_pair in enumerate(index_mesh_pairs):
    #     joblib.dump(mesh_pair, os.path.join(dump_detected_testcase_dir, "mesh_pairs_{0:02d}.pikle".format(i)),
    #                 compress=True)

    with Timer('estimation'):
        Hs, statuses, pairs = spla.calclate_Homography4splitmesh(mesh_pQ,
                                                                 mesh_pT,
                                                                 mesh_pairs,
                                                                 median=median)
    joblib.dump(Hs,
                os.path.join(dump_match_testcase_dir, 'Hs.pikle'),
                compress=True)
    joblib.dump(statuses,
                os.path.join(dump_match_testcase_dir, 'statuses.pikle'),
                compress=True)
    index_pairs = tuple(
        tuple((p.pt, p.size, p.angle, p.response, p.octave, p.class_id)
              for p in pair) for pair in pairs)
    with open(os.path.join(dump_match_testcase_dir, 'pairs.pickle'),
              'wb') as f:
        pickle.dump(index_pairs, f)
Пример #10
0
def compute_fgbg_masks(vidreader,
                       sal,
                       bg,
                       num_prev_frames=1,
                       threaded=False,
                       num_blocks=20):
    start = time.time()

    def compute_mask(frameIdx, frames):
        num_frames = len(frames)
        _idx = num_prev_frames
        fgMasks = []
        bgMasks = []
        while _idx < num_frames:
            prev_frames = frames[_idx - num_prev_frames:_idx]
            bg_variation = bg.process(frames[_idx], prev_frames)
            saliency = sal.process(frames[_idx])
            _idx += 1
            saliency_prob = normalize(6 * bg_variation + 4 * saliency)
            _, fgmask = cv2.threshold(saliency_prob, 0.6, 1, cv2.THRESH_BINARY)
            _, bgmask = cv2.threshold(bg_variation, 0.1, 1,
                                      cv2.THRESH_BINARY_INV)
            fgMasks.extend([fgmask])
            bgMasks.extend([bgmask])
        return (frameIdx, fgMasks, bgMasks)

    frameFgMasks = []
    frameBgMasks = []
    frameIdx = num_prev_frames + num_blocks
    N = vidreader.frames
    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn / 2)
    pending = deque()
    N = vidreader.frames
    while True:
        while len(pending) > 0 and pending[0].ready():
            task = pending.popleft()
            idx, fgmask, bgmask = task.get()
            frameFgMasks.extend(fgmask)
            frameBgMasks.extend(bgmask)
            print 'Computing Mask ... {0}%\r'.format((idx * 100 / N)),
        if len(pending
               ) < threadn and frameIdx - num_prev_frames - num_blocks < N:
            (cnt,
             frames) = vidreader.read(frameIdx - num_prev_frames - num_blocks,
                                      num_prev_frames + num_blocks)
            if cnt >= num_prev_frames:
                if threaded:
                    task = pool.apply_async(compute_mask,
                                            [min(frameIdx, N), frames])
                else:
                    task = DummyTask(compute_mask(min(frameIdx, N), frames))
                pending.append(task)
            frameIdx += num_blocks
        if len(pending) == 0:
            break
    time_taken = time.time() - start
    print "Computing Mask ... [DONE] in ", time_taken, " seconds"
    return (frameFgMasks, frameBgMasks)
Пример #11
0
class ThreadedReader:
    thread_num = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=thread_num)
    pending = deque()

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
Пример #12
0
 def run_jobs(self, f, jobs):
     if self.usecloud:
         jids = cloud.map(f, jobs, _env=self.cloud_env, _profile=True, _depends_on=self.preprocess_job)
         ires = cloud.iresult(jids)
     else:
         pool = ThreadPool(processes=cv2.getNumberOfCPUs())
         ires = pool.imap_unordered(f, jobs)
     return ires
def main():
    # 背景分割识别器序号
    algo_index = 0
    subtractor = ALGORITHMS_TO_EVALUATE[algo_index][0]
    videoPath = "./video/vtest.avi"
    show_fgmask = False

    # 获得运行环境CPU的核心数
    nthreads = cv2.getNumberOfCPUs()
    # 设置线程数
    cv2.setNumThreads(nthreads)

    # 读取视频
    capture = cv2.VideoCapture(videoPath)

    # 当前帧数
    frame_num = 0
    # 总执行时间
    sum_Time = 0.0

    while True:
        ret, frame = capture.read()
        if not ret:
            return
        begin_time = time()
        fgmask = subtractor.apply(frame)
        end_time = time()
        run_time = end_time - begin_time
        sum_Time = sum_Time + run_time
        # 平均执行时间
        average_Time = sum_Time / (frame_num + 1)

        if show_fgmask:
            segm = fgmask
        else:
            segm = (frame * 0.5).astype('uint8')
            cv2.add(frame, (100, 100, 0, 0), segm, fgmask)

        # 显示当前方法
        cv2.putText(segm, ALGORITHMS_TO_EVALUATE[algo_index][1], (10, 30), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 0, 255),
                    2,
                    cv2.LINE_AA)
        # 显示当前线程数
        cv2.putText(segm, str(nthreads) + " threads", (10, 60), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 0, 255), 2,
                    cv2.LINE_AA)
        # 显示当前每帧执行时间
        cv2.putText(segm, "averageTime {} s".format(average_Time), (10, 90), cv2.FONT_HERSHEY_PLAIN, 2.0,
                    (255, 0, 255), 2, cv2.LINE_AA);

        cv2.imshow('some', segm)
        key = cv2.waitKey(1) & 0xFF
        frame_num = frame_num + 1

        # 按'q'健退出循环
        if key == ord('q'):
            break

    cv2.destroyAllWindows()
Пример #14
0
 def __init__(self, camera, cameraName, cameraDevice, initCmd=None):
     threading.Thread.__init__(self)
     self._stop_event = threading.Event()
     self.camera = camera
     self.cameraName = cameraName
     self.cameraDevice = cameraDevice
     self.threadn = cv2.getNumberOfCPUs()
     self.pools = {}
     self.initCmd = initCmd
def asift_detect(detector, fn):
    img = read_image(fn)
    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    with Timer('Detection with [ ASIFT ]'):
        splt_kp, splt_desc = affine_detect(detector,
                                           img,
                                           pool=pool,
                                           simu_param='asift')
    return img, splt_kp, splt_desc
def split_asift_detect(detector, fn, split_num):
    img = emod.read_image(fn)
    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    with Timer('Detection with [ ASIFT ]'):
        splt_kp, splt_desc = saf.affine_detect_into_mesh(detector,
                                                         split_num,
                                                         img,
                                                         simu_param='asift')
    return img, splt_kp, splt_desc
def affine_func(folder_number, base_image, image_sequence, rand_img):

	starttime = timeit.default_timer()

	FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
	FLANN_INDEX_LSH = 6

	feature_name = 'sift'
	detector = cv2.SIFT()
	norm = cv2.NORM_L2
	flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
	matcher = cv2.FlannBasedMatcher(flann_params, {})
	img1 = cv2.imread(base_image, 0)
	features = []
	inliers = []
	matches = []
	base_features = []
	img_features = []
	print 'Processing image sequence using ASIFT'
	for i in range(0,len(image_sequence)):
		print 'Processing image %d of %d images' % (i, len(image_sequence)-1)
		fn2 = image_sequence[i]
		img2 = cv2.imread(fn2, 0)
		pool=ThreadPool(processes = cv2.getNumberOfCPUs())
		kp1, desc1 = affine_detect(detector, img1, pool=pool)
		kp2, desc2 = affine_detect(detector, img2, pool=pool)
		base_features.append(len(kp1))
		img_features.append(len(kp2))

		raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2)
		p1, p2, kp_pairs = rect.filter_matches(kp1, kp2, raw_matches)
		if len(p1) >= 4:
			H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
			a = np.sum(status)
			matches.append(len(status))
			inliers.append(a)
			print '%d / %d  inliers/matched' % (np.sum(status), len(status))

			# Save randomly selected feature mapping
			if i == rand_img:
				print 'Random image selected...'
				img_draw = img2
				kp_pairs_draw = kp_pairs
				status_draw = status
				H_draw = H
				filename = "Image"+str(i)+"_asift.png"
				vis = rect.explore_match(folder_number, filename, img1, img_draw, kp_pairs_draw, status_draw, H_draw)
		else:
			H, status = None, None
			print '%d matches found, not enough for homography estimation' % len(p1)
			matches.append(len(p1))
			inliers.append(0)

	stoptime = timeit.default_timer()
	run_time = stoptime - starttime
	return (base_features, img_features, matches, inliers, run_time)
Пример #18
0
 def __init__(self):
     # Configuracion del pool
     self.threadn = cv2.getNumberOfCPUs()
     self.pool = ThreadPool(processes = self.threadn)
     self.pending = deque()
     
     # Defino los clock para tomar tiempos
     self.latency = StatValue()
     self.frame_interval = StatValue()
     self.last_frame_time = clock()
Пример #19
0
def detect_and_match(detector, matcher, set_fn, splt_num=64, simu_type="default"):
    """
    SplitA実験
    set_fn:
    """
    fnQ, testcase, fnT = set_fn
    def get_expt_names():
        tmpf, tmpext = os.path.splitext(fnT)
        return (os.path.basename(__file__), testcase, tmpf)
    expt_names = get_expt_names()
    logger = setup(expt_names)
    logger.info(__doc__)

    full_fnQ = myfsys.getf_template((fnQ,))
    full_fnT = myfsys.getf_input(testcase, fnT)
    imgQ, imgT = read_images(full_fnQ, full_fnT, logger)

    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    with Timer('Detection with SPLIT-ASIFT', logger):
        splt_kpQ, splt_descQ = spltA.affine_detect_into_mesh(detector, splt_num, imgQ, simu_param=simu_type)
    with Timer('Detection with SFIT', logger):
        kpT, descT = affine_detect(detector, imgT, pool=pool, simu_param='test')
    logger.info('imgQ - {0} features, imgT - {1} features'.format(spltA.count_keypoints(splt_kpQ), len(kpT)))

    with Timer('matching', logger):
        mesh_pQ, mesh_pT, mesh_pairs = spltA.match_with_cross(matcher, splt_descQ, splt_kpQ, descT, kpT)

    Hs = []
    statuses = []
    kp_pairs_long = []
    Hs_stable = []
    kp_pairs_long_stable = []
    for pQ, pT, pairs in zip(mesh_pQ, mesh_pT, mesh_pairs):
        pairs, H, status = calclate_Homography(pQ, pT, pairs)
        Hs.append(H)
        statuses.append(status)
        if status is not None and not len(status) == 0 and np.sum(status)/len(status) >= 0.4:
            Hs_stable.append(H)
        else:
            Hs_stable.append(None)
        for p in pairs:
            kp_pairs_long.append(p)
            if status is not None and not len(status) == 0 and np.sum(status)/len(status) >= 0.4:
                kp_pairs_long_stable.append(p)

    vis = draw_matches_for_meshes(imgQ, imgT, Hs=Hs)
    cv2.imwrite(myfsys.getf_output(expt_names, 'meshes.png'), vis)

    visS = draw_matches_for_meshes(imgQ, imgT, Hs=Hs_stable)
    cv2.imwrite(myfsys.getf_output(expt_names, 'meshes_stable.png'), visS)

    viw = explore_match_for_meshes('affine find_obj', imgQ, imgT, kp_pairs_long_stable, Hs=Hs_stable)
    cv2.imwrite(myfsys.getf_output(expt_names, 'meshes_and_keypoints_stable.png'), viw)

    return vis, visS, viw
Пример #20
0
def video_parse(input_video = None, output_video = None, display = False):
    #Get capture object and read frame
    if input_video is not None:
        cap = cv2.VideoCapture(input_video)
    else:
        cap = cv2.VideoCapture(0)

    # Space for opencv setup

    #Generate output objects if is required
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')

    out_v = None
    if output_video is not None:
        out_v = cv2.VideoWriter(output_video,fourcc, 20.0, (int(previous_frame.shape[1]),int(previous_frame.shape[0])))

    ## Generate a queue for the processes
    n_threads = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes = n_threads)

    pending_frames = deque()

    ## While the video is running
    while(cap.isOpened()):
        # Unload the ready frames
        while len(pending_frames) > 0 and pending_frames[0].ready():
            #Get the completed job
            res = pending_frames.popleft().get()

            #Write out the file to video if requested
            if out_va is not None:
                out_va.write(res)

            #Display ready frame
            if display:
                cv2.imshow(' Frame ', res)

        #If there are approx threads free, get more jobs
        if len(pending_frames) < n_threads:
            ## Generate the new job
            ret, frame = cap.read()
            if ret == False:
                break

            # drawing componenet to the fraem - only the compoenents not culled
            task = pool.apply_async(process_frame, (frame.copy()))
            pending_frames.append(task)

    # Clean up the used elements
    if out_v is not None:
        out_v.release()

    cap.release()
    cv2.destroyAllWindows()
Пример #21
0
def main():
    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'brisk-flann')
    try:
        fn1, fn2 = args
    except:
        fn1 = 'data/aero1.jpg'
        fn2 = 'data/aero3.jpg'

    img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
    img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
    detector, matcher = init_feature(feature_name)

    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)

    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)

    print('using', feature_name)

    pool = ThreadPool(processes=cv.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2,
                                           k=2)  #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' %
                  len(p1))

        explore_match(win, img1, img2, kp_pairs, None, H)

    match_and_draw('affine find_obj')
    cv.waitKey()
    print('Done')
Пример #22
0
def main():
    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'brisk-flann')
    try:
        fn1, fn2 = args
    except:
        fn1 = 'aero1.jpg'
        fn2 = 'aero3.jpg'

    img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
    img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
    detector, matcher = init_feature(feature_name)

    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)

    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)

    print('using', feature_name)

    pool=ThreadPool(processes = cv.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))

        explore_match(win, img1, img2, kp_pairs, None, H)


    match_and_draw('affine find_obj')
    cv.waitKey()
    print('Done')
Пример #23
0
def matching_image(fn1='aero1.jpg', fn2='aero3.jpg', feature='sift'):
    import sys
    
    feature_name = feature
    
    
    detector, matcher = init_feature(feature_name)
    try:
        img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
        img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
    except:
        img1 = fn1
        img2 = fn2
        if img1 is None:
            print('Failed to load fn1:', fn1)
            sys.exit(1)

        if img2 is None:
            print('Failed to load fn2:', fn2)
            sys.exit(1)
        

    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)

    print('using', feature_name)

    pool=ThreadPool(processes = cv.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))
        explore_match(win, img1, img2, kp_pairs, None, H)
        return blending(img1, img2, H)


    final = match_and_draw('affine find_obj')
    cv.imwrite('{}_panorama.jpg'.format(feature), final)
    cv.waitKey()
    print('Done')
def affine_detect_into_mesh(detector,
                            split_num,
                            img1,
                            mask=None,
                            simu_param='default'):
    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    kp, desc = affine_detect(detector,
                             img1,
                             mask,
                             pool=pool,
                             simu_param=simu_param)
    return split_kd(kp, desc, split_num)
Пример #25
0
 def run_jobs(self, f, jobs):
     if self.usecloud:
         jids = cloud.map(f,
                          jobs,
                          _env=self.cloud_env,
                          _profile=True,
                          _depends_on=self.preprocess_job)
         ires = cloud.iresult(jids)
     else:
         pool = ThreadPool(processes=cv2.getNumberOfCPUs())
         ires = pool.imap_unordered(f, jobs)
     return ires
Пример #26
0
def main():
    import sys

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    def process_frame(frame, t0):
        # some intensive computation...
        frame = cv.medianBlur(frame, 19)
        frame = cv.medianBlur(frame, 19)
        return frame, t0

    threadn = cv.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40),
                     "latency        :  %.1f ms" % (latency.value * 1000))
            draw_str(
                res, (20, 60),
                "frame interval :  %.1f ms" % (frame_interval.value * 1000))
            cv.imshow('threaded video', res)
        if len(pending) < threadn:
            _ret, frame = cap.read()
            t = clock()
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t))
            else:
                task = DummyTask(process_frame(frame, t))
            pending.append(task)
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break

    print('Done')
def compute_fgbg_masks(vidreader,gtreader,sal,bg,num_prev_frames=1,skip_frames=0,
							last_frame=1000,threaded=False,num_blocks=10,):
	start = time.time();
	def compute_mask(frameIdx,frames,gtframes):
		num_frames = len(frames);	_idx = num_prev_frames
		fgMasks = []; bgMasks = []; gtMasks = [];
		while _idx<num_frames:
			prev_frames = frames[_idx-num_prev_frames:_idx]
			gtmask = getMask(gtframes[_idx-num_prev_frames]);
			bg_variation = bg.process(frames[_idx],prev_frames);
			saliency = sal.process(frames[_idx]);	
			_idx += 1;	saliency_prob =  normalize(7*bg_variation  + 3*saliency);
			_,fgmask = cv2.threshold(saliency_prob ,0.5,1,cv2.THRESH_BINARY)
			fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, KERNEL)
			EL_KERNEL = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))
			fgmask = cv2.dilate(np.uint8(fgmask),EL_KERNEL,iterations = 2)
			fgmask = cv2.medianBlur(fgmask,3)
			fgmask = binary_fill_holes(fgmask)
			fgmask = cv2.dilate(np.uint8(fgmask),EL_KERNEL,iterations = 2)
			fgmask = cv2.morphologyEx(np.uint8(fgmask), cv2.MORPH_CLOSE,KERNEL)	
			_,bgmask = cv2.threshold(bg_variation,0.1,1,cv2.THRESH_BINARY_INV)
			zero_frame= np.zeros(frames[0].shape,dtype = np.uint8);
			zero_frame[:,:,2]=fgmask*255;
			zero_frame[:,:,1]=bgmask*255;
			_prob = cv2.cvtColor(saliency_prob*255,cv2.COLOR_GRAY2RGB);
			fgMasks.extend([fgmask]); bgMasks.extend([bgmask]); gtMasks.extend([gtmask])
		return (frameIdx,fgMasks,bgMasks,gtMasks);
	frameFgMasks = []; frameBgMasks = []; frameGTMasks = [];
	frameIdx = skip_frames+num_prev_frames+num_blocks; N = vidreader.frames;
	threadn = cv2.getNumberOfCPUs();	pool = ThreadPool(processes = threadn/2);
	pending = deque();	N = min(vidreader.frames,last_frame);
	while True:
		while len(pending) > 0 and pending[0].ready():
			task = pending.popleft()
			idx,fgmask,bgmask,gtmask  = task.get()
			frameFgMasks.extend(fgmask); frameBgMasks.extend(bgmask); frameGTMasks.extend(gtmask);
			print 'Computing Mask ... {0}%\r'.format((idx*100/N)),
		if len(pending) < threadn and frameIdx-num_prev_frames-num_blocks < N:
			(cnt,frames) = vidreader.read(frameIdx-num_prev_frames-num_blocks,num_prev_frames+num_blocks);
			(_,gtframes) = gtreader.read(frameIdx-num_blocks,num_blocks);
			if cnt >= num_prev_frames:
				if threaded:
					task = pool.apply_async(compute_mask,[min(frameIdx,N),frames,gtframes]);
				else:
					task = DummyTask(compute_mask(min(frameIdx,N),frames,gtframes));
				pending.append(task)
			frameIdx += num_blocks;	
		if len(pending) == 0:
			break;
	time_taken = time.time()-start;	
	print "Computing Mask ... [DONE] in ",time_taken," seconds"
	return (frameFgMasks,frameBgMasks,frameGTMasks)
    def __init__(self, stream):
        self.stream = stream
        self.numThread = cv2.getNumberOfCPUs()
        #self.numThread = 1
        self.workerPool = ThreadPool(processes = self.numThread)
        self.pendingWorker = deque()

        self.latency = StatValue()
        self.frameInterval = StatValue()
        self.lastFrameTime = clock()

        self.outFrames = deque(maxlen = self.numThread)
        self.faces = []
    def __init__(self, stream):
        self.stream = stream
        self.numThread = cv2.getNumberOfCPUs()
        #self.numThread = 5
        self.workerPool = ThreadPool(processes=self.numThread)
        self.pendingWorker = deque()

        self.latency = StatValue()
        self.frameInterval = StatValue()
        self.lastFrameTime = clock()

        self.outFrames = deque(maxlen=self.numThread)
        self.faces = []
Пример #30
0
def check_build_info():
    success = True

    print("OpenCV Version: {}".format(cv2.__version__))
    if (cv2.getVersionMajor() != CURRENT_OPENCV_BUILD_VERSION[0]) and (
            cv2.getVersionMinor() != CURRENT_OPENCV_BUILD_VERSION[1]) and (
            cv2.getVersionRevision() != CURRENT_OPENCV_BUILD_VERSION[2]):
        print("ERROR: OpenCV version is different than the expected.")
        success = False

    print("Available CPUs: ", cv2.getNumberOfCPUs())
    print("Available threads: ", cv2.getNumThreads())
    if cv2.getNumThreads() < cv2.getNumberOfCPUs():
        print("ERROR: TBB is not enabled.")
        success = False

    cv2.CPU_NEON = 100  # Value taken from OpenCV doc. CPU labels don't work correctly in Python
    print("Cpu NEON support: ", cv2.checkHardwareSupport(cv2.CPU_NEON))
    if not cv2.checkHardwareSupport(cv2.CPU_NEON):
        print("ERROR: NEON is not enabled.")
        success = False

    return success
Пример #31
0
def find_asift_homography(fn1, fn2, scale=1.0):
    feature_name = 'orb-flann'

    img1 = cv2.imread(fn1, 0)
    img2 = cv2.imread(fn2, 0)
    detector, matcher = init_feature(feature_name, n=400)

    if img1 is None:
        print('Failed to load fn1:', fn1)
        return

    if img2 is None:
        print('Failed to load fn2:', fn2)
        return

    if detector is None:
        print('unknown feature:', feature_name)
        return

    print('using', feature_name)

    # cv2.INTER_LINEAR
    # img1 = cv2.resize(img1, (0, 0), fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
    w, h = get_size(img1)
    img1 = cv2.resize(img1, (int(w * scale), int(h * scale)))

    w, h = get_size(img2)
    img2 = cv2.resize(img2, (int(w * scale), int(h * scale)))

    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    with Timer('matching'):
        raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2

    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))

        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' %
              len(p1))

    return H, kp_pairs
Пример #32
0
    def __init__(self, queue, cmd, user_queue, cct):
        multiprocessing.Process.__init__(self)
        CameraServer.queue = queue

        self.trainor_queue = multiprocessing.Queue(64)
        self.svm_queue = multiprocessing.Queue(64)

        self.cmd = cmd
        from bvps.camera.camera import Camera
        self.user_queue = user_queue
        self.cct = cct
        self.detector = detector()
        self.recognizer = recognizer(None)
        self.threadn = cv2.getNumberOfCPUs()
        self.pools = {}
Пример #33
0
def detect_image(img1, img2):
    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'brisk-flann')

    detector, matcher = init_feature(feature_name)

    if img1 is None:
        print('Failed to load fn1:', fn1)
        sys.exit(1)

    if img2 is None:
        print('Failed to load fn2:', fn2)
        sys.exit(1)

    if detector is None:
        print('unknown feature:', feature_name)
        sys.exit(1)

    print('using', feature_name)

    img1 = cv.GaussianBlur(img1, (5, 5), sigmaX=5)

    pool = ThreadPool(processes=cv.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2,
                                           k=2)  #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' %
                  len(p1))

        img, corners = explore_match(win, img1, img2, kp_pairs, None, H)
        return img, corners, kp_pairs

    return match_and_draw('affine find_obj')
Пример #34
0
    def run(self):
        """Run the main loop."""

        threadn = cv2.getNumberOfCPUs()
        pool = ThreadPool(processes=threadn)
        pending = deque()

        # latency = StatValue()
        # frame_interval = StatValue()
        # last_frame_time = clock()

        # TODO: Camera Calibration, Video Stabilization

        self._windowManager.create_window()

        while self._windowManager.is_window_created:
            self._captureManager.enter_frame()
            original = self._captureManager.original
            self._captureManager.frame = original

            # if original is not None:
            #    output = self.process_and_detect(original)
            #    self._captureManager.frame = output§

            while len(pending) > 0 and pending[0].ready():
                output = pending.popleft().get()
                # latency.update(clock() - t0)
                cv2.putText(output, "threaded      :  " + str(self._thread_mode),
                            (15, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                # draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value * 1000))
                # draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value * 1000))
                self._captureManager.frame = output
                self._captureManager.exit_frame()

            if len(pending) < threadn:
                # ret, frame = cap.read()
                # t = clock()
                # frame_interval.update(t - last_frame_time)
                # last_frame_time = t
                if self._thread_mode:
                    task = pool.apply_async(self.process_and_detect, (original.copy(),))
                else:
                    task = DummyTask(self.process_and_detect(original))
                pending.append(task)

            self._captureManager.exit_frame()
            self._windowManager.process_events()
Пример #35
0
def extract_sift(im):
    if im.dtype != np.uint8:
        im = (im * 255).astype(np.uint8)

    if len(im.shape) == 3:
        im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)

    # Use multithreading to split the affine detection work
    pool = ThreadPool(processes=cv2.getNumberOfCPUs())

    #kp, des = affine_detect(SIFT, im, pool=pool)
    kp, des = SIFT.detectAndCompute(im, None)

    pool.close()
    pool.join()

    return kp, des
def compute_fgbg_masks(vidreader,sal,bg,num_prev_frames=1,threaded=False,num_blocks=20):
	start = time.time();
	def compute_mask(frameIdx,frames):
		num_frames = len(frames);	_idx = num_prev_frames
		fgMasks = []; bgMasks = [];
		while _idx<num_frames:
			prev_frames = frames[_idx-num_prev_frames:_idx]
			bg_variation = bg.process(frames[_idx],prev_frames);
			saliency = sal.process(frames[_idx]);	
			_idx += 1;	saliency_prob =  normalize(7*bg_variation  + 3*saliency);
			_,fgmask = cv2.threshold(saliency_prob ,0.5,1,cv2.THRESH_BINARY)
			_,bgmask = cv2.threshold(bg_variation,0.1,1,cv2.THRESH_BINARY_INV)
			name = "test_results/smoothening/{0}_sal.png".format(frameIdx-num_blocks-num_prev_frames+_idx);
			zero_frame= np.zeros(frames[0].shape,dtype = np.uint8);
			zero_frame[:,:,2]=fgmask*255;
			zero_frame[:,:,1]=bgmask*255;
			_prob = cv2.cvtColor(saliency_prob*255,cv2.COLOR_GRAY2RGB);
			out = cv2.addWeighted(np.uint8(_prob),0.6,zero_frame,0.4,0);
			cv2.imwrite(name,out)
		
			fgMasks.extend([fgmask]); bgMasks.extend([bgmask])
		return (frameIdx,fgMasks,bgMasks);
	frameFgMasks = []; frameBgMasks = []; 
	frameIdx = num_prev_frames+num_blocks; N = vidreader.frames;
	threadn = cv2.getNumberOfCPUs();	pool = ThreadPool(processes = threadn/2);
	pending = deque();	N = vidreader.frames;
	while True:
		while len(pending) > 0 and pending[0].ready():
			task = pending.popleft()
			idx,fgmask,bgmask  = task.get()
			frameFgMasks.extend(fgmask); frameBgMasks.extend(bgmask);
			print 'Computing Mask ... {0}%\r'.format((idx*100/N)),
		if len(pending) < threadn and frameIdx-num_prev_frames-num_blocks < N:
			(cnt,frames) = vidreader.read(frameIdx-num_prev_frames-num_blocks,num_prev_frames+num_blocks);
			if cnt >= num_prev_frames:
				if threaded:
					task = pool.apply_async(compute_mask,[min(frameIdx,N),frames]);
				else:
					task = DummyTask(compute_mask(min(frameIdx,N),frames));
				pending.append(task)
			frameIdx += num_blocks;	
		if len(pending) == 0:
			break;
	time_taken = time.time()-start;	
	print "Computing Mask ... [DONE] in ",time_taken," seconds"
	return (frameFgMasks,frameBgMasks)
    def test_result(self):
        pool = ThreadPool(processes=cv2.getNumberOfCPUs())
        meshList_kpT, meshList_descT = splta2.affine_detect_into_mesh(
            self.detector, self.splt_num, self.img1, pool=pool)
        kpQ, descQ = affine_detect(self.detector, self.img2, pool=pool)

        def count_keypoints():
            c = 0
            for s_kpT in meshList_kpT:
                for kpT in s_kpT:
                    c += len(kpT)
            return c

        print('imgQ - %d features, imgT - %d features' %
              (count_keypoints(), len(kpQ)))

        mesh_pT, mesh_pQ, mesh_pairs = splta2.match_with_cross(
            self.matcher, meshList_descT, meshList_kpT, descQ, kpQ)
        self.assertTrue(True)
Пример #38
0
def process_video(bgsubImpl,vidreader,vidwriter,num_blocks=6, threaded = False):
	vidwriter.build();
	threadn = cv2.getNumberOfCPUs();	pool = ThreadPool(processes = threadn);
	pending = deque();	N = vidreader.frames; frameIdx = num_blocks;
	while True:
		while len(pending) > 0 and pending[0].ready():
			task = pending.popleft()
			frame, idx = task.get()
			vidwriter.write(frame);
			print 'Proceesing ... {0}%\r'.format((idx*100/N)),	
		if len(pending) < threadn and frameIdx < N:
			(cnt,frames) = vidreader.read(frameIdx-num_blocks,num_blocks);
			if cnt == num_blocks:
				if threaded:
					task = pool.apply_async(bgsub_process,[bgsubImpl,frames,frameIdx]);
				else:
					task = DummyTask(bgsub_process(bgsubImpl,frames,frameIdx));
				pending.append(task)
			frameIdx += 1;	
		if len(pending) == 0:
			break;
Пример #39
0
def compute_fgbg_masks(vidreader,sal,bg,num_prev_frames=1,threaded=False,num_blocks=20):
	start = time.time();
	def compute_mask(frameIdx,frames):
		num_frames = len(frames);	_idx = num_prev_frames
		fgMasks = []; bgMasks = [];
		while _idx<num_frames:
			prev_frames = frames[_idx-num_prev_frames:_idx]
			bg_variation = bg.process(frames[_idx],prev_frames);
			saliency = sal.process(frames[_idx]);	
			_idx += 1;	saliency_prob =  normalize(6*bg_variation  + 4*saliency);
			_,fgmask = cv2.threshold(saliency_prob ,0.6,1,cv2.THRESH_BINARY)
			_,bgmask = cv2.threshold(bg_variation,0.1,1,cv2.THRESH_BINARY_INV)
			fgMasks.extend([fgmask]); bgMasks.extend([bgmask])
		return (frameIdx,fgMasks,bgMasks);
	frameFgMasks = []; frameBgMasks = []; 
	frameIdx = num_prev_frames+num_blocks; N = vidreader.frames;
	threadn = cv2.getNumberOfCPUs();	pool = ThreadPool(processes = threadn/2);
	pending = deque();	N = vidreader.frames;
	while True:
		while len(pending) > 0 and pending[0].ready():
			task = pending.popleft()
			idx,fgmask,bgmask  = task.get()
			frameFgMasks.extend(fgmask); frameBgMasks.extend(bgmask);
			print 'Computing Mask ... {0}%\r'.format((idx*100/N)),
		if len(pending) < threadn and frameIdx-num_prev_frames-num_blocks < N:
			(cnt,frames) = vidreader.read(frameIdx-num_prev_frames-num_blocks,num_prev_frames+num_blocks);
			if cnt >= num_prev_frames:
				if threaded:
					task = pool.apply_async(compute_mask,[min(frameIdx,N),frames]);
				else:
					task = DummyTask(compute_mask(min(frameIdx,N),frames));
				pending.append(task)
			frameIdx += num_blocks;	
		if len(pending) == 0:
			break;
	time_taken = time.time()-start;	
	print "Computing Mask ... [DONE] in ",time_taken," seconds"
	return (frameFgMasks,frameBgMasks)
Пример #40
0
    feature_name = opts.get('--feature', 'sift-flann')
    try: fn1, fn2 = args
    except:
        fn1 = 'C:/opencv-2.4.9/sources/samples/python2/data/aero1.jpg'#入力画像
        fn2 = 'C:/opencv-2.4.9/sources/samples/python2/data/aero3.jpg'

    img1 = cv2.imread(fn1, 0)
    img2 = cv2.imread(fn2, 0)
    detector, matcher = init_feature(feature_name)
    if detector != None:
        print 'using', feature_name
    else:
        print 'unknown feature:', feature_name
        sys.exit(1)

    pool=ThreadPool(processes = cv2.getNumberOfCPUs())
    kp1, desc1 = affine_detect(detector, img1, pool=pool)
    kp2, desc2 = affine_detect(detector, img2, pool=pool)
    print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))

    def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
Пример #41
0
def imrestore(images, **opts):
    """
    Restore images by merging and stitching techniques.

    :param images: list of images or string to path which uses glob filter in path.
            Loads image array from path, url, server, string
            or directly from numpy array (supports databases)
    :param debug: (0) flag to print debug messages
            0 -> do not print messages.
            1 -> print messages.
            2 -> print messages and show results
                (consumes significantly more memory).
            3 -> print messages, show results and additional data
                (consumes significantly more memory).
    :param feature: (None) feature instance. It contains the configured
            detector and matcher.
    :param pool: (None) use pool Ex: 4 to use 4 CPUs.
    :param cachePath: (None) saves memoization to specified path and
            downloaded images.
    :param clearCache: (0) clear cache flag.
            * 0 do not clear.
            * 1 All CachePath is cleared before use.
            * 2 re-compute data but other cache data is left intact.
            Notes: using cache can result in unspected behaviour
                if some configurations does not match to the cached data.
    :param loader: (None) custom loader function used to load images
            to merge. If None it loads the original images in color.
    :param pshape: (400,400) process shape, used to load pseudo images
            to process features and then results are converted to the
            original images. If None it loads the original images to
            process the features but it can incur to performance penalties
            if images are too big and RAM memory is scarce.
    :param baseImage: (None) First image to merge to.
            * None -> takes first image from raw list.
            * True -> selects image with most features.
            * Image Name.
    :param selectMethod: (None) Method to sort images when matching. This
            way the merging order can be controlled.
            * (None) Best matches.
            * Histogram Comparison: Correlation, Chi-squared,
                Intersection, Hellinger or any method found in hist_map
            * Entropy.
            * custom function of the form: rating,fn <-- selectMethod(fns)
    :param distanceThresh: (0.75) filter matches by distance ratio.
    :param inlineThresh: (0.2) filter homography by inlineratio.
    :param rectangularityThresh: (0.5) filter homography by rectangularity.
    :param ransacReprojThreshold: (5.0) maximum allowed reprojection error
            to treat a point pair as an inlier.
    :param centric: (False) tries to attach as many images as possible to
            each matching. It is quicker since it does not have to process
            too many match computations.
    :param hist_match: (False) apply histogram matching to foreground
            image with merge image as template
    :param mergefunc: (None) function used to merge foreground with
            background image using the given transformation matrix.
            The structure is as follows:

            merged, H_back, H_fore= mergefunc(back,fore,H)

            ..where::

                back: background image
                fore: foreground image
                H: calculated Transformation Matrix
                merged: new image of fore in back image
                H_back: transformation matrix that modifies
                        background key-points
                H_fore: transformation matrix that modifies
                        foreground key-points

    :param postfunc: (None) function used for post processing
            the merging result. The function is called with the merging
            image and must return the processed image.
    :param save: (False)
            * True, saves in path with name restored_{base_image}
            * False, does not save
            * Image name used to save the restored image.
    :return: restored image
    """
    # for debug
    FLAG_DEBUG = opts.get("debug",1)

    # for multiprocessing
    pool = opts.get("pool",None)
    if pool is not None: # convert pool count to pool class
        NO_CPU = cv2.getNumberOfCPUs()
        if pool <= NO_CPU:
            pool = Pool(processes = pool)
        else:
            raise Exception("pool of {} exceeds the number of processors {}".format(pool,NO_CPU))
    # for features
    feature = opts.get("feature",None)
    if feature is None:
        feature = Feature(pool=pool,debug=FLAG_DEBUG)
        feature.config(name='a-sift-flann') # init detector and matcher to compute descriptors
    else:
        feature.pool = pool
        feature.debug = FLAG_DEBUG

    # select method to order images to feed in superposition
    selectMethod = opts.get("selectMethod",None)
    best_match_list = ("bestmatches", "best matches")
    entropy_list = ("entropy",)
    if callable(selectMethod):
        orderValue = 3
    elif selectMethod in hist_map:
        orderValue = 2
    elif selectMethod in entropy_list:
        orderValue = 1
    elif selectMethod in best_match_list or selectMethod is None:
        orderValue = 0
    else:
        raise Exception("selectMethod {} not recognized".format(selectMethod))

    # distance threshold to filter best matches
    distanceThresh = opts.get("distanceThresh",0.75) # filter ratio

    # threshold for inlineRatio
    inlineThresh = opts.get("inlineThresh",0.2) # filter ratio
    assert inlineThresh<=1 and inlineThresh>=0 # ensures adequate value [0,1]

    # threshold for rectangularity
    rectangularityThresh = opts.get("rectangularityThresh",0.5) # filter ratio
    assert rectangularityThresh<=1 and rectangularityThresh>=0 # ensures adequate value [0,1]

    # threshold to for RANSAC reprojection
    ransacReprojThreshold = opts.get("ransacReprojThreshold",5.0)

    centric = opts.get("centric",False) # tries to attach as many images as possible
    pshape = opts.get("process_shape",(400,400))# it is not safe to compute descriptors from big images
    usepshape = False # output is as process_shape if True, else process with process_shape but output is as loader
    minKps = 3 # minimum len of key-points to find Homography
    histMatch = opts.get("hist_match",False)
    mergefunc = opts.get("mergefunc",None)
    if mergefunc is None:
        mergefunc = superpose
    assert callable(mergefunc)
    postfunc = opts.get("postfunc",None)
    assert postfunc is None or callable(postfunc)

    ############################## OPTIMIZATION MEMOIZEDIC #############################
    cachePath = opts.get("cachePath",None)
    if cachePath is not None:
        feature_dic = MemoizedDict(cachePath + "descriptors")
        if FLAG_DEBUG: print("Cache path is in {}".format(feature_dic._path))
        clearCache = opts.get("clearCache",0)
        if clearCache==1:
            feature_dic.clear()
            if FLAG_DEBUG: print("Cache path cleared")
    else:
        feature_dic = {}

    expert = opts.get("expert",None)
    if expert is not None:
        expert = MemoizedDict(expert) # convert path

    ################################## LOADING IMAGES ###################################
    if images is None or len(images)==0: # if images is empty use demonstration
        test = MANAGER.TESTPATH
        if FLAG_DEBUG: print("Looking in DEMO path {}".format(test))
        fns = glob(test + "*")
    elif isinstance(images,basestring):
        # if string assume it is a path
        if FLAG_DEBUG:print("Looking as {}".format(images))
        fns = glob(images)
    elif not isinstance(images,basestring) and len(images) == 1 and "*" in images[0]:
        images = images[0] # get string
        if FLAG_DEBUG:print("Looking as {}".format(images))
        fns = glob(images)
    else: # iterator containing data
        fns = images # list file names

    # check images
    if not len(fns)>1:
        raise Exception("list of images must be greater than 1, got {}".format(len(fns)))

    # to select base image ahead of any process
    baseImage = opts.get("baseImage",None)
    if isinstance(baseImage,basestring):
        base_old = baseImage
        try: # tries user input
            if baseImage not in fns:
                base, path, name, ext = getData(baseImage)
                if not path: # if name is incomplete look for it
                    base, path, _,_ = getData(fns[0])
                baseImage = lookinglob(baseImage, "".join((base, path)))
                # selected image must be in fns
                if baseImage is None:
                    raise IndexError
        except IndexError: # tries to find image based in user input
            # generate informative error for the user
            raise Exception("{} or {} is not in image list"
                            "\n A pattern is {}".format(
                            base_old,baseImage,fns[0]))

    if FLAG_DEBUG: print("No. images {}...".format(len(fns)))

    # make loader
    loader = opts.get("loader",None) # BGR loader
    if loader is None: loader = loadFunc(1)

    ######################## Local features: Key-points and descriptors ####################
    with TimeCode("Computing features...\n",
                  endmsg="Computed feature time was {time}\n", enableMsg=FLAG_DEBUG):
        feature_list = [] # list of key points and descriptors
        index = 0
        tries = 0 # prevents infinite loop
        while index < len(fns):
            try:
                path = fns[index]
                try:
                    if cachePath is None or clearCache==2 and path in feature_dic:
                        raise KeyError # clears entry from cache
                    kps, desc, shape = feature_dic[path] # thread safe
                    # check memoized is the same
                    if pshape is None:
                        sh = loader(path).shape # and checks that image exists
                    else:
                        sh = pshape

                    # solves possible difference in object id instead of if sh != shape
                    for ii,jj in zip(sh,shape):
                        if ii!=jj: raise KeyError

                except (KeyError, ValueError) as e: # not memorized
                    if FLAG_DEBUG: print("Processing features for {}...".format(path))
                    img = loader(path) #cv2.imread(path)
                    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                    if pshape is None: # get features directly from original
                        kps, desc = feature.detectAndCompute(img) # get features
                    else: # optimize by getting features from scaled image and the rescaling
                        oshape = img.shape # cache original shape
                        img = cv2.resize(img, pshape) # pseudo image
                        kps, desc = feature.detectAndCompute(img) # get features
                        # re-scale keypoints to original image
                        if not usepshape:
                            # this necessarily does not produce the same result when not used
                            """
                            # METHOD 1: using Transformation Matrix
                            H = getSOpointRelation(process_shape, oshape, True)
                            for kp in kps:
                                kp["pt"]=tuple(cv2.perspectiveTransform(
                                    np.array([[kp["pt"]]]), H).reshape(-1, 2)[0])
                            """
                            # METHOD 2:
                            rx,ry = getSOpointRelation(pshape, oshape)
                            for kp in kps:
                                x,y = kp["pt"]
                                kp["pt"] = x*rx,y*ry
                    shape = img.shape
                    feature_dic[path] = kps, desc, shape # to memoize

                # add paths to key-points
                for kp in kps:
                    kp["path"] = path

                # number of key-points, index, path, key-points, descriptors
                feature_list.append((len(kps),index,path,kps,desc))
                if FLAG_DEBUG: print("\rFeatures {}/{}...".format(index+1,len(fns)), end=' ')
                index += 1
                tries = 0
            except Exception as e:
                tries += 1
                warnings.warn("caught error 139")
                if tries > 2:
                    raise e

    ############################## Pre-selection from a set ###############################
    # initialization and base image selection
    if baseImage is None:
       _,_,path,kps_base,desc_base = feature_list[0] # select first for most probable
    elif isinstance(baseImage,basestring):
        kps_base,desc_base = feature_dic[baseImage]
        path = baseImage
    elif baseImage is True: # sort images
        feature_list.sort(reverse=True) # descendant: from bigger to least
        _,_,path,kps_base,desc_base = feature_list[0] # select first for most probable
    else:
        raise Exception("baseImage must be None, True or String")

    if FLAG_DEBUG: print("baseImage is", path)
    used = [path] # select first image path
    restored = loader(path) # load first image for merged image
    if usepshape:
        restored = cv2.resize(restored,pshape)
    failed = [] # registry for failed images

    ############################# Order set initialization ################################
    if orderValue: # obtain comparison with structure (value, path)
        if orderValue == 1: # entropy
            comparison = list(zip(*entropy(fns,loadfunc=loadFunc(1,pshape),invert=False)[:2]))
            if FLAG_DEBUG: print("Configured to sort by entropy...")
        elif orderValue == 2: # histogram comparison
            comparison = hist_comp(fns,loadfunc=loadFunc(1,pshape),method=selectMethod)
            if FLAG_DEBUG: print("Configured to sort by {}...".format(selectMethod))
        elif orderValue == 3:
            comparison = selectMethod(fns)
            if FLAG_DEBUG: print("Configured to sort by Custom Function...")
        else:
            raise Exception("DEBUG: orderValue {} does "
                            +"not correspond to {}".format(orderValue,selectMethod))
    elif FLAG_DEBUG: print("Configured to sort by best matches")

    with TimeCode("Restoring ...\n",
                  endmsg= "Restoring overall time was {time}\n", enableMsg= FLAG_DEBUG):
        while True:
            with TimeCode("Matching ...\n",
                          endmsg= "Matching overall time was {time}\n", enableMsg= FLAG_DEBUG):
                ####################### remaining keypoints to match ##########################
                kps_remain,desc_remain = [],[] # initialize key-point and descriptor base list
                for _,_,path,kps,desc in feature_list:
                    if path not in used: # append only those which are not in the base image
                        kps_remain.extend(kps)
                        desc_remain.extend(desc)

                if not kps_remain: # if there is not image remaining to stitch break
                    if FLAG_DEBUG: print("All images used")
                    break

                desc_remain = np.array(desc_remain) # convert descriptors to array

                ################################## Matching ###################################
                # select only those with good distance (hamming, L1, L2)
                raw_matches = feature.matcher.knnMatch(desc_remain,
                                                       trainDescriptors = desc_base, k = 2) #2
                # If path=2, it will draw two match-lines for each key-point.
                classified = {}
                for m in raw_matches:
                    # filter by Hamming, L1 or L2 distance
                    if m[0].distance < m[1].distance * distanceThresh:
                        m = m[0]
                        kp1 = kps_remain[m.queryIdx]  # keypoint in query image
                        kp2 = kps_base[m.trainIdx]  # keypoint in train image

                        key = kp1["path"] # ensured that key is not in used
                        if key in classified:
                            classified[key].append((kp1,kp2))
                        else:
                            classified[key] = [(kp1,kp2)]

                ############################ Order set ########################################
                if orderValue: # use only those in classified of histogram or entropy comparison
                    ordered = [(val,path) for val,path in comparison if path in classified]
                else: # order with best matches
                    ordered = sorted([(len(kps),path) for path,kps in list(classified.items())],reverse=True)


            for rank, path in ordered: # feed key-points in order according to order set

                ########################### Calculate Homography ##########################
                mkp1,mkp2 = list(zip(*classified[path])) # probably good matches
                if len(mkp1)>minKps and len(mkp2)>minKps:
                    p1 = np.float32([kp["pt"] for kp in mkp1])
                    p2 = np.float32([kp["pt"] for kp in mkp2])
                    if FLAG_DEBUG > 1: print('Calculating Homography for {}...'.format(path))
                    H, status = cv2.findHomography(p1, p2, cv2.RANSAC, ransacReprojThreshold)
                else:
                    if FLAG_DEBUG > 1: print('Not enough key-points for {}...'.format(path))
                    H = None

                if H is not None: #first test
                    fore = loader(path) # load fore image
                    if usepshape:
                        fore = cv2.resize(fore,pshape)
                    """
                    else:
                        # METHOD 3 for rescaling points. FIXME
                        #shapes = fore.shape,process_shape,fore.shape,process_shape
                        #H = sh2oh(H, *shapes) #### sTM to oTM
                        H = getSOpointRelation(process_shape,fore.shape, True)*H
                        for kp in feature_dic[path][0]:
                            kp["pt"]=tuple(cv2.perspectiveTransform(
                                np.array([[kp["pt"]]]), H).reshape(-1, 2)[0])"""
                    if histMatch: # apply histogram matching
                        fore = hist_match(fore, restored)

                    h,w = fore.shape[:2] # image shape

                    # get corners of fore projection over back
                    projection = getTransformedCorners((h,w),H)
                    c = ImCoors(projection) # class to calculate statistical data
                    lines, inlines = len(status), np.sum(status)

                    # ratio to determine how good fore is in back
                    inlineratio = inlineRatio(inlines,lines)

                    text = "inlines/lines: {}/{}={} and rectangularity {}".format(
                        inlines, lines, inlineratio, c.rotatedRectangularity)

                    if FLAG_DEBUG>1: print(text)

                    if FLAG_DEBUG > 2: # show matches
                        MatchExplorer("Match " + text, fore,
                                      restored, classified[path], status, H)

                    ######################### probability test ############################
                    if inlineratio>inlineThresh \
                            and c.rotatedRectangularity>rectangularityThresh: # second test

                        if FLAG_DEBUG>1: print("Test succeeded...")
                        while path in failed: # clean path in fail registry
                            try: # race-conditions safe
                                failed.remove(path)
                            except ValueError:
                                pass

                        ###################### merging and stitching #######################
                        if FLAG_DEBUG > 1: print("Merging...")
                        restored, H_back, H_fore= mergefunc(restored,fore,H)
                        if H_fore is None: # H is not modified use itself
                            H_fore = H

                        if FLAG_DEBUG > 1: # show merging result
                            Plotim("Last added with " + text, restored).show()

                        ####################### update base features #######################
                        # make projection to test key-points inside it
                        if FLAG_DEBUG > 1: print("Updating key-points...")
                        projection = getTransformedCorners((h,w),H_fore)
                        newkps, newdesc = [], []
                        for _,_,p,kps,desc in feature_list:
                            # append all points in the base image and update their position
                            if p in used: # transform points in back
                                for kp,dsc in zip(kps,desc): # kps,desc
                                    pt = kp["pt"] # get point
                                    if H_back is not None: # update point
                                        pt = tuple(transformPoint(pt,H_back))
                                        kp["pt"] = pt
                                    # include only those points outside foreground
                                    if cv2.pointPolygonTest(projection, pt, False) == -1:
                                        newkps.append(kp)
                                        newdesc.append(dsc)
                            elif p == path: # transform points in fore
                                # include only those points inside foreground
                                for kp,dsc in zip(kps,desc): # kps,desc
                                    kp["pt"] = tuple(transformPoint(kp["pt"],H_fore))
                                    newkps.append(kp)
                                    newdesc.append(dsc)
                        # update kps_base and desc_base
                        kps_base = newkps
                        desc_base = np.array(newdesc)

                        if FLAG_DEBUG > 2: # show keypints in merging
                            Plotim("merged Key-points",  # draw key-points in image
                                   cv2.drawKeypoints(
                                       im2shapeFormat(restored,restored.shape[:2]+(3,)),
                                              [dict2keyPoint(index) for index in kps_base],
                                              flags=4, color=(0,0,255))).show()
                        if FLAG_DEBUG: print("This image has been merged: {}...".format(path))
                        used.append(path) # update used
                        if not centric:
                            break
                    else:
                        failed.append(path)
                else:
                    failed.append(path)

            # if all classified have failed then end
            if set(classified.keys()) == set(failed):
                if FLAG_DEBUG:
                    print("Ended, these images do not fit: ")
                    for index in list(classified.keys()):
                        print(index)
                break

        if postfunc is not None:
            if FLAG_DEBUG: print("Applying post function...")
            restored = postfunc(restored)
        ####################################### Save image ####################################
        save = opts.get("save",False)
        if save:
            base, path, name, ext = getData(used[0])
            if isinstance(save,basestring):
                fn = save.format(path="".join((base, path)), ext=ext,)
            else:
                fn = "".join((base,path,"restored_"+ name,ext))

            mkPath(getPath(fn))
            r = cv2.imwrite(fn,restored)
            if FLAG_DEBUG and r:
                print("Saved: {}".format(fn))
            else:
                print("{} could not be saved".format(fn))

    return restored # return merged image
 def _setup_multithreaded(self):
     self.thread_num = cv2.getNumberOfCPUs()
     self.thread_pool = ThreadPool(self.thread_num)
Пример #43
0
def extract_ball_from_capture_threaded(cap, max_frames_count=-1, skip_count=0,
                                       carpet_mask=None, get_mask=None,
                                       carpet_lowerb=None,
                                       carpet_upperb=None,
                                       ball_lowerb=None,
                                       ball_upperb=None):
    """

    Read frames from capture until we detect motion of the ball

    Return tuple of (original frames, ball mask frames)

    :param carpet_lowerb:
    :rtype : (list(np.ndarray), list(np.ndarray))
    """
    frames = []
    mask_frames = []
    # mog = cv2.BackgroundSubtractorMOG(history=5, nmixtures=4,backgroundRatio=0.7)
    mog = cv2.BackgroundSubtractorMOG()
    motion_started = False
    for _ in xrange(skip_count):
        if cap.isOpened():
            cap.read()

    prev_mask = None

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    pending = deque()

    def process_frame(frame, prev_mask, carpet_mask):
        if carpet_mask is None:
            carpet_mask = img_util.green_carpet_mask(frame,
                                                     carpet_lowerb,
                                                     carpet_upperb)
        mask = get_mask(frame, lowerb=ball_lowerb, upperb=ball_upperb,
                        carpet_mask=carpet_mask)
        if prev_mask is None:
            move_mask = np.zeros_like(mask)
        else:
            move_mask = cv2.bitwise_not(prev_mask, mask=mask)

        prev_mask = mask
        is_ball, move_mask = img_util.detect_ball(move_mask)
        return is_ball, move_mask, prev_mask, frame

    while cap.isOpened():
        while len(pending) > 0 and pending[0].ready():
            is_ball, move_mask, prev_mask, frame = pending.popleft().get()
            if is_ball:
                if not motion_started:
                    logger.debug('ball appeared')
                    motion_started = True
                frames.append(frame)
                mask_frames.append(move_mask)
            else:
                if motion_started:
                    logger.debug(
                        'ball disappeared. Frames count: {}'.format(
                            len(frames)))
                    break
                else:
                    continue

            if 0 < max_frames_count < len(frames):
                logger.debug('max frames count reached')
                break

        if True:
            ret, frame = cap.read()
            task = pool.apply_async(process_frame,
                                    (frame, prev_mask, carpet_mask))
            pending.append(task)
        else:
            logger.debug('Cannot read more frames')
            break
    return frames, mask_frames
Пример #44
0
    def __init__(self, context, poller):
        super(NewClient, self).__init__(context, poller)

        self._threadn = cv2.getNumberOfCPUs()
        self._pool = ThreadPool(processes = self._threadn)
        self._pending = deque()
Пример #45
0
def multi_thread(f, jobs):
    pool = ThreadPool(processes=cv2.getNumberOfCPUs())
    ires = pool.imap_unordered(f, jobs)
    return ires
Пример #46
0
                cv2.CPU_NEON: "NEON",
                cv2.CPU_POPCNT: "POPCNT",
                cv2.CPU_SSE: "SSE",
                cv2.CPU_SSE2: "SSE2",
                cv2.CPU_SSE3: "SSE3",
                cv2.CPU_SSE4_1: "SSE4.1",
                cv2.CPU_SSE4_2: "SSE4.2",
                cv2.CPU_SSSE3: "SSSE3"};

        for feat in featDict:
            res = cv2.checkHardwareSupport(feat);
            print("%s = %d" % (featDict[feat], res));
        #cv2.setUseOptimized(onoff)!!!!

    # "Returns the number of logical CPUs available for the process."
    common.DebugPrint("cv2.getNumberOfCPUs() (#logical CPUs) is %s" % str(cv2.getNumberOfCPUs()));
    common.DebugPrint("cv2.getTickFrequency() is %s" % str(cv2.getTickFrequency()));

    """
    Available only in C++:
    # "getNumThreads - Returns the number of threads used by OpenCV for parallel regions."
    common.DebugPrint("cv2.getNumThreads() (#logical CPUs) is %s" % str(cv2.getNumThreads()));
    """


    videoPathFileNameQ = sys.argv[1]; # input/current video
    videoPathFileNameR = sys.argv[2]; # reference video


    #!!!!TODO: use getopt() to run Evangelidis' or "Alex's" algorithm, etc
Пример #47
0
 def init__threads(self):
     if self.threadn == 0:
         self.threadn = cv2.getNumberOfCPUs()
         self.pool = ThreadPool(processes=self.threadn)
         self.pending = deque()
Пример #48
0
def main():
    opts, args = getopt.getopt(sys.argv[1:], "", ["video="])
    opts = dict(opts)
    video = int(opts.get("--video", "0"))
    vsrc = None

    index = 0
    lastFn = None
    cmode = "rgb"
    mainImg = None
    value = 0
    algostate = {}

    print(
        "keys:\n"
        "  ESC: exit\n\n"
        "  c: rgb\n"
        "  r: red\n"
        "  g: green\n"
        "  b: blue,\n"
        "  h: hue\n"
        "  s: sat\n"
        "  v: val\n"
        "  0: adaptive threshold\n"
        "  1: threshold\n"
        "  2: huerange*valuerange\n"
        "  3: canny edges\n"
        "  4: simple blobs\n"
        "\n"
        "  <home>:           reset algorithm input\n"
        "  <up arrow>:       increase algorithm input\n"
        "  <down arrow>:     decrease algorithm input\n"
        "\n"
        "  <right arrow>:    increase img seq frame\n"
        "  <left arrow>:     decrease img seq frame\n"
    )

    if video:
        vsrc = cv2.VideoCapture(0)
        if not vsrc or not vsrc.isOpened():
            print("Problem opening video source")
            vsrc = None
            video = 0
        if 0:
            vsrc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, w)
            vsrc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, h)
    else:
        vsrc = None

    def processFrame(frame, t0):
        keypoints = None
        if cmode != "rgb":
            if cmode in ["h", "s", "v"]:
                # mode = (cv2.COLOR_BGR2HLS, cv2.COLOR_HLS2BGR)
                mode = (cv2.COLOR_BGR2HSV, cv2.COLOR_HSV2BGR)
                hsv = cv2.cvtColor(frame, mode[0])
                if cmode == "h":
                    if mode[0] == cv2.COLOR_BGR2HSV:
                        hsv[:, :, 1] = 255  # s = 1
                        hsv[:, :, 2] = 128  # v = .5
                    else:
                        hsv[:, :, 1] = 128  # l = .5
                        hsv[:, :, 2] = 255  # s = 1
                    frame = cv2.cvtColor(hsv, mode[1])
                elif cmode == "s":
                    # extract the s as grayscale
                    if mode[0] == cv2.COLOR_BGR2HSV:
                        h, frame, v = cv2.split(hsv)
                    else:
                        h, l, frame = cv2.split(hsv)
                elif cmode == "v":
                    if mode[0] == cv2.COLOR_BGR2HSV:
                        h, s, frame = cv2.split(hsv)
                    else:
                        h, frame, s = cv2.split(hsv)
            elif cmode in ["r", "g", "b"]:
                if cmode == "r":
                    b, g, frame = cv2.split(frame)
                elif cmode == "g":
                    b, frame, r = cv2.split(frame)
                elif cmode == "b":
                    frame, g, r = cv2.split(frame)
            elif cmode == "0":
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                frame = cv2.adaptiveThreshold(
                    gray, 200, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, value  # value to draw
                )
            elif cmode == "1":
                # simple thresholding
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                (ret, frame) = cv2.threshold(gray, 75 + value, 200, cv2.THRESH_BINARY)  # value to draw
            elif cmode == "2":
                # threshold value (want bright areas)
                # multiply result by hue in range
                mode = cv2.COLOR_BGR2HSV  # cv2.COLOR_HSV2BGR)
                hsv = cv2.cvtColor(frame, mode)
                h, s, v = cv2.split(hsv)
                vimg = cv2.inRange(v, 65, 255)
                himg = cv2.inRange(h, 50, 90)
                frame = cv2.multiply(vimg, himg, scale=1.0 / 255)
            elif cmode == "3":
                t1 = 2000 + value * 20
                t2 = t1 + 2000
                frame = cv2.Canny(frame, t1, t2, apertureSize=5)
            elif cmode == "4":
                if not "blobdetector" in algostate:
                    bp = cv2.SimpleBlobDetector_Params()
                    bp.filterByColor = True
                    bp.blobColor = 0

                    # Change thresholds
                    bp.minThreshold = 50
                    bp.maxThreshold = 150
                    bp.thresholdStep = 5

                    # Filter by Area.
                    bp.filterByArea = True
                    bp.minArea = 500
                    bp.maxArea = 10000

                    # Filter by Circularity
                    bp.filterByCircularity = False
                    bp.minCircularity = 0.1

                    # Filter by Convexity
                    bp.filterByConvexity = False
                    bp.minConvexity = 0.87

                    # Filter by Inertia
                    bp.filterByInertia = False
                    bp.minInertiaRatio = 0.01

                    algostate["blobdetector"] = cv2.SimpleBlobDetector(bp)

                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                keypoints = algostate["blobdetector"].detect(frame)

            else:
                print("unknown cmode: " + cmode)
        return frame, t0, keypoints

    def showImg(frame, keypoints):
        if keypoints:
            frame = cv2.drawKeypoints(
                frame, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
            )
        cv2.imshow("img", frame)

    # cv2.namedWindow("img")
    # cv2.createTrackbar("value", "img", 0, 100, trackbarChange)

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    pending = deque()
    threadedMode = True
    latency = common.StatValue()
    frameT = common.StatValue()
    lastFrameTime = common.clock()

    while True:
        while len(pending) > 0 and pending[0].ready():
            frame, t0, keypoints = pending.popleft().get()
            latency.update(common.clock() - t0)
            putStr(frame, (20, 20), "latency       : %.1f ms" % (latency.value * 1000))
            putStr(frame, (20, 40), "frame interval: %.1f ms" % (frameT.value * 1000))
            showImg(frame, keypoints)

        if video:
            if len(pending) < threadn:
                ret, mainImg = vsrc.read()
                t = common.clock()
                frameT.update(t - lastFrameTime)
                lastFrameTime = t
                task = pool.apply_async(processFrame, (mainImg.copy(), t))
                pending.append(task)

            done, update, cmode, index, value = checkKey(1, cmode, index, value)
            if done:
                break
        else:
            if index < 0:
                # index < 0 signals user's intent to go backward in framelist
                goback = True
                index = abs(index)
            else:
                goback = False
            fn = fnpat % index
            base = os.path.basename(fn)
            if not os.path.isfile(fn) and index >= 0:
                sys.stdout.write(base + " not found\r")
                sys.stdout.flush()
                if goback:
                    index = -(index - 1)
                else:
                    index = index + 1
                continue

            if fn != lastFn:
                mainImg = cv2.imread(fn, cv2.IMREAD_ANYCOLOR)
                (img, t0, keypts) = processFrame(mainImg.copy(), common.clock())
                frameT.update(common.clock() - t0)
                if cmode == "rgb":
                    str = "%s     (%.2f ms)" % (base, frameT.value * 1000)
                else:
                    str = "%s[%s] (%.2f ms)" % (base, cmode, frameT.value * 1000)
                putStr(img, (20, 20), str)
                showImg(img, keypts)
                lastFn = fn

            done, update, cmode, index, value = checkKey(10, cmode, index, value)
            if done:
                break
            elif update:
                lastFn = None
Пример #49
0
if __name__ == '__main__':
	source = 'V:\\Video AG\\archiv\\14ws-infin-141014\\14ws-infin-141014-dozent.mp4'
	# 0:12:40
	source = 0

	if sys.argv[1:]:
		source = sys.argv[1]

	framescale = 0.25 #0.5
	prevframe = None
	curframe = None
	tracks = []
	tracklen = 10
	flowmethod = 0

	numthreads = cv2.getNumberOfCPUs()

	scalerpool = ThreadPool(processes=1)
	qscaling = deque()

	flowpool = ThreadPool(processes=1) # must be serial!
	qflow = deque()

	vid = cv2.VideoCapture(source)
	#vid.set(cv2.CAP_PROP_FPS, 15)
	#if isinstance(source, str):
	#	vid.set(cv2.CAP_PROP_POS_MSEC, 1000 * (12*60 + 40))

	vid.set(cv2.CAP_PROP_POS_MSEC, 1000 * (10*60 + 0))

	if source == 0:
    def run(self):
        # init big image fro stitching
        ret, frame = self.cam.read()
        frame=cv2.resize(frame,(320,240))
        h,w,d=frame.shape
        big_image = np.zeros((h*12,w*3,3), np.uint8)
        starty=h*11
        startx=w
        total_transl_x=0
        total_transl_y=0

        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self.prev_gray=frame_gray
        self.prev_frame=frame

        detector, matcher = init_feature('sift-flann')
        pool=ThreadPool(processes = cv2.getNumberOfCPUs())

        while True:
            for i in range(skip_frames):
                ret, frame = self.cam.read()
            ret, frame = self.cam.read()
            frame=cv2.resize(frame,(320,240))

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            vis = frame.copy()

            img0, img1 = self.prev_gray, frame_gray

            kp1, desc1 = affine_detect(detector, img0[10:h-50,10:w-10], pool=pool)
            kp2, desc2 = affine_detect(detector, img1[10:h-50,10:w-10], pool=pool)
            print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))

            with Timer('matching'):
                raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
            p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
            if len(p1) >= 4:
                H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
                print '%d / %d  inliers/matched' % (np.sum(status), len(status))
                # do not draw outliers (there will be a lot of them)
                kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

                warp = cv2.warpPerspective(img0[10:h-50,10:w-10], H, (w, h*2))
                cv2.imshow("warped",warp)

            else:
                H, status = None, None
                print '%d matches found, not enough for homography estimation' % len(p1)

            vis = explore_match('affine find_obj', img0, img1, kp_pairs, None, H)




            # stitching-----------------------
            translation = np.zeros((3,1))  #3,1
            if len(p1)>4 and len(p2)>4:

                # temp1=[]
                # # temp2=[]

                # for i in range(len(kp1)):
                #     print kp1[i].pt+ (0,)
                #     temp1.append(kp1[i].pt+ (0,))
                # # for i in range(len(kp2)):
                # #     temp2.append(kp2[i].pt)
                # points1.astype(np.uint8)

                # points1 = np.array(temp1)
                # print points1
                # # points2 = np.array(temp2)

                # Hr=cv2.estimateRigidTransform(points1, points1,False)

                translation[:,0] = H[:,2] #Hr[:,2]

                # rotation = np.zeros((3,3))
                # rotation[:,0] = H[:,0]
                # rotation[:,1] = H[:,1]
                # rotation[:,2] = np.cross(H[0:3,0],H[0:3,1])

                # print "x translation:",translation[0]
                # print "y translation:",translation[1]

                draw_str(vis, (20, 40), 'x-axis translation: %.1f' % translation[0])
                draw_str(vis, (20, 60), 'y-axis translation: %.1f' % translation[1])

                if translation[0]<60 and translation[1]<60:  #check for bad H
                    total_transl_x+=int(translation[0])
                    total_transl_y+=int(translation[1])

                    draw_str(vis, (20, 80), 'tot x-axis translation: %.1f' % total_transl_x)
                    draw_str(vis, (20, 100), 'tot y-axis translation: %.1f' % total_transl_y)

                    #h,w,d=frame.shape

                    frame_over=self.prev_frame[10:h-50,10:w-10].copy()
                    overlay = cv2.warpPerspective(frame_over, H, (w, h))
                    frame_h,frame_w,d=frame_over.shape

                    cv2.imshow('overlay',overlay)
                    #vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0)
                    big_image[starty-int(total_transl_y):starty-int(total_transl_y)+frame_h,startx-int(total_transl_x):startx-int(total_transl_x)+frame_w]=overlay[0:frame_h,0:frame_w].copy()

            #small_image=big_image.copy()
            big_h,big_w,d=big_image.shape
            small_image=cv2.resize(big_image,(big_w/4,big_h/4))
            cv2.imshow('stitching', small_image)
            #cv2.imwrite("result.jpg",big_image);



            self.frame_idx += 1
            self.prev_gray = frame_gray
            self.prev_frame=frame

            ch = 0xFF & cv2.waitKey(5)
            if ch == 27:
                break
Пример #51
0
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
import itertools as it
from multiprocessing.pool import ThreadPool as Pool
# three-party
import cv2
import numpy as np
# custom
from RRtool.RRtoolbox.lib import plotter, cache, config, image
from RRtool.RRtoolbox import filter,basic as ar
from RRtool.RRtoolbox.lib.arrayops import convert

# ----------------------------    GLOBALS    ---------------------------- #
cpc = cv2.getNumberOfCPUs()
print("configured to use {} cpus".format(cpc))
pool = Pool(processes = cpc) # DO NOT USE IT when module is imported and this runs with it. It creates a deadlock"
feature_name = 'sift-flann'
paths = config.ConfigFile()
# ----------------------------    DECORATORS    ---------------------------- #

def getalfa(foregray,backgray,window = None):
    """ get alfa transparency for merging to retinal images
    :param foregray: image on top
    :param backgray: image at bottom
    :param window: window used to customizing alfa, values go from 0 for transparency to any value
                    where the maximum is visible i.e a window with all the same values does nothing.
                    a binary image can be used, where 0 is transparent and 1 is visible.
                    If not window is given alfa is left as intended.
    :return: float window modified by alfa
Пример #52
0
 def run_jobs(self, f, jobs):
     pool = ThreadPool(processes=cv.getNumberOfCPUs())
     ires = pool.imap_unordered(f, jobs)
     return ires
Пример #53
0
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.linear_model.logistic import LogisticRegression

from utils import *

import warnings
warnings.filterwarnings("ignore")

TEST = False
SAVE = False
LOWE = False

train_images, train_labels, test_images, test_labels = get_train_test(TEST)

pool = Pool(cv2.getNumberOfCPUs()-2)

if LOWE:
    print " [!] Lowe's SIFT"
    train_sift_with_null = pool.map(get_sift_lowe, train_images)
    test_sift_with_null = pool.map(get_sift_lowe, test_images)
else:
    print " [!] OpenCV2's SIFT"
    train_sift_with_null = pool.map(get_sift, train_images)
    test_sift_with_null = pool.map(get_sift, test_images)

pool.terminate()                                                                                        

train_sift = removing_null(train_sift_with_null, train_labels)
reduced_train_sift = np.concatenate(train_sift, axis = 0)
descriptor = "SIFT"
descriptor = "spSIFT"

if TEST:
    prefix = "%s_%s_" % (descriptor, "test")
else:
    prefix = "%s_%s_" % (descriptor, "full")

train_images, train_labels, test_images, test_labels = get_train_test(TEST)

if descriptor == "SIFT":
    if os.path.isfile(prefix % "kmeans"):
        kmeans = load_pickle(prefix + "kmeans.pkl")
    else:
        pool = Pool(getNumberOfCPUs() - 2)

        if LOWE:
            print " [!] Lowe's SIFT"
            train_sift_with_null = pool.map(get_sift_lowe, train_images)
            test_sift_with_null = pool.map(get_sift_lowe, test_images)
        else:
            print " [!] OpenCV2's SIFT"
            train_sift_with_null = pool.map(get_sift, train_images)
            test_sift_with_null = pool.map(get_sift, test_images)

        pool.close()
        pool.join()
        pool.terminate()

        del pool
Пример #55
0
                        blank += 1
                else:
                    if mx and mx.number.value() > 9 and blank >= _wait(5):
                        puts(mx)
                        wait = _wait(5)
                    blank += 1

if __name__ == '__main__':
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("-s", "--start", type="int", default=0, help="set the start time offset")
    parser.add_option("-l", "--length", type="int", default=99999999, help="set the length")
    parser.add_option("-X", "--offset_x", type="int", help="horizontal offset of the crop")
    parser.add_option("-Y", "--offset_y", type="int", help="vertical offset of the crop")
    parser.add_option("--crop_size", help="width and height of the crop")
    parser.add_option("-p", "--processes", type="int", default=cv2.getNumberOfCPUs(), help="num of processes")
    parser.add_option("--gui", action="store_true", help="use the GUI")
    parser.add_option("--profile", action="store_true", help="enable profiling")

    (op, args) = parser.parse_args()

    crop_size = [None, None]
    if op.crop_size:
        crop_size = [int(e) for e in op.crop_size.split('x')]

    if op.profile:
        import cProfile
        cProfile.run('Scout(args[0], op.start, op.length, op.offset_x, op.offset_y, *crop_size, op.processes, op.gui)')
    else:
        Scout(args[0], op.start, op.length, op.offset_x, op.offset_y, *crop_size, op.processes, op.gui)
    except:
        fn = 0
    '''camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCap= PiRGBArray(camera, size = (640, 480))'''
    cap =  PiVideoStream().start()


    def process_frame(frame, t0):
        # some intensive computation...
        frame = cv2.medianBlur(frame, 19)
        # frame = cv2.medianBlur(frame, 19)
        return frame, t0

    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:
        while len(pending) > 0 and pending[0].ready():
            '''   '''
            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "Latency: %.1f ms" % (latency.value*1000))
            draw_str(res, (100, 20), "Frame interval: %.1f ms" % (frame_interval.value*1000))
Пример #57
0
    def init(self):
        # args is argparser.Namespace: an object whose members are
        # accessed as:  self.args.cannedimages

        self.robotCnx = robotCnx.RobotCnx(self.args.fakerobot)

        self.vsrc = None
        if self.args.cannedimages:
            self.fnpat = '../../pictures/RealFullField/%d.jpg'
        else:
            self.fnpat = None
        self.index = 0
        self.lastFn = None
        self.mainImg = None
        self.algostate = {}  # stores persistant objects depending on cmode
        self.threadn = cv2.getNumberOfCPUs()
        self.pool = ThreadPool(processes = self.threadn)
        self.pending = deque()
        self.threadedMode = True
        self.update = False
        self.latency = common.StatValue()
        self.frameT = common.StatValue()
        self.lastFrameTime = common.clock()
        self.lastStashTime = 0
        self.stashFilename = "/var/tmp/imgServer.home/currentImage.jpg"
        self.stashParams = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
        self.zeros = (0,0,0,0,0,0)
        self.LUT =  np.array(range(0,256)).astype('uint8')

        self.indent = ' ' * 50
        self.lastStatus = ""

        # cmodelist maps a number [0, len] to an cmode/algorithm
        self.cmodelist = [
            'rgb',
            'adaptiveThreshold',
            'threshold',
            'huerange*valrange',
            'canny',
            'simpleblob',
            'houghlines',
            'contours',
            'ORB',
            'dance1',
            'gamma',

            'b',
            'g',
            'h',
            'r',
            's',
            'v',
        ]

        # cmodeValueCache stores the edited values associated with each cmode.
        # It starts empty, then builds up state as cmode changes.
        self.cmodeValueCache = {
            'adaptiveThreshold': [-4, 0, 0, 0, 0, 0],
            'threshold':         [75, 0, 0, 0, 0, 0],
            'huerange*valrange': [55, 100, 255, 255, 0, 0], # works for g LED
            'canny':             [10, 200, 0, 0, 0, 0],
            'simpleblob':        [75, 150, 20, 40**2, 0, 0],
                          # minThresh
                          # maxThresh
                          # thresStep
                          # minSize (maxsize is 'large')
            'houghlines':        [2, 5, 10, 30, 2, 0],
                          # rho is distance resolution in pixels
                          # theta is angle in degress (larger -> more lines)
                          # threshold is measured in 'votes', higher -> fewer
                          # minLineLength
                          # maxLineGap
            'contours':          [1, 0, 0, -1, 1, 0],
                          # mode: [0-3]
                          # method: [CHAIN_APPROX_NONE,_SIMPLE,_TC89_L1, KOS]
                          # offset shift for contour
                          # unused
                          # which contour to draw, -1 is all
                          # which level of the contours to draw
            'ORB':              [40, 10, 8, 31, 0, 0],
                          # nfeatures
                          # scaleFactor [1 -> 2  (0->255)]
                          # nlevels
                          # patchSizea == edgeThreshold
            'dance1':		[20,20,0,0,0,0],
            			  #X(degrees)
            			  #Y(degrees)
            			  #Not rotating correctly (not interpreting correctly)
            'gamma':		[34,0,0,0,0,0],
            			  #alpha
            			  #beta
            'r': self.zeros,
            'g': self.zeros,
            'b': self.zeros,
            'h':            [0, 61, 0, 0, 0, 0],
            's': self.zeros,
            'v': self.zeros,
            'rgb': self.zeros
        }

        #keyToCmode maps a key to a cmod
        self.keyToCmode = {
            49: self.cmodelist[1], # adaptiveThreshold on '1' key
            50: self.cmodelist[2],
            51: self.cmodelist[3],
            52: self.cmodelist[4],
            53: self.cmodelist[5],
            54: self.cmodelist[6], # houghlines
            55: self.cmodelist[7], # contours
            56: self.cmodelist[8], # ORB
            57: self.cmodelist[9], # dance1
            48: self.cmodelist[10],# 0 key: gamma

            98: 'b',
            99: 'rgb',
            103: 'g',
            104: 'h',
            114: 'r',
            115: 's',
            118: 'v',
        }

        self.keyToFrameChange = {
            81: 'b',
            83: 'n',
        }

        self.algoValueChange = {
            127: ("reset",  self.zeros),  # keypad clear

            190: ("v1Down",  (-1, 0, 0, 0, 0, 0)), # f1
            191: ("v1Up  ",  (1, 0, 0, 0, 0, 0)),  # f2

            84: ("v1Down",  (-1, 0, 0, 0, 0, 0)), # downarrow
            82: ("v1Up  ",  (1, 0, 0, 0, 0, 0)),  # uparrow

            191: ("v1Up  ",  (1, 0, 0, 0, 0, 0)),  # downarrow

            192: ("v2Down",  (0, -1, 0, 0, 0, 0)), # f3
            193: ("v2Up  ",  (0, 1, 0, 0, 0, 0)),  # f4

            194: ("v3Down",  (0, 0, -1, 0, 0, 0)), # f5
            195: ("v3Up  ",  (0, 0, 1, 0, 0, 0)),  # f6

            196: ("v4Down",  (0, 0, 0, -1, 0, 0)), # f7
            197: ("v4Up  ",  (0, 0, 0, 1, 0, 0)), # f8

            198: ("v5Down",  (0, 0, 0, 0, -1, 0)), # f9
            199: ("v5Up  ",  (0, 0, 0, 0, 1, 0)), # f10

            200: ("v6Down",  (0, 0, 0, 0, 0, -1)), # f11
            201: ("v6Up  ",  (0, 0, 0, 0, 0, 1)),  # f12
        }

        self.cmode = self.getCmode(self.args.algorithm)
        self.values = self.getCmodeValues(self.cmode)