Exemplo n.º 1
0
def main():
    # intrinsic paramters of Intel Realsense SR300
    fx, fy, ux, uy = 628.668, 628.668, 311.662, 231.571
    depth_scale = 0.0010000000474974513
    # paramters
    dataset = 'hands17'
    if len(sys.argv) == 2:
        dataset = sys.argv[1]
    print(dataset)
    lower_ = 1
    upper_ = 435

    # init hand pose estimation model
    try:
        hand_model = ModelPoseREN(
            dataset,
            lambda img: get_center(img, lower=lower_, upper=upper_),
            param=(fx, fy, ux, uy),
            use_gpu=True)
    except:
        print('Model not found')
    # for msra dataset, use the weights for first split
    if dataset == 'msra':
        hand_model.reset_model(dataset, test_id=0)
    # realtime hand pose estimation loop
    frames = read_images(img_array, depth_scale)
    # preprocessing depth
    # # training samples are left hands in icvl dataset,
    # # right hands in nyu dataset and msra dataset,
    # # for this demo you should use your right hand
    if dataset == 'icvl':
        frames = frames[:, ::-1]  # flip
    # get hand pose
    predicted = []
    f = open('results.txt', 'w')
    for idx, depth in enumerate(frames):
        depth = frames[idx, :, :]
        #        depth = np.rot90(depth,2)
        depth[depth == 0] = depth.max()
        results, cropped_image = hand_model.detect_image(depth)
        img_show = show_results(depth, results, cropped_image, dataset)
        # cv2.imshow('result', img_show)
        cv2.imwrite('result_{}.png'.format(idx), img_show)
        f.write('image_{}.png'.format(idx))
        for r in results:
            for i in r:
                f.write(' %s' % i)
        f.write('\n')
        #print(results)
        predicted.append(results)
    f.close()
Exemplo n.º 2
0
def main():
    if len(sys.argv) < 4:
        print_usage()

    dataset = sys.argv[1]
    base_dir = sys.argv[2]
    out_file = sys.argv[3]
    names = util.load_names(dataset)
    centers = []
    for idx, name in enumerate(names):
        if dataset == 'nyu':  # use synthetic image to compute center
            name = name.replace('depth', 'synthdepth')
        img = util.load_image(dataset, os.path.join(base_dir, name))
        if dataset == 'icvl':
            center = util.get_center(img, upper=500, lower=0)
        elif dataset == 'nyu':
            center = util.get_center(img, upper=1300, lower=500)
        elif dataset == 'msra':
            center = util.get_center(img, upper=1000, lower=10)
        centers.append(center.reshape((1, 3)))
        if idx % 500 == 0:
            print('{}/{}'.format(idx + 1, len(names)))
    util.save_results(centers, out_file)
Exemplo n.º 3
0
def main():
    args = parse_args()
    dataset_model = args.dataset_model
    dataset_image = args.dataset_image
    if dataset_image is None:
        dataset_image = dataset_model

    hand_model = HandModel(dataset_model, args.model_prefix,
            lambda img: util.get_center(img, lower=args.lower, upper=args.upper),
            param=(args.fx, args.fy, args.ux, args.uy))
    with open(args.in_file) as f:
        names = [line.strip() for line in f]
    results = hand_model.detect_files(args.base_dir, names, dataset=dataset_image,
            is_flip=args.is_flip)
    util.save_results(results, args.out_file)
Exemplo n.º 4
0
def main():
    # intrinsic paramters of Intel Realsense D415
    fx, fy, ux, uy = 628.668, 628.668, 311.662, 231.571

    # paramters
    dataset = 'icvl'
    if len(sys.argv) == 2:
        dataset = sys.argv[1]
    print(dataset)
    lower_ = 1
    upper_ = 650

    # init realsense
    pipeline, depth_scale = init_device()
    # init hand pose estimation model
    try:
        hand_model = ModelPoseREN(
            dataset,
            lambda img: get_center(img, lower=lower_, upper=upper_),
            param=(fx, fy, ux, uy),
            use_gpu=True)
    except:
        print('Model not found')
    # for msra dataset, use the weights for first split
    if dataset == 'msra':
        hand_model.reset_model(dataset, test_id=0)
    # realtime hand pose estimation loop
    while True:
        depth = read_frame_from_device(pipeline, depth_scale)
        # preprocessing depth
        depth[depth == 0] = depth.max()
        # training samples are left hands in icvl dataset,
        # right hands in nyu dataset and msra dataset,
        # for this demo you should use your right hand
        if dataset == 'icvl':
            depth = depth[:, ::-1]  # flip
        # get hand pose
        results, cropped_image = hand_model.detect_image(depth)
        img_show = show_results(depth, results, cropped_image, dataset)
        cv2.imshow('result', img_show)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    stop_device(pipeline)
Exemplo n.º 5
0
def test1(): 

  import time
  
  cal_id = 3
  dep_id = 105
  t_start = 1407452400 
  t_end = 1407455985 - (59 * 60) 

  db_con = util.get_db('reader')
  sv = signal1.SteeringVectors(db_con, cal_id)
  signal = signal1.Signal(db_con, dep_id, t_start, t_end)

  sites = util.get_sites(db_con)
  (center, zone) = util.get_center(db_con)
  assert zone == util.get_utm_zone(db_con)
  
  start = time.time()
  pos = PositionEstimator(dep_id, sites, center, signal, sv, 
    method=signal1.Signal.MLE)
  print "Finished in {0:.2f} seconds.".format(time.time() - start)
 
  print compute_conf(pos.p, pos.num_sites, sites, pos.splines)
Exemplo n.º 6
0
    def analyze(self):
        '''
        Pre-define the parameters that would later be passed into the tracker
        '''
        parameters_pupil = {
            'blur': (20, 20),
            'canny': (40, 50),
            'stare_posi': None
        }
        parameters_glint = {
            'blur': (1, 1),
            'canny': (40, 50),
            'H_count': 8,
            'stare_posi': None
        }
        '''
        We need both CPI and ROI, the difference is that ROI is the displacement 
        and CPI is the new position for both x and y
        '''
        ROI_pupil = get_ROI(self.cropping_factor_pupil)
        ROI_glint = get_ROI(self.cropping_factor_glint)
        CPI_pupil = self.cropping_factor_pupil
        CPI_glint = self.cropping_factor_glint
        # We also need the center of pupil and glint based on user-chosen area
        center_pupil = get_center(ROI_pupil)
        center_glint = get_center(ROI_glint)

        # check user has draw roi boxes for both pupil and glint
        # without these, we cannot continue. will hit excpetions below
        for cntr in [center_pupil, center_glint]:
            if cntr[0] == 0 and cntr[1] == 0:
                mkmessage('Draw ROI boxes for both pupil and glint!')
                return
        '''
        Enable the interface button
        '''
        self.Analyze.setEnabled(False)
        self.Plotting.setEnabled(True)
        '''
        This is for pre-processing
        '''

        #Pre_calculate the perfect threshold for glint detection
        self.threshold_range_glint = self.glint_threshold(
            center_glint, 1, CPI_glint, parameters_glint)
        parameters_glint['threshold'] = self.threshold_range_glint

        print("first pass pass parameters")
        print(f"  pupil: {parameters_pupil}")
        print(f"  glint: {parameters_glint}")

        #Propress the blurring factor for pupil
        t1 = threading.Thread(target=self.get_blur,
                              args=(4, CPI_pupil, parameters_pupil, ROI_pupil,
                                    ROI_glint))
        #Get the count for hough transform
        t2 = threading.Thread(target=self.get_count,
                              args=(1, ROI_glint, CPI_glint, parameters_glint))

        #Run the thread
        t1.start()
        t2.start()
        t1.join()
        t2.join()
        '''
        This is for pre-processsing as well....
        '''
        #4 is the shrinking factor that could boost up the speed
        th_range_pupil = self.pupil_threshold(center_pupil, 4, CPI_pupil,
                                              parameters_pupil)
        #Add the perfect blurrin factor for pupil
        parameters_pupil['blur'] = self.pupil_blur
        #Add the perfect threshold value
        parameters_pupil['threshold'] = th_range_pupil

        #Add the perfect H_count value for glint. Pupil doesn't need this
        parameters_glint['H_count'] = self.H_count
        #Put in the ideal staring position that might be used in the tracker portion
        parameters_pupil['stare_posi'] = self.stare_posi
        parameters_glint['stare_posi'] = self.stare_posi

        # useful to know for e.g. ./tracker.py
        print("second pass parameters")
        print(f"  pupil: {parameters_pupil}")
        print(f"  glint: {parameters_glint}")

        #Create the thread for both pupil and glint
        #No need to join because i don't want the user interface to freeze
        t2 = threading.Thread(target=self.pupil_tracking,
                              args=(ROI_pupil, parameters_pupil, ROI_glint))
        t3 = threading.Thread(target=self.glint_tracking,
                              args=(ROI_glint, CPI_glint, parameters_glint))
        #Start the thread for final calculation
        t2.start()
        t3.start()
Exemplo n.º 7
0
    def draw(self):
        xa = self.dxfprocessor.xa
        xi = self.dxfprocessor.xi
        ya = self.dxfprocessor.ya
        yi = self.dxfprocessor.yi
        line = self.dxfprocessor.line
        arc = self.dxfprocessor.arc
        ellipse = self.dxfprocessor.ellipse
        lwpline = self.dxfprocessor.lwpline
        plt.clf()
        plt.close('all')
        figure, ax = plt.subplots(figsize=((xa - xi) / 10000,
                                           (ya - yi) / 10000),
                                  dpi=100)
        # 设置x,y值域
        ax.set_xlim(left=xi, right=xa, auto=False)
        ax.set_ylim(bottom=yi, top=ya, auto=False)

        # 底图绘制
        line_xs = [0] * len(line)
        line_ys = [0] * len(line)
        for i in range(len(line)):
            (line_xs[i], line_ys[i]) = zip(*line[i])

        for i in range(len(line_xs)):
            ax.add_line(
                Line2D(line_xs[i], line_ys[i], linewidth=1, color='black'))
        print('底图 1/4')
        for i in arc:
            ax.add_patch(Arc(i[0], i[1], i[1], theta1=i[2], theta2=i[3]))
        print('底图 2/4')
        for i in ellipse:
            ax.add_patch(
                Arc(i[0], i[1], i[2], angle=i[3], theta1=i[4], theta2=i[5]))
        print('底图 3/4')
        lwpl_xs = [0] * len(lwpline)
        lwpl_ys = [0] * len(lwpline)
        for i in range(len(lwpline)):
            (lwpl_xs[i], lwpl_ys[i]) = zip(*lwpline[i])
        for i in range(len(lwpl_xs)):
            ax.add_line(
                Line2D(lwpl_xs[i], lwpl_ys[i], linewidth=1, color='black'))
        print('底图 4/4')
        print('完成:底图绘制, 开始填充热力……')

        #热度填充
        self.df_store_heat = self.get_df_store_heat()
        '''begin,end = self.duration
        begin = pd.to_datetime(begin)
        end = pd.to_datetime(end)
        days =int((end-begin)//datetime.timedelta(1))
        dates = [begin + datetime.timedelta(1)*i for i in range(days+1)]'''
        dates = util.get_dates_within_duration(self.duration)

        self.heat = self.df_store_heat[dates].sum(axis=1)

        boundaries = list(self.df_store_heat['boundary'])
        names = list(self.df_store_heat['name'])
        patches = []
        for boundary, name, heat in zip(boundaries, names, self.heat):
            text = name + '\n' + str(int(heat))
            center = util.get_center(boundary)
            polygon = Polygon(np.array(boundary), True)
            patches.append(polygon)
            plt.text(center[0], center[1], text, ha='center', va='center')
        p = PatchCollection(patches, cmap=plt.get_cmap('rainbow'), alpha=0.4)
        p.set_array(np.array(self.heat))
        p.set_clim([0, max(list(self.heat))])
        ax.add_collection(p)
        ax.set_aspect(1)
        figure.colorbar(p, ax=ax, orientation='horizontal')
        print('完成:热力填充,正在保存……')

        # 展示
        plt.plot()
        plt.savefig(self.path + '/heat' + str(self.duration[0]) + '-' +
                    str(self.duration[-1]) + '.jpg',
                    dpi=100)

        #plt.show()
        print('保存为 ' + self.path + '/heat' + str(self.duration[0]) + '-' +
              str(self.duration[-1]) + '.jpg')
        return self.path + '/heat' + str(self.duration[0]) + '-' + str(
            self.duration[-1]) + '.jpg'
Exemplo n.º 8
0
def main():
    # intrinsic paramters of Intel Realsense SR300
    # fx, fy, ux, uy = 463.889, 463.889, 320, 240
    fx, fy, ux, uy = 385.13, 385.13, 316.802, 241.818
    # paramters
    dataset = 'hands17'
    if len(sys.argv) == 2:
        dataset = sys.argv[1]

    lower_ = 200
    upper_ = 450

    # init realsense
    pipeline, depth_scale = init_device()
    # init hand pose estimation model
    hand_model = ModelPoseREN(
        dataset,
        lambda img: get_center(img, lower=lower_, upper=upper_),
        param=(fx, fy, ux, uy),
        use_gpu=True)
    # for msra dataset, use the weights for first split
    if dataset == 'msra':
        hand_model.reset_model(dataset, test_id=0)

    is_recording = False
    is_testing = False
    d, f = read_trainset()
    rec = []

    # realtime hand pose estimation loop
    while True:
        depth = read_frame_from_device(pipeline, depth_scale)
        # preprocessing depth
        depth[depth == 0] = depth.max()
        # training samples are left hands in icvl dataset,
        # right hands in nyu dataset and msra dataset,
        # for this demo you should use your right hand
        if dataset == 'icvl':
            depth = depth[:, ::-1]  # flip
        # get hand pose
        results, cropped_image = hand_model.detect_image(depth)
        img_show = show_results(depth, results, cropped_image, dataset)
        cv2.imshow('result', img_show)

        if is_recording or is_testing:
            rec.append(results)
        readkey = cv2.waitKey(1) & 0xFF

        if readkey == ord('q'):
            break

        elif readkey == ord('r'):
            if is_testing:
                print('*** cannot record ***')
            if not is_recording:
                print("--- is recording ---")
                is_recording = True
            else:
                print("--- recording stopped ---")
                is_recording = False
                filename = input('filename: ')
                np.save(filename + '.npy', rec)
                rec = []

        elif readkey & 0xFF == ord('t'):
            if is_recording:
                print('*** cannot test ***')
            if not is_testing:
                print('--- is testing ---')
                is_testing = True
            else:
                is_testing = False
                t_feat = get_feature(np.array(rec))
                rec = []
                test(t_feat, f)

    stop_device(pipeline)
Exemplo n.º 9
0
def main():
    if len(sys.argv) < 3:
        print_usage()

    dataset = sys.argv[1]
    out_file = sys.argv[2]
    # data_dir_dict = {'nyu': config.nyu_data_dir,
    #                  'icvl': config.icvl_data_dir + "test/Depth",
    #                  'msra': config.msra_data_dir}
    # base_dir = data_dir_dict[dataset] #sys.argv[3]
    # name = os.path.join(base_dir, names[0])
    batch_size = 64
    if len(sys.argv) == 4:
        batch_size = int(sys.argv[3])

    # generate deploy prototxt
    make_baseline_net(os.path.join(ROOT_DIR, '../models'), dataset)
    make_pose_ren_net(os.path.join(ROOT_DIR, '../models'), dataset)

    names = util.load_names(dataset)
    # centers = util.load_centers(dataset)
    centers = None
    fx, fy, ux, uy = 587.270, 587.270, 326.548, 230.419
    # fx, fy, ux, uy = util.get_param(dataset)
    lower_ = 1
    upper_ = 650
    hand_model = ModelPoseREN(
        dataset,
        lambda img: get_center(img, lower=lower_, upper=upper_),
        param=(fx, fy, ux, uy),
        use_gpu=True)

    if dataset == 'msra':
        hand_model.reset_model(dataset, test_id=0)

    base_dir = "/media/reborn/Others/Study/Reborn/Github/Pose-REN/test"
    # depthName = os.path.join(base_dir, "000000_depth.bin")
    depthName = os.path.join(base_dir, "0.img")
    imgName = os.path.join(base_dir, "image_0000.png")

    if (dataset == "msra"):
        # with open(depthName,"rb") as file:
        # 	data = np.fromfile(file, dtype=np.uint32)
        # 	width, height, left, top, right , bottom = data[:6]
        # 	depth = np.zeros((height, width), dtype=np.float32)
        # 	file.seek(4*6)
        # 	data = np.fromfile(file, dtype=np.float32)
        # depth[top:bottom, left:right] = np.reshape(data, (bottom-top, right-left))
        # depth[depth == 0] = 10000
        # print(depth[depth < depth.max()])
        # cv2.imwrite("img0.jpg",depth)

        with open(depthName, "rb") as file:
            data = np.fromfile(file, dtype=np.uint16)
            height = 480
            width = 640
            # depth = np.zeros((height, width), dtype=np.uint16)
            depth = np.reshape(data, (height, width)).astype(np.float32)
            min = depth.min()
            max = depth.max()
            print("min = {}, max = {}".format(min, max))

            flag = np.logical_xor(depth <= upper_, depth >= lower_)
            depth[flag] = 0

            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
            depth = cv2.morphologyEx(depth, cv2.MORPH_CLOSE, kernel)
            depth[depth == 0] = 10000
            # depth[depth == 0] = depth.max()

        # with open(depthName,"rb") as file:
        # 	depth = []
        # 	cnt = 0
        # 	contents = iter(partial(file.read, 2), b'')
        # 	for r in contents:
        # 		r_int = int.from_bytes(r, byteorder='big')  #将 byte转化为 int
        # 		cnt += 1
        # 		depth.append(r_int)
        # 	# print("i = {} -- {}".format(cnt,r_int))
        # depth = np.array(depth)
        # depth = np.reshape(depth,(480,640))
        # depth[depth == 0] = 10000
    elif (dataset == "icvl"):
        depth = cv2.imread(imgName, 2)
        depth[depth == 0] = depth.max()  # invalid pixel
        depth = depth.astype(float)

    # depth = np.reshape(depth,(240,320))
    # depth[depth == 0] = depth.max()
    print("np.shape(depth) = {}".format(np.shape(depth)))
    # depth = depth[:, ::-1]
    # print("names = {}".format(imgName))
    # print("np.shape(img) = {}".format(np.shape(img)))
    # results = hand_model.detect_files(base_dir, names, centers, max_batch=batch_size)
    results = hand_model.detect_image(depth)
    print("results = {}".format(results))
    print(np.shape(results))
    img_show = show_results(depth, results, dataset)
    cv2.imwrite('result.jpg', img_show)
    cv2.waitKey()