コード例 #1
0
    def __init__(self, screen: Surface):
        """
        Creates the game
        @param screen: The screen surface (created with pygame.display.set_mode) to draw on. This screen is used both
        for drawing and for creating the camera.
        @note To pave our way to Rust projects, this project uses type hints in function signatures.
        """

        # Load sprite sheets (aka costumes)
        assets_path = os.path.dirname(os.path.realpath(__file__))
        bear_spritesheet = image.load(os.path.join(
            assets_path, "..", "assets", "bears.png"))
        berry_spritesheet = image.load(os.path.join(
            assets_path, "..", "assets", "berry.png"))

        # Definition of game objects
        self.world = World()
        self.bear = Bear(320, 180, bear_spritesheet)
        self.berry_group = Group()
        for _ in range(100):
            x, y = randint(0, 1000), randint(0, 1000)
            berry = Berry(x, y, berry_spritesheet)
            berry.rect = berry.rect.clamp(self.world.rect)
            self.berry_group.add(berry)
        self.screen = screen
        self.camera = Camera(320, 180, screen)
        self.text = TextUI(0, 0)

        # Gameplay-related data
        self.score = 0
        self.remaining = 100
        self.timer = 0

        # State management. You might want to use a state machine for complex games.
        self.running = True
コード例 #2
0
ファイル: PoseNet3D.py プロジェクト: malli1983/rgbd-pose3d
    def __init__(self, ope_depth=5, gpu_id=0, gpu_memory_limit=None, vpn_type=None, K=None):
        self._session = None  # tensorflow session
        self._image = None  # input to the 2D network
        self._scoremaps_kp = None  # output of the 2D network
        self._scoremaps_paf = None  # output of the 2D network
        self._depth_vox = None  # input to the 3D network
        self._kp_uv = None  # input to the 3D network
        self._voxel_root_xyz = None  # input to the 3D network
        self._voxel_scale = None  # input to the 3D network
        self._cam_mat = None  # input to the 3D network
        self._kp_vox = None  # output of the 3D network

        # parameters
        self._intermediate_scoremap_size = (100, 100)  # map size used for warping 2D->3D
        self.conf2d_thresh = 0.5  # minimal confidence of 2d detection
        self.cam = Camera(K)

        self.ope_depth = ope_depth
        self.gpu_id = gpu_id
        self.gpu_memory_limit = gpu_memory_limit

        if vpn_type == 'fast':
            self.use_fast_vpn = True
        else:
            self.use_fast_vpn = False

        # create network (this sets some member variables)
        self._setup_network()  # creates the tensorflow graph)
        self._init_open_pose()  # loads weights
コード例 #3
0
    def __init__(self, game_manager):
        # type: (GameManager.GameManager) -> None
        self.game_manager = game_manager  # type: GameManager.GameManager

        # self.rendered_image = self.world_gen.get_current_room()  # type: pygame.Surface
        # self.current_room_rendered = self.world_gen.get_current_room()  # type: RoomRenderer

        self.cam = Camera.Camera(self.game_manager)

        self.world_gen = WorldGenerator.WorldGenerator(
            self.game_manager,
            "game_data/config/world.json",
            "game_data/worlds/test_world",
            camera=self.cam)

        self.player = Player.PlayerObject(
            self.game_manager,
            self.world_gen,
            position=Vector2D(1280 / 2, 720 / 2),
            camera=self.cam,
            obj_data=self.world_gen.load_player())

        self.cam.position = self.player.position.copy()

        self.cam.target(self.player.position)
コード例 #4
0
ファイル: sprites.py プロジェクト: mechbear14/bear-berries
 def draw(self, surface: Surface, camera: Camera = None):
     """
     Same as bear.draw()
     @note Probably should create a separate class to inherit this method rather than having it twice
     """
     if camera is None:
         surface.blit(self.image, self.rect)
     else:
         position_in_camera = camera.from_world(self.position)
         rect = self.image.get_rect(center=position_in_camera)
         surface.blit(self.image, rect)
コード例 #5
0
ファイル: Main.py プロジェクト: smurpheus/DartDetector
    def __init__(self, c1=0, camera=None):
        Thread.__init__(self)
        self.threadLock = Lock()
        print("BackgroundSubstractor called with capture %s" % c1)
        if not isinstance(camera, Camera):
            self.camera = Camera(device=c1)
        else:
            self.camera = camera
        # c1.release()re('test.avi')

        self.storage = ContourStorage()
        self._initialize_substractor()
コード例 #6
0
ファイル: sprites.py プロジェクト: mechbear14/bear-berries
 def draw(self, surface: Surface, camera: Camera = None):
     """
     Draws the bear onto a surface based on camera position
     @param surface: the surface to draw on
     @param camera: if not set, this function draws the bear onto the surface treating the *world coordinate* of the
     bear's rect as its position on the surface. If set, this function uses *camera coordinate* instead.
     @return: None
     """
     self.image = self.costumes[self.costume_number]
     if camera is None:
         surface.blit(self.image, self.rect)
     else:
         position_in_camera = camera.from_world(self.position)
         rect = self.image.get_rect(center=position_in_camera)
         surface.blit(self.image, rect)
コード例 #7
0
def main():
    #2x2 binning
    camera = Camera(sensor_width=512,
                    sensor_height=512,
                    pixel_size=0.62,
                    source_to_detector_distance=1200,
                    isocenter_distance=450)
    #4x4 binning
    # camera = Camera(sensor_width=620, sensor_height=480, pixel_size=0.62, source_to_detector_distance=1200,isocenter_distance=800)

    ####
    #define the path to your dicoms here or use the simple phantom from the code above
    ####
    CT_volume_path = r".\your_dicom_directory\\"

    #save_path = r".\generated_data\test"
    save_path = r"./test"
    min_theta = 75
    max_theta = 105
    min_phi = 75
    max_phi = 105
    spacing_theta = 30
    spacing_phi = 30
    photon_count = 100000
    #origin [0,0,0] corresponds to the center of the volume
    origin = [0, 0, 0]
    spectrum = spectrum_generator.SPECTRUM90KV_AL40

    if not os.path.isdir(save_path):
        os.makedirs(save_path)

    generate_projections_on_sphere(CT_volume_path,
                                   save_path,
                                   min_theta,
                                   max_theta,
                                   min_phi,
                                   max_phi,
                                   spacing_theta,
                                   spacing_phi,
                                   photon_count,
                                   camera,
                                   spectrum,
                                   origin=origin,
                                   scatter=False)
コード例 #8
0
ファイル: MainGame.py プロジェクト: C3RV1/DarkProject
    def __init__(self, game_manager):
        # type: (GameManager.GameManager) -> None
        self.game_manager = game_manager  # type: GameManager.GameManager

        self.cam = Camera.Camera(self.game_manager)

        self.world_gen = WorldGenerator.WorldGenerator(self.game_manager, "game_data/config/world.json",
                                                       "game_data/worlds/test_world", camera=self.cam)

        self.player = Player.PlayerObject(self.game_manager,
                                          self.world_gen,
                                          position=Vector2D(1280 / 2,
                                                            720 / 2),
                                          camera=self.cam,
                                          obj_data=self.world_gen.load_player())

        self.cam.position = self.player.position.copy()

        self.cam.target(self.player.position)
コード例 #9
0
ファイル: Main.py プロジェクト: smurpheus/DartDetector
class MainApplikacation(object):
    detected = []
    detected2 = []
    detected3 = []
    detected4 = []
    detected5 = []
    real = []
    was_covert = []
    frame_no = []
    Calibrated = None
    Substractor = None
    input = None
    camera = None
    board_config_load = True
    date = datetime.now().strftime("%Y-%m-%d-%H-%M")
    fname = "data%s"%date
    imgpoint = None
    boardpoint = None
    def write_data(self):
        print "Writing data to file"
        i = 0
        fname = self.fname
        fname += ".csv"
        while os.path.isfile(fname):
            i += 1
            fname = self.fname + '-' + str(i)+".csv"
        with open(fname, 'wb') as csvfile:
            spamwriter = csv.writer(csvfile, delimiter=',',dialect='excel',
                                    quotechar='|', quoting=csv.QUOTE_MINIMAL)
            spamwriter.writerow(['Detected','Detected2','Detected3','Detected4','Detected5', 'Reality', 'Was Covert', 'Frameno', 'Diff', 'Diff2', 'Diff3', 'Diff4', 'Diff5'])
            def calc_diff(a,b):
                i = 0
                diff = []
                for each in a:
                    try:
                        if each == b[i]:
                            diff.append(True)
                        else:
                            diff.append(False)
                    except:
                        print "For some Reasion there was an error in diff calc"
                    i += 1
                percentage = float(len([x for x in diff if x is True])) / float(len(diff))
                return diff, percentage
            diff, percentage = calc_diff(self.detected, self.real)
            print "Tip Version 1 was %s Percent correct."%(percentage * 100)
            diff2, percentage = calc_diff(self.detected2, self.real)
            print "Tip Version 2 was %s Percent correct." % (percentage * 100)
            diff3, percentage = calc_diff(self.detected3, self.real)
            print "Tip Version 3 was %s Percent correct." % (percentage * 100)
            diff4, percentage = calc_diff(self.detected4, self.real)
            print "Tip Version 4 was %s Percent correct." % (percentage * 100)
            diff5, percentage = calc_diff(self.detected5, self.real)
            print "Tip Version 5 was %s Percent correct." % (percentage * 100)
            datas = zip(self.detected, self.detected2, self.detected3, self.detected4, self.detected5, self.real,
                        self.was_covert, self.frame_no, diff,diff2, diff3, diff4, diff5)
            for each in datas:
                entry = list(each)
                # if each[0] == each[1]:
                #     entry.append(True)
                # else:
                #     entry.append(False)
                spamwriter.writerow(entry)

    
    def __init__(self, inp):
        if isinstance(inp,str):
            self.fname = inp.split('.')[0]
        self.camera = Camera(device=inp, output=self.fname)
        self.board = Board()
        camconf = "camera_config.json"
        baord_conf = "boardconfig.json"
        if os.path.isfile(camconf):
            self.camera.load_config(filename=camconf)
        else:
            self.camera.do_calibration(img=True)
            self.camera.save_config(camconf)
        if self.board_config_load and os.path.isfile(baord_conf):
            with open(baord_conf, 'r') as bc:
                imgps = json.loads(bc.readline())

            self.Calibrated = BoardCalibrator(camera=self.camera, imgpts=imgps, board=self.board)
        else:
            self.Calibrated = BoardCalibrator(camera=self.camera)
            with open("boardconfig.json", 'w') as bc:
                imgps = self.Calibrated.imgpoints
                bc.write(json.dumps(imgps))
        self.Substractor = BackgroundSubtractor(c1=inp,camera=self.camera)
        plt.ion()
        self.figure = plt.figure()
        self.plt1 = self.figure.add_subplot(111)
        self.line1, = self.plt1.plot(range(200), [0] * 200, 'r.-')
        self.plt1.axis([0, 200, 0, 10000])
        cv2.namedWindow("Current", cv2.WINDOW_NORMAL)
        cv2.moveWindow("Current", 20,20)
        cv2.namedWindow("Original", cv2.WINDOW_NORMAL)
        cv2.moveWindow("Original", 20, 500)
        cv2.namedWindow("Points", cv2.WINDOW_NORMAL)
        cv2.moveWindow("Current", 1000, 20)
        cv2.namedWindow("Blobimg", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("Points", self._click)
        mixer.init()
        mixer.music.load('beep.mp3')
        # cv2.namedWindow("FG Substraction", cv2.WINDOW_NORMAL)
        # cv2.createTrackbar("History", "Current", self.history, 1000, self._set_history)
        # cv2.createTrackbar("Shadow Treshold", "Current", int(self.shad_tresh * 100), 100, self._set_shad_tresh)
        # cv2.createTrackbar("VarThreshold", "Current", self.var_tresh, 100, self._set_var_tresh)
        # cv2.createTrackbar("VarMax", "Current", self.var_max, 100, self._set_var_max)
        # cv2.createTrackbar("VarMin", "Current", self.var_min, 100, self._set_var_min)

        self.Substractor.start()

        realboard = np.zeros((self.camera.height, self.camera.width, 3), np.uint8)
        # self.frame = self.camera.undistort_image(img)
        for i in self.Calibrated.imp:
            try:
                realboard[i[0][1], i[0][0]] = [0,0,255]
            except IndexError:
                pass

        added = 0
        while True:
            img = self.Substractor.get_image()
            if img is not None:

                img = cv2.add(realboard,img)
                cv2.imshow("Original", img)
                cv2.imshow("Points", self.board.draw_board())
                if self.Substractor.stopped:
                    self.write_data()
                    exit()
                cv2.imshow("Current", self.Substractor.get_substracted())
                storage, unaltered = self.Substractor.get_storage()
                y = [x[2] for x in storage]
                y = unaltered
                self.line1.set_xdata(range(len(y)))
                self.line1.set_ydata(y)
                k = cv2.waitKey(1)
                if k == ord('a'):
                    self.add_dart(frame_no=self.camera.read_frame_no)
                if k == ord('s'):
                    self.write_data()
                if k == ord('w'):
                    pass
                    self.figure.savefig(r"thesisimages/plot.jpg")
                if k == ord('f'):
                    added = 0
                    self.Substractor.clear_arrows()
                if k == 27:
                    self.Substractor.stopped = True
                    break
                if k == 119:
                    print "Pressed w Key so Waiting"
                    cv2.waitKey(-1)
            arrows = self.Substractor.get_arrows()
            i = 1
            for each in arrows:
                tip = each.tip
                frame_no = each.frame_no
                points = self.Calibrated.calculate_points(tip)
                if i > added:
                    self.add_dart(arrow=each, detected=points, frame_no=frame_no)
                    added += 1
                i += 1
            if added >= 3:
                added = 0
                self.Substractor.clear_arrows()
        self.write_data()

    def _click(self, event, x, y, flags, param):

        if event == cv2.EVENT_LBUTTONDOWN:
            nx = x - self.board.size / 2
            ny = self.board.size / 2 - y
            print("Clicked %s " % self.board.calculate_field([[nx],[ny]]))
            self.imgpoint = [x, y]
            self.boardpoint = [[nx],[ny]]
            # self.imgpoints.append([x, y])

    def add_dart(self, arrow=None, detected="N/D", frame_no=0):
        self.Substractor.paused = True
        # mixer.music.play()
        if arrow is not None:
            print "Ratio is %s"%arrow.ratio
            print [cv2.contourArea(x) for x in arrow.contours]
            points = self.Calibrated.calculate_points(arrow.tip)
            pimg = self.board.draw_field(points)
            cv2.imshow("Points", pimg)
            cv2.imshow("Blobimg", arrow.img)
            k = -1
            while k not in [13, 32]or self.boardpoint is None:
                k = cv2.waitKey(-1)
            if k == 13:
                print "Enter"
                print len(self.detected)
                self.was_covert.append(False)
            if k == 32:
                self.was_covert.append(True)
            self.real.append(self.board.calculate_field(self.boardpoint))
            self.boardpoint = None
コード例 #10
0
ファイル: PoseNet3D.py プロジェクト: malli1983/rgbd-pose3d
class PoseNet3D(object):
    def __init__(self, ope_depth=5, gpu_id=0, gpu_memory_limit=None, vpn_type=None, K=None):
        self._session = None  # tensorflow session
        self._image = None  # input to the 2D network
        self._scoremaps_kp = None  # output of the 2D network
        self._scoremaps_paf = None  # output of the 2D network
        self._depth_vox = None  # input to the 3D network
        self._kp_uv = None  # input to the 3D network
        self._voxel_root_xyz = None  # input to the 3D network
        self._voxel_scale = None  # input to the 3D network
        self._cam_mat = None  # input to the 3D network
        self._kp_vox = None  # output of the 3D network

        # parameters
        self._intermediate_scoremap_size = (100, 100)  # map size used for warping 2D->3D
        self.conf2d_thresh = 0.5  # minimal confidence of 2d detection
        self.cam = Camera(K)

        self.ope_depth = ope_depth
        self.gpu_id = gpu_id
        self.gpu_memory_limit = gpu_memory_limit

        if vpn_type == 'fast':
            self.use_fast_vpn = True
        else:
            self.use_fast_vpn = False

        # create network (this sets some member variables)
        self._setup_network()  # creates the tensorflow graph)
        self._init_open_pose()  # loads weights

    def detect(self, image, depth_w, mask):
        """ Given RGBD input it predicts 3D human pose. """
        ## A) 2D network on the RGB input
        # 1. Preprocessing RGB: Get image to the right size
        image_proc, image_s, scale_ratio = self._preproc_color(image)
        #self._show_input(image_s, depth_w, block=False)

        # 2. Run 2D network
        scoremaps_kp_v, scoremaps_paf_v = self._session.run([self._scoremaps_kp, self._scoremaps_paf],
                                                            {self._image: image_proc})

        # 3. Postprocessing: Detect keypoints and use PAF to work out person instances
        keypoint_det = detect_keypoints(np.squeeze(scoremaps_kp_v, 0))  # detect keypoints in the one scoremap
        paf_u, paf_v = self._split_paf_scoremap(np.squeeze(scoremaps_paf_v, 0))  # split representation
        pairwise_scores = calculate_pair_scores(keypoint_det, paf_u, paf_v)  # Calculate matching scores with the pafs

        # upscale because we dont upsample the scoremaps anymore
        keypoint_det_fs = list()
        for x in keypoint_det:
            x[:, :2] *= 8.0
            keypoint_det_fs.append(x)
        person_det = group_keypoints(keypoint_det_fs, pairwise_scores)  # Use detections and pairwise scores to get final estimation
        # print('Found %d persons' % len(person_det))  # coords in: person_det['person0']['kp'] = None,  person_det['person1']['kp'] = (u, v)
        # self._show_openpose_det(image_s, person_det, block=False)

        ## B) 3D network: VOXELPOSENET
        coord_uv, coord2d_conf = self._trafo_dict2array(person_det)
        coord_vis = coord2d_conf > self.conf2d_thresh
        coord_uv_fs = coord_uv / scale_ratio

        coord_xyz, det_conf = list(), list()
        for pid in range(len(person_det)):
            # Check if neck keypoint is visible
            if coord_vis[pid, 1] == 1.0:
                root_id = 1  # neck keypoint
                coord2d_root = coord_uv_fs[pid, root_id, :]

                #asymmetric grid
                grid_size_m = np.array([[-1.1, -0.4, -1.1],
                                        [1.1, 1.8, 1.1]])

            # if not try R-hip
            elif coord_vis[pid, 8] == 1.0:
                root_id = 8  # R-hip keypoint
                coord2d_root = coord_uv_fs[pid, root_id, :]

                #symmetric grid
                grid_size_m = np.array([[-1.1, -1.1, -1.1],
                                        [1.1, 1.1, 1.1]])

            # if not try L-hip
            elif coord_vis[pid, 11] == 1.0:
                root_id = 11  # L-hip keypoint
                coord2d_root = coord_uv_fs[pid, root_id, :]

                #symmetric grid
                grid_size_m = np.array([[-1.1, -1.1, -1.1],
                                        [1.1, 1.1, 1.1]])
            else:
                continue

            # find approx. depth for root
            z_value = self._get_depth_value(depth_w / 1000.0, coord2d_root[0], coord2d_root[1])
            # self._show_sparse_depth(depth_w, coord_uv_fs[pid, 1, :], block=False)

            if z_value == 0.0:
                print("Could not extract depth value. Skipping sample.")
                continue

            # create voxel occupancy grid from the warped depth map
            voxelgrid, coord2d_s, trafo_params = vu.voxelize_person(self.cam, depth_w, mask,
                                                            coord_uv_fs[pid, :, :], coord_vis[pid, :],
                                                            z_value, (64, 64, 64), f=1.2,
                                                            grid_size_m=grid_size_m,
                                                            root_id=root_id, coord2d_root=coord2d_root)

            # 5. Run VoxelPoseNet
            feed_dict_vpn = {self._depth_vox: voxelgrid, self._kp_uv: coord2d_s, self._kp_vis: coord_vis[pid, :]}
            kp_scorevol_v = self._session.run(self._kp_vox, feed_dict_vpn)

            # 6. Postprocessing: Detect keypoints, transform back to XYZ
            keypoints_xyz_vox, det_conf_vox = self._detect_scorevol(kp_scorevol_v)
            keypoints_xyz_pred = vu.trafo_vox_coords_to_xyz_new(keypoints_xyz_vox, trafo_params)  # xyz from voxel
            keypoints_xyz_pred_proj = self.cam.backproject(coord_uv_fs[pid, :, :], keypoints_xyz_pred[:, -1:])  # xyz from backprojected uv

            # assemble solution from voxel result and backprojected solution
            cond = coord2d_conf[pid, :] > det_conf_vox  # use backproj only when 2d was visible and 2d/3d roughly matches
            keypoints_xyz_pred[cond, :] = keypoints_xyz_pred_proj[cond, :]
            coord_xyz.append(keypoints_xyz_pred)
            det_conf.append(det_conf_vox)
            # self._show_voxelposenet_det(voxelgrid, keypoints_xyz_vox)

        # output numpy arrays
        if len(coord_xyz) > 0:
            coord_xyz = np.stack(coord_xyz)
        else:
            coord_xyz = np.zeros((0, 18, 3))

        if len(det_conf) > 0:
            det_conf = np.stack(det_conf)
        else:
            det_conf = np.zeros((0, 18))

        return coord_xyz, det_conf

    def _setup_network(self):
        """ Creates the tensorflow graph structure. """
        # input placeholder
        self._image = tf.placeholder(tf.float32, (1, 376, 656, 3), 'image')
        self._depth_vox = tf.placeholder(tf.float32, (64, 64, 64), 'depth_vox')
        self._kp_uv = tf.placeholder(tf.float32, (18, 2), 'kp_uv')
        self._kp_vis = tf.placeholder(tf.float32, (18), 'kp_vis')

        self._voxel_root_xyz = tf.placeholder(tf.float32, (1, 3), 'voxel_root_xyz') # only for warped
        self._voxel_scale = tf.placeholder(tf.float32, (1, 3), 'voxel_scale') # only for warped
        self._cam_mat = tf.placeholder(tf.float32, (3, 3), 'cam_mat') # only for warped
        evaluation = tf.placeholder_with_default(True, shape=(), name='evaluation')

        # OpenPose colornet
        color_net = OpenPoseCoco()
        scoremaps_kp_list, scoremaps_paf_list = color_net.inference_pose(self._image, train=False, upsample=False,
                                                                         gpu_id=self.gpu_id)
        self._scoremaps_kp, self._scoremaps_paf = scoremaps_kp_list[self.ope_depth], scoremaps_paf_list[self.ope_depth]

        # VoxelPoseNet for Person Kp
        net = VoxelPoseNet(use_slim=self.use_fast_vpn)

        # calculate scoremap
        kp_map_uv = self._create_multiple_gaussian_map(self._kp_uv, (64, 64), 3.0, tf.expand_dims(self._kp_vis, -1))

        # tile 2D scoremap into a scorevolume
        det2d_vox = tf.tile(tf.expand_dims(tf.expand_dims(kp_map_uv, 0), 3), [1, 1, 1, 64, 1])

        self._kp_vox = net.inference(tf.expand_dims(tf.expand_dims(self._depth_vox, 0), -1),
                                     det2d_vox,
                                     evaluation, gpu_id=self.gpu_id)

        # start session load weights
        if self.gpu_memory_limit is None:
            self._session = tf.Session()
        else:
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)
            self._session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        self._init_voxelposenet()

    def _init_open_pose(self):
        """ Loads weights for the OpenPose network. """
        # create dict for renaming the weights
        name_dict = {'conv1_1': 'conv1_1',
                     'conv1_2': 'conv1_2',
                     'conv2_1': 'conv2_1',
                     'conv2_2': 'conv2_2',
                     'conv3_1': 'conv3_1',
                     'conv3_2': 'conv3_2',
                     'conv3_3': 'conv3_3',
                     'conv3_4': 'conv3_4',
                     'conv4_1': 'conv4_1',
                     'conv4_2': 'conv4_2',
                     'conv4_3_CPM': 'conv4_3',
                     'conv4_4_CPM': 'conv4_4'}

        for type_old, type_new in [('L1', 'paf'), ('L2', 'kp')]:
            for rep_id in range(1, 8):
                name_dict['conv5_%d_CPM_%s' % (rep_id, type_old)] = 'conv5_%d_%s' % (rep_id, type_new)

            for stage_id in range(2, 7):
                for rep_id in range(1, 8):
                    name_dict['Mconv%d_stage%d_%s' % (rep_id, stage_id, type_old)] = 'conv%d_%d_%s' % (stage_id + 4, rep_id, type_new)

        weight_dict = dict()
        with open('./weights/openpose-coco.pkl', 'rb') as fi:
            if sys.version_info[0] == 3:
                weight_dict_raw = pickle.load(fi, encoding='latin1')  # for python3
            else:
                weight_dict_raw = pickle.load(fi)  # for python2

            for k, v in weight_dict_raw.items():
                if k in name_dict.keys():
                    new_name = name_dict[k]
                    weight_dict['CocoPoseNet/' + new_name + '/weights'] = v[0]
                    weight_dict['CocoPoseNet/' + new_name + '/biases'] = v[1]
                else:
                    print('Skipping: ', k)

            init_op, init_feed = tf.contrib.framework.assign_from_values(weight_dict)
            self._session.run(init_op, init_feed)
            print('Initialized 2D network with %d variables' % len(weight_dict))

    def _init_voxelposenet(self):
        """ Initializes the VoxelPoseNet from a snapshot. """
        if self.use_fast_vpn:
            checkpoint_path = './weights/snapshots_pose_run194/'
        else:
            checkpoint_path = './weights/snapshots_pose_run191/'
        rename_dict = {}
        discard_list = ['Adam', 'global_step', 'beta']
        self._load_all_variables_from_snapshot(checkpoint_path, rename_dict, discard_list)

    def _load_all_variables_from_snapshot(self, checkpoint_path, rename_dict=None, discard_list=None):
        """ Initializes certain tensors from a snapshot. """
        last_cpt = tf.train.latest_checkpoint(checkpoint_path)
        assert last_cpt is not None, "Could not locate snapshot to load."
        reader = pywrap_tensorflow.NewCheckpointReader(last_cpt)
        var_to_shape_map = reader.get_variable_to_shape_map()  # var_to_shape_map

        # for name in var_to_shape_map.keys():
        #     print(name, reader.get_tensor(name).shape)

        # Remove everything from the discard list
        num_disc = 0
        var_to_shape_map_new = dict()
        for k, v in var_to_shape_map.items():
            good = True
            for dis_str in discard_list:
                if dis_str in k:
                    good = False

            if good:
                var_to_shape_map_new[k] = v
            else:
                num_disc += 1
        var_to_shape_map = dict(var_to_shape_map_new)
        print('Discarded %d items' % num_disc)
        # print('Vars in checkpoint', var_to_shape_map.keys(), len(var_to_shape_map))

        # rename everything according to rename_dict
        num_rename = 0
        if rename_dict is not None:
            var_to_shape_map_new = dict()
            for name in var_to_shape_map.keys():
                rename = False
                for rename_str in rename_dict.keys():
                    if rename_str in name:
                        new_name = name.replace(rename_str, rename_dict[rename_str])
                        var_to_shape_map_new[new_name] = reader.get_tensor(name)
                        rename = True
                        num_rename += 1
                        break
                if not rename:
                    var_to_shape_map_new[name] = reader.get_tensor(name)
            var_to_shape_map = dict(var_to_shape_map_new)
        print('Renamed %d items' % num_rename)
        # print('(Possibly) renamed vars', var_to_shape_map.keys(), len(var_to_shape_map))

        init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
        self._session.run(init_op, init_feed)
        print('Initialized %d variables from %s.' % (len(var_to_shape_map), last_cpt))

    @staticmethod
    def _preproc_color(image):
        """ Preprocessing the color image"""
        output_shape = np.array([376.0, 656.0], np.float32)

        # reshape by trafo
        ratio = np.min(output_shape / np.array(image.shape[:2], dtype=np.float32))
        M = np.array([[ratio, 0.0, 0.0], [0.0, ratio, 0.0]])
        image_s = cv2.warpAffine(image, M, (output_shape[1], output_shape[0]), flags=cv2.INTER_AREA)
        
        # subtract mean and rgb -> bgr
        image = image_s[:, :, 0:3].astype('float32')
        image = image[:, :, ::-1]
        image = image / 256.0 - 0.5
        image = np.expand_dims(image, 0)
        return image, image_s, ratio

    def _detect_keypoints(self, scoremap):
        """
        Takes a scoremap and finds locations for keypoints.
        Returns a KxNx2 matrix with the (u, v) coordinates of the N maxima found for the K keypoints.
        """
        assert len(scoremap.shape) == 3, "Needs to be a 3D scoremap."

        keypoint_loc = list()
        for kid in range(scoremap.shape[2]):
            num_kp, maxima = self._find_maxima(scoremap[:, :, kid])
            if num_kp > 0:
                keypoint_loc.append(maxima)
            else:
                keypoint_loc.append(None)
        return keypoint_loc

    @staticmethod
    def _find_maxima(scoremap):
        """
        Takes a scoremap and detect the peaks using the local maximum filter.
        Returns a Nx2 matrix with the (u, v) coordinates of the N maxima found.
        """
        assert len(scoremap.shape) == 2, "Needs to be a 2D scoremap."

        # apply the local maximum filter; all pixel of maximal value
        local_max = maximum_filter(scoremap, size=3)
        mask_max = scoremap == local_max

        # mask out background
        mask_bg = ((np.max(scoremap) - np.min(scoremap)) * 0.25) > scoremap
        mask_max[mask_bg] = False

        # find distinct objects in map
        labeled, num_objects = ndimage.label(mask_max)
        slices = ndimage.find_objects(labeled)

        # create matrix of found objects with their location
        maxima = np.zeros((num_objects, 3), dtype=np.float32)
        for oid, (dy, dx) in enumerate(slices):
            maxima[oid, :2] = [(dx.start + dx.stop - 1)/2, (dy.start + dy.stop - 1)/2]
            u, v = int(maxima[oid, 0] + 0.5), int(maxima[oid, 1] + 0.5)
            maxima[oid, 2] = scoremap[v, u]

        return num_objects, maxima

    @staticmethod
    def _split_paf_scoremap(scoremap_paf):
        """ The network outputs u1, v1, u2, v2, ... """
        paf_u, paf_v = scoremap_paf[:, :, ::2], scoremap_paf[:, :, 1::2]
        return paf_u, paf_v

    @staticmethod
    def _show_input(image, depth, block=True):
        """ Shows the input to the pipeline. """
        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax1 = fig.add_subplot(121)
        ax2 = fig.add_subplot(122)
        ax1.imshow(image)
        ax2.imshow(depth)
        plt.show(block=block)

    @staticmethod
    def _show_sparse_depth(depth, coord_uv, block=True):
        """ Shows the where from the sparse depth map we take a value. """
        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax1 = fig.add_subplot(111)
        ax1.imshow(depth)
        ax1.plot(coord_uv[0], coord_uv[1], 'ro')
        print('coord_uv', coord_uv)
        plt.show(block=block)

    @staticmethod
    def _show_openpose_det(image_s, person_det, block=True):
        """ Shows the detections of openpose in the color image. """
        import matplotlib.pyplot as plt
        from utils.DrawUtil import draw_person_limbs_2d_coco
        fig = plt.figure()
        ax1 = fig.add_subplot(111)
        ax1.imshow(image_s)
        fmt_list = ['ro', 'go', 'co', 'mo']
        for anno, fmt in zip(person_det.values(), fmt_list):
            coords = np.zeros((18, 2))
            vis = np.zeros((18, ))
            for i, kp in enumerate(anno['kp']):
                if (kp is not None) and (kp[2] > 0.5):
                    # ax1.plot(kp[0], kp[1], fmt)
                    coords[i, :] = np.array([kp[0], kp[1]])
                    vis[i] = 1.0

            draw_person_limbs_2d_coco(ax1, coords, vis, color='sides', order='uv')
        plt.show(block=block)

    @staticmethod
    def _show_voxelposenet_det(voxelgrid, keypoints_xyz_vox, block=True):
        """ Shows the detections of VoxelPoseNet in the input voxelgrid. """
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        from utils.DrawUtil import draw_person_limbs_3d_coco

        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        X, Y, Z = np.where(voxelgrid)
        ax.scatter(X, Y, Z, c='g')
        draw_person_limbs_3d_coco(ax, keypoints_xyz_vox, color='r')
        plt.show(block=block)

    def _get_depth_value(self, map, u, v, crop_size=25):
        """ Extracts a depth value from a map.
            Checks for the closest value in a given neighborhood. """
        coord = np.array([[u, v]])

        while True:
            # get crop
            map_c, min_c, _ = self._crop_at(map, coord, crop_size)
            center = coord - min_c

            # find valid depths
            X, Y = np.where(np.not_equal(map_c, 0.0))

            if not X.shape[0] == 0:
                break
            crop_size *= 2  # if not successful use larger crop

        # calculate distance
        grid = np.stack([X, Y], 1) - center
        dist = np.sqrt(np.sum(np.square(grid), 1))

        # find element with minimal distance
        nn_ind = np.argmin(dist)
        x, y = X[nn_ind], Y[nn_ind]
        z_val = map_c[x, y]
        return z_val

    @staticmethod
    def _crop_at(map, center_coord, size):
        """ Crop a given map at the given center coordinate with the crop size specified.
            If the cropped area would partially reside outside of the map it is translated accordingly. """
        expand = False
        s = map.shape
        if len(s) == 2:
            map = np.expand_dims(map, 2)
            expand = True
            s = map.shape
        assert len(s) == 3, "Map has to be of Dimension 2 or 3."

        size = np.round(size).astype('int')

        # make sure crop size cant exceed image dims
        if s[0] <= size:
            size = s[0]
        elif s[1] <= size:
            size = s[1]

        center_coord = np.array(center_coord)
        center_coord = np.reshape(center_coord, [2])

        # work out the coords to actually lie in the crop
        c_min = np.round(center_coord - size // 2).astype('int')
        c_max = c_min + size

        # check if we left map
        for dim in [0, 1]:
            if c_min[dim] < 0.0:
                c_max[dim] -= c_min[dim]
                c_min[dim] = 0.0
            if c_max[dim] > s[1-dim]:
                c_min[dim] -= (c_max[dim]-s[1-dim])
                c_max[dim] = s[1-dim]

        # perform crop
        map_crop = map[c_min[1]:c_max[1], c_min[0]:c_max[0], :]

        if expand:
            map_crop = np.squeeze(map_crop)

        return map_crop, c_min, c_max

    @staticmethod
    def _trafo_dict2array(person_det, kp_conf_thresh=0.25):
        """ Transforms the dictionary returned from openpose into an array. """
        coord_uv = np.zeros((len(person_det), 18, 2))
        coord_vis = np.zeros((len(person_det), 18))

        for pid, anno in enumerate(person_det.values()):
            for kid, kp in enumerate(anno['kp']):
                if (kp is not None) and (kp[2] > kp_conf_thresh):
                    coord_vis[pid, kid] = kp[2]
                    coord_uv[pid, kid, :] = np.array([kp[0], kp[1]])
                else:
                    coord_vis[pid, kid] = 0.0
        return coord_uv, coord_vis

    @staticmethod
    def _create_multiple_gaussian_map(coords_uv, output_size, sigma, valid_vec=None):
        """ Creates a map of size (output_shape[0], output_shape[1]) at (center[0], center[1])
            with variance sigma for multiple coordinates."""
        with tf.name_scope('create_multiple_gaussian_map'):
            sigma = tf.cast(sigma, tf.float32)
            assert len(output_size) == 2
            s = coords_uv.get_shape().as_list()
            coords_uv = tf.cast(coords_uv, tf.int32)
            if valid_vec is not None:
                valid_vec = tf.cast(valid_vec, tf.float32)
                valid_vec = tf.squeeze(valid_vec)
                cond_val = tf.greater(valid_vec, 0.5)
            else:
                cond_val = tf.ones_like(coords_uv[:, 0], dtype=tf.float32)
                cond_val = tf.greater(cond_val, 0.5)

            cond_1_in = tf.logical_and(tf.less(coords_uv[:, 0], output_size[0]-1), tf.greater(coords_uv[:, 0], 0))
            cond_2_in = tf.logical_and(tf.less(coords_uv[:, 1], output_size[1]-1), tf.greater(coords_uv[:, 1], 0))
            cond_in = tf.logical_and(cond_1_in, cond_2_in)
            cond = tf.logical_and(cond_val, cond_in)

            coords_uv = tf.cast(coords_uv, tf.float32)

            # create meshgrid
            x_range = tf.expand_dims(tf.range(output_size[0]), 1)
            y_range = tf.expand_dims(tf.range(output_size[1]), 0)

            X = tf.cast(tf.tile(x_range, [1, output_size[1]]), tf.float32)
            Y = tf.cast(tf.tile(y_range, [output_size[0], 1]), tf.float32)

            X.set_shape((output_size[0], output_size[1]))
            Y.set_shape((output_size[0], output_size[1]))

            X = tf.expand_dims(X, -1)
            Y = tf.expand_dims(Y, -1)

            X_b = tf.tile(X, [1, 1, s[0]])
            Y_b = tf.tile(Y, [1, 1, s[0]])

            X_b -= coords_uv[:, 0]
            Y_b -= coords_uv[:, 1]

            dist = tf.square(X_b) + tf.square(Y_b)

            scoremap = tf.exp(-dist / tf.square(sigma)) * tf.cast(cond, tf.float32)

            return scoremap

    @staticmethod
    def _detect_scorevol(scorevolume):
        """ Finds maximum volumewise. Tensor scorevolume is [1, D, H, W, C]. """
        scorevolume = np.squeeze(scorevolume)
        s = scorevolume.shape
        assert len(s) == 4, "Tensor must be 4D"

        coord_det = list()
        coord_conf = list()
        for i in range(s[3]):
            max_val = np.amax(scorevolume[:, :, :, i])

            ind = np.where(scorevolume[:, :, :, i] == max_val)
            ind = [np.median(x) for x in ind]  # this eliminates, when there are multiple maxima
            ind = [int(x) for x in ind]
            coord_conf.append(max_val)
            coord_det.append(ind)
        coord_det = np.reshape(np.array(coord_det), [-1, 3])
        coord_conf = np.array(coord_conf)
        return coord_det, coord_conf
コード例 #11
0
# Actually, it turns out that, whatever the t's scale is, a 3D point will still end up at the same 2D location only.

# TO BE PROVED
# Take three cameras with any arbitrary place.
# Take three more camers with same config except that their t is unit-length.

# Verify if u get the distance correctly in the 2nd camera systems.

# These will be the centres.
c1 = np.asarray([0, 0, 0]).reshape(3, -1)
c2 = np.asarray([70, 70, 70]).reshape(3, -1)
c3 = np.asarray([30, 15, 40]).reshape(3, -1)
c4 = np.asarray([25, 45, 25]).reshape(3, -1)

# Let these be the first set of cameras
Cam1 = Camera(K, Rc=utils.rotate(), center=c1)
pts2d_1 = utils.hom_to_euc(utils.project(Cam1.P, pts3d_11))
Cam2 = Camera(K, Rc=utils.rotate(thetay=169), center=c2)
pts2d_2 = utils.hom_to_euc(utils.project(Cam2.P, pts3d_11))
Cam3 = Camera(K, Rc=utils.rotate(thetaz=90), center=c3)
pts2d_3 = utils.hom_to_euc(utils.project(Cam3.P, pts3d_11))
Cam4 = Camera(K, Rc=utils.rotate(thetax=90), center=c4)
pts2d_4 = utils.hom_to_euc(utils.project(Cam4.P, pts3d_11))

# Let these be the second set of cameras at just unit-distance from world
UnitCam1 = Camera(K, Rc=utils.rotate(), center=utils.norm(c1))
unit_pts2d_1 = utils.hom_to_euc(utils.project(UnitCam1.P, pts3d_11))
UnitCam2 = Camera(K, Rc=utils.rotate(thetay=169), center=utils.norm(c2))
unit_pts2d_2 = utils.hom_to_euc(utils.project(UnitCam2.P, pts3d_11))
UnitCam3 = Camera(K, Rc=utils.rotate(thetaz=90), center=utils.norm(c3))
unit_pts2d_3 = utils.hom_to_euc(utils.project(UnitCam3.P, pts3d_11))
コード例 #12
0
class Game:
    """
    Class representing the game.
    @note Since this example has only one scene, all the timing and sprite sheets are managed in this class.
    If there's a requirement for multiple scenes, you might want to create a Scene class separately, and decide which
    objects are shared between scenes (for example, player's backpack) and which are in one specific scene only
    (for example, timer).
    """
    def __init__(self, screen: Surface):
        """
        Creates the game
        @param screen: The screen surface (created with pygame.display.set_mode) to draw on. This screen is used both
        for drawing and for creating the camera.
        @note To pave our way to Rust projects, this project uses type hints in function signatures.
        """

        # Load sprite sheets (aka costumes)
        assets_path = os.path.dirname(os.path.realpath(__file__))
        bear_spritesheet = image.load(os.path.join(
            assets_path, "..", "assets", "bears.png"))
        berry_spritesheet = image.load(os.path.join(
            assets_path, "..", "assets", "berry.png"))

        # Definition of game objects
        self.world = World()
        self.bear = Bear(320, 180, bear_spritesheet)
        self.berry_group = Group()
        for _ in range(100):
            x, y = randint(0, 1000), randint(0, 1000)
            berry = Berry(x, y, berry_spritesheet)
            berry.rect = berry.rect.clamp(self.world.rect)
            self.berry_group.add(berry)
        self.screen = screen
        self.camera = Camera(320, 180, screen)
        self.text = TextUI(0, 0)

        # Gameplay-related data
        self.score = 0
        self.remaining = 100
        self.timer = 0

        # State management. You might want to use a state machine for complex games.
        self.running = True

    def update(self, ticks: int):
        """
        Updates game state with timing information provided by PyGame
        @param ticks: number of milliseconds passed since pygame.init() was called
        (obtained with pygame.time.get_ticks())
        @return: None
        """
        self.bear.update(ticks)
        self.bear.clamp(self.world.rect)
        self.camera.move_to(self.bear.position)
        self.camera.clamp(self.world.rect)
        collide_list = spritecollide(self.bear, self.berry_group, True)
        for _ in collide_list:
            self.score += 1
            self.remaining = len(self.berry_group)
            if self.remaining == 0:
                self.running = False
        if self.running:
            self.timer = ticks
            self.text.set_text(
                "Collected: {0}. Remaining: {1}. Elapsed time: {2:.1f} s".format(self.score, self.remaining, self.timer / 1000.0))
        else:
            self.text.set_text(
                "You collected all berries in {0:.1f} seconds".format(self.timer / 1000.0))

    def on_key_down(self, keys: List[bool]):
        """
        Event listener for pygame.KEYDOWN event. All key press checks should be done in this function and on_key_up().
        @bug Only checks the first pressed key in the list. Not able to handle multiple keys pressed at a same time
        @param keys: a list of key status obtained with pygame.key.get_pressed().
        @return: None
        """
        directions = [Vector2(0, -1), Vector2(-1, 0),
                      Vector2(0, 1), Vector2(1, 0)]
        accepted_keys = [K_UP, K_LEFT, K_DOWN, K_RIGHT, K_w, K_a, K_s, K_d]
        for index, key in enumerate(accepted_keys):
            if keys[key]:
                self.bear.set_direction(directions[index % 4])
                self.bear.set_walking(True)

    def on_key_up(self, keys: List[bool]):
        """
        Event listener for pygame.KEYDOWN event. All key press checks should be done in this function and on_key_down().
        @note As required by Python syntax, the position argument keys must present even if it's not used anywhere in
        the function.
        @param keys: a list of key status obtained with pygame.key.get_pressed().
        @return: None
        """
        self.bear.set_walking(False)

    def render(self):
        """
        Draws all game objects onto the screen based on camera position.
        @note Order matters.
        @return: None
        """
        self.world.draw(self.screen, self.camera)
        for berry in self.berry_group:
            if berry.rect.colliderect(self.camera.rect):
                berry.draw(self.screen, self.camera)
        self.bear.draw(self.screen, self.camera)
        self.text.render(self.screen)
コード例 #13
0
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import time
import cv2

import serial

from utils.Camera import *

camera = Camera()

arduino = serial.Serial(port="/dev/tty.usbmodem143101",
                        baudrate=9600)  # create usb link
time.sleep(2)  #time recommended by arduino

classes = ["verre", "plastique et metal", "carton", "rien"]  #object classes

# Scientific notation desactivation
np.set_printoptions(suppress=True)

# Importe model generated with teachablemachine
model = tensorflow.keras.models.load_model('keras_model.h5')

# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)


def predict_object(image_cv2):
コード例 #14
0
ファイル: env.py プロジェクト: AdenosHermes/OdometryCNN
    lines = []
    for i in range(N_poly):
        lines.extend(random_poly().lines)

    world = engine.render_world(lines, 75)
    save_image(os.path.join(args.output_directory, 'world.png'), world)

    times = []

    cam_pos = np.array((0, 0))
    cam_angle = 0
    max_angle_jump = np.radians(10)

    data = np.empty((N_steps, 3 + cam_size))
    for i in range(N_steps):
        cam = Camera(cam_pos, cam_angle, cam_fov, cam_size)

        start_time = time.time()
        img = engine.render(lines, cam)
        times.append(time.time() - start_time)

        data[i, 0:2] = cam_pos
        data[i, 2] = cam_angle
        data[i, 3:] = img

        save_image(os.path.join(args.output_directory, 'im_%03d.png' % i), img)

        cam_angle += np.random.uniform(-max_angle_jump, max_angle_jump)

    np.savetxt(os.path.join(args.output_directory, 'data.txt'),
               data,
コード例 #15
0
ファイル: Main.py プロジェクト: smurpheus/DartDetector
class BackgroundSubtractor(Thread):
    ###Default ######
    history = 500
    shad_tresh = 0.5
    var_tresh = 16
    var_max = 75
    var_min = 4
    #################
    history = 200
    shad_tresh = 0.55
    var_tresh = 16
    var_max = 75
    var_min = 1
    arrows = []
    fgbg = None
    threadLock = None
    storage = None
    image = None
    stopped = False
    paused = False
    def _initialize_substractor(self):
        self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
        self.fgbg.setHistory(self.history)
        self.fgbg.setShadowThreshold(self.shad_tresh)
        self.fgbg.setVarThreshold(self.var_tresh)
        # self.fgbg.setVarMax(self.var_max)
        # self.fgbg.setVarMin(self.var_min)
        return self.fgbg

    def get_image(self):
        self.threadLock.acquire()
        img = copy.copy(self.image)
        self.threadLock.release()
        return img

    def set_image(self, img):
        self.threadLock.acquire()
        self.image = img
        self.threadLock.release()

    def get_substracted(self):
        self.threadLock.acquire()
        img = copy.copy(self.substracted)
        self.threadLock.release()
        return img

    def set_substracted(self, img):
        self.threadLock.acquire()
        self.substracted = img
        self.threadLock.release()

    def _set_arrow(self, arrow):
        self.threadLock.acquire()
        self.arrows.append(arrow)
        self.threadLock.release()

    def clear_arrows(self):
        self.threadLock.acquire()
        self.arrows = []
        self.threadLock.release()

    def get_arrows(self):
        self.threadLock.acquire()
        return_list = list(self.arrows)
        self.threadLock.release()
        return return_list

    def get_storage(self):
        self.threadLock.acquire()
        return_list = list(self.storage.storage)
        unaltered = list(self.storage.unaltered)
        self.threadLock.release()
        return return_list, unaltered

    def _add_to_storage(self, contours, f1, no_of_frame):
        self.threadLock.acquire()
        self.storage.add_to_storage(contours, f1, no_of_frame)
        self.threadLock.release()

    def __init__(self, c1=0, camera=None):
        Thread.__init__(self)
        self.threadLock = Lock()
        print("BackgroundSubstractor called with capture %s" % c1)
        if not isinstance(camera, Camera):
            self.camera = Camera(device=c1)
        else:
            self.camera = camera
        # c1.release()re('test.avi')

        self.storage = ContourStorage()
        self._initialize_substractor()

    def run(self):
        try:
            self.run_substraction()
        except NoImage:
            self.stopped = True

    def run_substraction(self):

        while not self.stopped:
            if not self.paused:
                res = self.camera.get_image()
                if res is not None:
                    f1,reseted = res
                else:
                    raise NoImage()
                no_of_frame = self.camera.read_frame_no
                if reseted:
                    self._initialize_substractor()


                fgmask1 = self.fgbg.apply(f1)
                fgmask1 = cv2.inRange(fgmask1, 250, 255)
                kernel = np.ones((6, 6), np.uint8)
                closed = cv2.morphologyEx(fgmask1, cv2.MORPH_CLOSE, kernel)
                kernel = np.ones((4, 4), np.uint8)
                opened = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel)
                closed2 = np.array(closed)
                self.set_substracted(closed)
                im2, contours, hierarchy = cv2.findContours(closed2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                colored = cv2.cvtColor(closed2, cv2.COLOR_GRAY2BGR)
                self._add_to_storage(contours, f1, no_of_frame)
                arrow = self.storage.get_arrow(self.history)
                for a in arrow:
                    self._set_arrow(a)
                # print str("Arrows: %s"%self.get_arrows())
                stdout.flush()
                for arrow in self.get_arrows():
                    cv2.drawContours(f1, arrow.contours, -1, (0, 255, 0), -1)
                    cv2.drawContours(f1, [arrow.aproximated], 0, (255, 255, 0), 2)

                    cv2.circle(f1, (arrow.tip[0], arrow.tip[1]), 3, [255, 0, 0], 2)
                    f1[arrow.tip[1], arrow.tip[0]] = [255, 0, 0]

                    cv2.circle(f1, (arrow.tip2[0], arrow.tip2[1]), 3, [101,8,108], 2)
                    f1[arrow.tip2[1], arrow.tip2[0]] = [101,8,108]

                    cv2.circle(f1, (arrow.tip3[0], arrow.tip3[1]), 3, [225,97,53], 2)
                    f1[arrow.tip2[1], arrow.tip2[0]] = [225,97,53]

                    cv2.circle(f1, (arrow.tip4[0], arrow.tip4[1]), 3, [81,106,37], 2)
                    f1[arrow.tip2[1], arrow.tip2[0]] = [81,106,37]

                    rows, cols = f1.shape[:2]
                    cv2.line(f1, (cols - 1, arrow.line[1]), (0, arrow.line[0]), (255, 255, 0), 1)
                    cv2.drawContours(f1, [np.int0(arrow.bbox)], 0, (0, 0, 255), 2)

                self.set_image(f1)
            else:
                time.sleep(1)
        #
        #     cv2.imshow("Current", closed)
        #     cv2.imshow("FG Substraction", colored)
        #     cv2.imshow("Original", f1)
        #
        #     k = cv2.waitKey(1) & 0xFF
        #     if k == ord('f') or len(arrows) >= 3:
        #         arrows = []
        #     if k == 27:
        #         break
        #     if k == 119:
        #         cv2.waitKey(-1)
        # cv2.destroyAllWindows()
    def _set_history(self, val):
        self.history = val
        self.fgbg.setHistory(self.history)

    def _set_shad_tresh(self, val):
        self.shad_tresh = val/100.
        self.fgbg.setShadowThreshold(self.shad_tresh)


    def _set_var_tresh(self, val):
        self.var_tresh = val
        self.fgbg.setVarThreshold(self.var_tresh)

    def _set_var_min(self, val):
        self.var_min = val
        self.fgbg.setVarMin(self.var_min)

    def _set_var_max(self, val):
        self.var_max = val
        self.fgbg.setVarMax(self.var_max)
コード例 #16
0
# airSphere = Sphere()
# airSphere.transform = scaling(0.5, 0.5, 0.5)
# airSphere.material = Material()
# airSphere.material.color = Color.white()
# airSphere.material.diffuse = 0
# airSphere.material.shininess = 300.0
# airSphere.material.reflective = 0.9
# airSphere.material.transparency = 0.9
# airSphere.material.ambient = 0
# airSphere.material.specular = 0.9
# airSphere.material.refractive_index = 1.0000034

world = World()
world.Objects = [Floor, _sphere, _sphere2]
world.light = PointLight(Point(-10, 10, -10), Color(1, 1, 1))
cam = Camera(width, height, pi / 3)
cam.transform = view_transform(Point(0, 1.5, -5), Point(0, 1, 0),
                               Vector3(0, 1, 0))

# cam.transform = view_transform(Point(0,0, -5), Point(0, 0, 0), Vector3(0, 1, 0))


def PixelRender(x, y):
    global cam, world
    ray = ray_for_pixel(cam, x, y)
    color = color_at(world, ray)
    pixel(screen, x, y, color.ConvertColor())
    pygame.display.flip()


def render(camera, world):
コード例 #17
0
import utils
from utils import Camera

# np.random.seed(79)
np.set_printoptions(precision=3)

thetaX = np.random.randint(0, 360)
thetaY = np.random.randint(0, 360)
thetaZ = np.random.randint(0, 360)

pts3d_11 = np.random.randint(11, 50, size=(8, 3)).astype(np.float32) # 8-points
K = np.load("../camMatrix_720p.npy")
dist = np.zeros(shape=5)

# This is the first camera with same orientation and same place as origin.
Cam11 = Camera(K)
pts2d_11 = utils.project(Cam11.P, pts3d_11)
pts2d_11 = utils.hom_to_euc(pts2d_11)

# This is second camera.
Cam12 = Camera(K, Rc=utils.rotate(thetax=thetaX), center=np.asarray([10, 25, 7]).reshape(3, -1))
Cam12_R = Cam12.R
Cam12_c = Cam12.center
Cam12_t = Cam12.t
pts3d_12 = np.matmul(Cam12.Rt, utils.euc_to_hom(pts3d_11).T).T
pts2d_12 = utils.project(Cam12.P, pts3d_11)
pts2d_12 = utils.hom_to_euc(pts2d_12)

# This is third camera.
Cam13 = Camera(K, Rc=utils.rotate(thetay=thetaY), center=np.asarray([15, 25, 10]).reshape(3, -1))
Cam13_R = Cam13.R
コード例 #18
0
from flask import Flask, render_template, Response
import numpy as np
import cv2
from utils import Camera
import torch

app = Flask(__name__)

camera = Camera()


@app.route('/home', methods=['GET'])
def home():
    return render_template('home.html')


@app.route('/streamCam', methods=['GET'])
def streamCam():
    return Response(camera.get_frames(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == "__main__":
    app.run(debug=True)
コード例 #19
0
ファイル: Main.py プロジェクト: smurpheus/DartDetector
    def __init__(self, inp):
        if isinstance(inp,str):
            self.fname = inp.split('.')[0]
        self.camera = Camera(device=inp, output=self.fname)
        self.board = Board()
        camconf = "camera_config.json"
        baord_conf = "boardconfig.json"
        if os.path.isfile(camconf):
            self.camera.load_config(filename=camconf)
        else:
            self.camera.do_calibration(img=True)
            self.camera.save_config(camconf)
        if self.board_config_load and os.path.isfile(baord_conf):
            with open(baord_conf, 'r') as bc:
                imgps = json.loads(bc.readline())

            self.Calibrated = BoardCalibrator(camera=self.camera, imgpts=imgps, board=self.board)
        else:
            self.Calibrated = BoardCalibrator(camera=self.camera)
            with open("boardconfig.json", 'w') as bc:
                imgps = self.Calibrated.imgpoints
                bc.write(json.dumps(imgps))
        self.Substractor = BackgroundSubtractor(c1=inp,camera=self.camera)
        plt.ion()
        self.figure = plt.figure()
        self.plt1 = self.figure.add_subplot(111)
        self.line1, = self.plt1.plot(range(200), [0] * 200, 'r.-')
        self.plt1.axis([0, 200, 0, 10000])
        cv2.namedWindow("Current", cv2.WINDOW_NORMAL)
        cv2.moveWindow("Current", 20,20)
        cv2.namedWindow("Original", cv2.WINDOW_NORMAL)
        cv2.moveWindow("Original", 20, 500)
        cv2.namedWindow("Points", cv2.WINDOW_NORMAL)
        cv2.moveWindow("Current", 1000, 20)
        cv2.namedWindow("Blobimg", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("Points", self._click)
        mixer.init()
        mixer.music.load('beep.mp3')
        # cv2.namedWindow("FG Substraction", cv2.WINDOW_NORMAL)
        # cv2.createTrackbar("History", "Current", self.history, 1000, self._set_history)
        # cv2.createTrackbar("Shadow Treshold", "Current", int(self.shad_tresh * 100), 100, self._set_shad_tresh)
        # cv2.createTrackbar("VarThreshold", "Current", self.var_tresh, 100, self._set_var_tresh)
        # cv2.createTrackbar("VarMax", "Current", self.var_max, 100, self._set_var_max)
        # cv2.createTrackbar("VarMin", "Current", self.var_min, 100, self._set_var_min)

        self.Substractor.start()

        realboard = np.zeros((self.camera.height, self.camera.width, 3), np.uint8)
        # self.frame = self.camera.undistort_image(img)
        for i in self.Calibrated.imp:
            try:
                realboard[i[0][1], i[0][0]] = [0,0,255]
            except IndexError:
                pass

        added = 0
        while True:
            img = self.Substractor.get_image()
            if img is not None:

                img = cv2.add(realboard,img)
                cv2.imshow("Original", img)
                cv2.imshow("Points", self.board.draw_board())
                if self.Substractor.stopped:
                    self.write_data()
                    exit()
                cv2.imshow("Current", self.Substractor.get_substracted())
                storage, unaltered = self.Substractor.get_storage()
                y = [x[2] for x in storage]
                y = unaltered
                self.line1.set_xdata(range(len(y)))
                self.line1.set_ydata(y)
                k = cv2.waitKey(1)
                if k == ord('a'):
                    self.add_dart(frame_no=self.camera.read_frame_no)
                if k == ord('s'):
                    self.write_data()
                if k == ord('w'):
                    pass
                    self.figure.savefig(r"thesisimages/plot.jpg")
                if k == ord('f'):
                    added = 0
                    self.Substractor.clear_arrows()
                if k == 27:
                    self.Substractor.stopped = True
                    break
                if k == 119:
                    print "Pressed w Key so Waiting"
                    cv2.waitKey(-1)
            arrows = self.Substractor.get_arrows()
            i = 1
            for each in arrows:
                tip = each.tip
                frame_no = each.frame_no
                points = self.Calibrated.calculate_points(tip)
                if i > added:
                    self.add_dart(arrow=each, detected=points, frame_no=frame_no)
                    added += 1
                i += 1
            if added >= 3:
                added = 0
                self.Substractor.clear_arrows()
        self.write_data()
コード例 #20
0
ファイル: main.py プロジェクト: MrGrayCode/Ant-Bot
        #sum_error += error
        motor_speed = kp * error + kd * (error - last_error)
        last_error = error
        left_motor_speed = base_speed - motor_speed
        right_motor_speed = base_speed + motor_speed
        print(left_motor_speed, right_motor_speed)
        bot.forward(left_motor_speed, right_motor_speed)
    except Exception as e:
        if serial_output == 'N' and prev_reading != 'N':
            print("[NODE]")
            node_count += 1
        #else:
        #print(e)
    prev_reading = serial_output

cam = Camera()

cameraThread = Thread(target=cam.getArucoID)
motionThread = Thread(target=bot.right, args=(20, 20))
cameraThread.start()
motionThread.start()

cameraThread.join()
motionThread.join()
'''
serial_output = str(ser.readline())
serial_output = int(serial_output[2:-5])

while serial_output>=-1 and serial_output <=50:
    bot.right(40,40)
    serial_output = str(ser.readline())
コード例 #21
0
    color = scipy.misc.imread('./color.png')  # color image
    color = scipy.misc.imresize(color, (1080, 1920))
    depth_w = scipy.misc.imread('./depth.png').astype(
        'float32')  # depth map warped into the color frame

    # intrinsic calibration data
    ratio = np.array([1920.0 / 512.0, 1080.0 / 424.0])
    K = np.array([[
        3.7132019636619111e+02 * ratio[0], 0.0,
        2.5185416982679811e+02 * ratio[0]
    ],
                  [
                      0.0, 3.7095047063504268e+02 * ratio[1],
                      2.1463524817996452e+02 * ratio[1]
                  ], [0.0, 0.0, 1.0]])
    cam = Camera(K)

    # create algorithm
    poseNet = PoseNet3D(ope_depth=OPE_DEPTH,
                        vpn_type=VPN_TYPE,
                        gpu_id=GPU_ID,
                        gpu_memory_limit=GPU_MEMORY,
                        K=K)

    # loop
    mask = np.logical_not(depth_w == 0.0)

    # run algorithm
    coords_pred, det_conf = poseNet.detect(color, depth_w, mask)

    # visualize