Example #1
0
 def collect_state(self):
     mc = self.mc
     target = ['lapis_block', 4.5, 46, 12.5]
     target_pos = target[1:4]
     mc.observeProc()
     aPos = mc.getAgentPos()
     logging.debug(aPos)
     while aPos is None:
         time.sleep(0.05)
         mc.observeProc()
         aPos = mc.getAgentPos()
         if not all(mc.isAlive):
             raise DeadException()
     # grid
     grid = mc.getNearGrid()
     grid_vec = grid_to_vec_walking(grid[:27])
     # position encoding
     grid_enc = torch.as_tensor(grid_vec)
     # target
     pitch, yaw, dist = direction_to_target(mc, target_pos)
     target_enc = torch.as_tensor([pitch, yaw, dist])
     # 'XPos', 'YPos', 'ZPos', 'Pitch', 'Yaw'
     # take pitch, yaw
     # take XPos, YPos, ZPos modulo 1
     self_pitch = toRadAndNorm(aPos[3])
     self_yaw = toRadAndNorm(aPos[4])
     xpos, ypos, zpos = [_ % 1 for _ in aPos[0:3]]
     logging.debug("%.2f %.2f %.2f ", xpos, ypos, zpos)
     self_pos_enc = torch.as_tensor(
         [self_pitch, self_yaw, xpos, ypos, zpos])
     data = dict(grid_vec=grid_enc, target=target_enc, pos=self_pos_enc)
     return data
Example #2
0
    def collect_state(self):
        while True:
            self.mc.observeProc()
            data = self.mc.getImage()
            aPos = self.mc.getAgentPos()

            if not any(x is None for x in (data, aPos)):
                pitch_raw = degree2rad(aPos[3])
                yaw_raw = degree2rad(aPos[4])
                self_pitch = toRadAndNorm(aPos[3])
                self_yaw = toRadAndNorm(aPos[4])

                data = data.reshape(
                    (240, 320, 3 + self.want_depth)).transpose(2, 0, 1) / 255.
                pitch_yaw = torch.as_tensor([self_pitch, self_yaw])
                pitch_yaw_raw = torch.as_tensor([pitch_raw, yaw_raw])
                height = 1.6025
                x, y, z = aPos[0:3]
                y += height
                visible = self.observe_by_line()
                return dict(image=torch.as_tensor(data).float(),
                            pitch_yaw=pitch_yaw,
                            pitch_yaw_raw=pitch_yaw_raw,
                            coordinates=[x, y, z],
                            visible=visible)
            else:
                time.sleep(0.05)
Example #3
0
    def collect_state(self):
        while True:
            self.mc.observeProc()
            data = self.mc.getImage()
            aPos = self.mc.getAgentPos()
            if not any(x is None for x in (data, aPos)):
                self_pitch = toRadAndNorm(aPos[3])
                self_yaw = toRadAndNorm(aPos[4])

                data = data.reshape((240, 320, 3 + self.want_depth)).transpose(2, 0, 1) / 255.
                pitch_yaw = torch.as_tensor([self_pitch, self_yaw])
                return dict(image=torch.as_tensor(data).float(), position=pitch_yaw)
            else:
                time.sleep(0.05)
Example #4
0
    def collect_state(self):
        """
        state has format:
            image, prev_action, prev_prev_action,
            pitch, prev_pitch,
            yaw - prev_yaw, prev_yaw - prev_prev_yaw
        """
        mc = self.mc
        mc.observeProc()
        aPos = mc.getAgentPos()
        img_data = self.mc.getImage()
        logging.debug(aPos)
        while aPos is None or (img_data is None):
            time.sleep(0.05)
            mc.observeProc()
            aPos = mc.getAgentPos()
            img_data = self.mc.getImage()
        height = aPos[1]
        if height < 30: # avoid ponds and holes
            raise DeadException()
        pitch = aPos[3]
        yaw = aPos[4]
        data = dict()
        img_data = img_data.reshape((240 * 4, 320 * 4, 3 + self.want_depth))
        img_data = cv2.resize(img_data, (320, 240))
        img = img_data.reshape((240, 320, 3 + self.want_depth)).transpose(2, 0, 1) / 255.

        img = torch.as_tensor(img).float()
        data['image'] = img
        actions = []

        imgs = [torch.as_tensor(img)]
        yaws = [yaw]
        pitches = [pitch]
        for item in reversed(self.state_queue):
            actions.append(item['action'])
            yaws.append(item['yaw'])
            pitches.append(item['pitch'])
        while len(yaws) < 3:
            actions.append(torch.as_tensor(-1).to(img))
            yaws.append(torch.as_tensor(yaw).to(img))
            pitches.append(torch.as_tensor(pitch).to(img))

        # use relative change for yaws
        for i in range(len(yaws) - 1):
            yaws[i] =  toRadAndNorm(yaws[i] - yaws[i + 1])
        yaws.pop()
        pitches.pop()
        state = torch.as_tensor(actions + pitches + yaws)
        data.update(dict(state=state,
                         images=torch.stack(imgs),
                         image=img,
                         reward=torch.as_tensor(0),
                         yaw=torch.as_tensor(yaw),
                         pitch=torch.as_tensor(pitch)
                         ))
        return data
Example #5
0
 def lookAt(self, pitch_new, yaw_new):
     mc = self.mc
     print('look at')
     for t in range(2000):
         time.sleep(0.02)
         mc.observeProc()
         aPos = mc.getAgentPos()
         if aPos is None:
             continue
         current_pitch = toRadAndNorm(aPos[3])
         current_yaw = toRadAndNorm(aPos[4])
         pitch = normAngle(normAngle(pitch_new) - current_pitch)
         yaw = normAngle(normAngle(yaw_new) - current_yaw)
         if abs(pitch) < 0.02 and abs(yaw) < 0.02: break
         yaw = yaw * 0.5
         while abs(yaw) > 1:
             yaw *= 0.8
         pitch = pitch * 0.5
         while abs(pitch) > 1:
             pitch *= 0.8
         mc.sendCommand("turn " + str(yaw))
         mc.sendCommand("pitch " + str(pitch))
     mc.sendCommand("turn 0")
     mc.sendCommand("pitch 0")
Example #6
0
    def collect_state(self):
        mc = self.mc
        target =  ['lapis_block', self.target_x, 30, self.target_y]
        target_pos = target[1:4]
        mc.observeProc()
        aPos = mc.getAgentPos()
        img_data = self.mc.getImage()
        logging.debug(aPos)
        while aPos is None or (img_data is None):
            time.sleep(0.05)
            mc.observeProc()
            aPos = mc.getAgentPos()
            img_data = self.mc.getImage()
            if not all(mc.isAlive):
                raise DeadException()

        # target
        pitch, yaw, dist = direction_to_target(mc, target_pos)
        # 'XPos', 'YPos', 'ZPos', 'Pitch', 'Yaw'
        # take pitch, yaw
        # take XPos, YPos, ZPos modulo 1
        self_pitch = toRadAndNorm(aPos[3])
        self_yaw = toRadAndNorm(aPos[4])
        xpos, ypos, zpos = aPos[0:3]
        logging.debug("%.2f %.2f %.2f ", xpos, ypos, zpos)
        # use relative height
        ypos = 30 - aPos[1]
        data = dict()

        img_data = img_data.reshape((240 * 4, 320 * 4, 3 + self.want_depth))
        img_data = cv2.resize(img_data, (320, 240))
        img = img_data.reshape((240, 320, 3 + self.want_depth)).transpose(2, 0, 1) / 255.
        img = torch.as_tensor(img).float()
        data['image'] = img

        actions = []
        imgs = [torch.as_tensor(img)]
        yaws = [torch.as_tensor(yaw)]
        dists = [torch.as_tensor(dist)]
        heights = [torch.as_tensor(ypos)]
        # first prev, then prev_prev etc..
        for item in reversed(self.state_queue):
            actions.append(item['action'])
            imgs.append(item['image'])
            yaws.append(item['yaw'])
            dists.append(item['dist'])
            heights.append(item['ypos'])
        while len(imgs) < 3:
            imgs.append(img.to(img))
            actions.append(torch.as_tensor(-1).to(img))
            yaws.append(torch.as_tensor(yaw).to(img))
            dists.append(torch.as_tensor(dist).to(img))
            heights.append(torch.as_tensor(ypos).to(img))
        state = torch.as_tensor(yaws + dists + heights + actions)
        data.update(dict(state=state,
                         images=torch.stack(imgs),
                         image=img,
                         dist=torch.as_tensor(dist),
                         yaw=torch.as_tensor(yaw),
                         ypos=torch.as_tensor(ypos)
                         ))

        # depth
        coords1 = self.collect_visible(data, aPos[:3])
        if self.write_visualization:
            visualize(yaw, dist)
        for key, value in data.items():
            if isinstance(value, torch.Tensor):
                assert not torch.isnan(value).any()
        return data
Example #7
0
    def collect_state(self):
        mc = self.mc
        target = ['lapis_block', self.target_x, 30, self.target_y]
        target_pos = target[1:4]
        mc.observeProc()
        aPos = mc.getAgentPos()
        img_data = self.mc.getImage()
        logging.debug(aPos)
        while aPos is None or (img_data is None):
            time.sleep(0.05)
            mc.observeProc()
            aPos = mc.getAgentPos()
            img_data = self.mc.getImage()
            if not all(mc.isAlive):
                raise DeadException()
        # grid
        grid = mc.getNearGrid()

        grid_vec = grid_to_vec_walking(grid[:36])
        # position encoding
        grid_enc = torch.as_tensor(grid_vec)
        # target
        pitch, yaw, dist = direction_to_target(mc, target_pos)
        target_enc = torch.as_tensor([pitch, yaw, dist])
        # 'XPos', 'YPos', 'ZPos', 'Pitch', 'Yaw'
        # take pitch, yaw
        # take XPos, YPos, ZPos modulo 1
        self_pitch = toRadAndNorm(aPos[3])
        self_yaw = toRadAndNorm(aPos[4])
        xpos, ypos, zpos = [_ % 1 for _ in aPos[0:3]]
        # use relative height
        ypos = 30 - aPos[1]
        logging.debug("%.2f %.2f %.2f ", xpos, ypos, zpos)
        self_pos_enc = torch.as_tensor(
            [self_pitch, self_yaw, xpos, ypos, zpos])
        data = dict(grid_vec=grid_enc,
                    target=target_enc,
                    state=target_enc,
                    pos=self_pos_enc)

        img = img_data.reshape(
            (240, 320, 3 + self.want_depth)).transpose(2, 0, 1) / 255.
        data['image'] = torch.as_tensor(img).float()
        # depth
        visible = self.mc.getLineOfSight('type')
        if visible is not None:
            coords = [
                self.mc.getLineOfSight('x'),
                self.mc.getLineOfSight('y'),
                self.mc.getLineOfSight('z')
            ]
            height = 1.6025
            coords1 = aPos[:3]
            coords1[1] += height
            dist = numpy.linalg.norm(
                numpy.asarray(coords) - numpy.asarray(coords1), 2)
            data['visible'] = [visible] + coords + [dist]
        if self.write_visualization:
            import cv2
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (40, 40)
            fontScale = 0.9
            fontColor = (15, 15, 15)
            lineType = 2
            img_draw = (img * 255).astype(numpy.uint8)
            img_draw = cv2.putText(img_draw.transpose(1, 2, 0),
                                   'distance {0:.1f}'.format(dist),
                                   bottomLeftCornerOfText, font, fontScale,
                                   fontColor, lineType)
            #img_draw = cv2.putText(img_draw, 'yaw {0:.1f}'.format(yaw),
            #                       (10, 80),
            #                       font,fontScale,fontColor,lineType)
            c_x = 260
            c_y = 200
            r = 20
            img_draw = cv2.circle(img_draw, (c_x, c_y), r, (0, 255, 255), 2)
            cos_x = numpy.cos(yaw + numpy.pi / 2) * r
            sin_y = numpy.sin(yaw + numpy.pi / 2) * r
            img_draw = cv2.line(img_draw, (c_x, c_y),
                                (round(c_x - cos_x), round(c_y - sin_y)),
                                (0, 255, 255), 2)
            cv2.imwrite('episodes/img{0}.png'.format(self.img_num), img_draw)
            self.img_num += 1
            #cv2.imshow('1', img_draw)
            #cv2.waitKey(100)

        #if self.want_depth:
        #    depth = data['image'][-1]
        #    h, w = [_ // 2 for _ in depth.shape]
        #    img_depth = img[-1][h, w]
        #    norm_depth = (depth * (dist / img_depth))
        #    data['image'][-1] = norm_depth
        for key, value in data.items():
            if isinstance(value, torch.Tensor):
                assert not torch.isnan(value).any()
        return data