Ejemplo n.º 1
0
    def __init__(self, path_to_conf_file):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.transform = torchvision.transforms.ToTensor()
        self.converter = ConverterTorch().to(self.device)

        self.target_index = 65
        self.speed_mult = 2.5

        path_to_conf_file = Path(path_to_conf_file)
        config = load_yaml(path_to_conf_file.parent / 'config.yaml')

        self.net = Network(**config['model_args']).to(self.device)
        self.net.load_state_dict(torch.load(path_to_conf_file))
        self.net.eval()
Ejemplo n.º 2
0
    def __init__(self, teacher_path):
        super().__init__()

        teacher_yaml = teacher_path.parent / 'config.yaml'
        teacher_args = load_yaml(teacher_yaml)['model_args']
        teacher = Network(**teacher_args)
        teacher.load_state_dict(torch.load(str(teacher_path)))

        self.teacher = teacher
        self.converter = ConverterTorch()
Ejemplo n.º 3
0
def net_eval(net, data, config):
    net.eval()
    MAE, RMSE, EVS = [], [], []
    converter = ConverterTorch()
    iterator = tqdm.tqdm(data, desc='val', total=len(data), position=1, leave=None)
    # wandb.run.summary['step'] = 0
    num_theta, total = 0, 0
    for i, (rgb_forward, rgb, mapview, waypoints, _) in enumerate(iterator):
        waypoints[..., 0] = AUG_MAP_SIZE - (waypoints[..., 0] + 1) * mapview.shape[-1] / 2
        waypoints[..., 1] = (waypoints[..., 1] + 1) * mapview.shape[-2] / 2

        points_cam = converter(waypoints)
        points_cam[..., 0] = (points_cam[..., 0] / converter.W) * 2 - 1
        points_cam[..., 1] = (points_cam[..., 1] / converter.H) * 2 - 1

        rgb_forward = rgb_forward.to(config['device'])
        rgb = rgb.to(config['device'])
        model_input = torch.cat((rgb_forward, rgb), 1)
        points_cam = points_cam.to(config['device'])
        _waypoints = net(model_input)

        mae = torch.abs(points_cam - _waypoints)
        mae_mean = mae.sum((1, 2)).mean()
        MAE.append(mae_mean.item())

        rmse = torch.pow((points_cam - _waypoints), 2)
        rmse_mean = rmse.sum((1, 2)).mean()
        RMSE.append(rmse_mean.item())

        y_true, y_pred = points_cam.cpu().numpy(), _waypoints.cpu().numpy()
        explained_variance_score = 1 - np.var(np.array(y_true) - np.array(y_pred)) / np.var(y_true)
        EVS.append(explained_variance_score)

        # points_cam = points_cam.cpu().numpy()
        # _waypoints = _waypoints.cpu().numpy()
        # for i in range(len(points_cam)):
        #     theta1 = np.degrees(np.arctan2(points_cam[i][4][0], points_cam[i][4][1]))
        #     theta2 = np.degrees(np.arctan2(_waypoints[i][4][0], _waypoints[i][4][1]))
        #     if abs(theta1-theta2) < 3:
        #         num_theta += 1
        # total += len(points_cam)

        # metrics = dict()
        # metrics['images'] = _log_visuals(rgb, mapview, loss,
        #             waypoints, points_cam, _waypoints)
        # wandb.run.summary['step'] += 1
        # wandb.log(
        #     {('%s/%s' % ('val', k)): v for k, v in metrics.items()},
        #     step=wandb.run.summary['step'])
    # print('-------------------')
    # print(num_theta, '   ', total)
    # print(num_theta * 1.0 / total)
    # print('-------------------')
    return np.mean(MAE), np.sqrt(np.mean(RMSE)), np.mean(EVS)
Ejemplo n.º 4
0
if __name__ == '__main__':
    """
        run arg path of dataset
    """

    import sys
    import cv2
    from PIL import ImageDraw
    # from ..utils import visualize_birdview
    # from ..converter import ConverterTorch
    from utils.common import visualize_birdview
    from utils.converter import ConverterTorch

    print(sys.argv[1])
    data = CarlaDataset(sys.argv[1])
    convert = ConverterTorch()

    for i in range(len(data)):
        rgb, birdview, meta = data[i]
        canvas = np.uint8(birdview.detach().cpu().numpy().transpose(1, 2, 0) * 255).copy()
        canvas = visualize_birdview(canvas)
        canvas = Image.fromarray(canvas)
        draw = ImageDraw.Draw(canvas)

        origin = np.array([birdview.shape[-1] // 2, birdview.shape[-1] - 5])
        offsets = np.array([[0, 0], [0, -10], [-5, -20]])
        points = origin + offsets
        points_cam = convert(torch.FloatTensor(points))
        points_reproject = convert.cam_to_map(points_cam)

        for x, y in points:
        return rgb_forward, rgb, target_forward, target, waypoints, '%s %s' % (path.stem, frame)

if __name__ == '__main__':
    """ 
        run arg path of dataset
    """

    import sys
    import cv2
    from PIL import ImageDraw
    from utils.common import visualize_birdview
    from utils.converter import ConverterTorch

    print(sys.argv[1])
    data = CarlaDataset(sys.argv[1])
    convert = ConverterTorch()

    for i in range(len(data)):
        print(i)
        _, rgb, birdview, _, waypoints, meta = data[i]
        canvas = np.uint8(birdview.detach().cpu().numpy().transpose(1, 2, 0) * 255).copy()
        canvas = visualize_birdview(canvas)
        canvas = Image.fromarray(canvas)
        draw = ImageDraw.Draw(canvas)

        # if i == 167 or i == 177 or i == 175:
        #     print('save')
        #     cv2.imwrite(r'C:\Users\纪泽锋\Desktop\tmp\map_%d.jpg' % i, cv2.cvtColor(np.array(canvas), cv2.COLOR_BGR2RGB))

        for x, y in waypoints.squeeze():
            x = int(AUG_MAP_SIZE - (x + 1) / 2 * canvas.width) + 1
Ejemplo n.º 6
0
class Planner(object):
    def __init__(self, path_to_conf_file):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.transform = torchvision.transforms.ToTensor()
        self.converter = ConverterTorch().to(self.device)

        self.target_index = 65
        self.speed_mult = 2.5

        path_to_conf_file = Path(path_to_conf_file)
        config = load_yaml(path_to_conf_file.parent / 'config.yaml')

        self.net = Network(**config['model_args']).to(self.device)
        self.net.load_state_dict(torch.load(path_to_conf_file))
        self.net.eval()

    @torch.no_grad()
    def run_step(self, rgb, rgb_forward, viz=None):
        if Modular:
            # Modularity and Abstract
            rgb = Image.fromarray(rgb).convert('RGB')
            img = input_transform_cityscapes(rgb)
            img = img.cuda().unsqueeze(0)
            rgb_forward = Image.fromarray(rgb_forward).convert('RGB')
            img_forward = input_transform_cityscapes(rgb_forward)
            img_forward = img_forward.cuda().unsqueeze(0)

            output = model(img)
            label = output[0].max(0)[1].byte().cpu().data
            label_color = Colorize()(label.unsqueeze(0))
            rgb = ToPILImage()(label_color)
            rgb.save('./seg.jpg')

            output = model(img_forward)
            label = output[0].max(0)[1].byte().cpu().data
            label_color = Colorize()(label.unsqueeze(0))
            rgb_forward = ToPILImage()(label_color)
            rgb_forward.save('./seg_2.jpg')

        img = self.transform(rgb).to(self.device).unsqueeze(0)
        img_forward = self.transform(rgb_forward).to(self.device).unsqueeze(0)

        # print(img_forward.shape)
        model_input = torch.cat((img_forward, img), 1)

        cam_coords = self.net(model_input)
        cam_coords[..., 0] = (cam_coords[..., 0] +
                              1) / 2 * img.shape[-1]  # rgb coords
        cam_coords[..., 1] = (cam_coords[..., 1] + 1) / 2 * img.shape[-2]

        map_coords = self.converter.cam_to_map(
            cam_coords).cpu().numpy().squeeze()
        world_coords = self.converter.cam_to_world(
            cam_coords).cpu().numpy().squeeze()

        target_speed = np.sqrt(
            ((world_coords[:2] - world_coords[1:3])**2).sum(1).mean())
        target_speed *= self.speed_mult

        theta1 = np.degrees(np.arctan2(world_coords[0][0], world_coords[0][1]))
        theta2 = np.degrees(np.arctan2(world_coords[4][0], world_coords[4][1]))
        # print(abs(theta2 - theta1))
        if abs(theta2 - theta1) < 2:
            target_speed *= self.speed_mult
        else:
            target_speed *= 1.2

        curve = spline(map_coords + 1e-8 * np.random.rand(*map_coords.shape),
                       100)
        target = curve[self.target_index]

        curve_world = spline(
            world_coords + 1e-8 * np.random.rand(*world_coords.shape), 100)
        target_world = curve_world[self.target_index]

        if viz:
            viz.planner_draw(cam_coords.cpu().numpy().squeeze(), map_coords,
                             curve, target)

        return target_world, target_speed
Ejemplo n.º 7
0
def train_or_eval(teacher, net, data, optim, is_train, config):
    if is_train:
        desc = 'train'
        net.train()
    else:
        desc = 'val'
        net.eval()

    tick = time.time()
    losses = list()
    converter = ConverterTorch()
    iterator = tqdm.tqdm(data,
                         desc=desc,
                         total=len(data),
                         position=1,
                         leave=None)

    for i, (rgb_forward, rgb, mapview_forward, mapview, waypoints,
            _) in enumerate(iterator):
        rgb_forward = rgb_forward.to(config['device'])
        rgb = rgb.to(config['device'])
        model_input = torch.cat((rgb_forward, rgb), 1)

        waypoints[
            ...,
            0] = AUG_MAP_SIZE - (waypoints[..., 0] + 1) * mapview.shape[-1] / 2
        waypoints[..., 1] = (waypoints[..., 1] + 1) * mapview.shape[-2] / 2
        points_cam = converter(waypoints)
        points_cam[..., 0] = (points_cam[..., 0] / converter.W) * 2 - 1
        points_cam[..., 1] = (points_cam[..., 1] / converter.H) * 2 - 1

        _waypoints = net(model_input)

        loss = torch.abs(points_cam - _waypoints)
        loss_mean = loss.sum((1, 2)).mean()
        losses.append(loss_mean.item())

        if is_train:
            loss_mean.backward()
            optim.step()
            optim.zero_grad()

            wandb.run.summary['step'] += 1

        metrics = dict()
        metrics['loss'] = loss_mean.item()
        metrics['images_per_second'] = mapview.shape[0] / (time.time() - tick)

        if i % 500 == 0:
            metrics['images'] = _log_visuals(rgb, mapview, loss, waypoints,
                                             points_cam, _waypoints)
        if i % 100 == 0:
            print(np.mean(losses))

        wandb.log({('%s/%s' % (desc, k)): v
                   for k, v in metrics.items()},
                  step=wandb.run.summary['step'])

        tick = time.time()

    return np.mean(losses)