def training_pipeline(cls, **kwargs):
     imitate_steps = int(75000000)
     lr = 3e-4
     num_mini_batch = 1
     update_repeats = 3
     num_steps = 30
     save_interval = 5000000
     log_interval = 10000 if torch.cuda.is_available() else 1
     gamma = 0.99
     use_gae = True
     gae_lambda = 0.95
     max_grad_norm = 0.5
     return TrainingPipeline(
         save_interval=save_interval,
         metric_accumulate_interval=log_interval,
         optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
         num_mini_batch=num_mini_batch,
         update_repeats=update_repeats,
         max_grad_norm=max_grad_norm,
         num_steps=num_steps,
         named_losses={"imitation_loss": Imitation()},
         gamma=gamma,
         use_gae=use_gae,
         gae_lambda=gae_lambda,
         advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
         pipeline_stages=[
             PipelineStage(
                 loss_names=["imitation_loss"],
                 max_stage_steps=imitate_steps,
                 # teacher_forcing=LinearDecay(steps=int(1e5), startp=1.0, endp=0.0,),
             ),
         ],
         lr_scheduler_builder=Builder(
             LambdaLR, {"lr_lambda": LinearDecay(steps=imitate_steps)}),
     )
 def training_pipeline(cls, **kwargs):
     ppo_steps = int(250000000)
     lr = 3e-4
     num_mini_batch = 1
     update_repeats = 3
     num_steps = 30
     save_interval = 5000000
     log_interval = 1000
     gamma = 0.99
     use_gae = True
     gae_lambda = 0.95
     max_grad_norm = 0.5
     return TrainingPipeline(
         save_interval=save_interval,
         metric_accumulate_interval=log_interval,
         optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
         num_mini_batch=num_mini_batch,
         update_repeats=update_repeats,
         max_grad_norm=max_grad_norm,
         num_steps=num_steps,
         named_losses={"ppo_loss": PPO(**PPOConfig)},
         gamma=gamma,
         use_gae=use_gae,
         gae_lambda=gae_lambda,
         advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
         pipeline_stages=[
             PipelineStage(loss_names=["ppo_loss"],
                           max_stage_steps=ppo_steps)
         ],
         lr_scheduler_builder=Builder(
             LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}),
     )
 def training_pipeline(cls, **kwargs) -> TrainingPipeline:
     ppo_steps = int(150000)
     return TrainingPipeline(
         named_losses=dict(ppo_loss=PPO(**PPOConfig)),  # type:ignore
         pipeline_stages=[
             PipelineStage(loss_names=["ppo_loss"],
                           max_stage_steps=ppo_steps)
         ],
         optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam),
                                   dict(lr=1e-4)),
         num_mini_batch=4,
         update_repeats=3,
         max_grad_norm=0.5,
         num_steps=16,
         gamma=0.99,
         use_gae=True,
         gae_lambda=0.95,
         advance_scene_rollout_period=None,
         save_interval=10000,
         metric_accumulate_interval=1,
         lr_scheduler_builder=Builder(
             LambdaLR,
             {"lr_lambda": LinearDecay(steps=ppo_steps)}  # type:ignore
         ),
     )
Ejemplo n.º 4
0
 def training_pipeline(cls, **kwargs) -> TrainingPipeline:
     ppo_steps = int(1.2e6)
     return TrainingPipeline(
         named_losses=dict(ppo_loss=PPO(
             clip_param=0.2,
             value_loss_coef=0.5,
             entropy_coef=0.0,
         ), ),  # type:ignore
         pipeline_stages=[
             PipelineStage(loss_names=["ppo_loss"],
                           max_stage_steps=ppo_steps),
         ],
         optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam),
                                   dict(lr=1e-3)),
         num_mini_batch=1,
         update_repeats=80,
         max_grad_norm=100,
         num_steps=2000,
         gamma=0.99,
         use_gae=False,
         gae_lambda=0.95,
         advance_scene_rollout_period=None,
         save_interval=200000,
         metric_accumulate_interval=50000,
         lr_scheduler_builder=Builder(
             LambdaLR,
             {"lr_lambda": LinearDecay(steps=ppo_steps)},  # type:ignore
         ),
     )
    def training_pipeline(cls, **kwargs):
        ppo_steps = int(1e6)
        lr = 2.5e-4
        num_mini_batch = 2 if not torch.cuda.is_available() else 6
        update_repeats = 4
        num_steps = 128
        metric_accumulate_interval = cls.MAX_STEPS * 10  # Log every 10 max length tasks
        save_interval = 10000
        gamma = 0.99
        use_gae = True
        gae_lambda = 1.0
        max_grad_norm = 0.5

        return TrainingPipeline(
            save_interval=save_interval,
            metric_accumulate_interval=metric_accumulate_interval,
            optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
            num_mini_batch=num_mini_batch,
            update_repeats=update_repeats,
            max_grad_norm=max_grad_norm,
            num_steps=num_steps,
            named_losses={
                "ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig),
            },
            gamma=gamma,
            use_gae=use_gae,
            gae_lambda=gae_lambda,
            advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
            pipeline_stages=[
                PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,),
            ],
            lr_scheduler_builder=Builder(
                LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
            ),
        )
Ejemplo n.º 6
0
 def training_pipeline(cls, **kwargs) -> TrainingPipeline:
     ppo_steps = int(150000)
     return TrainingPipeline(
         named_losses=dict(
             imitation_loss=Imitation(
                 cls.SENSORS[1]
             ),  # 0 is Minigrid, 1 is ExpertActionSensor
             ppo_loss=PPO(**PPOConfig, entropy_method_name="conditional_entropy"),
         ),  # type:ignore
         pipeline_stages=[
             PipelineStage(
                 teacher_forcing=LinearDecay(
                     startp=1.0, endp=0.0, steps=ppo_steps // 2,
                 ),
                 loss_names=["imitation_loss", "ppo_loss"],
                 max_stage_steps=ppo_steps,
             )
         ],
         optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=1e-4)),
         num_mini_batch=4,
         update_repeats=3,
         max_grad_norm=0.5,
         num_steps=16,
         gamma=0.99,
         use_gae=True,
         gae_lambda=0.95,
         advance_scene_rollout_period=None,
         save_interval=10000,
         metric_accumulate_interval=1,
         lr_scheduler_builder=Builder(
             LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}  # type:ignore
         ),
     )
    def training_pipeline(cls, **kwargs):
        ppo_steps = int(75000000)
        lr = 3e-4
        num_mini_batch = 1
        update_repeats = 4
        num_steps = 128
        save_interval = 5000000
        log_interval = 10000 if torch.cuda.is_available() else 1
        gamma = 0.99
        use_gae = True
        gae_lambda = 0.95
        max_grad_norm = 0.5

        action_strs = PointNavTask.class_action_names()
        non_end_action_inds_set = {
            i
            for i, a in enumerate(action_strs) if a != robothor_constants.END
        }
        end_action_ind_set = {action_strs.index(robothor_constants.END)}

        return TrainingPipeline(
            save_interval=save_interval,
            metric_accumulate_interval=log_interval,
            optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
            num_mini_batch=num_mini_batch,
            update_repeats=update_repeats,
            max_grad_norm=max_grad_norm,
            num_steps=num_steps,
            named_losses={
                "ppo_loss":
                PPO(**PPOConfig),
                "grouped_action_imitation":
                GroupedActionImitation(
                    nactions=len(PointNavTask.class_action_names()),
                    action_groups=[
                        non_end_action_inds_set, end_action_ind_set
                    ],
                ),
            },
            gamma=gamma,
            use_gae=use_gae,
            gae_lambda=gae_lambda,
            advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
            pipeline_stages=[
                PipelineStage(
                    loss_names=["ppo_loss", "grouped_action_imitation"],
                    max_stage_steps=ppo_steps,
                )
            ],
            lr_scheduler_builder=Builder(
                LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}),
        )
Ejemplo n.º 8
0
 def training_pipeline(cls, **kwargs):
     ppo_steps = int(10000000)
     lr = 3e-4
     num_mini_batch = 1
     update_repeats = 3
     num_steps = 30
     save_interval = 1000000
     log_interval = 100
     gamma = 0.99
     use_gae = True
     gae_lambda = 0.95
     max_grad_norm = 0.5
     return TrainingPipeline(
         save_interval=save_interval,
         metric_accumulate_interval=log_interval,
         optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
         num_mini_batch=num_mini_batch,
         update_repeats=update_repeats,
         max_grad_norm=max_grad_norm,
         num_steps=num_steps,
         named_losses={
             "ppo_loss":
             PPO(**PPOConfig),
             "nie_loss":
             NIE_Reg(
                 agent_pose_uuid="agent_pose_global",
                 pose_uuid="object_pose_global",
                 local_keypoints_uuid="3Dkeypoints_local",
                 global_keypoints_uuid="3Dkeypoints_global",
                 obj_update_mask_uuid="object_update_mask",
                 obj_action_mask_uuid="object_action_mask",
             ),
             "yn_im_loss":
             YesNoImitation(yes_action_index=ObjectPlacementTask.
                            class_action_names().index(END)),
         },
         gamma=gamma,
         use_gae=use_gae,
         gae_lambda=gae_lambda,
         advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
         pipeline_stages=[
             PipelineStage(
                 loss_names=["ppo_loss", "nie_loss", "yn_im_loss"],
                 max_stage_steps=ppo_steps)
         ],
         lr_scheduler_builder=Builder(
             LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}),
     )
Ejemplo n.º 9
0
 def training_pipeline(cls, **kwargs) -> TrainingPipeline:
     lr = 1e-4
     ppo_steps = int(8e7)  # convergence may be after 1e8
     clip_param = 0.1
     value_loss_coef = 0.5
     entropy_coef = 0.0
     num_mini_batch = 4  # optimal 64
     update_repeats = 10
     max_grad_norm = 0.5
     num_steps = 2048
     gamma = 0.99
     use_gae = True
     gae_lambda = 0.95
     advance_scene_rollout_period = None
     save_interval = 200000
     metric_accumulate_interval = 50000
     return TrainingPipeline(
         named_losses=dict(ppo_loss=PPO(
             clip_param=clip_param,
             value_loss_coef=value_loss_coef,
             entropy_coef=entropy_coef,
         ), ),  # type:ignore
         pipeline_stages=[
             PipelineStage(loss_names=["ppo_loss"],
                           max_stage_steps=ppo_steps),
         ],
         optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam),
                                   dict(lr=lr)),
         num_mini_batch=num_mini_batch,
         update_repeats=update_repeats,
         max_grad_norm=max_grad_norm,
         num_steps=num_steps,
         gamma=gamma,
         use_gae=use_gae,
         gae_lambda=gae_lambda,
         advance_scene_rollout_period=advance_scene_rollout_period,
         save_interval=save_interval,
         metric_accumulate_interval=metric_accumulate_interval,
         lr_scheduler_builder=Builder(
             LambdaLR,
             {"lr_lambda": LinearDecay(steps=ppo_steps, startp=1, endp=1)
              },  # constant learning rate
         ),
     )
Ejemplo n.º 10
0
    def _training_pipeline(
        cls,
        named_losses: Dict[str, Union[Loss, Builder]],
        pipeline_stages: List[PipelineStage],
        num_mini_batch: int,
        update_repeats: int,
        total_train_steps: int,
        lr: Optional[float] = None,
    ):
        lr = cls.DEFAULT_LR if lr is None else lr

        num_steps = cls.ROLLOUT_STEPS
        metric_accumulate_interval = (cls.METRIC_ACCUMULATE_INTERVAL()
                                      )  # Log every 10 max length tasks
        save_interval = int(cls.TOTAL_RL_TRAIN_STEPS / cls.NUM_CKPTS_TO_SAVE)
        gamma = 0.99

        use_gae = "reinforce_loss" not in named_losses
        gae_lambda = 0.99
        max_grad_norm = 0.5

        return TrainingPipeline(
            save_interval=save_interval,
            metric_accumulate_interval=metric_accumulate_interval,
            optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam),
                                      dict(lr=lr)),
            num_mini_batch=num_mini_batch,
            update_repeats=update_repeats,
            max_grad_norm=max_grad_norm,
            num_steps=num_steps,
            named_losses=named_losses,
            gamma=gamma,
            use_gae=use_gae,
            gae_lambda=gae_lambda,
            advance_scene_rollout_period=None,
            should_log=cls.SHOULD_LOG,
            pipeline_stages=pipeline_stages,
            lr_scheduler_builder=Builder(
                LambdaLR,
                {"lr_lambda": LinearDecay(steps=cls.TOTAL_RL_TRAIN_STEPS)
                 }  # type: ignore
            ),
        )
Ejemplo n.º 11
0
    def training_pipeline(self, **kwargs):
        # PPO
        ppo_steps = int(75000000)
        lr = 3e-4
        num_mini_batch = 1
        update_repeats = 4
        num_steps = 128
        save_interval = 5000000
        log_interval = 10000 if torch.cuda.is_available() else 1
        gamma = 0.99
        use_gae = True
        gae_lambda = 0.95
        max_grad_norm = 0.5
        PPOConfig["normalize_advantage"] = self.NORMALIZE_ADVANTAGE

        named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
        named_losses = self._update_with_auxiliary_losses(named_losses)

        return TrainingPipeline(
            save_interval=save_interval,
            metric_accumulate_interval=log_interval,
            optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
            num_mini_batch=num_mini_batch,
            update_repeats=update_repeats,
            max_grad_norm=max_grad_norm,
            num_steps=num_steps,
            named_losses={key: val[0]
                          for key, val in named_losses.items()},
            gamma=gamma,
            use_gae=use_gae,
            gae_lambda=gae_lambda,
            advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
            pipeline_stages=[
                PipelineStage(
                    loss_names=list(named_losses.keys()),
                    max_stage_steps=ppo_steps,
                    loss_weights=[val[1] for val in named_losses.values()],
                )
            ],
            lr_scheduler_builder=Builder(
                LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}),
        )
    def training_pipeline(cls, **kwargs) -> TrainingPipeline:
        info = cls._training_pipeline_info()

        return TrainingPipeline(
            gamma=info.get("gamma", 0.99),
            use_gae=info.get("use_gae", True),
            gae_lambda=info.get("gae_lambda", 0.95),
            num_steps=info["num_steps"],
            num_mini_batch=info["num_mini_batch"],
            update_repeats=info["update_repeats"],
            max_grad_norm=info.get("max_grad_norm", 0.5),
            save_interval=cls.SAVE_INTERVAL,
            named_losses=info["named_losses"],
            metric_accumulate_interval=cls.num_train_processes() *
            max(*cls.MAX_STEPS.values()) if torch.cuda.is_available() else 1,
            optimizer_builder=Builder(optim.Adam, dict(lr=info["lr"])),
            advance_scene_rollout_period=None,
            pipeline_stages=info["pipeline_stages"],
            lr_scheduler_builder=cls.get_lr_scheduler_builder(
                use_lr_decay=info["use_lr_decay"]),
        )
    def training_pipeline(self, **kwargs):
        # These params are identical to the baseline configuration for 60 samplers (1 machine)
        ppo_steps = int(300e6)
        lr = 3e-4
        num_mini_batch = 1
        update_repeats = 4
        num_steps = 128
        save_interval = 5000000
        log_interval = 10000 if torch.cuda.is_available() else 1
        gamma = 0.99
        use_gae = True
        gae_lambda = 0.95
        max_grad_norm = 0.5

        # We add 30 million steps for small batch learning
        small_batch_steps = int(30e6)
        # And a short transition phase towards large learning rate
        # (see comment in the `lr_scheduler` helper method
        transition_steps = int(2 / 3 * self.distributed_nodes * 1e6)

        # Find exact number of samplers per GPU
        assert (self.num_train_processes % len(self.train_gpu_ids) == 0
                ), "Expected uniform number of samplers per GPU"
        samplers_per_gpu = self.num_train_processes // len(self.train_gpu_ids)

        # Multiply num_mini_batch by the largest divisor of
        # samplers_per_gpu to keep all batches of same size:
        num_mini_batch_multiplier = [
            i for i in reversed(
                range(1,
                      min(samplers_per_gpu // 2, self.distributed_nodes) + 1))
            if samplers_per_gpu % i == 0
        ][0]

        # Multiply update_repeats so that the product of this factor and
        # num_mini_batch_multiplier is >= self.distributed_nodes:
        update_repeats_multiplier = int(
            math.ceil(self.distributed_nodes / num_mini_batch_multiplier))

        return TrainingPipeline(
            save_interval=save_interval,
            metric_accumulate_interval=log_interval,
            optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
            num_mini_batch=num_mini_batch,
            update_repeats=update_repeats,
            max_grad_norm=max_grad_norm,
            num_steps=num_steps,
            named_losses={"ppo_loss": PPO(**PPOConfig, show_ratios=False)},
            gamma=gamma,
            use_gae=use_gae,
            gae_lambda=gae_lambda,
            advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
            pipeline_stages=[
                # We increase the number of batches for the first stage to reach an
                # equivalent number of updates per collected rollout data as in the
                # 1 node/60 samplers setting
                PipelineStage(
                    loss_names=["ppo_loss"],
                    max_stage_steps=small_batch_steps,
                    num_mini_batch=num_mini_batch * num_mini_batch_multiplier,
                    update_repeats=update_repeats * update_repeats_multiplier,
                ),
                # The we proceed with the base configuration (leading to larger
                # batches due to the increased number of samplers)
                PipelineStage(
                    loss_names=["ppo_loss"],
                    max_stage_steps=ppo_steps - small_batch_steps,
                ),
            ],
            # We use the MultiLinearDecay curve defined by the helper function,
            # setting the learning rate scaling as the square root of the number
            # of nodes. Linear scaling might also works, but we leave that
            # check to the reader.
            lr_scheduler_builder=Builder(
                LambdaLR,
                {
                    "lr_lambda":
                    self.lr_scheduler(
                        small_batch_steps=small_batch_steps,
                        transition_steps=transition_steps,
                        ppo_steps=ppo_steps,
                        lr_scaling=math.sqrt(self.distributed_nodes),
                    )
                },
            ),
        )