def _build_computation_graph(self) -> None:
        """
        Build the Policy_theta computation graph with theta as multi-layer perceptron

        """
        """ ---- Placeholder ---- """
        observation_ph, action_ph, Q_values_ph = bloc.gym_playground_to_tensorflow_graph_adapter(
            self.playground,
            obs_shape_constraint=None,
            action_shape_constraint=None)
        self.obs_t_ph = observation_ph
        self.action_ph = action_ph
        self.Q_values_ph = Q_values_ph
        """ ---- The policy and is neural net theta ---- """
        reinforce_policy = REINFORCE_policy(observation_ph, action_ph,
                                            Q_values_ph, self.exp_spec,
                                            self.playground)
        (policy_action_sampler, theta_mlp, pseudo_loss) = reinforce_policy
        self.policy_pi = policy_action_sampler
        self.theta_mlp = theta_mlp
        self.pseudo_loss = pseudo_loss
        """ ---- Optimizer ---- """
        self.policy_optimizer_op = bloc.policy_optimizer(
            self.pseudo_loss, self.exp_spec.learning_rate)
        return None
def test_gym_env_to_tf_graph_adapter_DISCRETE_PASS(gym_discrete_setup):
    _, playground = gym_discrete_setup
    input_placeholder, output_placeholder, Q_values_ph = bloc.gym_playground_to_tensorflow_graph_adapter(
        playground, action_shape_constraint=(1, ))
    assert input_placeholder.shape[-1] == playground.OBSERVATION_SPACE.shape[0]
    print(output_placeholder.shape)
    assert output_placeholder.shape.rank == 1
def test_build_MLP_computation_graph_with_DISCRETE_adapter(gym_discrete_setup):
    _, playground = gym_discrete_setup
    input_placeholder, out_placeholder, Q_values_ph = bloc.gym_playground_to_tensorflow_graph_adapter(
        playground, action_shape_constraint=(1, ))
    bloc.build_MLP_computation_graph(input_placeholder,
                                     playground.ACTION_CHOICES,
                                     hidden_layer_topology=(2, 2))
Example #4
0
def gym_and_tf_discrete_setup():
    """
    :return: (obs_p, act_p, exp_spec, playground)
    :rtype: (tf.Tensor, tf.Tensor, ExperimentSpec, GymPlayground)
    """
    exp_spec = bloc.ExperimentSpec(batch_size_in_ts=1000, max_epoch=2, theta_nn_hidden_layer_topology=(2, 2))
    playground = bloc.GymPlayground('LunarLander-v2')
    obs_p, act_p, Q_values_ph = bloc.gym_playground_to_tensorflow_graph_adapter(playground,
                                                                                action_shape_constraint=(1,))
    yield obs_p, act_p, exp_spec, playground
    tf_cv1.reset_default_graph()
def gym_and_tf_SAC_Brain_continuous_setup():
    """
    :return: obs_t_ph, act_ph, obs_t_prime_ph, reward_t_ph, trj_done_t_ph, exp_spec, playground
    """
    exp_spec = bloc.ExperimentSpec()
    exp_spec.set_experiment_spec(unit_test_hparam)
    
    playground = bloc.GymPlayground('LunarLanderContinuous-v2')
    obs_t_ph, act_ph, _ = bloc.gym_playground_to_tensorflow_graph_adapter(playground)
    obs_t_prime_ph = bloc.continuous_space_placeholder(space=playground.OBSERVATION_SPACE,
                                                       name=vocab.obs_tPrime_ph)
    reward_t_ph = tf_cv1.placeholder(dtype=tf.float32, shape=(None,), name=vocab.rew_ph)
    trj_done_t_ph = tf_cv1.placeholder(dtype=tf.float32, shape=(None,), name=vocab.trj_done_ph)

    yield obs_t_ph, act_ph, obs_t_prime_ph, reward_t_ph, trj_done_t_ph, exp_spec, playground
    tf_cv1.reset_default_graph()
def test_integration_Playground_to_adapter_to_build_graph(
        gym_continuous_setup):
    exp_spec, playground = gym_continuous_setup

    # (!) fake input data
    input_data = np.ones((20, *playground.OBSERVATION_SPACE.shape))

    input_placeholder, out_placeholder, Q_values_ph = bloc.gym_playground_to_tensorflow_graph_adapter(
        playground, action_shape_constraint=(1, ))
    """Build a Multi Layer Perceptron (MLP) as the policy parameter theta using a computation graph"""
    theta = bloc.build_MLP_computation_graph(input_placeholder,
                                             playground.ACTION_CHOICES,
                                             exp_spec.theta_nn_h_layer_topo)

    writer = tf_cv1.summary.FileWriter('./graph', tf_cv1.get_default_graph())
    with tf_cv1.Session() as sess:
        # initialize random variable in the computation graph
        sess.run(tf_cv1.global_variables_initializer())

        # execute mlp computation graph with input data
        a = sess.run(theta, feed_dict={input_placeholder: input_data})

        # print("\n\n>>>run theta:\n{}\n\n".format(a))
    writer.close()
def test_gym_env_to_tf_graph_adapter_CONTINUOUS_PASS(gym_continuous_setup):
    _, playground = gym_continuous_setup
    input_placeholder, output_placeholder, Q_values_ph = bloc.gym_playground_to_tensorflow_graph_adapter(
        playground, action_shape_constraint=(1, ))
    assert input_placeholder.shape[-1] == playground.OBSERVATION_SPACE.shape[0]
    assert output_placeholder.shape.rank == 2
def test_gym_env_to_tf_graph_adapter_WRONG_IMPORT_TYPE():
    with pytest.raises(AssertionError):
        bloc.gym_playground_to_tensorflow_graph_adapter(gym, (1, ))
    def _build_computation_graph(self):
        """
        Build the Policy_theta & V_phi computation graph with theta and phi as multi-layer perceptron
        """
        assert isinstance(
            self.exp_spec['Network'],
            NetworkType), ("exp_spec['Network'] must be explicitely defined "
                           "with a NetworkType enum")

        if self.exp_spec.random_seed == 0:
            print(":: Random seed control is turned OFF")
        else:
            tf_cv1.random.set_random_seed(self.exp_spec.random_seed)
            np.random.seed(self.exp_spec.random_seed)
            print(":: Random seed control is turned ON")
        """ ---- Placeholder ---- """
        self.obs_t_ph, self.action_ph, _ = bloc.gym_playground_to_tensorflow_graph_adapter(
            self.playground, Q_name=vocab.Qvalues_ph)

        self.obs_tPrime_ph = bloc.continuous_space_placeholder(
            space=self.playground.OBSERVATION_SPACE, name=vocab.obs_tPrime_ph)

        self.reward_t_ph = tf_cv1.placeholder(dtype=tf.float32,
                                              shape=(None, ),
                                              name=vocab.rew_ph)

        if self.exp_spec['Network'] is NetworkType.Split:
            # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
            # *                                                                                                       *
            # *                                         Critic computation graph                                      *
            # *                                              (Split network)                                          *
            # *                                                                                                       *
            # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
            self.V_phi_estimator, self.V_phi_estimator_tPrime = build_two_input_critic_graph(
                self.obs_t_ph, self.obs_tPrime_ph, self.exp_spec)

            # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
            # *                                                                                                       *
            # *                                         Actor computation graph                                       *
            # *                                             (Split network)                                           *
            # *                                                                                                       *
            # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
            self.policy_pi, log_pi, _ = build_actor_policy_graph(
                self.obs_t_ph, self.exp_spec, self.playground)

            print(":: SPLIT network (two input advantage) constructed")

        elif self.exp_spec['Network'] is NetworkType.Shared:
            # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
            # *                                                                                                       *
            # *                                   Shared Actor-Critic computation graph                               *
            # *                                                                                                       *
            # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *

            raise NotImplementedError  # todo: implement

            # self.policy_pi, log_pi, _, self.V_phi_estimator = build_actor_critic_shared_graph(
            #     self.obs_t_ph, self.exp_spec, self.playground)
            #
            # print(":: SHARED network constructed")

        # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        # *                                                                                                           *
        # *                                                 Advantage                                                 *
        # *                                                                                                           *
        # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *

        # # alternate architecture with element wise computed advantage
        # self.Advantage_ph = tf_cv1.placeholder(tf.float32, shape=self.Qvalues_ph.shape, name=vocab.advantage_ph)

        with tf_cv1.name_scope(vocab.Advantage):
            # (!) note: Advantage computation
            #       |       no squeeze      ==>     SLOWER computation
            #       |               eg: Advantage = self.Qvalues_ph - self.V_phi_estimator
            #       |
            #       |       with squeeze    ==>     RACING CAR FAST computation
            #
            # (Nice to have) todo:investigate?? --> why it's much faster?: hypothese --> broadcasting slowdown computation
            self.Q_estimate = self.reward_t_ph + self.exp_spec.discout_factor * tf_cv1.squeeze(
                self.V_phi_estimator_tPrime)
            Advantage = self.Q_estimate - tf_cv1.squeeze(self.V_phi_estimator)

        # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        # *                                                                                                           *
        # *                                           Actor & Critic Train                                            *
        # *                                                                                                           *
        # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
        self.actor_loss, self.actor_policy_optimizer = actor_shared_train(
            self.action_ph,
            log_pi=log_pi,
            advantage=Advantage,
            experiment_spec=self.exp_spec,
            playground=self.playground)

        self.V_phi_loss, self.V_phi_optimizer = critic_shared_train(
            Advantage, self.exp_spec)

        # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ** * * * *
        # *                                                                                                            *
        # *                                                 Summary ops                                                *
        # *                                                                                                            *
        # * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ** * * * *
        """ ---- By Epoch summary ---- """
        self.summary_stage_avg_trjs_actor_loss_ph = tf_cv1.placeholder(
            tf.float32, name='Actor_loss_ph')
        self.summary_stage_avg_trjs_critic_loss_ph = tf_cv1.placeholder(
            tf.float32, name='Critic_loss_ph')
        tf_cv1.summary.scalar('Actor_loss',
                              self.summary_stage_avg_trjs_actor_loss_ph,
                              family=vocab.loss)
        tf_cv1.summary.scalar('Critic_loss',
                              self.summary_stage_avg_trjs_critic_loss_ph,
                              family=vocab.loss)

        self.summary_stage_avg_trjs_return_ph = tf_cv1.placeholder(
            tf.float32, name='summary_stage_avg_trjs_return_ph')
        tf_cv1.summary.scalar('Batch average return',
                              self.summary_stage_avg_trjs_return_ph,
                              family=vocab.G)

        self.summary_epoch_op = tf_cv1.summary.merge_all()
        """ ---- By Trajectory summary ---- """
        self.Summary_trj_return_ph = tf_cv1.placeholder(
            tf.float32, name='Summary_trj_return_ph')
        self.summary_trj_return_op = tf_cv1.summary.scalar(
            'Trajectory return', self.Summary_trj_return_ph, family=vocab.G)

        self.Summary_trj_lenght_ph = tf_cv1.placeholder(
            tf.float32, name='Summary_trj_lenght_ph')
        self.summary_trj_lenght_op = tf_cv1.summary.scalar(
            'Trajectory lenght',
            self.Summary_trj_lenght_ph,
            family=vocab.Trajectory_lenght)

        self.summary_trj_op = tf_cv1.summary.merge(
            [self.summary_trj_return_op, self.summary_trj_lenght_op])

        return None
def train(env_name='CartPole-v0',
          hidden_sizes=[32],
          lr=1e-2,
          epochs=50,
          batch_size=5000,
          render=False):

    # make environment, check spaces, get obs / act dims
    # env = gym.make(env_name)                                                             # ////// Original bloc //////

    REINFORCE_integration_test = {                                                         # \\\\\\    My bloc    \\\\\\
        'prefered_environment': env_name,
        'paramameter_set_name': 'REINFORCE integration test on CartPole-v0',
        'batch_size_in_ts': batch_size,
        'max_epoch': epochs,
        'discounted_reward_to_go': False,
        'discout_factor': 0.999,
        'learning_rate': lr,
        'theta_nn_h_layer_topo': tuple(hidden_sizes),
        'random_seed': 42,
        'theta_hidden_layers_activation': tf.nn.tanh,  # tf.nn.relu,
        'theta_output_layers_activation': None,
        'render_env_every_What_epoch': 100,
        'print_metric_every_what_epoch': 5,
    }
    playground = BLOC.GymPlayground(env_name)  # \\\\\\    My bloc    \\\\\\
    env = playground.env  # \\\\\\    My bloc    \\\\\\
    exp_spec = BLOC.ExperimentSpec()  # \\\\\\    My bloc    \\\\\\
    exp_spec.set_experiment_spec(
        REINFORCE_integration_test)  # \\\\\\    My bloc    \\\\\\
    consol_print_learning_stats = ConsolPrintLearningStats(  # \\\\\\    My bloc    \\\\\\
        exp_spec,
        exp_spec.print_metric_every_what_epoch)  # \\\\\\    My bloc    \\\\\\

    assert isinstance(env.observation_space, Box), \
        "This example only works for envs with continuous state spaces."
    assert isinstance(env.action_space, Discrete), \
        "This example only works for envs with discrete action spaces."

    obs_dim = env.observation_space.shape[0]
    n_acts = env.action_space.n

    # make core of policy network
    # obs_ph = tf.placeholder(shape=(None, obs_dim), dtype=tf.float32)                          # ////// Original bloc //////
    obs_ph, act_ph, weights_ph = BLOC.gym_playground_to_tensorflow_graph_adapter(
        playground)  # \\\\\\    My bloc    \\\\\\

    # logits = mlp(obs_ph, sizes=hidden_sizes+[n_acts])                                    # ////// Original bloc //////
    # logits = BLOC.build_MLP_computation_graph(obs_ph, playground,                        # \\\\\\    My bloc    \\\\\\
    #                                           hidden_layer_topology=tuple(hidden_sizes)) # \\\\\\    My bloc    \\\\\\

    # make action selection op (outputs int actions, sampled from policy)
    # actions = tf.squeeze(tf.multinomial(logits=logits,num_samples=1), axis=1)            # ////// Original bloc //////
    # actions, log_p_all = BLOC.policy_theta_discrete_space(logits, playground)            # \\\\\\    My bloc    \\\\\\

    # make loss function whose gradient, for the right data, is policy gradient
    # weights_ph = tf.placeholder(shape=(None,), dtype=tf.float32)                         # ////// Original bloc //////
    # act_ph = tf.placeholder(shape=(None,), dtype=tf.int32)                               # ////// Original bloc //////
    # action_masks = tf.one_hot(act_ph, n_acts)                                            # ////// Original bloc //////
    # log_probs = tf.reduce_sum(action_masks * tf.nn.log_softmax(logits), axis=1)          # ////// Original bloc //////
    # loss = -tf.reduce_mean(weights_ph * log_probs)                                       # ////// Original bloc //////

    # (!) First silent error cause by uneven batch size                                    # \\\\\\    My bloc    \\\\\\
    # loss = BLOC.discrete_pseudo_loss(log_p_all, act_ph, weights_ph, playground)          # \\\\\\    My bloc    \\\\\\

    reinforce_policy = REINFORCEbrain.REINFORCE_policy(
        obs_ph,
        act_ph,  # \\\\\\    My bloc    \\\\\\
        weights_ph,
        exp_spec,
        playground)  # \\\\\\    My bloc    \\\\\\
    (actions, _, loss) = reinforce_policy  # \\\\\\    My bloc    \\\\\\

    # make train op
    # train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)                   # ////// Original bloc //////
    train_op = BLOC.policy_optimizer(
        loss,
        learning_rate=exp_spec.learning_rate)  # \\\\\\    My bloc    \\\\\\

    # \\\\\\    My bloc    \\\\\\
    date_now = datetime.now()
    run_str = "Run--{}h{}--{}-{}-{}".format(date_now.hour, date_now.minute,
                                            date_now.day, date_now.month,
                                            date_now.year)
    # writer = tf_cv1.summary.FileWriter("./graph/{}".format(run_str), tf_cv1.get_default_graph())
    writer = tf_cv1.summary.FileWriter(
        "test_Z_integration/test_integrationREINFORCE/graph/{}".format(
            run_str), tf_cv1.get_default_graph())

    the_TRAJECTORY_COLLECTOR = TrajectoryCollector(
        exp_spec, playground)  # \\\\\\    My bloc    \\\\\\
    the_UNI_BATCH_COLLECTOR = UniformBatchCollector(
        exp_spec.batch_size_in_ts)  # \\\\\\    My bloc    \\\\\\

    # ////// Original bloc //////
    # sess = tf.InteractiveSession()
    # sess.run(tf.global_variables_initializer())

    # \\\\\\    My bloc    \\\\\\
    tf_cv1.set_random_seed(exp_spec.random_seed)
    np.random.seed(exp_spec.random_seed)
    with tf_cv1.Session() as sess:
        sess.run(tf_cv1.global_variables_initializer()
                 )  # initialize random variable in the computation graph
        consol_print_learning_stats.start_the_crazy_experiment()

        # for training policy
        def train_one_epoch():
            consol_print_learning_stats.next_glorious_epoch(
            )  # \\\\\\    My bloc    \\\\\\

            # ////// Original bloc //////
            # # make some empty lists for logging.
            # batch_obs = []          # for observations
            # batch_acts = []         # for actions
            # batch_weights = []      # for reward-to-go weighting in policy gradient
            # batch_rets = []         # for measuring episode returns
            # batch_lens = []         # for measuring episode lengths
            # ep_rews = []            # list for rewards accrued throughout ep

            # reset episode-specific variables
            obs = env.reset()  # first obs comes from starting distribution
            done = False  # signal from environment that episode is over

            # render first episode of each epoch
            finished_rendering_this_epoch = False

            consol_print_learning_stats.next_glorious_trajectory(
            )  # \\\\\\    My bloc    \\\\\\

            # collect experience by acting in the environment with current policy
            while True:

                # rendering
                if (not finished_rendering_this_epoch) and render:
                    env.render()

                # save obs
                # batch_obs.append(obs.copy())  # <-- (!) (Critical) append S_t not S_{t+1} ////// Original bloc //////

                # # act in the environment
                # act = sess.run(actions, {obs_ph: obs.reshape(1,-1)})[0]                # ////// Original bloc //////
                # obs, rew, done, _ = env.step(act)                                      # ////// Original bloc //////

                step_observation = BLOC.format_single_step_observation(
                    obs)  # \\\\\\    My bloc    \\\\\\
                action_array = sess.run(actions,
                                        feed_dict={
                                            obs_ph: step_observation
                                        })  # \\\\\\    My bloc    \\\\\\
                act = blocAndTools.tensorflowbloc.to_scalar(
                    action_array)  # \\\\\\    My bloc    \\\\\\
                # obs, rew, done, _ = playground.env.step(act)   <-- (!) mistake         # \\\\\\    My bloc    \\\\\\
                # (!) Solution to silent error 2: dont ovewrite S_t                        \\\\\\    My bloc    \\\\\\
                obs_prime, rew, done, _ = playground.env.step(
                    act)  # <-- (!) Solution     \\\\\\    My bloc    \\\\\\

                # ////// Original bloc //////
                # # save action, reward
                # batch_acts.append(act)
                # ep_rews.append(rew)

                # (Critical) | Append the observation S_t that trigered the action A_t is critical.  \\\\\\    My bloc    \\\\\\
                #            | If the observation is the one at time S_{t+1}, the agent wont learn   \\\\\\    My bloc    \\\\\\
                the_TRAJECTORY_COLLECTOR.collect_OAR(
                    obs, act, rew
                )  # <-- (!) Silent error 2            \\\\\\    My bloc    \\\\\\
                obs = obs_prime  # <-- (!) Solution to silent error 2 \\\\\\    My bloc    \\\\\\

                if done:
                    # ////// Original bloc //////
                    # # if episode is over, record info about episode
                    # ep_ret, ep_len = sum(ep_rews), len(ep_rews)
                    # batch_rets.append(ep_ret)
                    # batch_lens.append(ep_len)

                    trj_return = the_TRAJECTORY_COLLECTOR.trajectory_ended(
                    )  # \\\\\\    My bloc    \\\\\\
                    the_TRAJECTORY_COLLECTOR.compute_Qvalues_as_rewardToGo()
                    trj_container = the_TRAJECTORY_COLLECTOR.pop_trajectory_and_reset(
                    )  # \\\\\\    My bloc    \\\\\\
                    the_UNI_BATCH_COLLECTOR.collect(
                        trj_container)  # \\\\\\    My bloc    \\\\\\

                    consol_print_learning_stats.trajectory_training_stat(
                        the_trajectory_return=trj_return,
                        timestep=len(
                            trj_container))  # \\\\\\    My bloc    \\\\\\

                    # the weight for each logprob(a_t|s_t) is reward-to-go from t
                    # batch_weights += list(reward_to_go(ep_rews))                        # ////// Original bloc //////
                    # batch_weights += BLOC.reward_to_go(ep_rews)                        # \\\\\\    My bloc    \\\\\\

                    # reset episode-specific variables
                    obs, done, ep_rews = env.reset(), False, []

                    consol_print_learning_stats.next_glorious_trajectory(
                    )  # \\\\\\    My bloc    \\\\\\

                    # won't render again this epoch
                    finished_rendering_this_epoch = True

                    # ////// Original bloc //////
                    # # end experience loop if we have enough of it
                    # if len(batch_obs) > batch_size:
                    #     break

                    if not the_UNI_BATCH_COLLECTOR.is_not_full(
                    ):  # \\\\\\    My bloc    \\\\\\
                        break  # \\\\\\    My bloc    \\\\\\

            # ////// Original bloc //////
            # # take a single policy gradient update step
            # batch_loss, _ = sess.run([loss, train_op],
            #                          feed_dict={
            #                             obs_ph: np.array(batch_obs),
            #                             act_ph: np.array(batch_acts),
            #                             weights_ph: np.array(batch_weights)
            #                          })

            batch_container = the_UNI_BATCH_COLLECTOR.pop_batch_and_reset(
            )  # \\\\\\    My bloc    \\\\\\
            (batch_rets, batch_lens) = batch_container.get_basic_metric(
            )  # \\\\\\    My bloc    \\\\\\
            batch_obs = batch_container.batch_observations  # \\\\\\    My bloc    \\\\\\
            batch_acts = batch_container.batch_actions  # \\\\\\    My bloc    \\\\\\
            batch_weights = batch_container.batch_Qvalues  # \\\\\\    My bloc    \\\\\\

            feed_dictionary = blocAndTools.tensorflowbloc.build_feed_dictionary(
                [obs_ph, act_ph, weights_ph],
                # \\\\\\    My bloc    \\\\\\
                [
                    batch_obs,
                    # \\\\\\    My bloc    \\\\\\
                    batch_acts,
                    batch_weights
                ])  # \\\\\\    My bloc
            #    \\\\\\
            batch_loss, _ = sess.run(
                [loss, train_op],  # \\\\\\    My bloc    \\\\\\
                feed_dict=feed_dictionary)  # \\\\\\    My bloc    \\\\\\

            return batch_loss, batch_rets, batch_lens

        # training loop
        for i in range(epochs):
            batch_loss, batch_rets, batch_lens = train_one_epoch()
            mean_return = np.mean(batch_rets)
            average_len = np.mean(batch_lens)

            # ////// Original bloc //////
            # print('epoch: %3d \t loss: %.3f \t return: %.3f \t ep_len: %.3f' %
            #       (i, batch_loss, mean_return, average_len))

            # \\\\\\    My bloc    \\\\\\
            consol_print_learning_stats.epoch_training_stat(
                epoch_loss=batch_loss,
                epoch_average_trjs_return=mean_return,
                epoch_average_trjs_lenght=average_len,
                number_of_trj_collected=0,
                total_timestep_collected=0)

            yield (i, batch_loss, mean_return, average_len)

    print("\n>>> Close session\n")
    writer.close()
    playground.env.close()
    tf_cv1.reset_default_graph()