예제 #1
0
    def make_obs_ph(name):
        """
        makes the observation placeholder

        :param name: (str) the placeholder name
        :return: (TensorFlow Tensor) the placeholder
        """
        return ObservationInput(observation_space_shape, name=name)
예제 #2
0
 def make_obs_ph(name):
     return ObservationInput(observation_space, name=name)
예제 #3
0
        mode = input("Press 1 for NN expert mode\nPress 2 for Human expert mode\nAnything else for no expert mode\n--> ")
        if mode == "1":
            with open('expert_demonstrations_NN.csv', 'r', newline='') as csvfile:
                            data_reader = csv.reader(csvfile, delimiter=',')
                            exp_demo = ([r for r in data_reader])
        elif mode == "2":
            with open('expert_demonstrations_Human.csv', 'r', newline='') as csvfile:
                            data_reader = csv.reader(csvfile, delimiter=',')
                            exp_demo = ([r for r in data_reader])
        else:
            exp_demo = []

        # Create all the functions necessary to train the model
        act, train, update_target, debug = deepq.build_train(
            make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
            q_func=model,
            num_actions=env.action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
            gamma=0.99,
        )
        # Create the replay buffer
        replay_buffer = ReplayBuffer(50000)
        # Create the schedule for exploration starting from 1 (every action is random) down to
        # 0.02 (98% of actions are selected according to values predicted by the model).
        exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()
예제 #4
0
def train_policy(arglist):
    with U.single_threaded_session():
        # Create the environment
        if arglist.use_dense_rewards:
            print("Will use env MineRLNavigateDense-v0")
            env = gym.make("MineRLNavigateDense-v0")
            env_name = "MineRLNavigateDense-v0"
        else:
            print("Will use env MineRLNavigate-v0")
            env = gym.make('MineRLNavigate-v0')
            env_name = "MineRLNavigate-v0"

        if arglist.force_forward:
            env = MineCraftWrapperSimplified(env)
        else:
            env = MineCraftWrapper(env)

        if not arglist.use_demonstrations:
            # Use stack of last 4 frames as obs
            env = FrameStack(env, 4)

        # Create all the functions necessary to train the model
        act, train, update_target, debug = deepq.build_train(
            make_obs_ph=lambda name: ObservationInput(env.observation_space,
                                                      name=name),
            q_func=build_q_func('conv_only', dueling=True),
            num_actions=env.action_space.n,
            gamma=0.9,
            optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
        )

        # Create the replay buffer(s) (TODO: Use prioritized replay buffer)
        if arglist.use_demonstrations:
            replay_buffer = ReplayBuffer(int(arglist.replay_buffer_len / 2))
            demo_buffer = load_demo_buffer(env_name,
                                           int(arglist.replay_buffer_len / 2))
        else:
            replay_buffer = ReplayBuffer(arglist.replay_buffer_len)

        # Create the schedule for exploration starting from 1 (every action is random) down to
        # 0.02 (98% of actions are selected according to values predicted by the model).
        exploration = LinearSchedule(
            schedule_timesteps=arglist.num_exploration_steps *
            arglist.num_episodes * arglist.max_episode_steps,
            initial_p=1.0,
            final_p=arglist.final_epsilon)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()

        episode_rewards = [0.0]
        n_episodes = 0
        n_steps = 0
        obs = env.reset()
        log_path = "./learning_curves/minerl_" + str(date.today()) + "_" + str(
            time.time()) + ".dat"
        log_file = open(log_path, "a")
        for episode in range(arglist.num_episodes):
            print("Episode: ", str(episode))
            done = False
            episode_steps = 0
            while not done:

                # Take action and update exploration to the newest value
                action = act(obs[None],
                             update_eps=exploration.value(n_steps))[0]
                new_obs, rew, done, _ = env.step(action)
                n_steps += 1
                episode_steps += 1

                # Break episode
                if episode_steps > arglist.max_episode_steps:
                    done = True

                # Store transition in the replay buffer.
                replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                # Store rewards
                episode_rewards[-1] += rew
                if done:
                    obs = env.reset()
                    episode_rewards.append(0)
                    n_episodes += 1

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if (n_steps > arglist.learning_starts_at_steps) and (n_steps %
                                                                     4 == 0):
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        32)
                    train(obses_t, actions, rewards, obses_tp1, dones,
                          np.ones_like(rewards))

                if arglist.use_demonstrations:
                    if (n_steps < arglist.learning_starts_at_steps) and (
                            n_steps % 4 == 0):
                        obses_t, actions, rewards, obses_tp1, dones = demo_buffer.sample(
                            32)
                        train(obses_t, actions, rewards, obses_tp1, dones,
                              np.ones_like(rewards))
                    if (n_steps > arglist.learning_starts_at_steps) and (
                            n_steps % 4 == 0):
                        obses_t, actions, rewards, obses_tp1, dones = demo_buffer.sample(
                            32)
                        train(obses_t, actions, rewards, obses_tp1, dones,
                              np.ones_like(rewards))

                # Update target network periodically.
                if n_steps % arglist.target_net_update_freq == 0:
                    update_target()

                # Log data for analysis
                if done and len(episode_rewards) % 10 == 0:
                    logger.record_tabular("steps", n_steps)
                    logger.record_tabular("episodes", len(episode_rewards))
                    logger.record_tabular(
                        "mean episode reward",
                        round(np.mean(episode_rewards[-101:-1]), 1))
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * exploration.value(n_steps)))
                    logger.dump_tabular()

                #TODO: Save checkpoints
                if n_steps % arglist.checkpoint_rate == 0:
                    checkpoint_path = "./checkpoints/minerl_" + str(
                        episode) + "_" + str(date.today()) + "_" + str(
                            time.time()) + ".pkl"
                    save_variables(checkpoint_path)
                    print("%s,%s,%s,%s" %
                          (n_steps, episode,
                           round(np.mean(episode_rewards[-101:-1]),
                                 1), int(100 * exploration.value(n_steps))),
                          file=log_file)
        log_file.close()
예제 #5
0
 def make_obs_ph(name):
     #return ObservationInput(ob_space, name=name)
     return ObservationInput(Box(low=0.0,
                                 high=screen_dim[0],
                                 shape=(screen_dim[0], screen_dim[1], 1)),
                             name=name)
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=3000,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=3000,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.
    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.
    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)


    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=0.99,
        double_q=False
        #grad_norm_clipping=10,
        # param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(10000),
                                 initial_p=1.0,
                                 final_p=0.02)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()


    old_state = None





    formula_LTLf_1 = "!d U(g)"
    monitoring_RightToLeft = MonitoringSpecification(
        ltlf_formula=formula_LTLf_1,
        r=0,
        c=-0.01,
        s=10,
        f=-10
    )

    formula_LTLf_2 = "F(G(bb)) "  # break brick
    monitoring_BreakBrick = MonitoringSpecification(
        ltlf_formula=formula_LTLf_2,
        r=10,
        c=-0.01,
        s=10,
        f=0
    )

    monitoring_specifications = [monitoring_BreakBrick, monitoring_RightToLeft]




    def RightToLeftConversion(observation) -> TraceStep:

        done=False
        global old_state
        if arrays_equal(observation[-9:], np.zeros((len(observation[-9:])))):  ### Checking if all Bricks are broken
            # print('goal reached')
            goal = True  # all bricks are broken
            done = True
        else:
            goal = False

        dead = False
        if done and not goal:
            dead = True


        order = check_ordered(observation[-9:])
        if not order:
            # print('wrong order', state[5:])
            dead=True
            done = True

        if old_state is not None:  # if not the first state
            if not arrays_equal(old_state[-9:], observation[-9:]):
                brick_broken = True
                # check_ordered(state[-9:])
                # print(' a brick is broken')
            else:
                brick_broken = False
        else:
            brick_broken = False




        dictionary={'g': goal, 'd': dead, 'o': order, 'bb':brick_broken}
        #print(dictionary)
        return dictionary

    multi_monitor = MultiRewardMonitor(
        monitoring_specifications=monitoring_specifications,
        obs_to_trace_step=RightToLeftConversion
    )


    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True
            # initialize
    done = False
    #monitor.get_reward(None, False) # add first state in trace
        

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        episodeCounter=0
        num_episodes=0
        for t in itertools.count():
            
            # Take action and update exploration to the newest value
            action = act(obs[None], update_eps=exploration.value(t))[0]
            #print(action)
            #print(action)
            new_obs, rew, done, _ = env.step(action)

            done=False
            #done=False ## FOR FIRE ONLY

            #print(new_obs)

            #new_obs.append()

            start_time = time.time()
            rew, is_perm = multi_monitor(new_obs)
            #print("--- %s seconds ---" % (time.time() - start_time))
            old_state=new_obs
            #print(rew)


            done=done or is_perm



            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew


            is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
            if episodeCounter % 100 == 0 or episodeCounter<1:
                # Show off the result
                #print("coming here Again and Again")
                env.render()


            if done:
                episodeCounter+=1
                num_episodes+=1
                obs = env.reset()
                old_state=None
                episode_rewards.append(0)



                multi_monitor.reset()
                #monitor.get_reward(None, False)




            else:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if t > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(64)
                    train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))

                # Update target network periodically.
                if t % 1000 == 0:
                    update_target()
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            if done and len(episode_rewards) % 10 == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", len(episode_rewards))
                logger.record_tabular("currentEpisodeReward", episode_rewards[-1])
                logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    act.save_act()
                    #save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        # if model_saved:
        #     if print_freq is not None:
        #         logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
        #     load_variables(model_file)

    return act
예제 #7
0
def cart(target_update_steps=1000, minibatch_size=32):
    with U.make_session(8):
        # Create the environment
        env = gym.make("CartPole-v0")
        # Create all the functions necessary to train the model
        act, train, update_target, debug = deepq.build_train(
            make_obs_ph=lambda name: ObservationInput(env.observation_space,
                                                      name=name),
            q_func=model,
            num_actions=env.action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
        )
        # Create the replay buffer
        replay_buffer = ReplayBuffer(50000)
        # Create the schedule for exploration starting from 1 (every action is random) down to
        # 0.02 (98% of actions are selected according to values predicted by the model).
        exploration = LinearSchedule(schedule_timesteps=10000,
                                     initial_p=1.0,
                                     final_p=0.02)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()

        episode_rewards = [0.0]
        obs = env.reset()
        for t in itertools.count():
            # Take action and update exploration to the newest value
            action = act(obs[None], update_eps=exploration.value(t))[0]
            new_obs, rew, done, _ = env.step(action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0)

            is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
            if is_solved:
                pass
                # Show off the result
                #env.render()
            else:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if t > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        minibatch_size)
                    train(obses_t, actions, rewards, obses_tp1, dones,
                          np.ones_like(rewards))
                # Update target network periodically.
                if t % target_update_steps == 0:
                    update_target()

            if done and len(episode_rewards) % 10 == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", len(episode_rewards))
                logger.record_tabular(
                    "mean episode reward",
                    round(np.mean(episode_rewards[-101:-1]), 1))
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
예제 #8
0
 def make_obs_ph(name):
     return ObservationInput(observation_space,
                             name=name,
                             extra_channels=extra_channels)
 def make_bel_ph(name):
     return ObservationInput(belief_space, name=name)
예제 #10
0
 def make_obs_ph(name):
     print("ENV.OBSERVATION_SPACE: {}".format(env.observation_space))
     return ObservationInput(env.observation_space, name=name)
예제 #11
0
def main():
    # configure logger, disable logging in child MPI processes (with rank > 0)
    arg_parser = common_arg_parser()
    args, unknown_args = arg_parser.parse_known_args()
    extra_args = parse_cmdline_kwargs(unknown_args)

    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        logger.configure()
    else:
        logger.configure(format_strs=[])
        rank = MPI.COMM_WORLD.Get_rank()

    model, env, debug = train(args, extra_args) # Get the trained model
    env.close()

    if args.save_path is not None and rank == 0:
        save_path = osp.expanduser(args.save_path)
        model.save(save_path)

    if args.adv_alg: # If attack is applied, build the function for crafting adversarial observations
        g = tf.Graph()
        with g.as_default():
            with tf.Session() as sess:
                q_func = build_q_func(network='conv_only')
                craft_adv_obs = build_adv(
                    make_obs_tf=lambda name: ObservationInput(env.observation_space, name=name),
                    q_func=q_func, num_actions=env.action_space.n, epsilon=args.epsilon,
                    attack=args.adv_alg
                )

    if args.save_info: # Save all the information in a csv filter
        name = args.info_name
        csv_file = open('/Users/harry/Documents/info/' + name, mode='a' )
        fieldnames = ['episode', 'diff_type', 'diff', 'epsilon', 'steps', 'attack rate', 'success rate', 'score']
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        writer.writeheader()

    if args.play:
        logger.log("Running trained model")
        env = build_env(args)
        obs = env.reset()
        action_meanings = env.unwrapped.get_action_meanings()
        def initialize_placeholders(nlstm=128,**kwargs):
            return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
        state, dones = initialize_placeholders(**extra_args)

        num_episodes = 0
        num_moves = 0
        num_success_attack = 0
        num_attack = 0
        step = 0
        q_value_dict = {}
        old_diff = 0

        diff_type = args.diff_type
        print("Type of diff: {}. Threshold to launch attack: {}".format(diff_type, args.diff))
        print('-------------------------Episode 0 -------------------------')
        while True:
            step = step + 1 # Overall steps. Does not reset to 0 when an episode ends
            num_moves = num_moves + 1
            q_values = debug['q_values']([obs])
            q_values = np.squeeze(q_values)

            minus_diff = np.max(q_values) - np.min(q_values)
            div_diff = np.max(q_values) / np.min(q_values)
            sec_ord_diff = minus_diff - old_diff
            old_diff = minus_diff

            if args.save_q_value: # Save the q value to a file
                with open('/Users/harry/Documents/q_value_pong_ep' + str(num_episodes+1) + '_diff' + str(args.diff) + '.csv', 'a') as q_value_file:
                    q_value_writter = csv.writer(q_value_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
                    q_value_writter.writerow(q_values)

            if args.adv_alg:
                diff = minus_diff if args.diff_type == 'diff' else div_diff \
                                  if args.diff_type == 'div_diff' else sec_ord_diff \
                                  if args.diff_type == 'sec_ord_diff' else minus_diff

                if diff >= args.diff:
                    num_attack = num_attack + 1
                    with g.as_default():
                        with tf.Session() as sess:
                            sess.run(tf.global_variables_initializer())
                            adv_obs = craft_adv_obs([obs])[0] # Get the adversarial observation
                            adv_obs = np.rint(adv_obs)
                            adv_obs = adv_obs.astype(np.uint8)

                    if args.preview_image: # Show a few adversarial images on the screen
                        if num_attack >= 2 and num_attack <= 10:
                            adv_img = Image.fromarray(np.asarray(adv_obs[:,:,0]), mode='L')
                            adv_img.show()

                    if args.save_image: # Save one episode of adversarial images in a folder
                        if num_episodes == 0:
                            img = Image.fromarray(np.asarray(adv_obs[:,:,0]), mode='L')
                            img.save('/Users/harry/Documents/adv_19_99/adv_image_' + str(num_moves) + '.png')

                    prev_state = np.copy(state)
                    action, _, _, _ = model.step(obs,S=prev_state, M=dones)
                    adv_action, _, state, _ = model.step(adv_obs,S=prev_state, M=dones)
                    if (adv_action != action): # Count as a successful atttack
                        # print('Action before: {}, Action after: {}'.format(
                        #       action_meanings[action[0]], action_meanings[adv_action[0]]))
                        num_success_attack = num_success_attack + 1
                    obs, rew, done, info = env.step(adv_action)
                else:
                    action, _, state, _ = model.step(obs,S=state, M=dones)
                    obs, rew, done, info = env.step(action)
                    if args.save_image:
                        img = Image.fromarray(np.asarray(obs[:,:,0]), mode='L')
                        img.save('/Users/harry/Documents/adv_images_ep' + str(num_episodes+1) + '/' + str(num_moves) + '.png')
            else:
                if args.save_image: # Save one episode of normal images in a folder
                    if num_episodes == 0:
                        img = Image.fromarray(np.asarray(obs[:,:,0]), mode='L')
                        img.save('/Users/harry/Documents/normal_obs' + str(num_moves) + '.png')
                action, _, state, _ = model.step(obs,S=state, M=dones)
                obs, _, done, info = env.step(action)
            env.render()
            done = done.any() if isinstance(done, np.ndarray) else done

            if done:
                npc_score = info['episode']['r']
                score = 21 if npc_score < 0 else 21 - npc_score
                obs = env.reset()
                print('Episode {} takes {} time steps'.format(num_episodes, num_moves))
                print('NPC Score: {}'.format(npc_score))
                if args.adv_alg:
                    attack_rate = float(num_attack) / num_moves
                    success_rate = float(num_success_attack) / num_attack
                    print('Percentage of attack: {}'.format(100 * attack_rate))
                    print('Percentage of successful attacks: {}'.format(100 * success_rate))
                    info_dict = {'episode': num_episodes+1,'diff_type': args.diff_type, 'diff': args.diff, 'epsilon': args.epsilon,
                             'steps': num_moves, 'attack rate': attack_rate, 'success rate': success_rate, 'score': score}
                    writer.writerow(info_dict)

                num_moves = 0
                num_transfer = 0
                num_episodes = num_episodes + 1
                num_attack = 0
                num_success_attack = 0
                print(f'-------------------------Episode {num_episodes}-------------------------')

        env.close()
예제 #12
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=3000,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=3000,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):


    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)


    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        # gamma=gamma,
        # grad_norm_clipping=10,
        # param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(100000),
                                 initial_p=1.0,
                                 final_p=0.02)

    # Initialize the paramete    print(type(act))rs and copy them to the target network.
    U.initialize()
    update_target()





    old_state = None





    formula_LTLf_1 = "!F(die)"
    monitoring_RightToLeft = MonitoringSpecification(
        ltlf_formula=formula_LTLf_1,
        r=1,
        c=-10,
        s=1,
        f=-10
    )



    monitoring_specifications = [monitoring_RightToLeft]

    stepCounter = 0
    done = False

    def RightToLeftConversion(observation) -> TraceStep:

        print(stepCounter)


        if(done and not(stepCounter>=199)):
            die=True
        else:
            die=False


        dictionary={'die': die}
        print(dictionary)
        return dictionary

    multi_monitor = MultiRewardMonitor(
        monitoring_specifications=monitoring_specifications,
        obs_to_trace_step=RightToLeftConversion
    )













    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True


    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        episodeCounter=0
        num_episodes=0

        for t in itertools.count():
            
            # Take action and update exploration to the newest value
            action = act(obs[None], update_eps=exploration.value(t))[0]
            #print(action)
            new_obs, rew, done, _ = env.step(action)
            stepCounter+=1

            rew, is_perm = multi_monitor(new_obs)
            old_state=new_obs




            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew


            is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
            if episodeCounter % 100 == 0 or episodeCounter<1:
                # Show off the result
                #print("coming here Again and Again")
                env.render()


            if done:
                episodeCounter+=1
                num_episodes+=1
                obs = env.reset()
                episode_rewards.append(0)
                multi_monitor.reset()
                stepCounter=0
            else:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if t > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32)
                    train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))

                # Update target network periodically.
                if t % 1000 == 0:
                    update_target()
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            if done and len(episode_rewards) % 10 == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", len(episode_rewards))
                logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 500 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    act.save_act()
                    #save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        # if model_saved:
        #     if print_freq is not None:
        #         logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
        #     load_variables(model_file)

    return act
 def make_obs_ph(name):
     if aug_type == 'constraint_state':
         return ConstraintStateAugmentedInput(observation_space,
                                              constraints,
                                              name=name)
     return ObservationInput(observation_space, name=name)
예제 #14
0
    def learn(self):

        with U.make_session(8):
            # Create the environment
            env = gym.make(self._args.env)
            # Create all the functions necessary to train the model
            act, train, update_target, debug = deepq.build_train(
                make_obs_ph=lambda name: ObservationInput(
                    env.observation_space, name=name),
                q_func=self.model,
                num_actions=env.action_space.n,
                optimizer=tf.train.AdamOptimizer(
                    learning_rate=self._args.learning_rate),
            )
            # Create the replay buffer
            replay_buffer = ReplayBuffer(self._args.replay_buffer_size)
            # Create the schedule for exploration starting from 1 till min_exploration_rate.
            exploration = LinearSchedule(
                schedule_timesteps=self._args.exploration_duration,
                initial_p=1.0,
                final_p=self._args.min_exploration_rate)

            # Initialize the parameters and copy them to the target network.
            U.initialize()
            update_target()

            episode_rewards = [0.0]
            obs = env.reset()
            for t in itertools.count():
                # Take action and update exploration to the newest value
                action = act(obs[None], update_eps=exploration.value(t))[0]
                new_obs, rew, done, _ = env.step(action)
                # Store transition in the replay buffer.
                replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                episode_rewards[-1] += rew
                if done:
                    obs = env.reset()
                    episode_rewards.append(0)

                mean_episode_reward = np.mean(episode_rewards[-101:-1])
                # Show learned agent:
                if mean_episode_reward >= self._render_reward_threshold:
                    env.render()

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if t > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        32)
                    train(obses_t, actions, rewards, obses_tp1, dones,
                          np.ones_like(rewards))
                # Update target network periodically.
                if t % 1000 == 0:
                    update_target()

                if done and len(episode_rewards) % 10 == 0:
                    self._reward_buffer_mutex.acquire()
                    self._reward_buffer.append(mean_episode_reward)

                    logger.record_tabular("steps", t)
                    logger.record_tabular("episodes", len(episode_rewards))
                    logger.record_tabular("mean episode reward",
                                          round(mean_episode_reward, 1))
                    logger.record_tabular("% time spent exploring",
                                          int(100 * exploration.value(t)))
                    logger.dump_tabular()

                    self._reward_buffer_changed = True
                    self._reward_buffer_mutex.release()