コード例 #1
0
ファイル: train.py プロジェクト: miaojinshuai/rl-attack
def maybe_save_model(savedir, container, state):
    """This function checkpoints the model and state of the training algorithm."""
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)
    if container is not None:
        container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')
    relatively_safe_pickle_dump(state["monitor_state"], os.path.join(savedir, 'monitor_state.pkl'))
    if container is not None:
        container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
コード例 #2
0
ファイル: train.py プロジェクト: miaojinshuai/rl-attack
def maybe_load_model(savedir, container):
    """Load model if present at the specified path."""
    if savedir is None:
        return

    state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
    if container is not None:
        logger.log("Attempting to download model from Azure")
        found_model = container.get(savedir, 'training_state.pkl.zip')
    else:
        found_model = os.path.exists(state_path)
    if found_model:
        state = pickle_load(state_path, compression=True)
        model_dir = "model-{}".format(state["num_iters"])
        if container is not None:
            container.get(savedir, model_dir)
        U.load_state(os.path.join(savedir, model_dir, "saved"))
        logger.log("Loaded models checkpoint at {} iterations".format(state["num_iters"]))
        return state
コード例 #3
0
ファイル: train.py プロジェクト: limin24kobe/cleverhans
def maybe_load_model(savedir, container):
    """Load model if present at the specified path."""
    if savedir is None:
        return

    state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
    if container is not None:
        logger.log("Attempting to download model from Azure")
        found_model = container.get(savedir, 'training_state.pkl.zip')
    else:
        found_model = os.path.exists(state_path)
    if found_model:
        state = pickle_load(state_path, compression=True)
        model_dir = "model-{}".format(state["num_iters"])
        if container is not None:
            container.get(savedir, model_dir)
        U.load_state(os.path.join(savedir, model_dir, "saved"))
        logger.log("Loaded models checkpoint at {} iterations".format(
            state["num_iters"]))
        return state
コード例 #4
0
ファイル: train.py プロジェクト: limin24kobe/cleverhans
def maybe_save_model(savedir, container, state):
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state,
                                os.path.join(savedir,
                                             'training_state.pkl.zip'),
                                compression=True)
    if container is not None:
        container.put(os.path.join(savedir, 'training_state.pkl.zip'),
                      'training_state.pkl.zip')
    relatively_safe_pickle_dump(state["monitor_state"],
                                os.path.join(savedir, 'monitor_state.pkl'))
    if container is not None:
        container.put(os.path.join(savedir, 'monitor_state.pkl'),
                      'monitor_state.pkl')
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
コード例 #5
0
ファイル: train.py プロジェクト: zmsv73/cleverhans
def maybe_save_model(savedir, container, state):
    if savedir is None:
        return
    start_time = time.time()
    model_dir = "model-{}".format(state["num_iters"])
    U.save_state(os.path.join(savedir, model_dir, "saved"))
    if container is not None:
        container.put(os.path.join(savedir, model_dir), model_dir)
    relatively_safe_pickle_dump(state,
                                os.path.join(savedir,
                                             "training_state.pkl.zip"),
                                compression=True)
    if container is not None:
        container.put(os.path.join(savedir, "training_state.pkl.zip"),
                      "training_state.pkl.zip")
    relatively_safe_pickle_dump(state["monitor_state"],
                                os.path.join(savedir, "monitor_state.pkl"))
    if container is not None:
        container.put(os.path.join(savedir, "monitor_state.pkl"),
                      "monitor_state.pkl")
    logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
コード例 #6
0
            if done:
                steps_left = args.num_steps - info["steps"]
                completion = np.round(info["steps"] / args.num_steps, 1)
                mean_ep_reward = np.mean(info["rewards"][-100:])
                logger.record_tabular("% completion", completion)
                logger.record_tabular("steps", info["steps"])
                logger.record_tabular("iters", num_iters)
                logger.record_tabular("episodes", len(info["rewards"]))
                logger.record_tabular("reward (100 epi mean)",
                                      np.mean(info["rewards"][-100:]))
                if not args.noisy:
                    logger.record_tabular("exploration",
                                          exploration.value(num_iters))
                if args.prioritized:
                    logger.record_tabular("max priority",
                                          replay_buffer._max_priority)
                fps_estimate = (float(steps_per_iter) /
                                (float(iteration_time_est) + 1e-6)
                                if steps_per_iter._value is not None else
                                "calculating:")
                logger.dump_tabular()
                logger.log()
                logger.log("ETA: " +
                           pretty_eta(int(steps_left / fps_estimate)))
                logger.log()
                # add summary for one episode
                ep_stats.add_all_summary(writer, [mean_ep_reward, ep_length],
                                         num_iters)
                ep_length = 0
コード例 #7
0
def learn(env,
          q_func,
          lr=5e-4,
          max_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=1,
          checkpoint_freq=10000,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          num_cpu=16,
          param_noise=False,
          callback=None):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    q_func: (tf.Variable, int, str, bool) -> tf.Variable
        the model that takes the following inputs:
            observation_in: object
                the output of observation placeholder
            num_actions: int
                number of actions
            scope: str
            reuse: bool
                should be passed to outer variable scope
        and returns a tensor of shape (batch_size, num_actions) with values of every action.
    lr: float
        learning rate for adam optimizer
    max_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to max_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    num_cpu: int
        number of cpus to use for training
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of rlattack/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = U.make_session(num_cpu=num_cpu)
    sess.__enter__()

    def make_obs_ph(name):
        return U.BatchInput(env.observation_space.shape, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)
    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = max_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        max_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True
    with tempfile.TemporaryDirectory() as td:
        model_saved = False
        model_file = os.path.join(td, "model")
        for t in range(max_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps,
                         **kwargs)[0]
            reset = False
            new_obs, rew, done, _ = env.step(action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    U.save_state(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            U.load_state(model_file)

    return ActWrapper(act, act_params)
コード例 #8
0
ファイル: train.py プロジェクト: limin24kobe/cleverhans
                break

            if done:
                steps_left = args.num_steps - info["steps"]
                completion = np.round(info["steps"] / args.num_steps, 1)
                mean_ep_reward = np.mean(info["rewards"][-100:])
                logger.record_tabular("% completion", completion)
                logger.record_tabular("steps", info["steps"])
                logger.record_tabular("iters", num_iters)
                logger.record_tabular("episodes", len(info["rewards"]))
                logger.record_tabular("reward (100 epi mean)",
                                      np.mean(info["rewards"][-100:]))
                if not args.noisy:
                    logger.record_tabular("exploration",
                                          exploration.value(num_iters))
                if args.prioritized:
                    logger.record_tabular("max priority",
                                          replay_buffer._max_priority)
                fps_estimate = (
                    float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
                    if steps_per_iter._value is not None else "calculating:")
                logger.dump_tabular()
                logger.log()
                logger.log("ETA: " +
                           pretty_eta(int(steps_left / fps_estimate)))
                logger.log()
                # add summary for one episode
                ep_stats.add_all_summary(writer, [mean_ep_reward, ep_length],
                                         num_iters)
                ep_length = 0
コード例 #9
0
def play(env, act, craft_adv_obs, craft_adv_obs2, stochastic, video_path,
         attack, m_target, m_adv):
    num_episodes = 0
    num_moves = 0
    num_transfer = 0
    obs = env.reset()

    actiondict = {0: "NOOP", 1: "NOOP", 2: "UP", 3: "DOWN", 4: "UP", 5: "DOWN"}
    titles = ["original", "perturbation", "adversarial"]

    while num_episodes < 100:

        #V: Attack #
        if attack != None and (random.random() <= args.attack_prob):
            # Craft adv. examples
            with m_adv.get_session().as_default():
                adv_obs = craft_adv_obs(np.array(obs)[None],
                                        stochastic_adv=stochastic)[0]

            with m_target.get_session().as_default():
                action = act(np.array(adv_obs)[None], stochastic=stochastic)[0]
                action2 = act(np.array(obs)[None], stochastic=stochastic)[0]
                num_moves += 1
                if (action != action2):
                    num_transfer += 1
            """ 
            org_str = "action: " + actiondict[action] #Seaquest
            adv_str = "action: " + actiondict[action2] #Seaquest
            
            diff = np.array(adv_obs)[None][0][:, :, 0] - np.array(obs)[None][0][:, :, 0]#Seaquest
            diff = np.abs(diff)#Seaquest
            print("maximum value in perturbation: ", np.max(diff))#Seaquest


			#print(env.unwrapped.get_action_meanings())
            if actiondict[action] != actiondict[action2]:#Seaquest
                
                fig = pyplot_image.pair_visual(np.array(obs)[None][0][:, :, 0], np.array(adv_obs)[None][0][:, :, 0])#Seaquest
                #Seaquest
                for index, ax in enumerate(fig.axes):
                    if index == 0:
                        ax.text(10, 100, org_str)
                        ax.set_title("original")
                    elif index == 1:
                        ax.set_title("perturbation")
                    elif index == 2:
                        ax.set_title("perturbed image")
                        ax.text(10, 100, adv_str)
                        
                fig.savefig('pics/cwl2-eps1-' + str(num_moves) +'.png')
             """
        else:
            # Normal
            action = act(np.array(obs)[None], stochastic=stochastic)[0]

        obs, rew, done, info = env.step(action)

        if done:
            obs = env.reset()

        if done:
            if len(info["rewards"]) > num_episodes:
                #print((info["rewards"]))
                episode_rewards = info["rewards"]
                #print("rew: "+str(rew))
                #print("--")
                #print("list info[rewards]:" +str(episode_rewards))
                num_episodes = len(episode_rewards)
                #print(num_episodes)
                if args.attack_prob > 0:
                    success = float((num_transfer) / num_moves) * 100.0
                    logger.log("Percentage of successful attacks: " +
                               str(success))

            if num_episodes == 100:
                episode_rewards = np.mean(info["rewards"][-100:])
                logger.log('Reward: ' + str(episode_rewards))
            #num_episodes = len(episode_rewards)

            num_moves = 0
            num_transfer = 0