From b2e7126840259ccc7bda13cf96eb939e40689e78 Mon Sep 17 00:00:00 2001 From: Scott Brownlie Date: Wed, 8 Sep 2021 11:54:50 +0100 Subject: [PATCH] Train/Eval Mode Support (#39) * switch models between train and eval mode * update changelog * update release in change log * Update dependency Co-authored-by: Antonin Raffin --- docs/misc/changelog.rst | 8 +- sb3_contrib/qrdqn/policies.py | 10 ++ sb3_contrib/qrdqn/qrdqn.py | 2 + sb3_contrib/tqc/policies.py | 13 ++ sb3_contrib/tqc/tqc.py | 2 + sb3_contrib/version.txt | 2 +- setup.py | 2 +- tests/test_train_eval_mode.py | 225 ++++++++++++++++++++++++++++++++++ 8 files changed, 260 insertions(+), 4 deletions(-) create mode 100644 tests/test_train_eval_mode.py diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index 33991d0..07a42a0 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -4,14 +4,18 @@ Changelog ========== -Release 1.2.0a0 (WIP) +Release 1.2.0 (2021-09-08) ------------------------------- +**Train/Eval mode support** + Breaking Changes: ^^^^^^^^^^^^^^^^^ +- Upgraded to Stable-Baselines3 >= 1.2.0 Bug Fixes: ^^^^^^^^^^ +- QR-DQN and TQC updated so that their policies are switched between train and eval mode at the correct time (@ayeright) Deprecations: ^^^^^^^^^^^^^ @@ -152,4 +156,4 @@ Stable-Baselines3 is currently maintained by `Antonin Raffin`_ (aka `@araffin`_) Contributors: ------------- -@ku2482 @guyk1971 @minhlong94 +@ku2482 @guyk1971 @minhlong94 @ayeright diff --git a/sb3_contrib/qrdqn/policies.py b/sb3_contrib/qrdqn/policies.py index 5776159..ba42ecf 100644 --- a/sb3_contrib/qrdqn/policies.py +++ b/sb3_contrib/qrdqn/policies.py @@ -167,6 +167,7 @@ class QRDQNPolicy(BasePolicy): self.quantile_net = self.make_quantile_net() self.quantile_net_target = self.make_quantile_net() self.quantile_net_target.load_state_dict(self.quantile_net.state_dict()) + self.quantile_net_target.set_training_mode(False) # Setup optimizer with initial learning rate self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) @@ -199,6 +200,15 @@ class QRDQNPolicy(BasePolicy): ) return data + def set_training_mode(self, mode: bool) -> None: + """ + Put the policy in either training or evaluation mode. + This affects certain modules, such as batch normalisation and dropout. + :param mode: if true, set to training mode, else set to evaluation mode + """ + self.quantile_net.set_training_mode(mode) + self.training = mode + MlpPolicy = QRDQNPolicy diff --git a/sb3_contrib/qrdqn/qrdqn.py b/sb3_contrib/qrdqn/qrdqn.py index 001f550..ad6016e 100644 --- a/sb3_contrib/qrdqn/qrdqn.py +++ b/sb3_contrib/qrdqn/qrdqn.py @@ -155,6 +155,8 @@ class QRDQN(OffPolicyAlgorithm): self.logger.record("rollout/exploration rate", self.exploration_rate) def train(self, gradient_steps: int, batch_size: int = 100) -> None: + # Switch to train mode (this affects batch norm / dropout) + self.policy.set_training_mode(True) # Update learning rate according to schedule self._update_learning_rate(self.policy.optimizer) diff --git a/sb3_contrib/tqc/policies.py b/sb3_contrib/tqc/policies.py index d77b4c0..d02601c 100644 --- a/sb3_contrib/tqc/policies.py +++ b/sb3_contrib/tqc/policies.py @@ -376,6 +376,9 @@ class TQCPolicy(BasePolicy): self.critic_target = self.make_critic(features_extractor=None) self.critic_target.load_state_dict(self.critic.state_dict()) + # Target networks should always be in eval mode + self.critic_target.set_training_mode(False) + self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs) def _get_constructor_parameters(self) -> Dict[str, Any]: @@ -423,6 +426,16 @@ class TQCPolicy(BasePolicy): def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor: return self.actor(observation, deterministic) + def set_training_mode(self, mode: bool) -> None: + """ + Put the policy in either training or evaluation mode. + This affects certain modules, such as batch normalisation and dropout. + :param mode: if true, set to training mode, else set to evaluation mode + """ + self.actor.set_training_mode(mode) + self.critic.set_training_mode(mode) + self.training = mode + MlpPolicy = TQCPolicy diff --git a/sb3_contrib/tqc/tqc.py b/sb3_contrib/tqc/tqc.py index adead01..05dbd7a 100644 --- a/sb3_contrib/tqc/tqc.py +++ b/sb3_contrib/tqc/tqc.py @@ -175,6 +175,8 @@ class TQC(OffPolicyAlgorithm): self.critic_target = self.policy.critic_target def train(self, gradient_steps: int, batch_size: int = 64) -> None: + # Switch to train mode (this affects batch norm / dropout) + self.policy.set_training_mode(True) # Update optimizers learning rate optimizers = [self.actor.optimizer, self.critic.optimizer] if self.ent_coef_optimizer is not None: diff --git a/sb3_contrib/version.txt b/sb3_contrib/version.txt index 0816bf0..26aaba0 100644 --- a/sb3_contrib/version.txt +++ b/sb3_contrib/version.txt @@ -1 +1 @@ -1.2.0a0 +1.2.0 diff --git a/setup.py b/setup.py index 87fedc3..1b75a55 100644 --- a/setup.py +++ b/setup.py @@ -62,7 +62,7 @@ setup( packages=[package for package in find_packages() if package.startswith("sb3_contrib")], package_data={"sb3_contrib": ["py.typed", "version.txt"]}, install_requires=[ - "stable_baselines3>=1.1.0", + "stable_baselines3>=1.2.0", ], description="Contrib package of Stable Baselines3, experimental code.", author="Antonin Raffin", diff --git a/tests/test_train_eval_mode.py b/tests/test_train_eval_mode.py new file mode 100644 index 0000000..cbab03f --- /dev/null +++ b/tests/test_train_eval_mode.py @@ -0,0 +1,225 @@ +import gym +import numpy as np +import pytest +import torch as th +import torch.nn as nn +from stable_baselines3.common.preprocessing import get_flattened_obs_dim +from stable_baselines3.common.torch_layers import BaseFeaturesExtractor + +from sb3_contrib import QRDQN, TQC + + +class FlattenBatchNormDropoutExtractor(BaseFeaturesExtractor): + """ + Feature extract that flatten the input and applies batch normalization and dropout. + Used as a placeholder when feature extraction is not needed. + :param observation_space: + """ + + def __init__(self, observation_space: gym.Space): + super(FlattenBatchNormDropoutExtractor, self).__init__( + observation_space, + get_flattened_obs_dim(observation_space), + ) + self.flatten = nn.Flatten() + self.batch_norm = nn.BatchNorm1d(self._features_dim) + self.dropout = nn.Dropout(0.5) + + def forward(self, observations: th.Tensor) -> th.Tensor: + result = self.flatten(observations) + result = self.batch_norm(result) + result = self.dropout(result) + return result + + +def clone_batch_norm_stats(batch_norm: nn.BatchNorm1d) -> (th.Tensor, th.Tensor): + """ + Clone the bias and running mean from the given batch norm layer. + :param batch_norm: + :return: the bias and running mean + """ + return batch_norm.bias.clone(), batch_norm.running_mean.clone() + + +def clone_qrdqn_batch_norm_stats(model: QRDQN) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor): + """ + Clone the bias and running mean from the quantile network and quantile-target network. + :param model: + :return: the bias and running mean from the quantile network and quantile-target network + """ + quantile_net_batch_norm = model.policy.quantile_net.features_extractor.batch_norm + quantile_net_bias, quantile_net_running_mean = clone_batch_norm_stats(quantile_net_batch_norm) + + quantile_net_target_batch_norm = model.policy.quantile_net_target.features_extractor.batch_norm + quantile_net_target_bias, quantile_net_target_running_mean = clone_batch_norm_stats(quantile_net_target_batch_norm) + + return quantile_net_bias, quantile_net_running_mean, quantile_net_target_bias, quantile_net_target_running_mean + + +def clone_tqc_batch_norm_stats( + model: TQC, +) -> (th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor, th.Tensor): + """ + Clone the bias and running mean from the actor and critic networks and critic-target networks. + :param model: + :return: the bias and running mean from the actor and critic networks and critic-target networks + """ + actor_batch_norm = model.actor.features_extractor.batch_norm + actor_bias, actor_running_mean = clone_batch_norm_stats(actor_batch_norm) + + critic_batch_norm = model.critic.features_extractor.batch_norm + critic_bias, critic_running_mean = clone_batch_norm_stats(critic_batch_norm) + + critic_target_batch_norm = model.critic_target.features_extractor.batch_norm + critic_target_bias, critic_target_running_mean = clone_batch_norm_stats(critic_target_batch_norm) + + return (actor_bias, actor_running_mean, critic_bias, critic_running_mean, critic_target_bias, critic_target_running_mean) + + +CLONE_HELPERS = { + QRDQN: clone_qrdqn_batch_norm_stats, + TQC: clone_tqc_batch_norm_stats, +} + + +def test_qrdqn_train_with_batch_norm(): + model = QRDQN( + "MlpPolicy", + "CartPole-v1", + policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor), + learning_starts=0, + seed=1, + tau=0, # do not clone the target + ) + + ( + quantile_net_bias_before, + quantile_net_running_mean_before, + quantile_net_target_bias_before, + quantile_net_target_running_mean_before, + ) = clone_qrdqn_batch_norm_stats(model) + + model.learn(total_timesteps=200) + + ( + quantile_net_bias_after, + quantile_net_running_mean_after, + quantile_net_target_bias_after, + quantile_net_target_running_mean_after, + ) = clone_qrdqn_batch_norm_stats(model) + + assert ~th.isclose(quantile_net_bias_before, quantile_net_bias_after).all() + assert ~th.isclose(quantile_net_running_mean_before, quantile_net_running_mean_after).all() + + assert th.isclose(quantile_net_target_bias_before, quantile_net_target_bias_after).all() + assert th.isclose(quantile_net_target_running_mean_before, quantile_net_target_running_mean_after).all() + + +def test_tqc_train_with_batch_norm(): + model = TQC( + "MlpPolicy", + "Pendulum-v0", + policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor), + learning_starts=0, + tau=0, # do not copy the target + seed=1, + ) + + ( + actor_bias_before, + actor_running_mean_before, + critic_bias_before, + critic_running_mean_before, + critic_target_bias_before, + critic_target_running_mean_before, + ) = clone_tqc_batch_norm_stats(model) + + model.learn(total_timesteps=200) + + ( + actor_bias_after, + actor_running_mean_after, + critic_bias_after, + critic_running_mean_after, + critic_target_bias_after, + critic_target_running_mean_after, + ) = clone_tqc_batch_norm_stats(model) + + assert ~th.isclose(actor_bias_before, actor_bias_after).all() + assert ~th.isclose(actor_running_mean_before, actor_running_mean_after).all() + + assert ~th.isclose(critic_bias_before, critic_bias_after).all() + assert ~th.isclose(critic_running_mean_before, critic_running_mean_after).all() + + assert th.isclose(critic_target_bias_before, critic_target_bias_after).all() + assert th.isclose(critic_target_running_mean_before, critic_target_running_mean_after).all() + + +@pytest.mark.parametrize("model_class", [QRDQN, TQC]) +def test_offpolicy_collect_rollout_batch_norm(model_class): + if model_class in [QRDQN]: + env_id = "CartPole-v1" + else: + env_id = "Pendulum-v0" + + clone_helper = CLONE_HELPERS[model_class] + + learning_starts = 10 + model = model_class( + "MlpPolicy", + env_id, + policy_kwargs=dict(net_arch=[16, 16], features_extractor_class=FlattenBatchNormDropoutExtractor), + learning_starts=learning_starts, + seed=1, + gradient_steps=0, + train_freq=1, + ) + + batch_norm_stats_before = clone_helper(model) + + model.learn(total_timesteps=100) + + batch_norm_stats_after = clone_helper(model) + + # No change in batch norm params + for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after): + assert th.isclose(param_before, param_after).all() + + +@pytest.mark.parametrize("model_class", [QRDQN, TQC]) +@pytest.mark.parametrize("env_id", ["Pendulum-v0", "CartPole-v1"]) +def test_predict_with_dropout_batch_norm(model_class, env_id): + if env_id == "CartPole-v1": + if model_class in [TQC]: + return + elif model_class in [QRDQN]: + return + + model_kwargs = dict(seed=1) + clone_helper = CLONE_HELPERS[model_class] + + if model_class in [QRDQN, TQC]: + model_kwargs["learning_starts"] = 0 + else: + model_kwargs["n_steps"] = 64 + + policy_kwargs = dict( + features_extractor_class=FlattenBatchNormDropoutExtractor, + net_arch=[16, 16], + ) + model = model_class("MlpPolicy", env_id, policy_kwargs=policy_kwargs, verbose=1, **model_kwargs) + + batch_norm_stats_before = clone_helper(model) + + env = model.get_env() + observation = env.reset() + first_prediction, _ = model.predict(observation, deterministic=True) + for _ in range(5): + prediction, _ = model.predict(observation, deterministic=True) + np.testing.assert_allclose(first_prediction, prediction) + + batch_norm_stats_after = clone_helper(model) + + # No change in batch norm params + for param_before, param_after in zip(batch_norm_stats_before, batch_norm_stats_after): + assert th.isclose(param_before, param_after).all()