def load_agent_config(config_path): """ Load an agent configuration from file, with inheritance. :param config_path: path to a json config file :return: the configuration dict """ with open(config_path) as f: agent_config = json.loads(f.read()) if "base_config" in agent_config: base_config = load_agent_config(agent_config["base_config"]) del agent_config["base_config"] agent_config = Configurable.rec_update(base_config, agent_config) return agent_config
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.activation = activation_factory(self.config["activation"]) self.conv1 = nn.Conv2d(self.config["in_channels"], 32, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1) self.maxpool1 = nn.MaxPool2d(3, stride=2) self.bn1 = nn.BatchNorm2d(32) # MLP Head # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size=3, stride=1, padding=1): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 def maxpool_size_out(size, kernel_size=3, stride=2, padding=0): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out( conv2d_size_out( conv2d_size_out(self.config["in_width"])))))) convh = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out( conv2d_size_out( conv2d_size_out(self.config["in_height"])))))) assert convh > 0 and convw > 0 self.config["head_mlp"]["in"] = convw * convh * 32 self.config["head_mlp"]["out"] = self.config["out"] self.head = model_factory(self.config["head_mlp"])
def load_environment_config(env_config): """ Load an environment from a configuration file. :param env_config: the configuration, or path to the environment configuration file :return: the environment """ if not isinstance(env_config, dict): with open(env_config) as f: env_config = json.loads(f.read()) if "base_config" in env_config: base_config = load_environment_config(env_config["base_config"]) # del env_config["base_config"] env_config = Configurable.rec_update(base_config, env_config) return env_config
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.activation = activation_factory(self.config["activation"]) self.conv1 = nn.Conv3d(self.config["in_channels"], 32, kernel_size=(1, 7, 7), stride=(1, 1, 1), padding=(0, 3, 3)) self.conv2 = nn.Conv3d(32, 32, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 2)) self.conv3 = nn.Conv3d(32, 64, kernel_size=(3, 5, 5), stride=(1, 1, 1)) self.conv4 = nn.Conv3d(64, 64, kernel_size=(1, 5, 5), stride=(1, 1, 1)) self.maxpool1 = nn.MaxPool3d(kernel_size=(3, 5, 5), stride=(1, 2, 2)) self.bn1 = nn.BatchNorm3d(64) # MLP Head # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size=5, stride=1, padding=0): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 def maxpool_size_out(size, kernel_size=5, stride=2, padding=0): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out(conv2d_size_out(self.config["in_width"], kernel_size=7, stride=1, padding=3), kernel_size=5, stride=1, padding=2)))) convh = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out(conv2d_size_out(self.config["in_height"], kernel_size=7, stride=1, padding=3), kernel_size=5, stride=1, padding=2)))) convd = conv2d_size_out(conv2d_size_out(maxpool_size_out( conv2d_size_out(conv2d_size_out(self.config["in_depth"], kernel_size=1, stride=1, padding=0), kernel_size=3, stride=1, padding=1), kernel_size=3, stride=1), kernel_size=3, stride=1), kernel_size=1, stride=1) assert convh > 0 and convw > 0 self.config["head_mlp"]["in"] = convw * convh * convd * 64 self.config["head_mlp"]["out"] = self.config["out"] self.fc1 = nn.Linear(self.config["head_mlp"]["in"], 256) self.fc2 = nn.Linear(256, self.config["head_mlp"]["out"])