def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.activation = activation_factory(self.config["activation"]) self.conv1 = nn.Conv2d(self.config["in_channels"], 16, kernel_size=2, stride=2) self.conv2 = nn.Conv2d(16, 32, kernel_size=2, stride=2) self.conv3 = nn.Conv2d(32, 64, kernel_size=2, stride=2) # MLP Head # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size=2, stride=2): return (size - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out( conv2d_size_out(conv2d_size_out(self.config["in_width"]))) convh = conv2d_size_out( conv2d_size_out(conv2d_size_out(self.config["in_height"]))) assert convh > 0 and convw > 0 self.config["head_mlp"]["in"] = convw * convh * 64 self.config["head_mlp"]["out"] = self.config["out"] self.head = model_factory(self.config["head_mlp"])
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.config["base_module"]["in"] = self.config["in"] self.base_module = model_factory(self.config["base_module"]) self.advantage = nn.Linear(self.config["base_module"]["layers"][-1], self.config["out"]) self.value = nn.Linear(self.config["base_module"]["layers"][-1], 1)
def __init__(self, config): super().__init__() Configurable.__init__(self, config) sizes = [self.config["in"]] + self.config["layers"] self.activation = activation_factory(self.config["activation"]) layers_list = [nn.Linear(sizes[i], sizes[i + 1]) for i in range(len(sizes) - 1)] self.layers = nn.ModuleList(layers_list) if self.config.get("out", None): self.predict = nn.Linear(sizes[-1], self.config["out"])
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.features_per_head = int(self.config["feature_size"] / self.config["heads"]) self.value_all = nn.Linear(self.config["feature_size"], self.config["feature_size"], bias=False) self.key_all = nn.Linear(self.config["feature_size"], self.config["feature_size"], bias=False) self.query_all = nn.Linear(self.config["feature_size"], self.config["feature_size"], bias=False) self.attention_combine = nn.Linear(self.config["feature_size"], self.config["feature_size"], bias=False)
def __init__(self, config): super().__init__() Configurable.__init__(self, config) # sizes = [self.config["in"]] + self.config["layers"] + [self.config["out"]] # self.activation = activation_factory(self.config["activation"]) # layers_list = [GCNConv(sizes[i], sizes[i + 1]) for i in range(len(sizes) - 1)] # self.layers = nn.ModuleList(layers_list) self.conv1 = GCNConv(self.config["in"], self.config["layers"]) self.conv2 = GCNConv(self.config["layers"], self.config["out"])
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.config = config if not self.config["embedding_layer"]["in"]: self.config["embedding_layer"]["in"] = self.config["in"] self.config["output_layer"]["in"] = self.config["attention_layer"]["feature_size"] self.config["output_layer"]["out"] = self.config["out"] self.embedding = model_factory(self.config["embedding_layer"]) self.attention_layer = SelfAttention(self.config["attention_layer"]) self.output_layer = model_factory(self.config["output_layer"])
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.activation = activation_factory(self.config["activation"]) self.conv1 = nn.Conv3d(self.config["in_channels"], 32, kernel_size=(1, 8, 8), stride=(1, 4, 4)) self.conv2 = nn.Conv3d(32, 64, kernel_size=(3, 4, 4), stride=(1, 2, 2)) self.conv3 = nn.Conv3d(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1)) # MLP Head # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size=3, stride=1): return (size - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out( conv2d_size_out(conv2d_size_out(self.config["in_width"], kernel_size=8, stride=4), kernel_size=4, stride=2)) convh = conv2d_size_out( conv2d_size_out(conv2d_size_out(self.config["in_height"], kernel_size=8, stride=4), kernel_size=4, stride=2)) convd = conv2d_size_out( conv2d_size_out(conv2d_size_out(self.config["in_depth"], kernel_size=3, stride=1), kernel_size=1, stride=1)) assert convh > 0 and convw > 0 self.config["head_mlp"]["in"] = convw * convh * convd * 64 self.config["head_mlp"]["out"] = self.config["out"] self.fc1 = nn.Linear(self.config["head_mlp"]["in"], 512) self.fc2 = nn.Linear(512, self.config["head_mlp"]["out"])
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.activation = activation_factory(self.config["activation"]) self.conv1 = nn.Conv2d(self.config["in_channels"], 32, kernel_size=3, stride=1) self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1) self.bn1 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1) self.conv4 = nn.Conv2d(32, 32, kernel_size=3, stride=1) self.bn2 = nn.BatchNorm2d(32) self.conv5 = nn.Conv2d(32, 32, kernel_size=3, stride=1) self.maxpool1 = nn.MaxPool2d(3, stride=2) # MLP Head # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size=3, stride=1): return (size - (kernel_size - 1) - 1) // stride + 1 def maxpool_size_out(size, kernel_size=3, stride=2, padding=0): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out( conv2d_size_out( conv2d_size_out(self.config["in_width"])))))) convh = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out( conv2d_size_out( conv2d_size_out(self.config["in_height"])))))) assert convh > 0 and convw > 0 self.config["head_mlp"]["in"] = convw * convh * 32 self.config["head_mlp"]["out"] = self.config["out"] self.head = model_factory(self.config["head_mlp"])
def __init__(self, config): super().__init__() Configurable.__init__(self, config) self.activation = activation_factory(self.config["activation"]) self.conv1 = nn.Conv3d(self.config["in_channels"], 32, kernel_size=(1, 7, 7), stride=(1, 1, 1), padding=(0, 3, 3)) self.conv2 = nn.Conv3d(32, 32, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 2)) self.conv3 = nn.Conv3d(32, 64, kernel_size=(3, 5, 5), stride=(1, 1, 1)) self.conv4 = nn.Conv3d(64, 64, kernel_size=(1, 5, 5), stride=(1, 1, 1)) self.maxpool1 = nn.MaxPool3d(kernel_size=(3, 5, 5), stride=(1, 2, 2)) self.bn1 = nn.BatchNorm3d(64) # MLP Head # Number of Linear input connections depends on output of conv2d layers # and therefore the input image size, so compute it. def conv2d_size_out(size, kernel_size=5, stride=1, padding=0): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 def maxpool_size_out(size, kernel_size=5, stride=2, padding=0): return (size + 2 * padding - (kernel_size - 1) - 1) // stride + 1 convw = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out(conv2d_size_out(self.config["in_width"], kernel_size=7, stride=1, padding=3), kernel_size=5, stride=1, padding=2)))) convh = conv2d_size_out( conv2d_size_out( maxpool_size_out( conv2d_size_out(conv2d_size_out(self.config["in_height"], kernel_size=7, stride=1, padding=3), kernel_size=5, stride=1, padding=2)))) convd = conv2d_size_out(conv2d_size_out(maxpool_size_out( conv2d_size_out(conv2d_size_out(self.config["in_depth"], kernel_size=1, stride=1, padding=0), kernel_size=3, stride=1, padding=1), kernel_size=3, stride=1), kernel_size=3, stride=1), kernel_size=1, stride=1) assert convh > 0 and convw > 0 self.config["head_mlp"]["in"] = convw * convh * convd * 64 self.config["head_mlp"]["out"] = self.config["out"] self.fc1 = nn.Linear(self.config["head_mlp"]["in"], 256) self.fc2 = nn.Linear(256, self.config["head_mlp"]["out"])