def conv_out_size(self, h, w, c=None): """Helper function ot return the output size for a given input shape, without actually performing a forward pass through the model.""" for child in self.block.children(): if isinstance(child, torch.nn.Conv2d): h, w = conv2d_output_shape(h, w, child.kernel_size, child.stride, child.padding) c = child.out_channels elif isinstance(child, torch.nn.MaxPool2d): h, w = conv2d_output_shape(h, w, child.kernel_size, child.stride, child.padding) elif isinstance(child, ResidualBlock): h, w, c = child.conv_out_size(h, w, c) return h, w, c
def conv_out_size(self, h, w, c=None): for child in self.conv.children(): try: h, w = conv2d_output_shape(h, w, child.kernel_size, child.stride, child.padding) except AttributeError: pass # Not a conv or maxpool layer. try: c = child.out_channels except AttributeError: pass # Not a conv layer. return h * w * c
def conv_out_size(self, h, w, c=None): """Helper function ot return the output size for a given input shape, without actually performing a forward pass through the model.""" for child in self.block.children(): try: h, w = conv2d_output_shape(h, w, child.kernel_size, child.stride, child.padding) except AttributeError: pass # Not a conv or maxpool layer. try: c = child.out_channels except AttributeError: pass # Not a conv layer. return h, w, c
def __init__(self, h, w, in_channels=3, out_channels=16, kernel=3, stride=1, maxstride=2): super().__init__() p = int((kernel - 1) // 2) # Same padding base_conv = torch.nn.Sequential( torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=p), torch.nn.MaxPool2d(kernel, maxstride, p)) self.output_size = conv2d_output_shape( h, w, kernel_size=kernel, stride=maxstride, padding=p) # Output after maxpool self.conv = torch.nn.Sequential(base_conv, IMPALAResidualBlock(out_channels), IMPALAResidualBlock(out_channels))