def __new__(cls, n_seq=4096, n_channels=1, n_outputs=84, legacy=True): k = 3 if legacy else 6 # sigh, a mistake... had to add `legacy` flag param = [ (1, 16, k, 2, True), # Trabelsi et al. (2017) has kernel=6 (L57) (16, 32, 3, 2, True), (32, 64, 3, 1, True), (64, 64, 3, 1, True), (64, 128, 3, 1, False), (128, 128, 3, 1, True), ] named_blocks = [] for j, par in enumerate(param): n_seq, blk = cls.one_block(n_seq, *par) named_blocks.append((f"bk{j:02d}", blk)) return torch.nn.Sequential( OrderedDict([ ("cplx", ConcatenatedRealToCplx(copy=False, dim=-2)), *named_blocks, ("fltn", CplxToCplx[Flatten](-2)), ("lin1", cls.Linear(n_seq * 128, 2048)), ("relu", CplxToCplx[torch.nn.ReLU]()), ("lin2", cls.Linear(2048, n_outputs)), ("real", CplxReal()), ]))
def __new__(cls, vgg_name='VGG16', n_outputs=10, n_channels=3, upcast=False, half=False): if upcast: layers = [AsTypeCplx()] else: layers = [ConcatenatedRealToCplx(copy=False, dim=-3)] for x in cfg[vgg_name]: if x == 'M': layers.append(CplxToCplx[torch.nn.MaxPool2d](kernel_size=2, stride=2)) else: x = (x // 2) if half else x layers.extend([ cls.Conv2d(n_channels, x, kernel_size=3, padding=1), CplxBatchNorm2d(x), CplxToCplx[torch.nn.ReLU](), ]) n_channels = x # the last integer x was 512 (or 256). return torch.nn.Sequential(*layers, CplxToCplx[Flatten](-3, -1), cls.Linear(256 if half else 512, n_outputs), CplxReal())
def __new__(cls, n_seq=4096, n_channels=1, n_outputs=84): return torch.nn.Sequential( OrderedDict([ # B x C x L float -> B x C/2 x L cplx ("cplx", ConcatenatedRealToCplx(copy=False, dim=-2)), ("fltn", CplxToCplx[Flatten](1, -1)), ("lin1", cls.Linear(n_seq * n_channels, 2048)), ("relu", CplxToCplx[torch.nn.ReLU]()), ("lin2", cls.Linear(2048, n_outputs)), ("real", CplxReal()), ]))
def __new__(cls, n_outputs=10, n_inputs=1, upcast=False, half=False): if upcast: layers = [("cplx", AsTypeCplx())] else: layers = [("cplx", ConcatenatedRealToCplx(copy=False, dim=-3))] n_features = 2048 if half else 4096 layers.extend([ ("flat_", CplxToCplx[Flatten](-3, -1)), ("lin_1", cls.Linear(n_inputs * 28 * 28, n_features)), ("relu2", CplxToCplx[torch.nn.ReLU]()), ("lin_2", cls.Linear(n_features, n_outputs)), ("real", CplxReal()), ]) return torch.nn.Sequential(OrderedDict(layers))
def __new__(cls, n_seq=4096, n_channels=1, n_outputs=84): n_seq, conv = one_conv(n_seq, cls.Conv1d, n_channels, 32, 512, 16, bias=True) n_seq, pool = one_pool(n_seq, torch.nn.AvgPool1d, 4, 2) return torch.nn.Sequential( OrderedDict([ ("cplx", ConcatenatedRealToCplx(copy=False, dim=-2)), ("conv", conv), ("relu", CplxToCplx[torch.nn.ReLU]()), ("pool", pool), ("fltn", CplxToCplx[Flatten](-2)), ("lin1", cls.Linear(n_seq * 32, 2048)), ("relu", CplxToCplx[torch.nn.ReLU]()), ("lin2", cls.Linear(2048, n_outputs)), ("real", CplxReal()), ]))
def __new__(cls, n_outputs=10, n_inputs=1, upcast=False, half=False): if upcast: layers = [("cplx", AsTypeCplx())] else: layers = [("cplx", ConcatenatedRealToCplx(copy=False, dim=-3))] n_features = [10, 25, 250] if half else [20, 50, 500] layers.extend([ ("conv1", cls.Conv2d(n_inputs, n_features[0], 5, 1)), ("relu1", CplxToCplx[torch.nn.ReLU]()), ("pool1", CplxToCplx[torch.nn.AvgPool2d](2, 2)), ("conv2", cls.Conv2d(n_features[0], n_features[1], 5, 1)), ("relu2", CplxToCplx[torch.nn.ReLU]()), ("pool2", CplxToCplx[torch.nn.AvgPool2d](2, 2)), ("flat_", CplxToCplx[Flatten](-3, -1)), ("lin_1", cls.Linear(4 * 4 * n_features[1], n_features[2])), ("relu3", CplxToCplx[torch.nn.ReLU]()), ("lin_2", cls.Linear(n_features[2], n_outputs)), ("real", CplxReal()), # ("real", CplxToConcatenatedReal(dim=-1)), # ("lin_3", torch.nn.Linear(20, 10)), ]) return torch.nn.Sequential(OrderedDict(layers))