예제 #1
0
 def __init__(self):
     super().__init__()
     # self.fc1 = nn.Sequential(
     #     gn.Spread(2),
     #     torch.nn.Softmax(),
     #     gn.Spread(4),
     #     torch.nn.Softmax(),
     #     gn.Spread(6),
     #     torch.nn.Softmax(),
     #     gn.Spread(8),
     #     torch.nn.Softmax(),
     #     gn.Slide(10),
     #     torch.nn.Softmax(),
     #     gn.Slide(10),
     #     torch.nn.Softmax(),
     #     gn.Gather(10),
     #     torch.nn.Softmax(),
     #     gn.Gather(8),
     #     torch.nn.Softmax(),
     #     gn.Gather(6),
     #     torch.nn.Softmax(),
     #     gn.Gather(4),
     #     torch.nn.Softmax(),
     #     gn.Gather(2),
     # )
     self.fc1 = nn.Sequential(
         gn.Spread(2, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Slide(4, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Spread(4, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Slide(6, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Spread(6, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Slide(8, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Spread(8, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Slide(10, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Gather(10, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Gather(8, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Gather(6, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Gather(4, bias=False),
         torch.nn.Softmax(dim=1),
         gn.Gather(2, bias=False),
     )
예제 #2
0
    def __init__(self, z_features=20, activation_function='Softmax', sigma=1):
        # --------------------------------------------------------------------
        # Init
        super().__init__()
        if z_features < 12:
            raise ValueError("z_features must be >= 12.")

        self.z_features = z_features
        self.sigma = sigma

        # Lookup activation function (a class)
        AF = getattr(nn, activation_function)

        # --------------------------------------------------------------------
        # Def fc1:
        glia1 = []
        for s in reversed(range(12, self.z_features + 2, 2)):
            glia1.append(gn.Gather(s))
            glia1.append(gn.Leak(s - 2, sigma=sigma))
            glia1.append(gn.Slide(s - 2))
            glia1.append(gn.Leak(s - 2, sigma=sigma))

            # Linear on the last output, for digit decode
            if s > 12:
                glia1.append(AF(dim=1))
        self.glia1 = nn.Sequential(*glia1)
예제 #3
0
    def __init__(self, in_features, num_actions):
        super().__init__()

        # -------------------------------------------------------------------
        # Vision
        self.conv = nn.Sequential(
            nn.Conv2d(in_features[0], 32, kernel_size=8, stride=4), nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1), nn.ReLU())
        conv_features = self.conv_layer_size(in_features)

        # -------------------------------------------------------------------
        # Decision

        # TODO. shrink d before AGN layer. Neural decode should not be
        # there.

        # Build glia decision layer
        glia1 = []
        for s in reversed(range(512 + 2, conv_features, 2)):
            glia1.append(gn.Gather(s, bias=False))
            glia1.append(torch.nn.ELU())
        self.fc1 = nn.Sequential(*glia1)

        # Linear neurons (decoder?)
        self.fc2 = nn.Linear(512, num_actions)