Example #1
0
    def __init__(self,
                 minimap_channels,
                 screen_channels,
                 screen_resolution,
                 nonspatial_obs_dim,
                 num_action):
        super(AtariNet, self).__init__()

        # spatial features
        # apply paddinga as 'same', padding = (kernel - 1)/2
        self.minimap_conv_layers = nn.Sequential(
            nn.Conv2d(minimap_channels, 16, 8, stride=4),  # shape (N, 16, m, m)
            nn.ReLU(),
            nn.Conv2d(16, 32, 4, stride=2),  # shape (N, 32, m, m)
            nn.ReLU(),
            Flatten()
        )

        self.screen_conv_layers = nn.Sequential(
            nn.Conv2d(screen_channels, 16, 8, stride=4),  # shape (N, 16, m, m)
            nn.ReLU(),
            nn.Conv2d(16, 32, 4, stride=2),  # shape (N, 32, m, m)
            nn.ReLU(),
            Flatten()
        )

        # non-spatial features
        self.nonspatial_dense = nn.Sequential(
            nn.Linear(nonspatial_obs_dim, 256),
            nn.Tanh()
        )

        # calculated conv. output shape for input resolutions
        shape_conv = self._conv_output_shape(screen_resolution, kernel_size=8, stride=4)
        shape_conv = self._conv_output_shape(shape_conv, kernel_size=4, stride=2)

        # state representations
        self.layer_hidden = nn.Sequential(nn.Linear(32 * shape_conv[0] * shape_conv[1] + 256, 256),
                                          nn.ReLU()
                                         )
        # output layers
        self.layer_value = nn.Linear(256, 1)
        self.layer_action = nn.Linear(256, num_action)
        self.layer_screen1_x = nn.Linear(256, screen_resolution[0])
        self.layer_screen1_y = nn.Linear(256, screen_resolution[1])
        self.layer_screen2_x = nn.Linear(256, screen_resolution[0])
        self.layer_screen2_y = nn.Linear(256, screen_resolution[1])

        self.apply(init_weights)  # weight initialization
        self.train()  # train mode
Example #2
0
    def __init__(self, config):
        super(BranchedCNN, self).__init__()
        self.stem = nn.Sequential(
            Conv_BN_Relu(3, 32, kernel_size=1, stride=1, padding=0),
            Conv_BN_Relu(32, 64, kernel_size=3, stride=1, padding=1),
            Conv_BN_Relu(64, 128, kernel_size=1, stride=1, padding=0))

        self.conv1 = nn.Sequential(
            Conv_BN_Relu(128, 128, kernel_size=1, stride=1, padding=0),
            Conv_BN_Relu(128, 256, kernel_size=3, stride=1, padding=1),
            Conv_BN_Relu(256, 128, kernel_size=1, stride=1, padding=0),
            nn.MaxPool2d(2, stride=2),
            Conv_BN_Relu(128, 64, kernel_size=5, stride=1, padding=1),
            nn.MaxPool2d(2, stride=2))

        self.conv2 = nn.Sequential(
            nn.UpsamplingNearest2d(scale_factor=2),
            Conv_BN_Relu(64, 128, kernel_size=1, stride=1, padding=0),
            Conv_BN_Relu(128, 128, kernel_size=3, stride=1, padding=1),
            Conv_BN_Relu(128, 256, kernel_size=1, stride=1, padding=0),
            nn.UpsamplingNearest2d(scale_factor=2),
        )

        self.feats_decoder = nn.Sequential(
            Conv_BN_Relu(128, 1, kernel_size=1, stride=1, padding=0),
            nn.MaxPool2d(4, stride=2), Flatten(), nn.Dropout(0.5),
            nn.Linear(3025, 1024),
            nn.BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True),
            nn.ReLU(), nn.Dropout(0.2), nn.Linear(1024, NUM_CLASSES))

        self.conv1_decoder = nn.Sequential(
            Conv_BN_Relu(64, 1, kernel_size=1, stride=1, padding=0), Flatten(),
            nn.Dropout(0.5), nn.Linear(729, 512),
            nn.BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True),
            nn.ReLU(), nn.Dropout(0.2), nn.Linear(512, NUM_CLASSES))

        self.conv2_decoder = nn.Sequential(
            nn.MaxPool2d(2, stride=2),
            Conv_BN_Relu(256, 3, kernel_size=1, stride=1, padding=0),
            Flatten(), nn.Dropout(0.5), nn.Linear(8748, 1024),
            nn.BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True),
            nn.ReLU(), nn.Dropout(0.2), nn.Linear(1024, NUM_CLASSES))

        self.conv1_conv2_short = nn.Sequential(
            Conv_BN_Relu(64, 256, kernel_size=1, stride=1, padding=0),
            nn.UpsamplingNearest2d(scale_factor=2),
            Conv_BN_Relu(256, 256, kernel_size=1, stride=1, padding=0),
            nn.UpsamplingNearest2d(scale_factor=2),
        )
Example #3
0
    def __init__(self):
        super().__init__()

        # Seg Net
        # self.crop_seq = [256, 128, 32, 18]
        self.filter_seq = [128, 256, 512, 1024]

        self.conv1 = UNetConvModule(3, self.filter_seq[0])
        self.pc1 = UNetPoolCropModule(crop_size=None)  # 128

        self.conv2 = UNetConvModule(self.filter_seq[0], self.filter_seq[1])
        self.pc2 = UNetPoolCropModule(crop_size=None)  # 64

        self.conv3 = UNetConvModule(self.filter_seq[1], self.filter_seq[2])
        self.pc3 = UNetPoolCropModule(crop_size=None)

        # Aggregate Net
        self.agg1 = UNetConvModule(self.filter_seq[0] * 2, self.filter_seq[0])
        self.agg2 = UNetConvModule(self.filter_seq[1] * 2,
                                   self.filter_seq[0],
                                   upsample=True)
        self.agg3 = UNetConvModule(self.filter_seq[2],
                                   self.filter_seq[1],
                                   upsample=True)

        # Output Layer
        self.out_conv = nn.Conv2d(self.filter_seq[0], 1, 3, padding=1)
        self.flatten = Flatten()
        self.out_dense = nn.Linear(12544, NUM_CLASSES)
Example #4
0
 def __init__(self, input_size):
     """
     Parameters
     ----------
     input_size : (int, int, int)
         Input size.
     """
     super(BayesianConv5Dense1, self).__init__(input_size)
    
     self.layers = nn.ModuleList([
         BayesianConv2d(input_size[0], 10, kernel_size=(10, 1), padding=(5, 0)),
         nn.Sigmoid(),
         BayesianConv2d(10, 10, kernel_size=(10, 1), padding=(4, 0)),
         nn.Sigmoid(),
         BayesianConv2d(10, 10, kernel_size=(10, 1), padding=(5, 0)),
         nn.Sigmoid(),
         BayesianConv2d(10, 10, kernel_size=(10, 1), padding=(4, 0)),
         nn.Sigmoid(),
         BayesianConv2d(10, 1, kernel_size=(3, 1), padding=(1, 0)),
         nn.Softplus(),
         Flatten(1 * input_size[1] * input_size[2]),
         BayesianLinear(1 * input_size[1] * input_size[2], 100),
         nn.Softplus(),
         BayesianLinear(100, 1),
         nn.Softplus()
     ])
    def __init__(self):
        super(CriticNet, self).__init__()
        # process observation
        # spatial features
        # apply paddinga as 'same', padding = (kernel - 1)/2
        self.minimap_conv_layers = conv_minimap
        self.screen_conv_layers = conv_screen

        # non-spatial features
        self.nonspatial_dense = dense_nonspatial

        # process action
        # spatial action
        self.conv_action = nn.Sequential(
            nn.Conv2d(2, 16, 5, stride=1, padding=2),  # shape (N, 16, m, m)
            nn.ReLU(),
            nn.Conv2d(16, 32, 3, stride=1, padding=1),  # shape (N, 32, m, m)
            nn.ReLU())

        # non-spatial action
        self.action_dense = nn.Sequential(nn.Linear(arglist.NUM_ACTIONS, 32),
                                          nn.ReLU(), Dense2Conv())

        # state representations
        # screen + minimap + nonspatial_obs + spatial_act + nonspatial_act
        self.layer_hidden = nn.Sequential(
            nn.Conv2d(32 * 5, 64, 3, stride=1, padding=1), nn.ReLU(),
            nn.Conv2d(64, 1, 1), nn.ReLU(), Flatten())
        # output layers
        self.layer_value = nn.Linear(arglist.FEAT2DSIZE * arglist.FEAT2DSIZE,
                                     1)
        self.apply(init_weights)  # weight initialization
        self.train()  # train mode
    def __init__(self):
        super(FullyConvNet, self).__init__()

        # spatial features
        self.minimap_conv_layers = nn.Sequential(
            nn.Conv2d(minimap_channels, 16, 5, stride=1,
                      padding=2),  # shape (N, 16, m, m)
            nn.ReLU(),
            nn.Conv2d(16, 32, 3, stride=1, padding=1),  # shape (N, 32, m, m)
            nn.ReLU())
        self.screen_conv_layers = nn.Sequential(
            nn.Conv2d(screen_channels, 16, 5, stride=1,
                      padding=2),  # shape (N, 16, m, m)
            nn.ReLU(),
            nn.Conv2d(16, 32, 3, stride=1, padding=1),  # shape (N, 32, m, m)
            nn.ReLU())

        # non-spatial features
        self.nonspatial_dense = nn.Sequential(
            nn.Linear(arglist.NUM_ACTIONS, 32), nn.ReLU(), Dense2Conv())

        # state representations
        self.layer_hidden = nn.Sequential(
            nn.Conv2d(32 * 3, 64, 3, stride=1, padding=1), nn.ReLU())
        # output layers: policy
        self.layer_action = nn.Sequential(
            nn.Conv2d(64, 1, 1), nn.ReLU(), Flatten(),
            nn.Linear(arglist.FEAT2DSIZE * arglist.FEAT2DSIZE,
                      arglist.NUM_ACTIONS))
        self.layer_screen1 = nn.Conv2d(64, 1, 1)
        self.layer_screen2 = nn.Conv2d(64, 1, 1)

        # output layers: policy
        self.layer_q_action = nn.Sequential(
            nn.Conv2d(64, 1, 1), nn.ReLU(), Flatten(),
            nn.Linear(arglist.FEAT2DSIZE * arglist.FEAT2DSIZE,
                      arglist.NUM_ACTIONS))
        self.layer_q_screen1 = nn.Conv2d(64, 1, 1)
        self.layer_q_screen2 = nn.Conv2d(64, 1, 1)

        self.apply(init_weights)  # weight initialization
        self.train()  # train mode
Example #7
0
    def _initialize_spatial_actions(self, in_channels):
        '''Initialize spatial action operations'''

        out = {}
        for name, arg_type in actions.TYPES._asdict().items():
            if name in ['screen', 'screen2', 'minimap']:
                out[arg_type.id] = nn.Sequential(
                    nn.Conv2d(in_channels, 1, 1, stride=1, padding=0),
                    Flatten(), nn.Softmax(dim=1))

        return out
    def __init__(self, input_size):
        """
        Parameters
        ----------
        input_size : (int, int, int)
            Input size.
        """
        super(FrequentistConv5Dense1, self).__init__(input_size)

        self.layers = nn.ModuleList([
            nn.Conv2d(input_size[0],
                      10,
                      kernel_size=(10, 1),
                      padding=(5, 0),
                      bias=False),
            nn.Tanh(),
            nn.Conv2d(10, 10, kernel_size=(10, 1), padding=(4, 0), bias=False),
            nn.Tanh(),
            nn.Conv2d(10, 10, kernel_size=(10, 1), padding=(5, 0), bias=False),
            nn.Tanh(),
            nn.Conv2d(10, 10, kernel_size=(10, 1), padding=(4, 0), bias=False),
            nn.Tanh(),
            nn.Conv2d(10, 1, kernel_size=(3, 1), padding=(1, 0), bias=False),
            nn.Tanh(),
            Flatten(1 * input_size[1] * input_size[2]),
            nn.Dropout(0.5),
            nn.Linear(1 * input_size[1] * input_size[2], 100, bias=False),
            nn.Linear(100, 1, bias=False)
            #nn.Tanh()
        ])

        def weights_init(m):
            """Xavier initialization.

            Parameters
            ----------
            m : Module
                Layer.
            """
            classname = m.__class__.__name__
            if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
                nn.init.xavier_normal_(m.weight.data)
                # Xavier initialization not defined for scalar values
                #nn.init.xavier_normal_(m.bias.data)

        self.apply(weights_init)
 def __init__(self, input_size):
     """
     Parameters
     ----------
     input_size : (int, int, int)
         Input size.
     """
     super(FrequentistConv2Pool2, self).__init__(input_size)
    
     self.layers = nn.ModuleList([
         nn.Conv2d(input_size[0], 8, kernel_size=(5, 14), bias=False),
         nn.Sigmoid(),
         nn.AvgPool2d(kernel_size=(2, 1)),
         nn.Conv2d(8, 14, kernel_size=(2, 1), bias=False),
         nn.Sigmoid(),
         nn.AvgPool2d(kernel_size=(2, 1)),
         Flatten(14 * int((((input_size[1] - 4) / 2) - 1) / 2) * (input_size[2] - 13)),
         nn.Linear(14 * int((((input_size[1] - 4) / 2) - 1) / 2) * (input_size[2] - 13), 1, bias=False)
     ])
Example #10
0
    def __init__(self, input_size):
        """
        Parameters
        ----------
        input_size : (int, int, int)
            Input size.
        """
        super(BayesianDense3, self).__init__(input_size)

        self.layers = nn.ModuleList([
            Flatten(input_size[0] * input_size[1] * input_size[2]),
            BayesianLinear(input_size[0] * input_size[1] * input_size[2], 100),
            nn.Sigmoid(),
            BayesianLinear(100, 100),
            nn.Sigmoid(),
            BayesianLinear(100, 100),
            nn.Sigmoid(),
            BayesianLinear(100, 1),
            nn.Softplus()
        ])
    def __init__(self, input_size):
        """
        Parameters
        ----------
        input_size : (int, int, int)
            Input size.
        """
        super(FrequentistDense3, self).__init__(input_size)

        self.layers = nn.ModuleList([
            Flatten(input_size[0] * input_size[1] * input_size[2]),
            nn.Linear(input_size[0] * input_size[1] * input_size[2],
                      100,
                      bias=False),
            nn.Sigmoid(),
            nn.Linear(100, 100, bias=False),
            nn.Sigmoid(),
            nn.Linear(100, 100, bias=False),
            nn.Sigmoid(),
            nn.Linear(100, 1, bias=False)
        ])
Example #12
0
    def __init__(self):
        super(CriticNet, self).__init__()
        # process observation
        # spatial features
        # apply paddinga as 'same', padding = (kernel - 1)/2
        self.minimap_conv_layers = conv_minimap
        self.screen_conv_layers = conv_screen

        # non-spatial features
        self.nonspatial_dense = dense_nonspatial

        # state representations
        # screen + minimap + nonspatial_obs
        self.layer_hidden = nn.Sequential(nn.Conv2d(32 * 3, 64, 3, stride=1, padding=1),
                                          nn.ReLU(),
                                          nn.Conv2d(64, 1, 1),
                                          nn.ReLU(),
                                          Flatten())
        # output layers
        self.layer_value = nn.Linear(arglist.FEAT2DSIZE * arglist.FEAT2DSIZE, 1)
        self.apply(init_weights)  # weight initialization
        self.train()  # train mode
Example #13
0
    def __init__(self):
        super(ActorNet, self).__init__()
        # spatial features
        self.minimap_conv_layers = conv_minimap
        self.screen_conv_layers = conv_screen

        # non-spatial features
        self.nonspatial_dense = dense_nonspatial

        # state representations
        self.layer_hidden = nn.Sequential(nn.Conv2d(32 * 3, 64, 3, stride=1, padding=1),
                                          nn.ReLU())
        # output layers
        self.layer_action = nn.Sequential(nn.Conv2d(64, 1, 1),
                                          nn.ReLU(),
                                          Flatten(),
                                          nn.Linear(arglist.FEAT2DSIZE * arglist.FEAT2DSIZE, arglist.NUM_ACTIONS))
        self.layer_screen1 = nn.Conv2d(64, 1, 1)
        self.layer_screen2 = nn.Conv2d(64, 1, 1)

        self.apply(init_weights)  # weight initialization
        self.train()  # train mode
Example #14
0
    def __init__(self, input_size):
        """
        Parameters
        ----------
        input_size : (int, int, int)
            Input size.
        """
        super(BayesianConv2Pool2, self).__init__(input_size)

        self.layers = nn.ModuleList([
            BayesianConv2d(input_size[0], 8, kernel_size=(5, 14)),
            nn.Sigmoid(),
            nn.AvgPool2d(kernel_size=(2, 1)),
            BayesianConv2d(8, 14, kernel_size=(2, 1)),
            nn.Sigmoid(),
            nn.AvgPool2d(kernel_size=(2, 1)),
            Flatten(14 * int(
                (((input_size[1] - 4) / 2) - 1) / 2) * (input_size[2] - 13)),
            BayesianLinear(
                14 * int((((input_size[1] - 4) / 2) - 1) / 2) *
                (input_size[2] - 13), 1),
            nn.Softplus()
        ])
Example #15
0
 def __init__(self, config):
   super(BaselineCNN, self).__init__()
   self.model = nn.Sequential(
                     # Conv_Relu_BatchNorm --> 32 x 32
                     nn.Conv2d(3, 32, kernel_size = 7, stride = 1, padding = 2),
                     nn.ReLU(inplace=True),
                     nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True),
                     nn.MaxPool2d(4, stride=2),
     
                     # Conv_Relu_BatchNorm_Maxpool --> 32 x 14 x 14
                     nn.Conv2d(32, 32, kernel_size=3, stride=1, padding = 2),
                     nn.ReLU(inplace=True),
                     nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True),
                     nn.MaxPool2d(2, stride=2),
     
                     # Aggregation Layers
                     Flatten(), # see above for explanation
                     nn.Linear(131072, 2048), # affine layer
                     nn.ReLU(inplace = False),
                     nn.Dropout(p=0.55, inplace = False),
                     nn.Linear(2048, NUM_CLASSES), # affine layer
           )
   self.model = self.model.type(config.dtype)
Example #16
0
def createModel(config):
    model = nn.Sequential(
        # Conv_Relu_BatchNorm --> 32 x 32
        nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=2),
        nn.ReLU(inplace=True),
        nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True),
        nn.MaxPool2d(4, stride=2),

        # Conv_Relu_BatchNorm_Maxpool --> 32 x 14 x 14
        nn.Conv2d(32, 32, kernel_size=7, stride=2, padding=2),
        nn.ReLU(inplace=True),
        nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True),
        nn.MaxPool2d(2, stride=2),

        # Aggregation Layers
        Flatten(),  # see above for explanation
        nn.Linear(1152, 512),  # affine layer
        nn.ReLU(inplace=False),
        #nn.Dropout(p=0.45, inplace = False), #don't use dropout until I overfit..
        nn.Linear(512, NUM_CLASSES),  # affine layer
    )
    if config.use_gpu:
        model = model.cuda()
    return model
Example #17
0
 def _spatial_outputs(self, in_):
     return nn.DataParallel(
         nn.Sequential(nn.Conv2d(in_, 1, 1, stride=1), Flatten(),
                       nn.Softmax(dim=1)))