def __init__(self, input_shape, owner_name=""): super(SRNetS, self).__init__(input_shape, 1, owner_name) self.conv = [ nn.Sequential(BaseN.conv3_2(input_shape[0], 8), nn.Softplus(), BaseN.conv3_2(8, 16)) ] x = BaseN.output_shape(input_shape, self.conv[0]) self.model1 = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 256), nn.Linear(256, 64), nn.Tanh()) self.model2 = nn.Sequential(BaseN.conv3_2(input_shape[0], 8), nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.Flatten(), nn.Linear(np.prod(x), 256), nn.Linear(256, 64), nn.Tanh()) self.model3 = nn.Sequential(nn.Linear(128, 64), nn.Linear(64, 1)) self.compile() self.loss = nn.MSELoss() self.optimizer = self.optimizer = torch.optim.RMSprop(filter( lambda p: p.requires_grad, self.parameters()), lr=1e-3)
def __init__(self, input_shape, output_shape, **kwargs): super(ConvNetMNIST, self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1, 32), BaseN.conv3_2(32, 64)] x = BaseN.output_shape(self.conv[0], input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512, 256), nn.Tanh(), BaseN.EigenLayer(256, self.output_shape[0])) self.compile()
def __init__(self, input_shape, output_shape, owner_name=""): super(ConvNet, self).__init__(input_shape, output_shape, owner_name) self.conv = [ nn.Sequential(BaseN.conv3_2(input_shape[0], 8), nn.ReLU(), BaseN.conv3_2(8, 16), nn.ReLU(), BaseN.conv3_2(8, 8)) ] x = BaseN.output_shape(self.conv[0], input_shape) self.model = nn.Sequential( self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), BaseN.AdaptiveTanh(), nn.Linear(512, 256), BaseN.EigenLayer(256, self.output_shape[0], bias=False)) self.compile()
def __init__(self, input_shape, output_shape, owner_name=""): super(ConvNetSimple, self).__init__(input_shape, output_shape, owner_name) self.conv = [ nn.Sequential(BaseN.conv3_2(input_shape[0], 4), nn.Softplus()) ] x = BaseN.output_shape(self.conv[0], input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512, 256), nn.Tanh(), nn.Linear(256, self.output_shape[0])) self.compile()
def __init__(self,input_shape,output_shape,**kwargs): super(QFunction_S,self).__init__(input_shape,output_shape,**kwargs) self.conv = [nn.Sequential(nn.Conv2d(self.input_shape[0], 8, kernel_size=8, stride=4), nn.ReLU(), nn.Conv2d(8, 16, kernel_size=4, stride=2), nn.Tanh(), nn.Conv2d(16, 32, kernel_size=3, stride=2), nn.Tanh())] x = B.output_shape(self.conv,self.input_shape) self.model = nn.Sequential(self.conv[0], B.Flatten(), nn.Linear(np.prod(x), 512),nn.Tanh(), nn.Linear(512,self.output_shape)) self.compile() self.loss = torch.nn.SmoothL1Loss() self.optimizer = optim.RMSprop(self.parameters(), lr=0.00025,alpha=0.95,eps=0.01,momentum=0.95)
def __init__(self, input_shape, output_shape, owner_name=""): super(ConvNetBigAtari, self).__init__(input_shape, output_shape, owner_name) self.conv = [ nn.Sequential(BaseN.conv3_2(input_shape[0], 8), nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32)) ] x = BaseN.output_shape(self.conv[0], input_shape) self.model = nn.Sequential( self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512, 512), nn.Tanh(), nn.Linear(512, 1024), BaseN.EigenLayer(1024, self.output_shape[0])) self.compile()