def _build_model(self, blur, brightness, need_ray, need_speed): assert self.obs_shapes[0] == (84, 84, 3) assert self.obs_shapes[1] == (2,) # ray assert self.obs_shapes[2] == (8,) # vector self.need_ray = need_ray self.need_speed = need_speed self.conv = m.ConvLayers(84, 84, 3, 'simple', out_dense_n=64, out_dense_depth=2) self.dense = m.LinearLayers(self.conv.output_size, dense_n=64, dense_depth=1) self.rnn = m.GRU(64 + self.c_action_size, 64, 1) if blur != 0: self.blurrer = m.Transform(T.GaussianBlur(blur, sigma=blur)) else: self.blurrer = None self.brightness = m.Transform(T.ColorJitter(brightness=(brightness, brightness))) cropper = torch.nn.Sequential( T.RandomCrop(size=(50, 50)), T.Resize(size=(84, 84)) ) self.random_transformers = T.RandomChoice([ m.Transform(SaltAndPepperNoise(0.2, 0.5)), m.Transform(GaussianNoise()), m.Transform(T.GaussianBlur(9, sigma=9)), m.Transform(cropper) ])
def _build_model(self): vec_size = self.obs_shapes[1][ 0] if self.use_extra_data else self.obs_shapes[1][0] - EXTRA_SIZE self.dense = m.LinearLayers(self.state_size, dense_n=64, dense_depth=2, output_size=self.obs_shapes[0][0] + vec_size)
def _build_model(self, blur, brightness, ray_random, need_speed): assert self.obs_shapes[0] == (84, 84, 3) assert self.obs_shapes[1] == (1442, ) # ray (1 + 360 + 360) * 2 assert self.obs_shapes[2] == (6, ) # vector self.ray_random = ray_random self.need_speed = need_speed if blur != 0: self.blurrer = m.Transform(T.GaussianBlur(blur, sigma=blur)) else: self.blurrer = None self.brightness = m.Transform( T.ColorJitter(brightness=(brightness, brightness))) self.ray_index = [] for i in reversed(range(RAY_SIZE // 2)): self.ray_index.append((i * 2 + 1) * 2) self.ray_index.append((i * 2 + 1) * 2 + 1) for i in range(RAY_SIZE // 2): self.ray_index.append((i * 2 + 2) * 2) self.ray_index.append((i * 2 + 2) * 2 + 1) self.conv = m.ConvLayers(84, 84, 3, 'simple', out_dense_n=64, out_dense_depth=2) self.ray_conv = m.Conv1dLayers(RAY_SIZE, 2, 'default', out_dense_n=64, out_dense_depth=2) self.vis_ray_dense = m.LinearLayers(self.conv.output_size + self.ray_conv.output_size, dense_n=64, dense_depth=1) self.rnn = m.GRU(64 + self.c_action_size, 64, 1) cropper = torch.nn.Sequential(T.RandomCrop(size=(50, 50)), T.Resize(size=(84, 84))) self.random_transformers = T.RandomChoice([ m.Transform(SaltAndPepperNoise(0.2, 0.5)), m.Transform(GaussianNoise()), m.Transform(T.GaussianBlur(9, sigma=9)), m.Transform(cropper) ])
def _build_model(self): assert self.obs_shapes[0] == (30, 30, 3) assert self.obs_shapes[1] == (2, ) self.conv = m.ConvLayers(30, 30, 3, 'simple', out_dense_n=64, out_dense_depth=2) self.dense = m.LinearLayers(self.conv.output_size + 2, dense_n=64, dense_depth=1)
def _build_model(self): assert self.obs_shapes[0] == (30, 30, 3) assert self.obs_shapes[1] == (6, ) self.conv = m.ConvLayers(30, 30, 3, 'simple', out_dense_n=64, out_dense_depth=2) self.rnn = m.GRU(self.conv.output_size + self.c_action_size, 64, 1) self.dense = m.LinearLayers(input_size=64 + self.obs_shapes[1][0] - EXTRA_SIZE, dense_n=128, dense_depth=2, output_size=64)
def _build_model(self): assert self.obs_shapes[0] == (84, 84, 3) assert self.obs_shapes[1] == (84, 84, 3) assert self.obs_shapes[2] == (84, 84, 3) assert self.obs_shapes[3] == (11, ) self.conv = m.ConvLayers(84, 84, 3 * 3, 'simple', out_dense_n=64, out_dense_depth=2) self.dense = m.LinearLayers(self.conv.output_size + 11 - EXTRA_SIZE, dense_n=64, dense_depth=1) self.rnn = m.GRU(64 + self.c_action_size, 64, 1)
def _build_model(self): self.conv_transpose = m.ConvTransposeLayers( self.state_size, 64, 1, 2, 2, 32, conv_transpose=nn.Sequential( nn.ConvTranspose2d(32, 32, 4, 2), nn.LeakyReLU(), nn.ConvTranspose2d(32, 16, 8, 4), nn.LeakyReLU(), nn.ConvTranspose2d(16, 3, 3, 1), nn.LeakyReLU(), )) self.vec_dense = m.LinearLayers(self.state_size, dense_depth=2, output_size=EXTRA_SIZE)
def _build_model(self): self.dense = m.LinearLayers( self.state_size, 8, 2, self.obs_shapes[0][0] if self.use_extra_data else self.obs_shapes[0][0] - EXTRA_SIZE)
def _build_model(self): self.dense = m.LinearLayers(self.state_size, dense_n=64, dense_depth=2, output_size=self.obs_shapes[0][0] + self.obs_shapes[1][0])