コード例 #1
0
def get_patch_dimensions(sample_data, patch_size=16, overlap=2):
    """
    Gets the dimensions of the patched inputs. This is used for the initialization
    of GreedyInfoMaxBlocks.
    """
    x, n_patches_x, n_patches_y = patchify_inputs(sample_data, patch_size,
                                                  overlap)
    return n_patches_x, n_patches_y
コード例 #2
0
 def forward(self, x):
     # Patchify inputs
     x, n_patches_x, n_patches_y = data_utils.patchify_inputs(
         x, self.patch_size, self.overlap)
     x = self.encoder[0](x)
     # Save positive/contrastive samples for each encoder block
     log_f_module_list, true_f_module_list, z = self.encoder[1](x,
                                                                n_patches_x,
                                                                n_patches_y)
     # Lists of lists: each list has num_modules internal lists, with each
     # internal list containing k_predictions elements
     return log_f_module_list, true_f_module_list
コード例 #3
0
 def encode(self, x):
     # Patchify inputs
     x, n_patches_x, n_patches_y = data_utils.patchify_inputs(
         x, self.patch_size, self.overlap)
     x = self.encoder[0](x)
     # Compute encoded patch-level representation for each encoder block
     for module in self.encoder[1:]:
         # no need to detach between modules as .encode() will only be called
         # under a torch.no_grad() scope
         x, out = module.encode(x, n_patches_x, n_patches_y)
     # Return patch-level representation from the last block
     return out
コード例 #4
0
 def forward(self, x):
     # Patchify inputs
     x, n_patches_x, n_patches_y = data_utils.patchify_inputs(
         x, self.patch_size, self.overlap)
     x = self.encoder[0](x)
     # Save positive/contrastive samples for each encoder block
     log_f_module_list = []
     for module in self.encoder[1:]:
         # log_f_list and true_f_list each have k_predictions elements
         log_f_list, z = module(x, n_patches_x, n_patches_y)
         log_f_module_list.append(log_f_list)
         # Detach x to make sure no gradients are flowing in between modules
         x = z.detach()
     # Lists of lists: each list has num_modules internal lists, with each
     # internal list containing k_predictions elements
     return log_f_module_list
コード例 #5
0
 def forward(self, x):
     x, n_patches_x, n_patches_y = data_utils.patchify_inputs(
         x, self.patch_size, self.overlap)
     return x