def create_mask_montage(self, image, predictions): """ Create a montage showing the probability heatmaps for each one one of the detected objects Arguments: image (np.ndarray): an image as returned by OpenCV predictions (BoxList): the result of the computation by the model. It should contain the field `mask`. """ masks = predictions.get_field("mask") masks_per_dim = self.masks_per_dim masks = L.interpolate( masks.float(), scale_factor=1 / masks_per_dim ).byte() height, width = masks.shape[-2:] max_masks = masks_per_dim ** 2 masks = masks[:max_masks] # handle case where we have less detections than max_masks if len(masks) < max_masks: masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8) masks_padded[: len(masks)] = masks masks = masks_padded masks = masks.reshape(masks_per_dim, masks_per_dim, height, width) result = torch.zeros( (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8 ) for y in range(masks_per_dim): start_y = y * height end_y = (y + 1) * height for x in range(masks_per_dim): start_x = x * width end_x = (x + 1) * width result[start_y:end_y, start_x:end_x] = masks[y, x] return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def create_mask_montage(self, image, predictions): masks = predictions.get_field("mask") masks_per_dim = self.masks_per_dim masks = L.interpolate(masks.float(), scale_factor=1 / masks_per_dim).byte() height, width = masks.shape[-2:] max_masks = masks_per_dim**2 masks = masks[:max_masks] # handle case where we have less detections than max_masks if len(masks) < max_masks: masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8) masks_padded[:len(masks)] = masks masks = masks_padded masks = masks.reshape(masks_per_dim, masks_per_dim, height, width) result = torch.zeros((masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8) for y in range(masks_per_dim): start_y = y * height end_y = (y + 1) * height for x in range(masks_per_dim): start_x = x * width end_x = (x + 1) * width result[start_y:end_y, start_x:end_x] = masks[y, x] return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
def forward(self, x): x = self.kps_score_lowres(x) x = layers.interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False) return x
def forward(self, ft): ft = self.bo_input_xy(ft) ft_2x = self.conv5_bo_xy(ft) ft_2x = layers.interpolate(ft_2x, size = (48,48), mode='bilinear', align_corners=True) x = self.bo_input_1_1(ft_2x) y = self.bo_input_2_1(ft_2x) x = self.conv5_bo_x(x) y = self.conv5_bo_y(y) return x, y
def forward(self, ft): ft = self.ke_input_xy(ft) ft = self.conv5_ke_xy(ft) ft_2x = layers.interpolate(ft, scale_factor=self.up_scale, mode='bilinear', align_corners=True) x = self.conv5_ke_x_shrink(ft_2x) y = self.conv5_ke_y_shrink(ft_2x) assert(x.size()[2:] == torch.Size([1, self.resol]) and \ y.size()[2:] == torch.Size([self.resol, 1])), "x y: " +str(x.size())+' '+str(y.size()) # mty # mty = torch.cat((x_tc, y_tc), dim=1) mty = self.cat_trans(ft) mty = self.mty(mty) assert (mty.size()[1:] == torch.Size( [24, 1, 1])), "mty w h should be 1, but got {}".format(str(mty.size())) return x, y, mty
def forward(self, x): return interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=self.align_corners)