def append(self, d): """ Adds a single image to the blob :param datom: :return: """ i = len(self.imgs) self.imgs.append(d['img']) h, w, scale = d['img_size'] # all anchors self.im_sizes.append((h, w, scale)) gt_boxes_ = d['gt_boxes'].astype(np.float32) * d['scale'] self.gt_boxes.append(gt_boxes_) self.gt_classes.append( np.column_stack(( i * np.ones(d['gt_classes'].shape[0], dtype=np.int64), d['gt_classes'], ))) # Add relationship info if self.is_rel: self.gt_rels.append( np.column_stack( (i * np.ones(d['gt_relations'].shape[0], dtype=np.int64), d['gt_relations']))) # Augment with anchor targets if self.is_train: train_anchors_, train_anchor_inds_, train_anchor_targets_, train_anchor_labels_ = \ anchor_target_layer(gt_boxes_, (h, w)) self.train_anchors.append( np.hstack((train_anchors_, train_anchor_targets_))) self.train_anchor_labels.append( np.column_stack(( i * np.ones(train_anchor_inds_.shape[0], dtype=np.int64), train_anchor_inds_, train_anchor_labels_, ))) if 'proposals' in d: self.proposals.append( np.column_stack( (i * np.ones(d['proposals'].shape[0], dtype=np.float32), d['scale'] * d['proposals'].astype(np.float32)))) # -- Add depth images to the blob if self.use_depth: self.depth_imgs.append(d['depth_img'])
train_load = cls( dataset=train_data, batch_size=batch_size * num_gpus, shuffle=True, num_workers=num_workers, collate_fn=lambda x: coco_collate( x, num_gpus=num_gpus, is_train=True), drop_last=True, # pin_memory=True, **kwargs, ) val_load = cls( dataset=val_data, batch_size=batch_size * num_gpus, shuffle=False, num_workers=num_workers, collate_fn=lambda x: coco_collate( x, num_gpus=num_gpus, is_train=False), drop_last=True, # pin_memory=True, **kwargs, ) return train_load, val_load if __name__ == '__main__': train, val = CocoDetection.splits() gtbox = train[0]['gt_boxes'] img_size = train[0]['img_size'] anchor_strides, labels, bbox_targets = anchor_target_layer(gtbox, img_size)
def append(self, d): """ Adds a single image to the blob :param datom: :return: """ i = len(self.imgs) self.imgs.append(d['img']) h, w, scale = d['img_size'] # all anchors self.im_sizes.append((h, w, scale)) gt_boxes_ = d['gt_boxes'].astype(np.float32) * d['scale'] self.gt_boxes.append(gt_boxes_) gt_boxes_human_ = d['gt_boxes_human'].astype(np.float32) * d['scale'] self.gt_boxes_human.append(gt_boxes_human_) ### 여기서 image ind를 삽입 self.gt_classes.append(np.column_stack(( i * np.ones(d['gt_classes'].shape[0], dtype=np.int64), d['gt_classes'], ))) self.gt_human_classes.append(np.column_stack(( i * np.ones(d['gt_human_classes'].shape[0], dtype=np.int64), d['gt_human_classes'], ))) self.gt_hoi_classes.append(np.column_stack(( i * np.ones(d['gt_hoi_classes'].shape[0], dtype=np.int64), d['gt_hoi_classes'], ))) # # Add relationship info # if self.is_rel: # self.gt_rels.append(np.column_stack(( # i * np.ones(d['gt_relations'].shape[0], dtype=np.int64), # d['gt_relations']))) # Augment with anchor targets if self.is_train: train_anchors_, train_anchor_inds_, train_anchor_targets_, train_anchor_labels_ = \ anchor_target_layer(gt_boxes_, (h, w)) self.train_anchors.append(np.hstack((train_anchors_, train_anchor_targets_))) self.train_anchor_labels.append(np.column_stack(( i * np.ones(train_anchor_inds_.shape[0], dtype=np.int64), train_anchor_inds_, train_anchor_labels_, ))) train_anchors_human_, train_anchor_inds_human_, train_anchor_targets_human_, train_anchor_labels_human_ = \ anchor_target_layer(gt_boxes_human_, (h, w)) self.train_anchors_human.append(np.hstack((train_anchors_human_, train_anchor_targets_human_))) self.train_anchor_labels_human.append(np.column_stack(( i * np.ones(train_anchor_inds_human_.shape[0], dtype=np.int64), train_anchor_inds_human_, train_anchor_labels_human_, )))
def append(self, d): """ Adds a single image to the blob :param datom: :return: """ i = len(self.imgs) self.imgs.append(d['img']) h, w, scale = d['img_size'] if self.is_sal: self.sal_maps.append(d['sal_map']) if self.need_depth: self.depth_maps.append(d['depth_map']) # all anchors self.im_sizes.append((h, w, scale)) gt_boxes_ = d['gt_boxes'].astype(np.float32) * d['scale'] self.gt_boxes.append(gt_boxes_) self.gt_classes.append(np.column_stack(( i * np.ones(d['gt_classes'].shape[0], dtype=np.int64), d['gt_classes'], ))) if self.need_caption: self.seq_labels.append(d['seq_labels']) self.mask_labels.append(d['mask_labels']) # Add relationship info if self.is_rel: self.gt_rels.append(np.column_stack(( i * np.ones(d['gt_relations'].shape[0], dtype=np.int64), d['gt_relations']))) if self.is_keyrel: self.key_rels.append(np.column_stack(( i * np.ones(d['key_rels'].shape[0], dtype=np.int64), d['key_rels'] ))) if self.need_cocoid: self.coco_ids.append(d['coco_id']) # Augment with anchor targets if self.is_train: train_anchors_, train_anchor_inds_, train_anchor_targets_, train_anchor_labels_ = \ anchor_target_layer(gt_boxes_, (h, w)) self.train_anchors.append(np.hstack((train_anchors_, train_anchor_targets_))) self.train_anchor_labels.append(np.column_stack(( i * np.ones(train_anchor_inds_.shape[0], dtype=np.int64), train_anchor_inds_, train_anchor_labels_, ))) if 'proposals' in d: self.proposals.append(np.column_stack((i * np.ones(d['proposals'].shape[0], dtype=np.float32), d['scale'] * d['proposals'].astype(np.float32))))