コード例 #1
0
 def symbolic(g, single_level_grid_anchors, base_anchors, base_size, scales,
              ratios, anchor_stride, feat, img_tensor, target_stds):
     min_size = base_size
     max_sizes = []
     ars = []
     for scale in scales[1:]:
         max_sizes.append(scale * scale * min_size)
     for ar in ratios:
         if ar > 1:
             ars.append(ar)
     return g.op(add_domain("PriorBox"),
                 feat,
                 img_tensor,
                 min_size_f=[min_size],
                 max_size_f=max_sizes,
                 aspect_ratio_f=ars,
                 flip_i=1,
                 clip_i=0,
                 variance_f=list(target_stds),
                 step_f=anchor_stride[0],
                 offset_f=0.5,
                 step_h_f=0,
                 step_w_f=0,
                 img_size_i=0,
                 img_h_i=0,
                 img_w_i=0)
コード例 #2
0
 def symbolic(g, cls_scores, bbox_preds, img_metas, cfg, rescale, priors,
              cls_out_channels, use_sigmoid_cls, target_means, target_stds):
     return g.op(add_domain("DetectionOutput"),
                 bbox_preds,
                 cls_scores,
                 priors,
                 num_classes_i=cls_out_channels,
                 background_label_id_i=cls_out_channels - 1,
                 top_k_i=cfg['max_per_img'],
                 keep_top_k_i=cfg['max_per_img'],
                 confidence_threshold_f=cfg['score_thr'],
                 nms_threshold_f=cfg['nms']['iou_thr'],
                 eta_f=1,
                 share_location_i=1,
                 code_type_s="CENTER_SIZE",
                 variance_encoded_in_target_i=0)
コード例 #3
0
 def symbolic(g, single_level_grid_anchors, base_anchors, anchors_heights,
              anchors_widths, anchor_stride, feat, img_tensor, target_stds):
     return g.op(add_domain("PriorBoxClustered"),
                 feat,
                 img_tensor,
                 height_f=anchors_heights,
                 width_f=anchors_widths,
                 flip_i=0,
                 clip_i=0,
                 variance_f=list(target_stds),
                 step_f=anchor_stride[0],
                 offset_f=0.5,
                 step_h_f=0,
                 step_w_f=0,
                 img_size_i=0,
                 img_h_i=0,
                 img_w_i=0)
コード例 #4
0
 def symbolic(g,
              input,
              offset,
              weight,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              deformable_groups=1,
              im2col_step=64):
     assert groups == 1
     kh, kw = weight.type().sizes()[2:]
     return g.op(add_domain('DeformableConv2D'),
                 input,
                 offset,
                 weight,
                 strides_i=stride,
                 pads_i=[p for pair in zip(padding, padding) for p in pair],
                 dilations_i=dilation,
                 groups_i=groups,
                 deformable_groups_i=deformable_groups,
                 kernel_shape_i=[kh, kw])
コード例 #5
0
def roi_feature_extractor_symbolics(g,
                                    rois,
                                    *feats,
                                    output_size=1,
                                    featmap_strides=1,
                                    sample_num=1):
    from torch.onnx.symbolic_helper import _slice_helper
    rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
    roi_feats = g.op(
        add_domain('ExperimentalDetectronROIFeatureExtractor'),
        rois,
        *feats,
        output_size_i=output_size,
        pyramid_scales_i=featmap_strides,
        sampling_ratio_i=sample_num,
        image_id_i=0,
        distribute_rois_between_levels_i=1,
        preserve_rois_order_i=0,
        aligned_i=1,
        outputs=1,
    )
    return roi_feats