def _single_gpu_build_func(model): """Builds the model on a single GPU. Can be called in a loop over GPUs with name and device scoping to create a data parallel model.""" if model.train: train=model.train switch_to_teacher() model.train=False init_params=model.init_params with c2_utils.NamedTeacherScope(): blobs, dim, spatial_scales = get_func(cfg.MODEL.CONV_BODY)(model) retinanet_heads.add_fpn_retinanet_outputs( model, blobs, dim, spatial_scales ) model.train=train model.init_params=init_params switch_to_student() blobs, dim, spatial_scales = add_conv_body_func(model) if not model.train: model.conv_body_net = model.net.Clone('conv_body_net') retinanet_heads.add_fpn_retinanet_outputs( model, blobs, dim, spatial_scales ) if model.train: loss_gradients = retinanet_heads.add_fpn_retinanet_losses( model ) loss_gradients_distill=retinanet_heads.add_distill_loss(model,'','teacher/') loss_gradients.update(loss_gradients_distill) return loss_gradients if model.train else None
def _single_gpu_build_func(model): """Builds the model on a single GPU. Can be called in a loop over GPUs with name and device scoping to create a data parallel model.""" blobs, dim, spatial_scales = add_conv_body_func(model) retinanet_heads.add_fpn_retinanet_outputs(model, blobs, dim, spatial_scales) if model.train: loss_gradients = retinanet_heads.add_fpn_retinanet_losses(model) return loss_gradients if model.train else None
def _single_gpu_build_func(model): """Builds the model on a single GPU. Can be called in a loop over GPUs with name and device scoping to create a data parallel model.""" blobs, dim, spatial_scales = add_conv_body_func(model) retinanet_heads.add_fpn_retinanet_outputs( model, blobs, dim, spatial_scales ) if model.train: loss_gradients = retinanet_heads.add_fpn_retinanet_losses( model ) return loss_gradients if model.train else None