def __init__(self, backbone='resnet', output_stride=16, num_classes=21, sync_bn=False, freeze_bn=False, mc_dropout=False): super(DeepLab, self).__init__() if backbone == 'drn': output_stride = 8 if sync_bn == True: BatchNorm = SynchronizedBatchNorm2d else: BatchNorm = nn.BatchNorm2d self.backbone = build_backbone(backbone, output_stride, BatchNorm, mc_dropout=mc_dropout) self.aspp = build_aspp(backbone, output_stride, BatchNorm) self.decoder = build_decoder(num_classes, backbone, BatchNorm) self.return_features = False if freeze_bn: self.freeze_bn()
def __init__(self, args, cfg=None, net='resnet', output_stride=32, num_classes=21, img_size=512, pretrained=True, freeze_bn=False): super(DSSD, self).__init__() self.args = args self.cfg = cfg self.net = net self.num_classes = num_classes self.image_size = img_size self.priorbox = PriorBox(cfg, net, output_stride) self.priors = self.priorbox().to(args.device) self.backbone = build_backbone(net, output_stride, pretrained) self.aspp = build_aspp(net, output_stride) self.decoder = build_decoder(net) self.head = build_head(inplances=self.decoder.plances, num_classes=num_classes, num_anchor=cfg.anchor_number) if freeze_bn: self.freeze_bn # For detect self.softmax = nn.Softmax(dim=-1) self.detect = Detect(self.args, self.cfg, self.num_classes)
def __init__(self, output_stride=16, num_classes=21): super(DeepLab, self).__init__() BatchNorm = nn.BatchNorm2d self.backbone = build_resnet(output_stride, pretrained=True) self.aspp = build_aspp(output_stride) self.decoder = build_decoder(num_classes)
def __init__(self, output_stride=16, num_classes=21, freeze_bn=False): super(DeepLab, self).__init__() BatchNorm = nn.BatchNorm2d self.backbone = build_backbone(output_stride, BatchNorm) self.aspp = build_aspp(output_stride, BatchNorm) self.decoder = build_decoder(num_classes, BatchNorm) if freeze_bn: self.freeze_bn()
def __init__(self, args): super(DeepLab, self).__init__() if args.backbone == 'drn': output_stride = 8 else: output_stride = args.output_stride if args.sync_bn == True: BatchNorm = SynchronizedBatchNorm2d else: BatchNorm = nn.BatchNorm2d self.backbone = build_backbone(args.backbone, output_stride, BatchNorm) self.aspp = build_aspp(args.backbone, output_stride, BatchNorm) self.decoder = build_decoder(args.num_classes, args.backbone, BatchNorm) self.freeze_bn = args.freeze_bn
def compute_loss(source_ids, target_ids, sequence_mask, embeddings, enc_num_layers, enc_num_units, enc_cell_type, enc_bidir, dec_num_layers, dec_num_units, dec_cell_type, state_pass, infer_batch_size, infer_type="greedy", beam_size=None, max_iter=20, attn_wrapper=None, attn_num_units=128, l2_regularize=None, name="Seq2seq"): """ Creates a Seq2seq model and returns cross entropy loss. """ with tf.name_scope(name): # build encoder encoder_outputs, encoder_states = build_encoder(embeddings, source_ids, enc_num_layers, enc_num_units, enc_cell_type, bidir=enc_bidir, name="%s_encoder" % name) # build decoder: logits, [batch_size, max_time, vocab_size] train_logits, infer_outputs = build_decoder(encoder_outputs, encoder_states, embeddings, dec_num_layers, dec_num_units, dec_cell_type, state_pass, infer_batch_size, attn_wrapper, attn_num_units, target_ids, infer_type, beam_size, max_iter, name="%s_decoder" % name) # compute loss with tf.name_scope('loss'): final_ids = tf.pad(target_ids, [[0, 0], [0, 1]], constant_values=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=train_logits, labels=final_ids) losses = tf.boolean_mask(losses[:, :-1], sequence_mask) reduced_loss = tf.reduce_mean(losses) CE = tf.reduce_sum(losses) if l2_regularize is None: return CE, reduced_loss, train_logits, infer_outputs else: l2_loss = tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if not ('bias' in v.name) ]) total_loss = reduced_loss + l2_regularize * l2_loss return CE, total_loss, train_logits, infer_outputs
def __init__(self, backbone='xception', num_classes=3): super(GNet, self).__init__() filters = [32, 64, 128, 256, 512] Norm_layer = nn.InstanceNorm3d self.backbone = build_backbone(backbone, filters, Norm_layer) self.decoder = build_decoder(num_classes, filters, Norm_layer)