コード例 #1
0
ファイル: network.py プロジェクト: hezhujun/MyDetection
    def __init__(self, scales, ratios, fg_threshold, bg_threshold,
                 batch_size_per_image, positive_fraction,
                 pre_nms_top_n_in_train, post_nms_top_n_in_train,
                 pre_nms_top_n_in_test, post_nms_top_n_in_test, nms_thresh,
                 **kwargs):
        super(RegionProposalNetwork, self).__init__(**kwargs)
        self.scales = scales
        self.ratios = ratios
        self.max_num_anchors_per_position = 0
        for k, _scales in scales.items():
            num_anchors_per_position = 0
            _ratios = ratios[k]
            for i, scale in enumerate(_scales):
                num_anchors_per_position += len(_ratios[i])
            if num_anchors_per_position > self.max_num_anchors_per_position:
                self.max_num_anchors_per_position = num_anchors_per_position

        with self.name_scope():
            self.head = nn.Conv2D(256, 3, padding=1, activation="relu")
            self.object_cls = nn.Conv2D(self.max_num_anchors_per_position, 1)
            self.object_reg = nn.Conv2D(self.max_num_anchors_per_position * 4,
                                        1)

        self._anchors = dict()
        self.fg_threshold = fg_threshold
        self.bg_threshold = bg_threshold
        self.batch_size_per_image = batch_size_per_image
        self.positive_fraction = positive_fraction
        self.pre_nms_top_n_in_train = pre_nms_top_n_in_train
        self.post_nms_top_n_in_train = post_nms_top_n_in_train
        self.pre_nms_top_n_in_test = pre_nms_top_n_in_test
        self.post_nms_top_n_in_test = post_nms_top_n_in_test
        self.nms_thresh = nms_thresh
        self.object_cls_loss = gloss.SigmoidBCELoss()
        self.object_reg_loss = gloss.HuberLoss()
コード例 #2
0
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default prediction is (32, 1000, 1) to mimic a prediction of batch_size=32 and 1000 class classification.
        default_parameters = {
            "pred": (32, 1000, 1),
            "pred_initializer": nd.ones,
            "label": (32, 1000, 1),
            "label_initializer": nd.zeros,
            "weight": 1.0,
            "run_backward": False,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        # Create a random prediction and label tensor
        self.pred = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["pred"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["pred_initializer"],
                                   attach_grad=self.inputs["run_backward"])
        self.label = get_mx_ndarray(
            ctx=self.ctx,
            in_tensor=self.inputs["label"],
            dtype=self.inputs["dtype"],
            initializer=self.inputs["label_initializer"],
            attach_grad=self.inputs["run_backward"])

        self.block = loss.SigmoidBCELoss(from_sigmoid=False,
                                         weight=self.inputs["weight"],
                                         batch_axis=0)

        self.block.initialize(ctx=self.ctx)
コード例 #3
0
    def build_model(self):
        # DataLoader
        train_transform = transforms.Compose([
            transforms.RandomFlipLeftRight(),
            transforms.Resize((self.img_size + 30, self.img_size + 30)),
            transforms.RandomResizedCrop(self.img_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])
        test_transform = transforms.Compose([
            transforms.Resize((self.img_size, self.img_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])

        self.trainA = ImageFolder(
            os.path.join('dataset', self.dataset, 'trainA'), train_transform)
        self.trainB = ImageFolder(
            os.path.join('dataset', self.dataset, 'trainB'), train_transform)
        self.testA = ImageFolder(
            os.path.join('dataset', self.dataset, 'testA'), test_transform)
        self.testB = ImageFolder(
            os.path.join('dataset', self.dataset, 'testB'), test_transform)
        self.trainA_loader = DataLoader(self.trainA,
                                        batch_size=self.batch_size,
                                        shuffle=True,
                                        num_workers=self.num_workers)
        self.trainB_loader = DataLoader(self.trainB,
                                        batch_size=self.batch_size,
                                        shuffle=True,
                                        num_workers=self.num_workers)
        self.testA_loader = DataLoader(self.testA, batch_size=1, shuffle=False)
        self.testB_loader = DataLoader(self.testB, batch_size=1, shuffle=False)
        """ Define Generator, Discriminator """
        self.genA2B = ResnetGenerator(input_nc=3,
                                      output_nc=3,
                                      ngf=self.ch,
                                      n_blocks=self.n_res,
                                      img_size=self.img_size,
                                      light=self.light)
        self.genB2A = ResnetGenerator(input_nc=3,
                                      output_nc=3,
                                      ngf=self.ch,
                                      n_blocks=self.n_res,
                                      img_size=self.img_size,
                                      light=self.light)
        self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
        self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
        self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
        self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)

        self.whole_model = nn.HybridSequential()
        self.whole_model.add(*[
            self.genA2B, self.genB2A, self.disGA, self.disGB, self.disLA,
            self.disLB
        ])

        self.whole_model.hybridize(static_alloc=False, static_shape=False)
        """ Define Loss """
        self.L1_loss = gloss.L1Loss()
        self.MSE_loss = gloss.L2Loss(weight=2)
        self.BCE_loss = gloss.SigmoidBCELoss()
        """ Initialize Parameters"""
        params = self.whole_model.collect_params()
        block = self.whole_model
        if not self.debug:
            force_init(block.collect_params('.*?_weight'), KaimingUniform())
            force_init(block.collect_params('.*?_bias'),
                       BiasInitializer(params))
            block.collect_params('.*?_rho').initialize()
            block.collect_params('.*?_gamma').initialize()
            block.collect_params('.*?_beta').initialize()
            block.collect_params('.*?_state_.*?').initialize()
        else:
            pass
        block.collect_params().reset_ctx(self.dev)
        """ Trainer """
        self.G_params = param_dicts_merge(
            self.genA2B.collect_params(),
            self.genB2A.collect_params(),
        )
        self.G_optim = gluon.Trainer(
            self.G_params,
            'adam',
            dict(learning_rate=self.lr,
                 beta1=0.5,
                 beta2=0.999,
                 wd=self.weight_decay),
        )
        self.D_params = param_dicts_merge(self.disGA.collect_params(),
                                          self.disGB.collect_params(),
                                          self.disLA.collect_params(),
                                          self.disLB.collect_params())
        self.D_optim = gluon.Trainer(
            self.D_params,
            'adam',
            dict(learning_rate=self.lr,
                 beta1=0.5,
                 beta2=0.999,
                 wd=self.weight_decay),
        )
        """ Define Rho clipper to constraint the value of rho in AdaILN and ILN"""
        self.Rho_clipper = RhoClipper(0, 1)
コード例 #4
0
 def __init__(self):
     super(MyLoss3, self).__init__()
     self.loss1 = loss.SoftmaxCELoss()
     self.loss2 = loss.SigmoidBCELoss()
コード例 #5
0
 def __init__(self):
     super(MyLoss, self).__init__()
     self.loss = loss.SigmoidBCELoss()
コード例 #6
0
                                      transform=transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64),
                                                                    transforms.ToTensor(),
                                                                    transforms.Normalize((0.5, 0.5, 0.5),
                                                                                         (0.5, 0.5, 0.5))]))
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=4)

netG = generator.Generator()
netD = discriminator.Discriminator()

netG.initialize(init=init.Normal(sigma=0.02))
netD.initialize(init=init.Normal(sigma=0.02))

trainerG = Trainer(netG.collect_params(), 'adam', {'learning_rate': lr, 'momentum': momentum})
trainerD = Trainer(netD.collect_params(), 'adam', {'learning_rate': lr, 'momentum': momentum})

loss = gloss.SigmoidBCELoss()

for epoch in range(epoches):
    for i, batch in enumerate(data_loader):
        data_real = batch[0]
        labels_real = nd.ones(shape=(batch_size, ))

        z = nd.random.randn(batch_size, 100, 1, 1)
        data_fake = netG(z)
        labels_fake = nd.zeros(shape=(batch_size, ))
        with autograd.record():
            preds_real = netD(data_real)
            lo_real = loss(preds_real, labels_real)
            preds_fake = netD(data_fake)
            lo_fake = loss(preds_fake, labels_fake)
            err_D = preds_real + preds_fake