Пример #1
0
    def train_first_stage(self):
        """
        主干特征提取网络特征通用,冻结训练可以加快训练速度
        也可以在训练初期防止权值被破坏。
        Init_Epoch为起始世代
        Freeze_Epoch为冻结训练的世代
        Epoch总训练世代
        提示OOM或者显存不足请调小Batch_size
        :return:
        """
        optimizer_stage1 = optim.Adam(self.net.parameters(), self.lr_first, weight_decay=self.opt_weight_decay)
        if self.Cosine_lr:
            lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer_stage1, T_max=self.CosineAnnealingLR_T_max, eta_min=self.CosineAnnealingLR_eta_min)
        else:
            lr_scheduler = optim.lr_scheduler.StepLR(
                optimizer_stage1, step_size=self.StepLR_step_size, gamma=self.StepLR_gamma)

        if self.Use_Data_Loader:
            train_dataset = EfficientdetDataset(self.lines[:self.num_train], (self.input_shape[0], self.input_shape[1]))
            val_dataset = EfficientdetDataset(self.lines[self.num_train:], (self.input_shape[0], self.input_shape[1]))
            gen = DataLoader(train_dataset, batch_size=self.Batch_size_first, num_workers=self.num_workers,
                             pin_memory=True,
                             drop_last=True, collate_fn=efficientdet_dataset_collate)
            gen_val = DataLoader(val_dataset, batch_size=self.Batch_size_first, num_workers=self.num_workers,
                                 pin_memory=True,
                                 drop_last=True, collate_fn=efficientdet_dataset_collate)
        else:
            gen = Generator(self.Batch_size_first, self.lines[:self.num_train],
                            (self.input_shape[0], self.input_shape[1])).generate()
            gen_val = Generator(self.Batch_size_first, self.lines[self.num_train:],
                                (self.input_shape[0], self.input_shape[1])).generate()

        epoch_size = max(1, self.num_train // self.Batch_size_first)
        epoch_size_val = self.num_val // self.Batch_size_first
        # ------------------------------------#
        #   冻结一定部分训练
        # ------------------------------------#
        for param in self.model.backbone_net.parameters():
            param.requires_grad = False

        for epoch in range(self.Init_Epoch, self.Freeze_Epoch):
            self.fit_one_epoch(self.net, self.model, optimizer_stage1, self.efficient_loss, epoch, epoch_size,
                               epoch_size_val, gen, gen_val, self.Freeze_Epoch, self.Cuda)
            lr_scheduler.step()
Пример #2
0
    def train_second_stage(self):
        """
        整个网络的参数一起更新
        :return:
        """
        optimizer_stage2 = optim.Adam(self.net.parameters(), self.lr_second, weight_decay=self.opt_weight_decay)
        if self.Cosine_lr:
            lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer_stage2, T_max=self.CosineAnnealingLR_T_max, eta_min=self.CosineAnnealingLR_eta_min)
        else:
            lr_scheduler = optim.lr_scheduler.StepLR(
                optimizer_stage2, step_size=self.StepLR_step_size, gamma=self.StepLR_gamma)

        if self.Use_Data_Loader:
            train_dataset = EfficientdetDataset(self.lines[:self.num_train], (self.input_shape[0], self.input_shape[1]))
            val_dataset = EfficientdetDataset(self.lines[self.num_train:], (self.input_shape[0], self.input_shape[1]))
            gen = DataLoader(train_dataset, batch_size=self.Batch_size_first, num_workers=self.num_workers,
                             pin_memory=True,
                             drop_last=True, collate_fn=efficientdet_dataset_collate)
            gen_val = DataLoader(val_dataset, batch_size=self.Batch_size_first, num_workers=self.num_workers,
                                 pin_memory=True,
                                 drop_last=True, collate_fn=efficientdet_dataset_collate)
        else:
            gen = Generator(self.Batch_size_first, self.lines[:self.num_train],
                            (self.input_shape[0], self.input_shape[1])).generate()
            gen_val = Generator(self.Batch_size_first, self.lines[self.num_train:],
                                (self.input_shape[0], self.input_shape[1])).generate()

        epoch_size = max(1, self.num_train // self.Batch_size_second)
        epoch_size_val = self.num_val // self.Batch_size_second
        # ------------------------------------#
        #   解冻后训练
        # ------------------------------------#
        for param in self.model.backbone_net.parameters():
            param.requires_grad = True

        for epoch in range(self.Freeze_Epoch, self.Unfreeze_Epoch):
            self.fit_one_epoch(self.net, self.model, optimizer_stage2, self.efficient_loss, epoch, epoch_size,
                               epoch_size_val, gen, gen_val, self.Unfreeze_Epoch, self.Cuda)
            lr_scheduler.step()
Пример #3
0
        Batch_size = 4
        Init_Epoch = 0
        Freeze_Epoch = 50

        optimizer = optim.Adam(net.parameters(), lr, weight_decay=5e-4)
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2, verbose=True)

        if Use_Data_Loader:
            train_dataset = EfficientdetDataset(lines[:num_train], (input_shape[0], input_shape[1]))
            val_dataset = EfficientdetDataset(lines[num_train:], (input_shape[0], input_shape[1]))
            gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True, collate_fn=efficientdet_dataset_collate)
            gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                                 drop_last=True, collate_fn=efficientdet_dataset_collate)
        else:
            gen = Generator(Batch_size, lines[:num_train],
                            (input_shape[0], input_shape[1])).generate()
            gen_val = Generator(Batch_size, lines[num_train:],
                                (input_shape[0], input_shape[1])).generate()

        epoch_size = num_train // Batch_size
        epoch_size_val = num_val // Batch_size
        # ------------------------------------#
        #   冻结一定部分训练
        # ------------------------------------#
        for param in model.backbone_net.parameters():
            param.requires_grad = False

        for epoch in range(Init_Epoch, Freeze_Epoch):
            val_loss = fit_one_epoch(net, efficient_loss, epoch, epoch_size, epoch_size_val, gen, gen_val, Freeze_Epoch,
                                     Cuda)
            lr_scheduler.step(val_loss)
Пример #4
0
    #   Freeze_Epoch为冻结训练的世代
    #   Epoch总训练世代
    #------------------------------------------------------#
    for i in range(freeze_layers[phi]):
        model.layers[i].trainable = False

    if True:
        #--------------------------------------------#
        #   BATCH_SIZE不要太小,不然训练效果很差
        #--------------------------------------------#
        BATCH_SIZE = 4
        Lr = 1e-3
        Init_Epoch = 0
        Freeze_Epoch = 50
        gen = Generator(bbox_util, BATCH_SIZE, lines[:num_train],
                        lines[num_train:],
                        (image_sizes[phi], image_sizes[phi]), NUM_CLASSES)
        model.compile(loss={
            'regression': smooth_l1(),
            'classification': focal()
        },
                      optimizer=keras.optimizers.Adam(Lr))
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, BATCH_SIZE))
        model.fit(gen.generate(True, eager=False),
                  steps_per_epoch=max(1, num_train // BATCH_SIZE),
                  validation_data=gen.generate(False, eager=False),
                  validation_steps=max(1, num_val // BATCH_SIZE),
                  epochs=Freeze_Epoch,
                  verbose=1,
                  initial_epoch=Init_Epoch,
Пример #5
0
    #   Epoch总训练世代
    #------------------------------------------------------#
    for i in range(freeze_layers[phi]):
        model.layers[i].trainable = False

    if True:
        #--------------------------------------------#
        #   Batch_size不要太小,不然训练效果很差
        #--------------------------------------------#
        Batch_size = 8
        Lr = 1e-3
        Init_Epoch = 0
        Freeze_Epoch = 50

        gen = Generator(bbox_util, Batch_size, lines[:num_train],
                        lines[num_train:],
                        (image_sizes[phi], image_sizes[phi]), num_classes)
        model.compile(loss={
            'regression': smooth_l1(),
            'classification': focal()
        },
                      optimizer=keras.optimizers.Adam(Lr))
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, Batch_size))
        model.fit(gen.generate(True),
                  steps_per_epoch=max(1, num_train // Batch_size),
                  validation_data=gen.generate(False),
                  validation_steps=max(1, num_val // Batch_size),
                  epochs=Freeze_Epoch,
                  verbose=1,
                  initial_epoch=Init_Epoch,