示例#1
0
        net = net.cuda()

    annotation_path = '2007_train.txt'
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_train = len(lines)

    if Use_Data_Loader:
        train_dataset = SSDDataset(lines[:num_train], (Config["min_dim"], Config["min_dim"]))
        gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=8, pin_memory=True,
                                drop_last=True, collate_fn=ssd_dataset_collate)
    else:
        gen = Generator(Batch_size, lines,
                        (Config["min_dim"], Config["min_dim"]), Config["num_classes"]).generate()

    criterion = MultiBoxLoss(Config['num_classes'], 0.5, True, 0, True, 3, 0.5,
                             False, Cuda)
    epoch_size = num_train // Batch_size

    if True:
        # ------------------------------------#
        #   冻结一定部分训练
        # ------------------------------------#
        for param in model.vgg.parameters():
            param.requires_grad = False

        optimizer = optim.Adam(net.parameters(), lr=lr)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)
        for epoch in range(Start_iter,Freeze_epoch):
示例#2
0
    for i in range(freeze_layer):
        model.layers[i].trainable = False
    #-------------------------------------#
    #   TF2的小bug,冻结后显存要求更大了
    #-------------------------------------#
    if True:
        #--------------------------------------------#
        #   BATCH_SIZE不要太小,不然训练效果很差
        #--------------------------------------------#
        BATCH_SIZE = 4
        Lr = 5e-4
        Init_Epoch = 0
        Freeze_Epoch = 50

        generator = Generator(bbox_util, BATCH_SIZE, lines[:num_train],
                              lines[num_train:],
                              (input_shape[0], input_shape[1]), NUM_CLASSES)

        if Use_Data_Loader:
            gen = partial(generator.generate, train=True)
            gen = tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))

            gen_val = partial(generator.generate, train=False)
            gen_val = tf.data.Dataset.from_generator(gen_val,
                                                     (tf.float32, tf.float32))

            gen = gen.shuffle(buffer_size=BATCH_SIZE).prefetch(
                buffer_size=BATCH_SIZE)
            gen_val = gen_val.shuffle(buffer_size=BATCH_SIZE).prefetch(
                buffer_size=BATCH_SIZE)
示例#3
0
    #   也可以在训练初期防止权值被破坏。
    #   Init_Epoch为起始世代
    #   Freeze_Epoch为冻结训练的世代
    #   Epoch总训练世代
    #   提示OOM或者显存不足请调小Batch_size
    #------------------------------------------------------#
    for i in range(21):
        model.layers[i].trainable = False
    if True:
        Init_epoch = 0
        Freeze_epoch = 50
        Batch_size = 16
        learning_rate_base = 5e-4

        gen = Generator(bbox_util, Batch_size, lines[:num_train],
                        lines[num_train:], (input_shape[0], input_shape[1]),
                        NUM_CLASSES)

        epoch_size = num_train // Batch_size
        epoch_size_val = num_val // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小,无法进行训练,请扩充数据集。")

        model.compile(optimizer=Adam(lr=learning_rate_base),
                      loss=MultiboxLoss(NUM_CLASSES,
                                        neg_pos_ratio=3.0).compute_loss)
        model.fit_generator(gen.generate(True),
                            steps_per_epoch=epoch_size,
                            validation_data=gen.generate(False),
                            validation_steps=epoch_size_val,
示例#4
0
                             drop_last=True,
                             collate_fn=ssd_dataset_collate)

            val_dataset = SSDDataset(lines[num_train:],
                                     (Config["min_dim"], Config["min_dim"]),
                                     False)
            gen_val = DataLoader(val_dataset,
                                 shuffle=True,
                                 batch_size=Batch_size,
                                 num_workers=4,
                                 pin_memory=True,
                                 drop_last=True,
                                 collate_fn=ssd_dataset_collate)
        else:
            gen = Generator(Batch_size, lines[:num_train],
                            (Config["min_dim"], Config["min_dim"]),
                            Config["num_classes"]).generate(True)
            gen_val = Generator(Batch_size, lines[num_train:],
                                (Config["min_dim"], Config["min_dim"]),
                                Config["num_classes"]).generate(False)

        for param in model.vgg.parameters():
            param.requires_grad = False

        epoch_size = num_train // Batch_size
        epoch_size_val = num_val // Batch_size

        for epoch in range(Init_Epoch, Freeze_Epoch):
            fit_one_epoch(net, criterion, epoch, epoch_size, epoch_size_val,
                          gen, gen_val, Freeze_Epoch, Cuda)
            lr_scheduler.step()