예제 #1
0
        nets.rpn中ProposalCreator的n_train_post_nms=300;
        utils.utils中ProposalTargetCreator的pos_ratio=0.5;
    '''
    if True:
        lr = 1e-4
        Init_Epoch = 0
        Freeze_Epoch = 25

        optimizer = optim.Adam(model.parameters(), lr, weight_decay=5e-4)
        # optimizer = optim.SGD(model.parameters(),lr,weight_decay=5e-4,momentum=0.9)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                 step_size=1,
                                                 gamma=0.95)

        if Use_Data_Loader:
            train_dataset = FRCNNDataset(lines[:num_train],
                                         (IMAGE_SHAPE[0], IMAGE_SHAPE[1]))
            val_dataset = FRCNNDataset(lines[num_train:],
                                       (IMAGE_SHAPE[0], IMAGE_SHAPE[1]))
            gen = DataLoader(train_dataset,
                             batch_size=1,
                             num_workers=4,
                             pin_memory=True,
                             drop_last=True,
                             collate_fn=frcnn_dataset_collate)
            gen_val = DataLoader(val_dataset,
                                 batch_size=1,
                                 num_workers=4,
                                 pin_memory=True,
                                 drop_last=True,
                                 collate_fn=frcnn_dataset_collate)
        else:
예제 #2
0
    #   也可以在训练初期防止权值被破坏。
    #   Init_Epoch为起始世代
    #   Freeze_Epoch为冻结训练的世代
    #   Epoch总训练世代
    #   提示OOM或者显存不足请调小Batch_size
    #------------------------------------------------------#
    if True:
        lr              = 1e-4
        Batch_size      = 2
        Init_Epoch      = 0
        Freeze_Epoch    = 50
        
        optimizer       = optim.Adam(net.parameters(), lr, weight_decay=5e-4)
        lr_scheduler    = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.95)

        train_dataset   = FRCNNDataset(lines[:num_train], (input_shape[0], input_shape[1]), is_train=True)
        val_dataset     = FRCNNDataset(lines[num_train:], (input_shape[0], input_shape[1]), is_train=False)
        gen             = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                                drop_last=True, collate_fn=frcnn_dataset_collate)
        gen_val         = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                                drop_last=True, collate_fn=frcnn_dataset_collate)
                        
        epoch_size      = num_train // Batch_size
        epoch_size_val  = num_val // Batch_size

        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小,无法进行训练,请扩充数据集。")

        # ------------------------------------#
        #   冻结一定部分训练
        # ------------------------------------#
예제 #3
0
    #   Epoch总训练世代
    #   提示OOM或者显存不足请调小Batch_size
    #------------------------------------------------------#
    if True:
        lr = 1e-4
        Batch_size = 2
        Init_Epoch = 0
        Freeze_Epoch = 50

        optimizer = optim.Adam(net.parameters(), lr, weight_decay=5e-4)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                 step_size=1,
                                                 gamma=0.95)  #调整学习率机制

        train_dataset = FRCNNDataset(lines[:num_train],
                                     (input_shape[0], input_shape[1]),
                                     is_train=True)  #进行数据增强
        val_dataset = FRCNNDataset(
            lines[num_train:], (input_shape[0], input_shape[1]),
            is_train=False)  #不进行数据增强   图像等比例缩放,无图像区域用128填充
        gen = DataLoader(train_dataset,
                         shuffle=True,
                         batch_size=Batch_size,
                         num_workers=0,
                         pin_memory=True,
                         drop_last=True,
                         collate_fn=frcnn_dataset_collate)
        # pin_memory:会自动将获取的数据张量放入固定的内存中,从而更快地将数据传输到支持CUDA的GPU。
        # collate_fn:用于合并样本列表以形成小批量的Tensor ,批量加载数据时使用
        gen_val = DataLoader(val_dataset,
                             shuffle=True,