def main():
    ''' Main function '''
    if cfg.SYSTEM.UPDATE_CFG:
        cfg.merge_from_file(cfg.SYSTEM.CFG_FILE)
    cfg.freeze()  #冻结参数
    print(cfg)

    vis.log('configs:\n {}'.format(cfg.clone()))
    #LOAD DATA
    training_data, validation_data = prepare_stanford40(cfg.clone())
    #training_data, validation_data = prepare_pascal(cfg.clone())
    #LOAD MODEL
    model = resnet34(pretrained=False)
    model = model.cuda()
    model.load_state_dict(t.load(cfg.MODEL.PRETRAINED))

    #-------------   optim  -----------------#
    optimizer = optim.Adam(
        model.parameters(),
        lr=cfg.OPTIM.LR,
    )
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=cfg.OPTIM.LR_DECAY,
                                  patience=1,
                                  verbose=True,
                                  threshold=1e-4,
                                  threshold_mode='rel',
                                  cooldown=1,
                                  min_lr=0.00005,
                                  eps=1e-8)

    #-------------   train  ----------------#
    train(model, training_data, validation_data, optimizer, scheduler,
          cfg.clone())
def main():
    if cfg.SYSTEM.UPDATE_CFG:
        cfg.merge_from_file(cfg.SYSTEM.CFG_FILE)
    cfg.freeze()  #冻结参数
    vis_env.log('configs:\n {}'.format(cfg.clone()))
    #load data
    traindata, valdata = stanford40(cfg)
    #load model
    net = model.multi_two(True)
    net = net.cuda()
    optimizer = t.optim.Adagrad(net.parameters(), lr=cfg.OPTIM.LR)
    lr_strategy = t.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        300,
    )
    lossfunc = t.nn.CrossEntropyLoss()

    train(net, traindata, valdata, optimizer, lossfunc, lr_strategy)
Beispiel #3
0
def main():
    if cfg.SYSTEM.UPDATE_CFG:
        cfg.merge_from_file(cfg.SYSTEM.CFG_FILE)
    cfg.freeze()  #冻结参数
    vis_env.log('configs:\n {}'.format(cfg.clone()))
    #load data
    traindata, valdata = stanford40(cfg)
    #load model
    net = model.twokey_copy(True)  #getattr(model,cfg.MODEL.NAME)
    #for param in net.layer1.parameters():
    #    param.requires_grad = False

    net = net.cuda()
    optimizer = t.optim.Adagrad(filter(lambda p: p.requires_grad,
                                       net.parameters()),
                                lr=cfg.OPTIM.LR)
    lr_strategy = t.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        50,
    )
    lossfunc = t.nn.SmoothL1Loss()

    train(net, traindata, valdata, optimizer, lossfunc, lr_strategy)
Beispiel #4
0
def main():
    ''' Main function '''
    if cfg.SYSTEM.UPDATE_CFG:
        cfg.merge_from_file(cfg.SYSTEM.CFG_FILE)
    cfg.freeze()  #冻结参数
    print(cfg)

    vis.log('configs:\n {}'.format(cfg.clone()))
    #LOAD DATA
    ##training_data, validation_data = prepare_stanford40(cfg.clone())
    #training_data, validation_data = prepare_pascal(cfg.clone())
    #normalize = transforms.Normalize(mean=cfg.mean, std=cfg.std)

    img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
                        std=[58.395, 57.12, 57.375],
                        to_rgb=True)

    val_dataset = StanfordDataset(
        ann_file=
        '/home/share/LabServer/DATASET/stanford40/ImageSplits/test.txt',
        img_prefix='/home/share/LabServer/DATASET/stanford40/',
        img_scale=(224, 224),
        img_norm_cfg=img_norm_cfg,
        size_divisor=32,
        proposal_file='/home/share/LabServer/GLnet/stanford_test_bbox_new.pkl',
        test_mode=False,
    )
    train_dataset = StanfordDataset(
        ann_file=
        '/home/share/LabServer/DATASET/stanford40/ImageSplits/train.txt',
        img_prefix='/home/share/LabServer/DATASET/stanford40/',
        img_scale=(224, 224),
        img_norm_cfg=img_norm_cfg,
        size_divisor=32,
        proposal_file='/home/share/LabServer/GLnet/stanford_train_bbox_new.pkl',
        test_mode=False,
    )
    train_loader = DataLoader(train_dataset,
                              batch_size=10,
                              shuffle=True,
                              sampler=None,
                              collate_fn=partial(collate, samples_per_gpu=2),
                              num_workers=4)
    val_loader = DataLoader(val_dataset,
                            batch_size=10,
                            shuffle=False,
                            sampler=None,
                            collate_fn=partial(collate, samples_per_gpu=2),
                            num_workers=4)
    #LOAD MODEL
    model = proposalModel()
    model = model.to("cuda:0")
    #submodel = resnet50(True)
    #submodel = submodel.to("cuda:1")

    #-------------   optim  -----------------#
    optimizer = optim.Adam(
        model.parameters(),
        lr=cfg.OPTIM.LR,
    )
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=cfg.OPTIM.LR_DECAY,
                                  patience=1,
                                  verbose=True,
                                  threshold=1e-4,
                                  threshold_mode='rel',
                                  cooldown=1,
                                  min_lr=0.00005,
                                  eps=1e-8)

    #-------------   train  ----------------#
    train(model, train_loader, val_loader, optimizer, scheduler, cfg.clone())