def train(args): logging.info('======= user config ======') logging.info(pprint(opt)) logging.info(pprint(args)) logging.info('======= end ======') train_data, valid_data = get_data_provider(opt) net = getattr(network, opt.network.name)(classes=opt.dataset.num_classes) optimizer = getattr(torch.optim, opt.train.optimizer)(net.parameters(), lr=opt.train.lr, weight_decay=opt.train.wd, momentum=opt.train.momentum) ce_loss = nn.CrossEntropyLoss() lr_scheduler = LRScheduler(base_lr=opt.train.lr, step=opt.train.step, factor=opt.train.factor, warmup_epoch=opt.train.warmup_epoch, warmup_begin_lr=opt.train.warmup_begin_lr) net = nn.DataParallel(net) net = net.cuda() mod = Solver(opt, net) mod.fit(train_data=train_data, test_data=valid_data, optimizer=optimizer, criterion=ce_loss, lr_scheduler=lr_scheduler)
def train(args): logging.info('======= user config ======') logging.info(pprint(opt)) logging.info(pprint(args)) logging.info('======= end ======') train_data, test_data, num_query = get_data_provider(opt) net = getattr(network, opt.network.name)(opt.dataset.num_classes, opt.network.last_stride) net = nn.DataParallel(net).cuda() optimizer = getattr(torch.optim, opt.train.optimizer)(net.parameters(), lr=opt.train.lr, weight_decay=opt.train.wd) ce_loss = nn.CrossEntropyLoss() triplet_loss = TripletLoss(margin=opt.train.margin) def ce_loss_func(scores, feat, labels): ce = ce_loss(scores, labels) return ce def tri_loss_func(scores, feat, labels): tri = triplet_loss(feat, labels)[0] return tri def ce_tri_loss_func(scores, feat, labels): ce = ce_loss(scores, labels) triplet = triplet_loss(feat, labels)[0] return ce + triplet if opt.train.loss_fn == 'softmax': loss_fn = ce_loss_func elif opt.train.loss_fn == 'triplet': loss_fn = tri_loss_func elif opt.train.loss_fn == 'softmax_triplet': loss_fn = ce_tri_loss_func else: raise ValueError('Unknown loss func {}'.format(opt.train.loss_fn)) lr_scheduler = LRScheduler(base_lr=opt.train.lr, step=opt.train.step, factor=opt.train.factor, warmup_epoch=opt.train.warmup_epoch, warmup_begin_lr=opt.train.warmup_begin_lr) mod = Solver(opt, net) mod.fit(train_data=train_data, test_data=test_data, num_query=num_query, optimizer=optimizer, criterion=loss_fn, lr_scheduler=lr_scheduler)
def test(args): logging.info('======= user config ======') logging.info(pprint(opt)) logging.info(pprint(args)) logging.info('======= end ======') train_data, test_data, num_query = get_data_provider(opt) net = getattr(network, opt.network.name)(opt.dataset.num_classes, opt.network.last_stride) net.load_state_dict(torch.load(args.load_model)['state_dict']) net = nn.DataParallel(net).cuda() mod = Solver(opt, net) mod.test_func(test_data, num_query)
def train(args): train_data, valid_data, train_valid_data = get_data_provider(args.bs) net = network.ResNet18(num_classes=10) optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, weight_decay=args.wd, momentum=args.momentum) ce_loss = nn.CrossEntropyLoss() net = nn.DataParallel(net) if args.use_gpu: net = net.cuda() mod = Solver(net, args.use_gpu) mod.fit(train_data=train_data, test_data=valid_data, optimizer=optimizer, criterion=ce_loss, num_epochs=args.epochs, print_interval=args.print_interval, eval_step=args.eval_step, save_step=args.save_step, save_dir=args.save_dir)