Esempio n. 1
0
 def __init__(self, model):
     super(Loss, self).__init__()
     
     self.tanh = Tanh()
     self.l1_loss = L1Loss()
     self.bce_loss = BCELoss()
     self.cross_entropy_loss = CrossEntropyLoss()
     
     self.model = model
     self.optimizer, self.optimizer_D = get_optimizer(model)
Esempio n. 2
0
 def __init__(self, model, loss, data):
     self.train_loader = data.train_loader
     self.test_loader = data.test_loader
     self.query_loader = data.query_loader
     self.testset = data.testset
     self.queryset = data.queryset
     model = nn.DataParallel(model)
     self.model = model.to('cuda')
     self.loss = loss
     self.optimizer = get_optimizer(model)
     self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=opt.lr_scheduler, gamma=0.1)
Esempio n. 3
0
    def __init__(self, model):
        super(Loss, self).__init__()
        self.batch_size = opt.batchid * opt.batchimage
        self.num_gran = 8
        self.tanh = Tanh()
        self.l1_loss = L1Loss()
        self.bce_loss = BCELoss()
        self.cross_entropy_loss = CrossEntropyLoss()

        self.model = model
        self.optimizer, self.optimizer_D, self.optimizer_DC = get_optimizer(
            model)
Esempio n. 4
0
 def __init__(self, model, loss, data, start_epoch=-1):
     self.train_loader = data.train_loader
     self.test_loader = data.test_loader
     self.query_loader = data.query_loader
     self.testset = data.testset
     self.queryset = data.queryset
     self.model = model.cuda()
     self.loss = loss
     self.flag = True
     self.start_epoch = start_epoch
     self.optimizer = get_optimizer(model)
     self.scheduler = lr_scheduler.MultiStepLR(self.optimizer,
                                               milestones=opt.lr_scheduler,
                                               gamma=0.1)
Esempio n. 5
0
    def __init__(self, model, loss, data, optimizer=None, scheduler=None):
        self.train_loader = data.train_loader
        self.test_loader = data.test_loader
        self.query_loader = data.query_loader
        self.testset = data.testset
        self.queryset = data.queryset
        
        if len(opt.gpu_ids) > 0:
                model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)

        self.model = model.to(opt.base_device)
        self.loss = loss
        self.optimizer = optimizer if optimizer else get_optimizer(model)
        self.scheduler = scheduler if scheduler else lr_scheduler.MultiStepLR(self.optimizer, milestones=opt.lr_scheduler, gamma=opt.lr_gamma)
Esempio n. 6
0
def train_normal(args, num_epoch, model_save=None, model_load=None, imgSaver=None, EvaluatorIn=None):
    # model_save is the name of the file name to save model to
    # model_load is the model to initialize training with

    if EvaluatorIn is None:
        evaluator = Evaluator(args)
    else:
        evaluator = EvaluatorIn(args)

    model = get_model(args)
    losses = get_losses(args)
    optimizer = get_optimizer(args, model)
    scheduler = get_scheduler(args, optimizer)
    valid_metric = None
    main_metric = evaluator.main_metric

    if model_load is not None:
        model.load_state_dict(torch.load(model_load), strict=False)

    train_dataset, valid_dataset = get_train_val_datasets(args)

    valid_logs, train_logs, best_loss_valid_metric = train(model, optimizer, scheduler, losses, train_dataset,
                                                           valid_dataset, num_epoch, args, evaluator, imgSaver)

    if args['valid_dataset'] and args['train_with_metric']:
        valid_metric = valid_logs['metrics'][main_metric]

    if args['save_best_loss_name'] is not None or args['save_on_best_total_training_loss']:
        valid_metric = best_loss_valid_metric

    if not args['valid_dataset']:
        valid_metric = train_logs['metrics'][main_metric]
        valid_logs = train_logs

    if model_save is not None:
        torch.save(model.state_dict(), model_save)

    return valid_logs, train_logs, valid_metric
Esempio n. 7
0
        ######################### evaluation without rank##########################
        dist = cdist(qf, gf)

        r, m_ap = rank(dist)

        print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}'
              .format(m_ap, r[0], r[2], r[4], r[9]))


if __name__ == '__main__':

    data = Data()
    model = REID_NET()
    #model = model.to('cuda')
    loss_function = Loss()
    optimizer = get_optimizer(model)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=opt.lr_scheduler, gamma=0.1)

    if opt.mode == 'train':

        for epoch in range(1, opt.epoch + 1):
            print('\nepoch', epoch)
            train(model,data.train_loader,scheduler,optimizer,loss_function)
            if epoch % 50 == 0:
                print('\nstart evaluate')
                evaluate(model,data.query_loader,data.test_loader,data.queryset,data.testset)
                os.makedirs('weights', exist_ok=True)
                torch.save(model.state_dict(), ('weights/model_{}.pt'.format(epoch)))

    if opt.mode == 'evaluate':
        print('start evaluate')