Ejemplo n.º 1
0
            model = DataParallel(model)
            teacher_model = DataParallel(teacher_model)

        model = model.cuda() 
        teacher_model = teacher_model.cuda()       
       
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0005)

    # download teacher trained model
    if args.teacher_resume and args.teacher_resume_folder is not None:
        teacher_model_path = os.path.join(args.teacher_resume_folder, ('_'.join(['teacher', args.train_data, args.test_data, args.teacher_backbone, 'max_acc']) + '.pt'))
        teacher_state = torch.load(teacher_model_path)
        if args.multi_gpu:
            teacher_model.module.load_state_dict(teacher_state)
        else:
            teacher_model.load_state_dict(teacher_state)
    
    # training from the checkpoint
    if args.resume and args.resume_folder is not None:
        # load checkpoint
        checkpoint_path = os.path.join(args.resume_folder, ('_'.join([args.model_name, args.train_data, args.test_data, args.backbone, 'max_acc']) + '_checkpoint.pt.tar'))    # tag='max_acc' can be changed
        state = torch.load(checkpoint_path)
        if args.multi_gpu:
            model.module.load_state_dict(state['model'])
        else:
            model.load_state_dict(state['model'])
    
        train_log = state['train_log']
        optimizer.load_state_dict(state['optimizer'])
        initial_lr = optimizer.param_groups[0]['lr']
        # all_task_count表示在当前epoch前一共训练多少任务
Ejemplo n.º 2
0
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=0.0005)

    # training from the checkpoint
    if args.resume and args.resume_folder is not None:
        # load checkpoint
        checkpoint_path = os.path.join(args.resume_folder, ('_'.join([
            args.model_name, args.train_data, args.test_data, args.backbone,
            'max_acc'
        ]) + '_checkpoint.pt.tar'))  # tag='max_acc' can be changed
        state = torch.load(checkpoint_path)
        if args.multi_gpu:
            model.module.load_state_dict(state['model'])
        else:
            model.load_state_dict(state['model'])

        train_log = state['train_log']
        optimizer.load_state_dict(state['optimizer'])
        initial_lr = optimizer.param_groups[0]['lr']
        # all_task_count表示在当前epoch前一共训练多少任务
        all_task_count = state['all_task_count']

        print('all_task_count: {}, initial_lr: {}'.format(
            str(all_task_count), str(initial_lr)))

    # training from scratch
    else:
        train_log = {}
        train_log['args'] = vars(args)
        train_log['train_loss'] = []