コード例 #1
0
'''model'''

model = se_resnet101_xuelang(num_classes=160)
# model =resnet50(pretrained=False)
# model.avgpool =  torch.nn.AdaptiveAvgPool2d(output_size=1)
# model.fc = torch.nn.Linear(model.fc.in_features,2)
# model = inceptionv4(num_classes=160)
model = torch.nn.DataParallel(model)
base_lr =0.001
resume = None

if resume:
    logging.info('resuming finetune from %s'%resume)
    model.load_state_dict(torch.load(resume))
model = model.cuda()

optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-5)
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)

train(model,
      epoch_num=50,
      start_epoch=0,
      optimizer=optimizer,
      criterion=criterion,
      exp_lr_scheduler=exp_lr_scheduler,
      data_set=data_set,
      data_loader=dataloader,
      save_dir=save_dir,
      print_inter=50,
      val_inter=400)
コード例 #2
0
        model.load_state_dict(torch.load(opt.resume))
    model = torch.nn.DataParallel(model)
    model = model.cuda()
    if opt.optim == "sgd":
        optimizer = optim.SGD(model.parameters(),
                              lr=opt.learning_rate,
                              momentum=0.9,
                              weight_decay=1e-5)
    elif opt.optim == "adam":
        optimizer = optim.Adam(model.parameters(),
                               lr=opt.learning_rate,
                               weight_decay=1e-5)

    if opt.loss == "CrossEntropyLoss":
        criterion = CrossEntropyLoss()
    else:
        criterion = FocalLoss(class_num=61)

    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)

    train(model,
          epoch_num=opt.epochs,
          start_epoch=opt.start_epoch,
          optimizer=optimizer,
          criterion=criterion,
          exp_lr_scheduler=exp_lr_scheduler,
          data_set=data_set,
          data_loader=dataloader,
          save_dir=save_dir,
          print_inter=opt.print_interval,
          val_inter=opt.save_checkpoint_val_interval)
コード例 #3
0
def main(args):
    # --------------------------------------------------------------------------
    # DATA
    logger.info('-' * 100)
    logger.info('Load data files')
    train_exs = util.load_data(args, args.train_file)
    logger.info('Num train examples = %d' % len(train_exs))
    dev_exs = util.load_data(args, args.dev_file)
    logger.info('Num dev examples = %d' % len(dev_exs))

    #test_exs = util.load_data(args, args.test_file)
    #logger.info('Num dev examples = %d' % len(test_exs))

    # --------------------------------------------------------------------------
    # MODEL
    logger.info('-' * 100)
    start_epoch = 0
    if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'):
        # Just resume training, no modifications.
        logger.info('Found a checkpoint...')
        checkpoint_file = args.model_file + '.checkpoint'
        model, start_epoch = DocReader.load_checkpoint(checkpoint_file, args)
    else:
        # Training starts fresh. But the model state is either pretrained or
        # newly (randomly) initialized.
        if args.pretrained:
            logger.info('Using pretrained model...')
            model = DocReader.load(args.pretrained, args)
            if args.expand_dictionary:
                logger.info('Expanding dictionary for new data...')
                # Add words in training + dev examples
                words = util.load_words(args, train_exs + dev_exs)
                added_words = model.expand_dictionary(words)
                # Load pretrained embeddings for added words
                if args.words_embedding_file:
                    model.load_embeddings(added_words,
                                          args.words_embedding_file)
                logger.info('Expanding char dictionary for new data...')
                # Add words in training + dev examples
        else:
            logger.info('Training model from scratch...')
            model = util.init_from_scratch(args, train_exs, dev_exs)
            # Set up partial tuning of embeddings
            if args.tune_partial > 0:
                logger.info('-' * 100)
                logger.info('Counting %d most frequent question words' %
                            args.tune_partial)
                top_words = util.top_words(args, train_exs, model.word_dict)
                for word in top_words[:5]:
                    logger.info(word)
                logger.info('...')
                for word in top_words[-6:-1]:
                    logger.info(word)
                model.tune_embeddings([w[0] for w in top_words])

            # Set up optimizer
            model.init_optimizer()
    # Use the GPU?
    if args.cuda:
        model.cuda()

    # Use multiple GPUs?
    if args.parallel:
        model.parallelize()
    # --------------------------------------------------------------------------
    # DATA ITERATORS
    # Three datasets: train and dev. If we sort by length it's faster.
    logger.info('-' * 100)
    logger.info('Make data loaders')
    train_dataset = data.ReaderDataset(train_exs, model)

    if args.sort_by_len:
        train_sampler = data.SortedBatchSampler(train_dataset.lengths(),
                                                args.batch_size,
                                                shuffle=True)
    else:
        train_sampler = torch.utils.data.sampler.RandomSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        sampler=train_sampler,
        num_workers=args.data_workers,
        collate_fn=vector.batchify,
        # pin_memory= args.cuda,
    )
    dev_dataset = data.ReaderDataset(dev_exs, model)
    if args.sort_by_len:
        dev_sampler = data.SortedBatchSampler(dev_dataset.lengths(),
                                              args.dev_batch_size,
                                              shuffle=False)
    else:
        dev_sampler = torch.utils.data.sampler.SequentialSampler(dev_dataset)
    dev_loader = torch.utils.data.DataLoader(
        dev_dataset,
        batch_size=args.dev_batch_size,
        sampler=dev_sampler,
        num_workers=args.data_workers,
        collate_fn=vector.batchify,
        # pin_memory=args.cuda,
    )
    # -------------------------------------------------------------------------
    # PRINT CONFIG
    logger.info('-' * 100)
    logger.info('CONFIG:\n%s' %
                json.dumps(vars(args), indent=4, sort_keys=True))

    # --------------------------------------------------------------------------
    # --------------------------------------------------------------------------
    # TRAIN/VALID LOOP
    logger.info('-' * 100)
    logger.info('Starting training...')
    stats = {
        'timer': tool.Timer(),
        'epoch': 0,
        'best_f1_score': 0,
        'best_em_score': 0
    }
    train_saver = tool.DataSaver(args.model_name, "train")
    dev_saver = tool.DataSaver(args.model_name, "dev")
    for epoch in range(start_epoch, args.num_epochs):
        stats['epoch'] = epoch
        # Train
        train(args, train_loader, model, stats, train_saver)

        # Validate unofficial (train)
        validate_official(args, train_loader, model, stats, train_saver)

        # Validate unofficial (dev)
        result = validate_official(args, dev_loader, model, stats, dev_saver)

        # Save best valid
        if args.valid_metric is None or args.valid_metric == 'no':
            model.save(args.model_file)
        # {'exact_match': exact_match.avg,"f1_score":f1_score_avg.avg}
        if result['exact_match'] > stats['best_em_score']:
            stats['best_em_score'] = result['exact_match']
        if result['f1_score'] > stats['best_f1_score']:
            stats['best_f1_score'] = result['f1_score']
        logger.info(
            'Best f1_score = %.2f Best em_score: = %.2f (epoch %d, %d updates)'
            % (stats['best_f1_score'], stats['best_em_score'], stats['epoch'],
               model.updates))
        model.save(args.model_file)
    # save trained data
    train_saver.save(args.model_dir)
    dev_saver.save(args.model_dir)
    return model
コード例 #4
0
                          lr=base_lr,
                          weight_decay=weight_decay,
                          momentum=0.9)
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.33)
#exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.5,patience=4)
#exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones = [10,20,30,40], gamma=0.1)
#exp_lr_scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=5,eta_min=4e-08)

iter_per_epoch = len(data_set['train']) // opt.batchsize
#print (iter_per_epoch)

train(model,
      model_name=opt.model + '_' + str(opt.width) + '_' + str(opt.start) +
      '_' + str(opt.cn),
      epoch_num=100,
      start_epoch=start_epoch,
      optimizer=optimizer,
      criterion=criterion,
      exp_lr_scheduler=exp_lr_scheduler,
      data_set=data_set,
      data_loader=dataloader,
      save_dir=opt.save_dir,
      cls_number=cls_number,
      print_inter=iter_per_epoch // 4,
      val_inter=iter_per_epoch,
      mixup=opt.mixup,
      label_smoothing=opt.label_smoothing,
      focal_loss=opt.focal_loss)
torch.cuda.empty_cache()
コード例 #5
0
def train_main(x_train,x_test,y_train,y_test,model_times=0):
    if opt.process:
        #x_train = x_train[:1000]
        #x_test = x_test[:100]
        print np.mean(x_train),np.mean(x_test),np.min(x_train),np.min(x_test),np.max(x_train),np.max(x_test)
        x_train,x_test = process_data(x_train,x_test)
        print np.mean(x_train),np.mean(x_test),np.min(x_train),np.min(x_test),np.max(x_train),np.max(x_test)

    if opt.cn == 3:
      data_transforms = {
       'train' : transforms.Compose([
                 transforms.ToPILImage(),
                 transforms.RandomRotation(degrees=45,resample=Image.BICUBIC),
                 #transforms.RandomRotation(degrees=30,resample=Image.BICUBIC),
                 transforms.RandomHorizontalFlip(),
                 transforms.RandomVerticalFlip(),
                 #transforms.ColorJitter(brightness=0.2,contrast=0.2,saturation=0.2, hue=0.2),
                 transforms.RandomResizedCrop(target_size,scale=(0.64,1.0)),
                 #transforms.RandomResizedCrop(target_size,scale=(0.36,1.0)),
                 transforms.ToTensor(),
                 #transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
       ]),
       'val': transforms.Compose([
              transforms.ToPILImage(),
              #transforms.Resize(org_size),
              #transforms.CenterCrop(target_size),
              transforms.ToTensor(),
              #transforms.Normalize(mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225))
        ])
      }
    else:
      data_transforms = {
        'train' : Compose([
             #RandomRotate((0,45),bound=True),
             RandomRotate((0,45)),
             RandomHVShift(),
             RandomHflip(),
             RandomVflip(),
             RandomErasing(),
             #Resize((target_size,target_size)),
             RandomResizedCrop((target_size,target_size)),
             #Normalize()
         ]),
         'val': Compose([
             Resize((target_size,target_size)),
             #Normalize()
             #CenterCrop((target_size,target_size)),
         ])
      }


    #traindir = r'/media/disk1/fordata/web_server/multiGPU/cccccc/cloud/train/' ##train_dir
    #train_dataset = datasets.ImageFolder(traindir,data_transforms['train'])
    #test_dataset = datasets.ImageFolder(traindir,data_transforms['val'])

    train_x = torch.stack([torch.Tensor(i) for i in x_train])
    train_y = torch.Tensor(y_train)
    #train_y = torch.stack([torch.Tensor(i) for i in y_train])

    val_x = torch.stack([torch.Tensor(i) for i in x_test])
    val_y = torch.Tensor(y_test)
    #val_y = torch.stack([torch.Tensor(i) for i in y_test])


    #train_dataset = torch.utils.data.TensorDataset(train_x,train_y)
    #valid_dataset = torch.utils.data.TensorDataset(val_x,val_y)
    train_dataset = myTensorDataset(train_x,train_y,data_transforms['train'])
    valid_dataset = myTensorDataset(val_x,val_y,data_transforms['val'])

    data_set = {}
    data_set['train'] = train_dataset
    data_set['val'] = valid_dataset

    dataloader = {}
    dataloader['train'] = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
                                                   shuffle=True, num_workers=16)
    dataloader['val'] = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size,
                                                   shuffle=False, num_workers=16)
    ####print the mean and std of dataset
    #print (get_mean_and_std(train_dataset,cn=opt.cn))
    #print (get_mean_and_std(valid_dataset,cn=opt.cn))

    model,start_epoch = load_model(model_name=opt.model,resume=opt.resume,start_epoch=opt.start_epoch,cn=opt.cn, \
                   save_dir=opt.save_dir,width=opt.width,start=opt.start,cls_number=cls_number,avg_number=opt.avg_number, \
                   gpus=opt.gpus,model_times=model_times,kfold=opt.kfold)

    base_lr = opt.baselr
    weight_decay = opt.wd

    load_model_flag = False
    if load_model_flag:
        conv1_params = list(map(id, model.conv1.parameters()))
        fc_params = list(map(id, model.fc.parameters()))
        base_params = filter(lambda p: id(p) not in conv1_params + fc_params,model.parameters())
        optimizer = optim.Adam([{'params': base_params},{'params': model.conv1.parameters(), 'lr': base_lr * 10}, \
                                {'params': model.fc.parameters(), 'lr': base_lr * 10}
                               ], lr=base_lr, weight_decay=weight_decay, amsgrad=True)
    else:
        #optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay, amsgrad=True)
        if opt.optimizer == 'Adam':
            optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay, amsgrad=True)
        elif opt.optimizer == 'SGD':
            optimizer = optim.SGD(model.parameters(), lr=base_lr, weight_decay=weight_decay,momentum=0.9)

    criterion = CrossEntropyLoss()
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.33)
    #exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.5,patience=4)
    #exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones = [10,20,30,40], gamma=0.1)
    #exp_lr_scheduler = lr_scheduler.CosineAnnealingLR(optimizer,T_max=5,eta_min=4e-08)

    iter_per_epoch = len(data_set['train']) // opt.batchsize
    #print (iter_per_epoch)
    model_name = opt.model + '_' + str(opt.width) + '_' + str(opt.start) + '_' + str(opt.cn)
    if opt.kfold > 1:
        model_name = str(model_times) + '_' + model_name
    
    train(model,
          model_name=model_name,
          end_epoch=opt.end_epoch,
          start_epoch=start_epoch,
          optimizer=optimizer,
          criterion=criterion,
          exp_lr_scheduler=exp_lr_scheduler,
          data_set=data_set,
          data_loader=dataloader,
          save_dir=opt.save_dir,
          cls_number=cls_number,
          print_inter=iter_per_epoch // 4,
          val_inter=iter_per_epoch,
          mixup=opt.mixup,
          label_smoothing=opt.label_smoothing,
          focal_loss=opt.focal_loss
          )
    torch.cuda.empty_cache()
コード例 #6
0
ファイル: train.py プロジェクト: Chen94yue/Test
        'lr': base_lr * 10
    },
    {
        'params': model.module.classifier_swap.parameters(),
        'lr': base_lr * 10
    },
    {
        'params': model.module.Convmask.parameters(),
        'lr': base_lr * 10
    },
],
                      lr=base_lr,
                      momentum=cfg['momentum'])
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                       step_size=cfg['step_size'],
                                       gamma=cfg['gamma'])
train(
    cfg,
    model,
    epoch_num=cfg['epoch'],
    start_epoch=0,
    optimizer=optimizer,
    criterion=criterion,
    exp_lr_scheduler=exp_lr_scheduler,
    data_set=data_set,
    data_loader=dataloader,
    save_dir=save_dir,
    val_inter=int(cfg['numimage'] / cfg['batch_size']),
)
コード例 #7
0
# model.fc = torch.nn.Linear(model.fc.in_features,100)
model = multiscale_resnet(num_class=100)
base_lr = 0.001
resume = None
if resume:
    logging.info('resuming finetune from %s' % resume)
    model.load_state_dict(torch.load(resume))
model = model.cuda()

optimizer = optim.SGD(model.parameters(),
                      lr=base_lr,
                      momentum=0.9,
                      weight_decay=1e-5)
#optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0)
#optimizer = optim.Adam(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
criterion = CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.1)
best_acc, best_model_wts = train(
    model,
    epoch_num=150,
    start_epoch=0,
    optimizer=optimizer,
    criterion=criterion,
    exp_lr_scheduler=exp_lr_scheduler,
    data_set=data_set,
    data_loader=dataloader,
    save_dir=save_dir,
    print_inter=50,
    val_inter=200,
)