示例#1
0
        else:
            #if we are using an asym bilstm
            options.word_level_dim = sum(options.hidden_dim)


    # ===-----------------------------------------------------------------------===
    # Set up logging
    # ===-----------------------------------------------------------------------===

    if not os.path.exists(options.log_dir):
        os.mkdir(options.log_dir)
    if options.log_to_stdout:
        logging.basicConfig(level=logging.INFO)
    else:
        logging.basicConfig(filename=options.log_dir + "/log.txt", filemode="w", format="%(message)s", level=logging.INFO)
    train_dev_cost = utils.CSVLogger(options.log_dir + "/train_dev.log", ["Train.cost", "Dev.cost"])


    # ===-----------------------------------------------------------------------===
    # Log run parameters
    # ===-----------------------------------------------------------------------===
    logging.info(options)
    logging.info(
    """
    Dataset: {dataset}
    Num Epochs: {epochs}
    LSTM: {layers} layers, {hidden} hidden dim, {word} word level dim
    Training set size limit: {sent} sentences or {tokes} tokens
    Initial Learning Rate: {lr}
    Dropout: {dropout}
示例#2
0
def main():
    global args, start_epoch
    args = parser.parse_args()
    test_id = args.net_type + str(args.depth) + '_' + args.expname + '_CAM'
    print('test_id : ', test_id)

    csv_path = os.path.join('logs', test_id)
    checkpoint_path = os.path.join('save_models', test_id)

    if not os.path.exists(csv_path):
        os.makedirs(csv_path)
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    print('csv_path : ', csv_path)
    print('models_path : ', checkpoint_path)

    # Preprocessing
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # Dataloader
    train_transforms = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])

    val_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.CenterCrop((224)),
        transforms.ToTensor(), normalize
    ])

    train_dataset = Cub2011(root='../datas/',
                            train=True,
                            transform=train_transforms)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers)

    val_dataset = Cub2011(root='../datas/',
                          train=False,
                          transform=val_transform)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers)

    numberofclass = 200

    model = RN.ResNet(args.dataset, args.depth, numberofclass,
                      args.bottleneck)  # for ResNet

    model.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0
    filename = csv_path + '/' + test_id + '.csv'
    csv_logger = utils.CSVLogger(csv_path,
                                 args=args,
                                 fieldnames=[
                                     'epoch', 'train_loss', 'train_acc1',
                                     'test_loss', 'test_acc1', 'test_acc5'
                                 ],
                                 filename=filename)

    if args.resume:
        checkpoint_path = checkpoint_path + csv_path
        checkpoint = torch.load(checkpoint_path + 'model_best.pth.tar')
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch'] + 1
        filename = csv_path + 'log.csv'
        csv_logger = utils.CSVLogger(csv_path,
                                     args=args,
                                     fieldnames=[
                                         'epoch', 'train_loss', 'train_acc1',
                                         'test_loss', 'test_acc1', 'test_acc5'
                                     ],
                                     filename=filename)

    for epoch in range(start_epoch, start_epoch + args.epochs):

        utils.adjust_learning_rate(args, optimizer, epoch)

        # train for one epoch
        train_acc1, train_loss = train(train_loader, model, criterion,
                                       optimizer, epoch)

        # evaluate on validation set
        acc1, acc5, val_loss = validate(val_loader, model, criterion, epoch)

        # loss
        train_loss = '%.4f' % (train_loss)
        train_acc1 = '%.4f' % (train_acc1)

        val_loss = '%.4f' % (val_loss)
        test_acc1 = '%.4f' % (acc1)
        test_acc5 = '%.4f' % (acc5)

        # remember best prec@1 and save checkpoint
        is_best = acc1 >= best_acc1
        best_acc1 = max(acc1, best_acc1)
        if is_best:
            best_acc5 = acc5

        print('Current best acc (top-1 {0:.3f} and 5 acc {1:.3f})'.format(
            best_acc1, best_acc5))
        print(' ')
        utils.save_checkpoint(
            {
                'epoch': epoch,
                'arch': args.net_type,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'best_acc5': best_acc5,
                'optimizer': optimizer.state_dict(),
            }, is_best, test_id)

        row = {
            'epoch': str(epoch),
            'train_loss': str(train_loss),
            'train_acc1': str(train_acc1),
            'test_loss': str(val_loss),
            'test_acc1': str(test_acc1),
            'test_acc5': str(test_acc5)
        }
        csv_logger.writerow(row)

    print('Best accuracy (top-1 {0:.3f} and 5 acc {1:.3f})'.format(
        best_acc1, best_acc5))
    csv_logger.close()