transform = None
train_dataset = dataset.listDataset(list_file=opt.trainList,
                                    transform=transform)
assert train_dataset
if not opt.random_sample:
    sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
else:
    sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=opt.batchSize,
                                           shuffle=False,
                                           sampler=sampler,
                                           num_workers=int(opt.workers),
                                           collate_fn=dataset.alignCollate(
                                               height=opt.height,
                                               width=opt.width,
                                               keep_ratio=opt.keep_ratio))

val_dataset = dataset.listDataset(list_file=opt.valList,
                                  transform=dataset.resizeNormalize(
                                      (opt.width, opt.height)))

nclass = len(alphabet) + 3  # decoder的时候,需要的类别数,3 for SOS,EOS和blank
nc = 1

converter = utils.strLabelConverterForAttention(alphabet)
image = torch.FloatTensor(opt.batchSize, 3, opt.width, opt.height)
criterion = torch.nn.NLLLoss()  # 最后的输出要为log_softmax

encoder = model.encoder(opt.height, nc=nc, nh=256)
decoder = model.decoder(nh=256, nclass=nclass, dropout_p=0.1)
Exemplo n.º 2
0
    print(" -- Saving parameters json:")
    with open(opt.outdir + os.sep + "params.json", "w") as p:
        p.write(json.dumps(vars(opt)))

    assert train_dataset
    if not opt.random_sample:
        sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)
    else:
        sampler = None
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=False,
                                               sampler=sampler,
                                               num_workers=int(opt.workers),
                                               collate_fn=dataset.alignCollate(
                                                   imgH=opt.imgH,
                                                   imgW=opt.imgW,
                                                   keep_ratio=opt.keep_ratio))

    if opt.vallist.endswith(".h5"):
        #h5file, datasetImage='/train/image', datasetProf='/train/prof'
        test_dataset = h5dataset.H5Dataset(opt.vallist,
                                           datasetImage='/test/image',
                                           datasetProf='/test/prof',
                                           transform=dataset.resizeNormalize(
                                               (opt.imgW, opt.imgH)))
    else:
        test_dataset = dataset.listDataset(list_file=opt.vallist,
                                           transform=dataset.resizeNormalize(
                                               (opt.imgW, opt.imgH)))

    alphabet = utils.getAlphabetStr(opt.alphabet)