示例#1
0
def filter():
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dataset = ImageFolder(root=args.icdar_patches, transform=transform)
    loader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=True)

    model = Extractor()
    for params in model.parameters():
        params.requires_grad = False
    acc = clustering(loader=loader, model=model)
示例#2
0
    # Part 2: train F1t, F2t with pseudo labels
    logger.info("#################### Part2 ####################")
    extractor.train()
    s1_classifier.train()
    s2_classifier.train()
    s3_classifier.train()
    t_pse_label = os.path.join(data_root, args.t,
                               "pseudo/pse_label_" + str(step) + ".txt")
    t_pse_set = OfficeImage(t_root, t_pse_label, split="train")
    t_pse_loader_raw = torch.utils.data.DataLoader(t_pse_set,
                                                   batch_size=batch_size,
                                                   shuffle=shuffle,
                                                   num_workers=num_workers)
    logger.info("Length of pseudo-label dataset:{}".format(len(t_pse_set)))

    optim_extract = optim.Adam(extractor.parameters(),
                               lr=lr,
                               betas=(beta1, beta2))
    optim_s1_cls = optim.Adam(s1_classifier.parameters(),
                              lr=lr,
                              betas=(beta1, beta2))
    optim_s2_cls = optim.Adam(s2_classifier.parameters(),
                              lr=lr,
                              betas=(beta1, beta2))
    optim_s3_cls = optim.Adam(s3_classifier.parameters(),
                              lr=lr,
                              betas=(beta1, beta2))

    for cls_epoch in range(cls_epoches):  #cls_epoches
        s1_loader, s2_loader, s3_loader, t_pse_loader = iter(
            s1_loader_raw), iter(s2_loader_raw), iter(s3_loader_raw), iter(
示例#3
0
#Parameters
Batch_size_s = int(length_source_training / 25) + 1
Batch_size_t = int(length_target / 25) + 1
num_epochs = 200
m = 1
n = 10**(-7)
E = Extractor()
D = Discriminator()
R = Regressor()
E.apply(weights_init)
D.apply(weights_init)
R.apply(weights_init)
e_learning_rate = 0.00003
d_learning_rate = 0.00015
r_learning_rate = 0.0000001
e_optimizer = optim.RMSprop(E.parameters(), lr=e_learning_rate, alpha=0.9)
d_optimizer = optim.RMSprop(D.parameters(), lr=d_learning_rate, alpha=0.9)
r_optimizer = optim.RMSprop(R.parameters(), lr=r_learning_rate, alpha=0.9)
e_steps = 1
d_steps = 1
r_steps = 1

#SAMPLING
source_training_dataset = Data.TensorDataset(source_training_features,
                                             source_training_liquid_labels)
source_validation_dataset = Data.TensorDataset(
    source_validation_features, source_validation_liquid_labels)
target_dataset = Data.TensorDataset(target_features, target_liquid_labels)

source_training_loader_d = Data.DataLoader(dataset=source_training_dataset,
                                           batch_size=Batch_size_s,
示例#4
0
文件: svm.py 项目: sayano-lee/mmc_ocr
        pbar.update(1)
        im, ann, im_fns = data[0], data[1], data[2]
        im = im.cuda()
        feat = model(im)
        x = feat.cpu().numpy()
        y = ann.numpy()

        precision = precision + clf.score(x, y)
    pbar.close()

    print("\nAverage Precision is {}".format(precision / len(loader)))


if __name__ == '__main__':

    icdar_patches = "./data/icdar2015/patches"

    dataset = ImageFolder(root=icdar_patches, transform=transforms)
    loader = torch.utils.data.DataLoader(dataset, batch_size=20, shuffle=True)

    # extractor for deep features
    model = Extractor()
    model = model.cuda()
    for params in model.parameters():
        params.requires_grad = False

    # vanilla svm
    clf = svm.SVC(kernel="rbf", gamma=10)

    train(loader=loader, model=model, clf=clf)
示例#5
0
            if t_pred[j, ids[j]] >= threshold:
                fout.write(data[0] + " " + str(ids[j]) + "\n")
    fin.close()
    fout.close()
    # Part 2: train F1t, F2t with pseudo labels
    print ("#################### Part2 ####################")
    extractor.train()
    s1_classifier.train()
    s2_classifier.train()
    t_pse_label = os.path.join(data_root, args.t, "pseudo/pse_label_" + str(step) + ".txt")
    t_pse_set = OfficeImage(t_root, t_pse_label, split="train")
    t_pse_loader_raw = torch.utils.data.DataLoader(t_pse_set, batch_size=batch_size,
                           shuffle=shuffle, num_workers=num_workers)
    print ("Length of pseudo-label dataset: ", len(t_pse_set))

    optim_extract = optim.Adam(extractor.parameters(), lr=lr, betas=(beta1, beta2))
    optim_s1_cls = optim.Adam(s1_classifier.parameters(), lr=lr, betas=(beta1, beta2))
    optim_s2_cls = optim.Adam(s2_classifier.parameters(), lr=lr, betas=(beta1, beta2))

    for cls_epoch in range(cls_epoches):
        s1_loader, s2_loader, t_pse_loader = iter(s1_loader_raw), iter(s2_loader_raw), iter(t_pse_loader_raw)
        for i, (t_pse_imgs, t_pse_labels) in tqdm.tqdm(enumerate(t_pse_loader)):
            try:
                s1_imgs, s1_labels = s1_loader.next()
            except StopIteration:
                s1_loader = iter(s1_loader_raw)
                s1_imgs, s1_labels = s1_loader.next()
            try:
                s2_imgs, s2_labels = s2_loader.next()
            except StopIteration:
                s2_loader = iter(s2_loader_raw)