Пример #1
0
        target_encoder_name))
    target_encoder.load_state_dict(torch.load(target_encoder_pth))
else:
    print("Error: target encoder not loaded")

# Move to GPU
if cuda:
    target_encoder = target_encoder.cuda()

# ----------
#  Features
# ----------

print("---> generating features...")

target_encoder.eval()

# Load data from source
with torch.no_grad(
):  # do not need to calculate information for gradient during eval
    for idx, (imgs, label) in enumerate(dataloader_source_test):

        images = make_variable(imgs).cuda()
        source_features = target_encoder(images).cpu().numpy(
        )  # tensor/numpy array, size = batch size x 500 (each row is an image feature)
        source_class = label.cpu().numpy()  # numpy vector, size = batch size

        if idx == 0:
            break

# Load data from target
Пример #2
0
# init weights
src_encoder.load_state_dict(
    torch.load(output_src_encoder_path, map_location=device))

tgt_encoder.load_state_dict(
    torch.load(output_tgt_encoder_path, map_location=device))

# In[5]:

# get latent space
latents = []
labels = []
domains = []

src_encoder.eval()
tgt_encoder.eval()

for sources in tqdm(source_dataloader):
    source_images, label = sources[0].to(device), sources[1]
    with torch.no_grad():
        features = src_encoder(source_images)
        latents.append(features.cpu())
    labels.append(label)

for targets in tqdm(target_dataloader):
    target_images, label = targets[0].to(device), targets[1]
    with torch.no_grad():
        features = tgt_encoder(target_images)
        latents.append(features.cpu())
    labels.append(label)
Пример #3
0
def main(args):
    # read from args
    test_path = args.test_path
    d_target = args.d_target
    output_predict_path = args.output_predict_path

    ########## Arguments ##########
    batch_size = 128

    # svhn, usps, mnistm
    if d_target == "mnistm":
        d_source = "usps"
    elif d_target == "svhn":
        d_source = "mnistm"
    else:
        d_source = "svhn"

    output_src_classifier_path = "./hw3-4/models/src_classifier_{}_{}.pth".format(
        d_source, d_target)
    output_tgt_encoder_path = "./hw3-4//models/tgt_encoder_{}_{}.pth".format(
        d_source, d_target)

    #############################

    dataset = ReproduceDataset(test_path)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # init models
    tgt_encoder = LeNetEncoder()
    src_classifier = LeNetClassifier()

    # to device
    tgt_encoder.to(device)
    src_classifier.to(device)

    # init weights
    tgt_encoder.load_state_dict(torch.load(
        output_tgt_encoder_path, map_location=device))

    src_classifier.load_state_dict(torch.load(
        output_src_classifier_path, map_location=device))

    tgt_encoder.eval()
    src_classifier.eval()

    all_pred = []

    for idx, targets in enumerate(dataloader):
        target_images = targets.to(device)
        target_bs = target_images.shape[0]

        with torch.no_grad():
            preds = src_classifier(tgt_encoder(target_images))

        # calculate label acc
        _, pred_labels = torch.max(preds, 1)
        all_pred.append(pred_labels)

    # save to predict
    pred = torch.cat(all_pred).cpu().numpy()
    image_names = ['{:05}.png'.format(i) for i in range(len(pred))]

    pd.DataFrame({
        'image_name': image_names,
        'label': pred
    }).to_csv(output_predict_path, index=False)