inv_normalize)
val_set = CACDDataset("./data/CACD2000_val.hdf5", val_transform, inv_normalize)

train_dataloader = DataLoader(train_set,
                              batch_size=BATCH_SIZE,
                              num_workers=4,
                              shuffle=True)
val_dataloader = DataLoader(val_set,
                            batch_size=BATCH_SIZE,
                            num_workers=4,
                            shuffle=False)

base_model = BaseModel(IF_PRETRAINED=True)
base_model.to(device)
base_model.load_state_dict(torch.load(MODEL_LOAD_PATH)['model'])
base_model.eval()

# ------------------------- Loss loading --------------------------------
camera_distance = 2.732
elevation = 0
azimuth = 0

renderer = sr.SoftRenderer(image_size=250,
                           sigma_val=1e-4,
                           aggr_func_rgb='hard',
                           camera_mode='look_at',
                           viewing_angle=30,
                           fill_back=False,
                           perspective=True,
                           light_intensity_ambient=1.0,
                           light_intensity_directionals=0)
Пример #2
0
tokens = pad_sequence(
    [torch.Tensor(sentence).to(torch.long) for sentence in tokens],
    padding_value=PAD)

pretrained_model_name = 'pretrained_final.pth'

# You can use a model which have been pretrained over 200 epochs by TA
# If you use this saved model, you should mention it in the report
#
# pretrained_model_name = 'pretrained_byTA.pth'

model = BaseModel(token_num=len(processor))
model.load_state_dict(torch.load(pretrained_model_name, map_location='cpu'),
                      strict=False)

model.eval()

output = model(tokens)
output = pack_padded_sequence(output, (output[..., 0] != PAD).sum(0),
                              enforce_sorted=False)
temp = (output.data - output.data.mean(dim=0))
covariance = 1.0 / len(output.data) * temp.T @ temp
U, S, V = covariance.svd()
output = PackedSequence_(temp @ U[:, :7], output.batch_sizes,
                         output.sorted_indices, output.unsorted_indices)
output, _ = pad_packed_sequence(output, batch_first=True, padding_value=PAD)

_, ax = plt.subplots(nrows=2, ncols=len(sentences) // 2)
ax = list(chain.from_iterable(ax))
for i, sentence, state in zip(range(len(sentences)), sentences, output):
    state = state[:len(sentence), :]