Beispiel #1
0
SoftMax = torch.nn.Softmax(dim=1).cuda()


class Linear_Classifier(torch.nn.Module):
    def __init__(self, num_classes):
        super(Linear_Classifier, self).__init__()
        self.fc = torch.nn.Linear(feature_size, num_classes)

    def forward(self, x):
        return self.fc(x)


t1 = time.time()

cnn_model_supervised = net_model_from_lib(num_classes=number_of_known_classes)  # timm library
cnn_model_moco_places = net_model_from_lib(num_classes=number_of_known_classes)  # timm library
linear_model = Linear_Classifier(num_classes=number_of_known_classes)

assert os.path.isfile(cnn_path_supervised)
checkpoint = torch.load(cnn_path_supervised)
if "epoch" in checkpoint.keys():
    state_dict_model = checkpoint["state_dict"]
else:
    state_dict_model = checkpoint
from collections import OrderedDict

new_state_dict_model = OrderedDict()
for k, v in state_dict_model.items():
    if "module." == k[:7]:
        name = k[7:]  # remove `module.`
        img_pil = PIL.Image.fromarray(img)
        x = self.transform(img_pil)
        i = A.find("ImageNet/")
        address = A[i:]
        # y = int(L)
        # return (x,y)
        return (address, x)


dataset_test = csv_data_class(path=test_csv_path, transform=image_transform_val)
test_loader = DataLoader(dataset=dataset_test, batch_size=batch_size, shuffle=False, num_workers=n_cpu)


t1 = time.time()

cnn_model = net_model_from_lib(num_classes=number_of_known_classes)  # timm library

assert os.path.isfile(cnn_path)
checkpoint = torch.load(cnn_path, map_location="cpu")
# rename moco pre-trained keys
print("keys = ", checkpoint.keys())
state_dict = checkpoint["state_dict"]
for k in list(state_dict.keys()):
    # retain only encoder_q up to before the embedding layer
    if k.startswith("module.encoder_q") and not k.startswith("module.encoder_q.fc"):
        # remove prefix
        state_dict[k[len("module.encoder_q.") :]] = state_dict[k]
    # delete renamed or unused k
    del state_dict[k]
msg = cnn_model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"classifier.weight", "classifier.bias"}
        x_moco = self.transform_moco(img_pil)
        i = A.find("ImageNet/")
        address = A[i:]
        return (address, x_moco)


dataset_test = csv_data_class(path=test_csv_path,
                              transform_moco=image_transform_moco)
test_loader = DataLoader(dataset=dataset_test,
                         batch_size=batch_size,
                         shuffle=False,
                         num_workers=n_cpu)

t1 = time.time()

cnn_model_moco_imagenet = net_model_from_lib(
    num_classes=number_of_known_classes)  # timm library
cnn_model_moco_places = net_model_from_lib(
    num_classes=number_of_known_classes)  # timm library

assert os.path.isfile(cnn_path_moco_imagenet)
checkpoint = torch.load(cnn_path_moco_imagenet, map_location="cpu")
# rename moco pre-trained keys
print("keys = ", checkpoint.keys())
state_dict = checkpoint["state_dict"]
for k in list(state_dict.keys()):
    # retain only encoder_q up to before the embedding layer
    if k.startswith(
            "module.encoder_q") and not k.startswith("module.encoder_q.fc"):
        # remove prefix
        state_dict[k[len("module.encoder_q."):]] = state_dict[k]
    # delete renamed or unused k
Beispiel #4
0
                      interpolation=transforms.InterpolationMode.BICUBIC),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

image_transform_moco = transforms.Compose([
    transforms.Resize(size=(image_size, image_size),
                      interpolation=transforms.InterpolationMode.BICUBIC),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])

t1 = time.time()

model_supervised = net_model_from_lib(
    num_classes=number_of_known_classes)  # timm library
model_moco_imagenet = net_model_from_lib(
    num_classes=number_of_known_classes)  # timm library

assert os.path.isfile(cnn_path_supervised)
checkpoint = torch.load(cnn_path_supervised)
if "epoch" in checkpoint.keys():
    state_dict_model = checkpoint["state_dict"]
else:
    state_dict_model = checkpoint
from collections import OrderedDict

new_state_dict_model = OrderedDict()
for k, v in state_dict_model.items():
    if "module." == k[:7]:
        name = k[7:]  # remove `module.`