model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size


pickle_files = [
    os.path.join(data_dir, f) for f in os.listdir(data_dir)
    if os.path.isfile(os.path.join(data_dir, f))
]
print(pickle_files)
data_loader_train, data_loader_val = get_dataloader(batch_size=batch_size,
                                                    VG_DIR=vg_dir)
# Create training and validation dataloaders
dataloaders_dict = {'train': data_loader_train, 'val': data_loader_val}

num_classes = data_loader_train.dataset.num_objects

# Initialize the model for this run
model_ft, input_size = initialize_model(model_name,
                                        num_classes,
                                        feature_extract,
                                        use_pretrained=True)

# Print the model we just instantiated
# print(model_ft)

# Detect if we have a GPU available
示例#2
0
        _, z_rand_rec, _ = self.crop_encoder(crops_rand, objs)

        crops_input_rec = crop_bbox_batch(img_rec, boxes, obj_to_img,
                                          self.obj_size)

        return crops_input, crops_input_rec, crops_rand, img_rec, img_rand, mu, logvar, z_rand_rec


if __name__ == '__main__':
    from data.vg_custom_mask import get_dataloader

    device = torch.device('cuda:0')
    z_dim = 8
    batch_size = 4

    train_loader, _ = get_dataloader(batch_size=batch_size)
    vocab_num = train_loader.dataset.num_objects

    # test CropEncoder
    # model = CropEncoder(class_num=vocab_num).to(device)
    #
    # for batch in train_loader:
    #     imgs, objs, boxes, masks, obj_to_img = batch
    #     imgs, objs, boxes, masks, obj_to_img = imgs.to(device), objs.to(device), boxes.to(device), masks.to(device), obj_to_img.to(device)
    #
    #     crops = crop_bbox_batch(imgs, boxes, obj_to_img, 32)
    #     outputs = model(crops, objs)
    #
    #     for output in outputs:
    #         print(output.shape)
    #
示例#3
0
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft, input_size


data_loader_train, data_loader_val = get_dataloader(batch_size=batch_size, attribute_embedding=106)

iters_per_epoch = {"train": len(data_loader_train)//batch_size, "val": len(data_loader_val)//batch_size}

# Create training and validation dataloaders
dataloaders_dict = {'train': data_loader_train, 'val': data_loader_val}

num_classes = data_loader_train.dataset.num_objects

# Initialize the model for this run
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)

# Print the model we just instantiated
print(model_ft)

# Detect if we have a GPU available