help="Path to the file with attributes") parser.add_argument('--device', type=str, default='cuda', help="Device: 'cuda' or 'cpu'") args = parser.parse_args() start_epoch = 1 N_epochs = 50 batch_size = 16 num_workers = 8 # number of processes to handle dataset loading device = torch.device("cuda" if torch.cuda.is_available() and args.device == 'cuda' else "cpu") # attributes variable contains labels for the categories in the dataset and mapping between string names and IDs attributes = AttributesDataset(args.attributes_file) # specify image transforms for augmentation during training train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(p=0.5), transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0), transforms.RandomAffine(degrees=20, translate=(0.1, 0.1), scale=(0.8, 1.2), shear=None, resample=False, fillcolor=(255, 255, 255)), transforms.ToTensor(),
mlb = pickle.loads(open(LABEL_PATH, "rb").read()) proba = model.predict(image)[0] idxs = np.argsort(proba)[::-1][:2] for (i, j) in enumerate(idxs): label = "{}".format(mlb.classes_[j]) print(label) if __name__ == '__main__': checkpoint = '/checkpoint-000050.pth' attributes_file = '/styles.csv' device = 'cpu' #imagg=open(args.inputimage()) attributes = AttributesDataset(attributes_file) model = MultiOutputModel( n_color_classes=attributes.num_colors, n_gender_classes=attributes.num_genders, n_article_classes=attributes.num_articles).to(device) #enter path to input image res = DATASET_PATH val_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean, std)]) img = Image.open(res) img = val_transform(img) img = img.view(-1, 3, img.shape[0], img.shape[1]) name = checkpoint print('Restoring checkpoint: {}'.format(name))