torch.cuda.init() cuda_available: bool = True if torch.cuda.is_available(): print("CUDA available") else: print("CUDA not available") device = torch.device("cuda:0" if cuda_available else "cpu") # DATA SPLIT_RATIO: float = 0.7 BATCH_SIZE: int = 32 NUM_EPOCHS: int = 15 movie_data_set: MovieSuccessDataset = MovieSuccessDataset( MOVIE_DATA_FILE, POSTERS_DIR, Dictionary(DATA_DIR / 'dict2000.json'), Compose([Resize((299, 299)), ToTensor()])) data_set_size: int = len(movie_data_set) print(f'Size of the data-set: {data_set_size}') train_data_set_size: int = int(data_set_size * SPLIT_RATIO) val_data_set_size: int = data_set_size - train_data_set_size train_dataset, val_dataset = torch.utils.data.random_split( movie_data_set, [train_data_set_size, val_data_set_size]) weights: np.ndarray = get_class_weights(train_dataset) weighted_sampler = torch.utils.data.sampler.WeightedRandomSampler( weights, len(weights)) train_data_set_loader: DataLoader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
cuda_available: bool = True if torch.cuda.is_available(): print("CUDA available") else: print("CUDA not available") device = torch.device("cuda:0" if cuda_available else "cpu") # DATA SPLIT_RATIO: float = 0.7 BATCH_SIZE: int = 32 NUM_EPOCHS: int = 15 movie_data_set: MovieSuccessDataset = MovieSuccessDataset(MOVIE_DATA_FILE, POSTERS_DIR, Dictionary(DATA_DIR / 'dict2000.json'), Compose([Resize((299, 299)), ToTensor()])) data_set_size: int = len(movie_data_set) print(f'Size of the data-set: {data_set_size}') train_data_set_size: int = int(data_set_size * SPLIT_RATIO) val_data_set_size: int = data_set_size - train_data_set_size train_dataset, val_dataset = torch.utils.data.random_split(movie_data_set, [train_data_set_size, val_data_set_size]) weights: np.ndarray = get_class_weights(train_dataset) weighted_sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))