if __name__ == '__main__': args = parse_args() # Save vars show_report = args.show_report private_set = args.private_set index = args.index # Listing sorted checkpoints ckpts = sorted(glob.glob(os.path.join(args.output, args.name, 'best*')), reverse=True) # Load original args args = torch.load(ckpts[0])['args'] args = compute_args(args) # Define the splits to be evaluated evaluation_sets = ['valid', 'test' ] + ([private_set] if private_set is not None else []) # Creating dataloader train_dset = eval(args.dataloader)('train', args) loaders = { set: DataLoader(eval(args.dataloader)(set, args, train_dset.token_to_ix), args.batch_size, num_workers=8, pin_memory=True) for set in evaluation_sets }
type=str, choices=['MELD', 'MOSEI', 'MOSI', 'IEMOCAP', 'VGAF'], default='MOSEI') parser.add_argument('--task', type=str, choices=['sentiment', 'emotion'], default='sentiment') parser.add_argument('--task_binary', type=bool, default=False) args = parser.parse_args() return args if __name__ == '__main__': # Base on args given, compute new args args = compute_args(parse_args()) # Seed torch.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # DataLoader train_dset = eval(args.dataloader)('train', args) eval_dset = eval(args.dataloader)('valid', args, train_dset.token_to_ix) train_loader = DataLoader(train_dset, args.batch_size, shuffle=True, num_workers=8,