q_dir = QUESTIONS_DIR v_dir = VIDEOS_DIR labels_file = LABELS_FILE split_file = SPLIT_FILE with open(split_file, 'r') as f: split = json.load(f) train_file_ids = split['train'] val_file_ids = split['val'] with open(labels_file, 'r') as f: labels = json.load(f) # Initialize datasets for training and validation train_data = VNQADataset(q_dir=q_dir, v_dir=v_dir, filenames=train_file_ids, labels=labels) val_data = VNQADataset(q_dir=q_dir, v_dir=v_dir, filenames=val_file_ids, labels=labels) print('%d train examples, %d validation examples' % (len(train_data), len(val_data))) # Create DataLoader objects for training and validation sets train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) val_loader = DataLoader(dataset=val_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) # Initialize the model num_classes = args.num_classes if args.model == 'concat2d': model = QConcatCNN2DLSTM(batch_size=args.batch_size, q_embedding_size=args.embed_size, nb_classes=num_classes,
q_dir = QUESTIONS_DIR v_dir = VIDEOS_DIR labels_file = LABELS_FILE split_file = SPLIT_FILE with open(split_file, 'r') as f: split = json.load(f) train_file_ids = split['train'] test_file_ids = split['test'] max_num_frames = MAX_ALLOWED_NUM_FRAMES_DROPPING with open(labels_file, 'r') as f: labels = json.load(f) # Initialize datasets train_data = VNQADataset(q_dir=q_dir, v_dir=v_dir, filenames=train_file_ids, labels=labels) test_data = VNQADataset(q_dir=q_dir, v_dir=v_dir, filenames=test_file_ids, labels=labels, q_metadata=True) # Create DataLoader objects train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) test_loader = DataLoader(dataset=test_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) # Initialize the model num_classes = args.num_classes if args.model == 'concat2d': model = QConcatCNN2DLSTM(batch_size=args.batch_size, q_embedding_size=args.embed_size, nb_classes=num_classes,
if __name__ == '__main__': with open(SPLIT_FILE, 'r') as f: split = json.load(f) train_file_ids = split['train'] test_file_ids = split['test'] print('%d train examples, %d test examples' % (len(train_file_ids), len(test_file_ids))) with open(LABELS_FILE, 'r') as f: labels = json.load(f) # Initialize datasets for training and testing train_data = VNQADataset(q_dir=QUESTIONS_DIR, v_dir=VIDEOS_DIR, v_only=True, filenames=train_file_ids, labels=labels) test_data = VNQADataset(q_dir=QUESTIONS_DIR, v_dir=VIDEOS_DIR, v_only=True, filenames=test_file_ids, labels=labels) # Create DataLoader objects for training and test datasets train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) test_loader = DataLoader(dataset=test_data, batch_size=args.batch_size,
if __name__=='__main__': with open(SPLIT_FILE, 'r') as f: split = json.load(f) train_file_ids = split['train'] val_file_ids = split['val'] print('%d train examples, %d validation examples' % (len(train_file_ids), len(val_file_ids))) with open(LABELS_FILE, 'r') as f: labels = json.load(f) # Initialize datasets for training and testing num_classes = args.num_classes train_data = VNQADataset(q_dir=QUESTIONS_DIR, v_dir=VIDEOS_DIR, v_only=True, num_classes=num_classes, filenames=train_file_ids, labels=labels) val_data = VNQADataset(q_dir=QUESTIONS_DIR, v_dir=VIDEOS_DIR, v_only=True, num_classes=num_classes, filenames=val_file_ids, labels=labels) # Create DataLoader objects for training and test datasets train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) val_loader = DataLoader(dataset=val_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) # Initialize the model model = VideoOnlyCNN3D(nb_classes=num_classes)
if __name__ == '__main__': with open(args.split_file, 'r') as f: split = json.load(f) train_file_ids = split['train'] test_file_ids = split['test'] print('%d test examples' % len(test_file_ids)) with open(args.labels_file, 'r') as f: labels = json.load(f) num_classes = args.num_classes # Initialize datasets train_data = VNQADataset(q_dir=args.q_dir, v_dir=args.v_dir, q_only=True, filenames=train_file_ids, labels=labels, num_classes=num_classes) test_data = VNQADataset(q_dir=args.q_dir, v_dir=args.v_dir, q_only=True, filenames=test_file_ids, labels=labels, num_classes=num_classes) # Create DataLoader objects test_loader = DataLoader(dataset=test_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)