reader = ReadData(dataset=args.dataset, text_embedding_path=args.text_embedding, video_feature_path=args.video_features, data_shape=inputs, train_val_split=1.) results = [] labels = [] prog_bar = tqdm(total=int(reader.val_size / args.batch_size)) num_batches = int(reader.val_size / args.batch_size) i = 0 for x, y in reader.generator_val(batch_size=args.batch_size): label = list(y) result = list(model.predict(x)) for res in label: labels.append(res[0]) for res in result: if res > 0.5: results.append(1) else: results.append(0) if i > num_batches: break
model = RecurrentCNN(no_filters=hidden_size, no_classes=args.no_classes) elif args.model == 'bilstm_rcnn': inputs = [(args.no_comments, 512), (30, 1536)] model = BiLSTMRecurrentCNN(hidden_size, no_classes=args.no_classes) model = model.build(inputs) model.compile(loss='binary_crossentropy', optimizer=Adam(lr=args.learning_rate), metrics=['accuracy']) model.summary() if args.check_build: exit() reader = ReadData(dataset=args.dataset, text_embedding_path=args.text_embedding, video_feature_path=args.video_features, data_shape=inputs, train_val_split=args.train_val_split) train_generator = reader.generator(batch_size=args.batch_size) val_generator = reader.generator_val(batch_size=args.batch_size) log_dir = args.logs + '_' + args.model logging = TrainValTensorBoard(log_dir=log_dir) checkpoint = ModelCheckpoint(os.path.join(log_dir, 'ep{epoch:03d}-val_loss{val_loss:.3f}-val_acc{val_acc:.3f}.h5'), monitor='val_acc', save_weights_only=True, save_best_only=True, period=1) reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=3, verbose=1) early_stopping = EarlyStopping(monitor='val_acc', min_delta=0, patience=5, verbose=1) print('Training on {} samples and Validating on {} samples.'.format(reader.train_size, reader.val_size)) model.fit_generator(generator=train_generator, steps_per_epoch=int(reader.train_size/args.batch_size), validation_data=val_generator, epochs=args.epochs, validation_steps=int(reader.val_size/args.batch_size), callbacks=[logging, checkpoint, reduce_lr, early_stopping])