benset.get_dataset_annotations(), batch_size, num_frames) #benset_seq = BatchLoader(benset, benset.get_dataset_keys(),benset.get_dataset_annotations(),batch_size,num_frames, mode=1, random_hflip=1, random_brightness=1, random_channel_shift=0, random_zoom=1, random_subsampling=1, random_rot=1, random_blur=1) from sklearn.metrics import confusion_matrix y_actu = [] y_pred = [] from collections import Counter predictions = [] wrong = [0, 0, 0, 0] while True: x, y = benset_test.__next__() if x is None: break prediction = full_model.predict(x) pred_action = np.argmax(prediction[11]) annot_action = np.argmax(y[0]) y_actu.append(annot_action) y_pred.append(pred_action) if pred_action == annot_action: predictions.append(1) else:
trainer = MultiModelTrainer([models[1]], [benset_train_batchloader], workers=6, print_full_losses=True) trainer.train(40, steps_per_epoch=steps_per_epoch, initial_epoch=3, end_of_epoch_callback=fcallback) y_actu = [] y_pred = [] predictions = [] printcn(OKBLUE, 'Validation on Benset') for i in range(len(benset_dataloader.get_test_data_keys())): #printc(OKBLUE, '%04d/%04d\t' % (i, len(val_data_keys))) x, y = benset_test_batchloader.__next__() prediction = full_model.predict(x) pred_action = np.argmax(prediction[11]) annot_action = np.argmax(y[0]) y_actu.append(annot_action) y_pred.append(pred_action) if pred_action == annot_action: predictions.append(1) else: predictions.append(0) accuracy = 100.0 / len(predictions) * Counter(predictions)[1]