print("Spatial stream") spatial_model_restored = legacy_load_model( filepath="spatial.h5", custom_objects={ 'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5 }) spatial_model_restored.summary() # evaluate _, spatial_test_loader, test_video_level_label = frame_dataloader.SpatialDataLoader( width=int(spatial_model_restored.inputs[0].shape[1]), height=int(spatial_model_restored.inputs[0].shape[2]), batch_size=get_batch_size(spatial_model_restored, spatial=True), testing_samples_per_video=19).run() video_level_loss, video_level_accuracy_1, video_level_accuracy_5, test_video_level_preds = eval_model( spatial_model_restored, spatial_test_loader, test_video_level_label, 19) print("Spatial Model validation", "prec@1", video_level_accuracy_1, "prec@5", video_level_accuracy_5, "loss", video_level_loss) """ Evaluate motion stream """ # download # drive_manager = DriveManager("heavy-mot-xception-adam-1e-05-imnet") # drive_manager.download_file('1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ', "motion.zip") # the id of the zip file contains my network # load into ram print("Motion stream")
pred_file=pred_file, h5py_file=h5py_file, drive_manager=drive_manager, log_file=log_file) data_loader = partial(frame_dataloader.SpatialDataLoader, testing_samples_per_video=testing_samples_per_video, augmenter_level=augmenter_level, log_stream=log_stream) if checkpoint_found: # restore the model print("Model restored") eval_globals.best_video_level_accuracy_1 = float(zip_file_name.split("-")[1]) print("Current Best", eval_globals.best_video_level_accuracy_1) spatial_model_restored = legacy_load_model(filepath=h5py_file, custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5}) # init data loader train_loader, test_loader, test_video_level_label = data_loader(width=int(spatial_model_restored.inputs[0].shape[1]), height=int(spatial_model_restored.inputs[0].shape[2]), batch_size=get_batch_size(spatial_model_restored, spatial=True)).run() # training spatial_model_restored.fit_generator(train_loader, steps_per_epoch=len(train_loader), # generates a batch per step epochs=epochs, use_multiprocessing=True, workers=workers, # validation_data=gen_test(), validation_steps=len(test_loader.dataset) callbacks=[SpatialValidationCallback(model=spatial_model_restored, test_loader=test_loader, test_video_level_label=test_video_level_label), # returns callback instance keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=validate_every, verbose=1)], initial_epoch=int(zip_file_name.split("-")[0])) # get epoch number else: # init the model print("Starting from scratch")
data_loader = partial(frame_dataloader.MotionDataLoader, testing_samples_per_video=testing_samples_per_video, augmenter_level=augmenter_level, log_stream=log_stream, stacked_frames=stacked_frames) if checkpoint_found: # restore the model from the checkpoint log("Model restored") eval_globals.best_video_level_accuracy_1 = float(zip_file_name.split("-")[1]) log("Current Best", eval_globals.best_video_level_accuracy_1) motion_model_restored = legacy_load_model(filepath=h5py_file, custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5}) # init data loader train_loader, test_loader, test_video_level_label = data_loader(width=int(motion_model_restored.inputs[0].shape[1]), height=int(motion_model_restored.inputs[0].shape[2]), batch_size=get_batch_size(motion_model_restored, spatial=False)).run() # training motion_model_restored.fit_generator(train_loader, steps_per_epoch=len(train_loader), # generates a batch per step epochs=epochs, use_multiprocessing=True, workers=workers, # validation_data=gen_test(), validation_steps=len(test_loader.dataset) callbacks=[ MotionValidationCallback(model=motion_model_restored, test_loader=test_loader, test_video_level_label=test_video_level_label), # returns callback instance keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=validate_every, verbose=1)], initial_epoch=int(zip_file_name.split("-")[0])) # get epoch number else: # init the model from scratch log("Starting from scratch")
zip_file_name.split("-")[1]) print("Current Best", eval_globals.best_video_level_accuracy_1) spatial_model_restored = legacy_load_model( filepath=h5py_file, custom_objects={ 'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5 }) # init data loader train_loader, test_loader, test_video_level_label = data_loader( width=int(spatial_model_restored.inputs[0].shape[1]), height=int(spatial_model_restored.inputs[0].shape[2]), batch_size=get_batch_size(spatial_model_restored, spatial=True)).run() # training spatial_model_restored.fit_generator( train_loader, steps_per_epoch=len(train_loader), # generates a batch per step epochs=epochs, use_multiprocessing=False, workers=workers, # validation_data=gen_test(), validation_steps=len(test_loader.dataset) callbacks=[ SpatialValidationCallback( model=spatial_model_restored, test_loader=test_loader, test_video_level_label=test_video_level_label ), # returns callback instance