################################################################################
"""Files, paths & identifier"""
suffix = "hater"  # put your name or anything(your crush :3) :D
experiment_identifier = suffix + ("" if suffix == "" else "-") + get_augmenter_text(augmenter_level) + "-mot-" + model_name + "-" + ("adam" if is_adam else "SGD") + "-" + str(lr) + "-" + ("imnet" if pretrained else "scrat")
log_file = "motion.log"
log_stream = open("motion.log", "a")
training_log = open("motion_training.log", "a")
validation_log = open("motion_validation.log", "a")
h5py_file = "motion.h5"
pred_file = "motion.preds"
################################################################################
"""Checking latest"""
print(experiment_identifier)
num_actions = 10
drive_manager = DriveManager(experiment_identifier)
checkpoint_found, zip_file_name = drive_manager.get_latest_snapshot()
################################################################################
# you need to send it as callback before keras reduce on plateau
MotionValidationCallback = partial(eval_globals.get_validation_callback,
                                   log_stream=log_stream,
                                   training_log=training_log,
                                   validation_log=validation_log,
                                   validate_every=validate_every,
                                   testing_samples_per_video=testing_samples_per_video,
                                   pred_file=pred_file, h5py_file=h5py_file, drive_manager=drive_manager, log_file=log_file)

data_loader = partial(frame_dataloader.MotionDataLoader,
                      testing_samples_per_video=testing_samples_per_video,
                      augmenter_level=augmenter_level,
                      log_stream=log_stream, stacked_frames=stacked_frames)
from tensorflow.python.keras import Model, Input

import frame_dataloader
from evaluation import legacy_load_model
from evaluation.evaluation import *
from utils.drive_manager import DriveManager

#####################################################
feature_field_size = 2048
testing_samples_per_video = 19
#####################################################
"""Managed"""
evaluate = False
generate_test = False

drive_manager = DriveManager("motion_feature_dataset")
drive_manager.download_file(
    '1O8OM6Q01az_71HdMQmWM3op1qJhfsQoI',
    "motion.zip")  # the id of the zip file contains my network

motion_model_restored = legacy_load_model(
    filepath="motion.h5",
    custom_objects={
        'sparse_categorical_cross_entropy_loss':
        sparse_categorical_cross_entropy_loss,
        "acc_top_1": acc_top_1,
        "acc_top_5": acc_top_5
    })
motion_model_restored.summary()
# xception here is a layer
# The architecture summary is
예제 #3
0
from tensorflow.python.keras import Input

import frame_dataloader
from evaluation import legacy_load_model
from evaluation.evaluation import *
from utils.drive_manager import DriveManager

#####################################################
feature_field_size = 2048
testing_samples_per_video = 19
#####################################################
"""Managed"""
evaluate = False
generate_test = False

drive_manager = DriveManager("spatial_feature_dataset")
drive_manager.download_file('17O8JdvaSNJFmbvZtQPIBYNLgM9Um-znf', "spatial.zip")
spatial_model_restored = legacy_load_model(
    filepath="spatial.h5",
    custom_objects={
        'sparse_categorical_cross_entropy_loss':
        sparse_categorical_cross_entropy_loss,
        "acc_top_1": acc_top_1,
        "acc_top_5": acc_top_5
    })

spatial_model_restored.summary()

spatial_model_with_2_outputs = Model(
    spatial_model_restored.inputs,  # input image
    [layer.output for layer in spatial_model_restored.layers[-2:]
if is_spatial and is_motion:
    feature_field *= 2
################################################################################
"""Files, paths & identifier"""
suffix = ""  # put your name or anything :D
experiment_identifier = suffix + "recurrent_fusion_selu_atten_simple" + str(lr)
################
log_file = experiment_identifier + ".log"
log_stream = open(log_file, "a")
checkpoint_dir = "./fusion/"
checkpoints = checkpoint_dir + "fusion_chk"
try:
    shutil.rmtree(checkpoint_dir)
except:
    pass
drive_manager = DriveManager(experiment_identifier)
checkpoint_found, zip_file_name = drive_manager.get_latest_snapshot()
################################################################################
"""sanity check"""
if not is_motion and not is_spatial:
    exit()
################################################################################
"""Downloads the files and makes sure files aren't re-downloaded with every run if no one is missed"""
if is_spatial:
    drive_manager_spatial = DriveManager("spatial_feature_dataset")
    test_spatial = drive_manager_spatial.search_file(
        "test_features_spatial.pickle")
    train_spatial = drive_manager_spatial.search_file(
        "train_features_spatial.pickle")

    if len(test_spatial) == 0:
"""
********************************
*   Created by mohammed-alaa   *
********************************
Evaluate motion and spatial streams
"""
import frame_dataloader
from evaluation import legacy_load_model, get_batch_size
from evaluation.evaluation import *
from utils.drive_manager import DriveManager
"""
Evaluate spatial stream
"""
# download
drive_manager = DriveManager("spa-xception-adam-5e-06-imnet")
drive_manager.download_file('1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK', "spatial.zip")

# load into ram
print("Spatial stream")
spatial_model_restored = legacy_load_model(
    filepath="spatial.h5",
    custom_objects={
        'sparse_categorical_cross_entropy_loss':
        sparse_categorical_cross_entropy_loss,
        "acc_top_1": acc_top_1,
        "acc_top_5": acc_top_5
    })
spatial_model_restored.summary()

# evaluate
_, spatial_test_loader, test_video_level_label = frame_dataloader.SpatialDataLoader(