コード例 #1
0
    Y_action = tf.placeholder(tf.float32, shape=(batch_size, tscale))
    Y_start = tf.placeholder(tf.float32, shape=(batch_size, tscale))
    Y_end = tf.placeholder(tf.float32, shape=(batch_size, tscale))
    Y_iou = tf.placeholder(tf.float32, shape=(batch_size, tscale, tscale, 1))
    LR = tf.placeholder(tf.float32)
    train = tf.placeholder(tf.bool)
    optimizer, loss, DBG_trainable_variables = \
        DBG_Train(X_feature, Y_action, Y_start, Y_end, Y_iou, LR, train)
    """ init tf"""
    model_saver = tf.train.Saver(max_to_keep=80)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=tf_config)
    tf.global_variables_initializer().run()
    """ load dataset """
    train_dict, val_dict, test_dict = data_loader.getDatasetDict(
        dbg_config.video_info_file, dbg_config.video_filter)
    train_data_dict, train_video_mean_len = data_loader.getFullData(
        train_dict, dbg_config)
    val_data_dict = data_loader.getFullData(val_dict,
                                            dbg_config,
                                            training=False)

    train_info = {
        "cost": [],
        "loss_action": [],
        "loss_start": [],
        "loss_end": [],
        "l2": [],
        'loss_iou': [],
        'loss_st': []
    }
コード例 #2
0
    prop_start = prop_start * tf_mask
    prop_end = prop_end * tf_mask

    # boundary map fusion
    pstart = tf.reduce_sum(prop_start, 2) / tf.maximum(tf.reduce_sum(tf_mask, 2), 1)
    pend = tf.reduce_sum(prop_end, 1) / tf.maximum(tf.reduce_sum(tf_mask, 1), 1)

    model_saver = tf.train.Saver()
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=tf_config)
    tf.global_variables_initializer().run()
    model_saver.restore(sess, os.path.join(checkpoint_dir, 'dbg_model_best'))

    train_dict, val_dict, test_dict = data_loader.getDatasetDict()
    if test_mode == 'validation':
        video_dict = val_dict
    else:
        video_dict = test_dict

    batch_video_list = data_loader.getBatchListTest(video_dict, batch_size, shuffle=False)

    batch_result_xmin = []
    batch_result_xmax = []
    batch_result_iou = []
    batch_result_pstart = []
    batch_result_pend = []

    print('Runing DBG model ...')
    for idx in tqdm.tqdm(range(len(batch_video_list))):
コード例 #3
0
ファイル: test.py プロジェクト: apulis/ActionDetection-DBG
 prop_start = prop_start * tf_mask
 prop_end = prop_end * tf_mask
 pstart = tf.reduce_sum(prop_start, 2) / tf.maximum(
     tf.reduce_sum(tf_mask, 2), 1)
 pend = tf.reduce_sum(prop_end, 1) / tf.maximum(tf.reduce_sum(tf_mask, 1),
                                                1)
 """ Load model weights """
 model_saver = tf.train.Saver()
 tf_config = tf.ConfigProto()
 tf_config.gpu_options.allow_growth = True
 sess = tf.InteractiveSession(config=tf_config)
 tf.global_variables_initializer().run()
 model_saver.restore(sess, os.path.join(checkpoint_dir, 'dbg_model_best'))
 """ Get test or validation video list 
 """
 train_dict, val_dict, test_dict = data_loader.getDatasetDict(
     video_info_file)
 if test_mode == 'validation':
     video_dict = val_dict
 else:
     video_dict = test_dict
 """ load test or validation data
 """
 batch_video_list = data_loader.getBatchListTest(video_dict, batch_size)
 """ init result list
 """
 batch_result_xmin = []
 batch_result_xmax = []
 batch_result_iou = []
 batch_result_pstart = []
 batch_result_pend = []
 """ Run DBG model