예제 #1
0
                             shape=(config.batch_size, config.input_steps))
    Y_end = tf.placeholder(tf.float32,
                           shape=(config.batch_size, config.input_steps))
    LR = tf.placeholder(tf.float32)
    optimizer, loss, TEM_trainable_variables = TEM_Train(
        X_feature, Y_action, Y_start, Y_end, LR, config)
    """ Init tf"""
    model_saver = tf.train.Saver(var_list=TEM_trainable_variables,
                                 max_to_keep=80)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.log_device_placement = True
    sess = tf.InteractiveSession(config=tf_config)
    tf.global_variables_initializer().run()

    train_dict, val_dict, test_dict = TEM_load_data.getDatasetDict()
    train_data_dict = TEM_load_data.getFullData("train")
    val_data_dict = TEM_load_data.getFullData("val")

    train_info = {
        "cost": [],
        "loss_action": [],
        "loss_start": [],
        "loss_end": [],
        "l2": []
    }
    val_info = {
        "cost": [],
        "loss_action": [],
        "loss_start": [],
        "loss_end": [],
예제 #2
0
        self.input_steps=100

if __name__ == "__main__":
    config = Config()
    X_feature = tf.placeholder(tf.float32, shape=(config.batch_size,config.input_steps,config.n_inputs))
    tem_scores=TEM_inference(X_feature,config)
    
    model_saver=tf.train.Saver(var_list=tf.trainable_variables(),max_to_keep=80)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    tf_config.log_device_placement =True
    sess=tf.InteractiveSession(config=tf_config)
    tf.global_variables_initializer().run()  
    model_saver.restore(sess,"models/TEM/tem_model_best")  

    video_dict= TEM_load_data.load_json("./data/activitynet_annotations/anet_anno_action.json")

    batch_result_action=[]
    batch_result_start=[]
    batch_result_end=[]
    batch_result_xmin=[]
    batch_result_xmax=[]
    
    batch_video_list=TEM_load_data.getBatchListTest(video_dict,config.batch_size,shuffle=False)
    
    for idx in range(len(batch_video_list)):
        batch_anchor_xmin,batch_anchor_xmax,batch_anchor_feature=TEM_load_data.getProposalDataTest(batch_video_list[idx],video_dict)
        out_scores=sess.run(tem_scores,feed_dict={X_feature:batch_anchor_feature})  
        batch_result_action.append(out_scores["anchors_action"])
        batch_result_start.append(out_scores["anchors_start"])
        batch_result_end.append(out_scores["anchors_end"])
예제 #3
0
if __name__ == '__main__':

    opt = parse_arguments()
    batch_size = opt.batchsize
    if opt.experiment == None:
        opt.experiment = './pytorch_models'
        output_root = '../../output'
    else:
        output_root = os.path.join('../../output/', opt.experiment)
        opt.experiment = os.path.join('./pytorch_models', opt.experiment)

    # video_dict = TEM_load_data.load_json("./data/activitynet_annotations/anet_anno_action.json")
    gt_path = '../../datasets/virat/bsn_dataset/stride_100_interval_300/gt_annotations.pkl'
    with open(gt_path, 'rb') as input_file:
        video_dict = pickle.load(input_file)
    feat_dict = TEM_load_data.load_whole_features()

    batch_result_action = []
    batch_result_start = []
    batch_result_end = []
    batch_result_xmin = []
    batch_result_xmax = []

    batch_video_list = TEM_load_data.getBatchListTest(video_dict,
                                                      batch_size,
                                                      shuffle=False)

    tem = TEM(embedsize=opt.embedsize, hiddensize=opt.hiddensize)
    model_path = os.path.join(opt.experiment, 'TEM/tem_model_best.pth')
    tem.load_state_dict(torch.load(model_path))
    tem.cuda()