def predict(data_type, seq_length, saved_model, image_shape, video_name, class_limit, config):

    model = load_model(saved_model)

    feature_file_path= config.featureFileName
    work_dir = config.workDir
    classlist= config.classes    
    
    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit,
            feature_file_path = feature_file_path,
            repo_dir = config.repoDir,
            work_dir=work_dir, classlist=classlist)
    else:
        data = DataSet(seq_length=seq_length, image_shape=image_shape,
            class_limit=class_limit,
            feature_file_path = feature_file_path,
            repo_dir = config.repoDir,
            work_dir=work_dir, classlist=classlist)
    
    # Extract the sample from the data.
    sample = data.get_frames_by_filename(video_name, data_type)

    # Predict!
    prediction = model.predict(np.expand_dims(sample, axis=0))
    print(prediction)
    data.print_class_from_prediction(np.squeeze(prediction, axis=0))
示例#2
0
def predict(data_type, seq_length, saved_model, image_shape, video_name, class_limit):
    model = load_model(saved_model)

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length, image_shape=image_shape, class_limit=class_limit)
    
    # Extract the sample from the data.
    sample = data.get_frames_by_filename(video_name, data_type)

    # Predict!
    prediction = model.predict(np.expand_dims(sample, axis=0))
    print(prediction)
    data.print_class_from_prediction(np.squeeze(prediction, axis=0))
def predict(data_type, seq_length, saved_model, image_shape, video_name, class_limit):
    model = load_model(saved_model)

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length, image_shape=image_shape,
            class_limit=class_limit)
    
    # Extract the sample from the data.
    sample = data.get_frames_by_filename(video_name, data_type)

    # Predict!
    prediction = model.predict(np.expand_dims(sample, axis=0))
    print(prediction)
    data.print_class_from_prediction(np.squeeze(prediction, axis=0))
示例#4
0
def predict(data_type, seq_length, saved_model, image_shape, video_name,
            class_limit):
    print("**********************************")
    print("\nstart loading model...")
    print(datetime.datetime.now())
    print("**********************************")
    global model
    if not model:
        model = load_model(saved_model)
    print("**********************************")
    print("model loaded successfully...")
    print(datetime.datetime.now())
    print("**********************************")

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:
        data = DataSet(seq_length=seq_length,
                       image_shape=image_shape,
                       class_limit=class_limit)

    # Extract the sample from the data.
    sample = data.get_frames_by_filename(video_name, data_type)

    # Predict!
    print("**********************************")
    print(datetime.datetime.now())
    print("**********************************")
    prediction = model.predict(np.expand_dims(sample, axis=0))
    print("**********************************")
    print(datetime.datetime.now())
    print("**********************************")
    #print(keras.np_utils.probas_to_classes(prediction))
    print(prediction)
    print("**********************************")
    print(datetime.datetime.now())
    print("**********************************")
    return data.print_class_from_prediction(np.squeeze(prediction, axis=0))
示例#5
0
def predict(seq_length, class_limit, feature_length, saved_model, video_name):

    # 获取数据
    data = DataSet(seq_length=seq_length, class_limit=class_limit)

    # 利用视频名称提取样本的序列特征值
    sample = data.get_frames_by_filename(video_name)
    sample = np.reshape(sample, [-1, seq_length, feature_length])

    # 确定训练的类数量
    if class_limit is None:
        class_num = 101
    else:
        class_num = class_limit

    # 设置输入
    input_x = tf.placeholder(tf.float32, [None, seq_length, feature_length],
                             name="input-x")

    # 前向传播的计算
    _, output_y = lstm_inference.inference(input_x,
                                           class_num,
                                           None,
                                           train=False)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        model = tf.train.get_checkpoint_state(saved_model)
        if model and model.model_checkpoint_path:
            saver.restore(sess, model.model_checkpoint_path)
            prediction = sess.run(output_y, feed_dict={input_x: sample})
            print("the prediction of the video %s is:" % video_name)
            data.print_class_from_prediction(np.squeeze(
                prediction, axis=0))  # 删除prediction中第1维(并且该维大小为1)
示例#6
0
class Predict():
    def __init__(self,
                 epoch=25,
                 val_loss=1.174,
                 model_type='lstm',
                 seq_length=40,
                 data_type='features'):
        # model can be one of lstm, lrcn, mlp, conv_3d, c3d
        self.model_type = model_type
        self.seq_length = seq_length
        self.data_type = data_type
        filepath = os.path.join(
            '..', 'data', 'checkpoints', model_type + '-' + data_type +
            '.{:03d}-{:.3f}.hdf5'.format(epoch, val_loss))
        # print('Loading the model:', filepath)
        # model = load_model(filepath)

        # Get the data and process it.
        self.data = DataSet(seq_length=seq_length, class_limit=None)

        # Get the model.
        print("Model Type:", model_type)
        self.rm = ResearchModels(len(self.data.classes), self.model_type,
                                 self.seq_length, filepath)

        # read the video IDs
        # self.all_video_ids = sorted([os.path.basename(name).split('.webm')[0] for name in glob.glob('../*/*/*.webm')])
        self.all_video_ids = sorted([
            os.path.basename(name).split('.webm')[0]
            for name in glob.glob('../*/videos_safe_viewing/*.webm')
        ])
        assert (len(self.all_video_ids) != 0)
        remove_list = read_remove_list()

        # Not needed any more
        # for item in remove_list:
        #     try:
        #         self.all_video_ids.remove(item)
        #     except:
        #         print(item, 'not in list')

        self.showing_ids = []
        print('Ready to accept ReST calls!')

    def get_videos_ids(self, count=12):
        # pick some at random to display
        self.showing_ids = random.sample(self.all_video_ids, count)
        # self.showing_ids = self.all_video_ids[2000:2000+count]
        return self.showing_ids

    def predict_video_id(self, video_id, top_N=2):
        sample = self.data.get_frames_by_filename(video_id, self.data_type)
        if sample is not None:
            prediction = self.rm.model.predict(np.expand_dims(sample, axis=0))
            return (self.data.get_top_N_from_prediction(
                np.squeeze(prediction, axis=0)))
        else:
            return ([])

    def predict_all_showing_ids(self):
        predictions = []
        for video_id in self.showing_ids:
            prediction = self.predict_video_id(video_id)
            predictions.append({"id": video_id, "label": prediction})
        return predictions

    def get_all_video_ids(self):
        return self.all_video_ids

    def get_GT_from_id(self, video_id):
        return video_id.split('_')[1]  # sample format, v_RopeClimbing_g01_c01