Esempio n. 1
0
def infer(data_type):
    proposal_data = {'results': {}, 'version': "VERSION 1.0"}
    json_data = load_json("data/meta.json")
    database = json_data['database']
    dict_dim = 2048
    class_num = 2 
    window_lens = [30]
    window_stride = 30 
    model_path = "model/video_lstm30.tar.gz"
    prob_layer = lstm_net(dict_dim, class_num, is_infer=True)

    # initialize PaddlePaddle
    paddle.init(use_gpu=False, trainer_count=1)

    # load the trained models
    if os.path.exists(model_path):
        with gzip.open(model_path, 'r') as f:
            parameters = paddle.parameters.Parameters.from_tar(f)
    index = 0
    for video in database.keys():
        dataSet = database[video]["subset"]
        if dataSet != data_type:
            continue
        with open("data/" + dataSet + "/" + str(video) + ".pkl", 'rb') as f:
            video_fea = cPickle.load(f)
        print index, video
        index += 1
        video_len = np.shape(video_fea)[0]
        this_vid_proposals = []
        for pos in range(0, video_len, window_stride):
            inputs = []
            for window_len in window_lens:
                if pos + window_len < video_len:
                    inputs.append([video_fea[pos:pos + window_len]])
            probs = paddle.infer(
                output_layer=prob_layer, parameters=parameters, input=inputs, field="value")
            if len(probs) <= 0:
                continue
            max_probs = np.max(probs[0])
            window_index = 0 
            label = np.argmax(probs[0])
            if label == 0:
                continue
            proposal = {
                    'label': label, 
                    'score': int(max_probs * 100) / 100.0,
                    'segment': [pos, pos + window_lens[window_index]],
                   }
            this_vid_proposals += [proposal]
        proposal_data['results'][video] = this_vid_proposals
    with open("res/" + data_type + "_len.json", 'w') as fobj:
        json.dump(proposal_data, fobj)
Esempio n. 2
0
def stat():
    file_path = 'data/meta.json'
    json_data = load_json(file_path)
    database = json_data['database']
    data = []
    for video in database.keys():
        vd = []
        if database[video]['subset'] != 'validation':
            continue
        for i in database[video]['annotations']:
            st, ed = i['segment']
            vd.append(int((ed - st) / 10.0) * 10)
        mlen = int(np.mean(vd) / 10) * 10
        data.append(mlen)
    print Counter(data).keys()
Esempio n. 3
0
def plot(dataset):
    file_path = 'data/meta.json'
    json_data = load_json("data/meta.json")
    database = json_data['database']
    data_x = []
    data_y = []
    for video in database.keys():
        if database[video]['subset'] != 'validation':
            continue
        fig, ax = plt.subplots()
        gt_x, gt_y = get_gt_data(video)
        pr_x, pr_y = get_pr_data(video, dataset)
        bd_x, bd_y = get_bd_data(video, dataset)
        line1, = ax.plot(gt_x, gt_y)
        line2, = ax.plot(pr_x, pr_y, '--', linewidth=1)
        plt.savefig('image/' + dataset + '/' + video + '.jpg')
Esempio n. 4
0
def get_gt_data(video):
    file_path = 'data/meta.json'
    json_data = load_json(file_path)
    database = json_data['database']
    data_x = []
    data_y = []
    for i in database[video]['annotations']:
        st, ed = i['segment']
        data_x.append(st)
        data_y.append(0)
        data_x.append(st)
        data_y.append(1)
        data_x.append(ed)
        data_y.append(1)
        data_x.append(ed)
        data_y.append(0)
    return data_x, data_y
Esempio n. 5
0
def get_bd_data(video, dataset):
    file_path = 'res/' + dataset + '.json'
    json_data = load_json(file_path)
    database = json_data['results']
    data_x = []
    data_y = []
    for i in database[video]:
        st, ed = i['segment']
        data_x.append(st)
        data_y.append(0)
        data_x.append(st)
        data_y.append(1)
        data_x.append(ed)
        data_y.append(1)
        data_x.append(ed)
        data_y.append(0)
    return data_x, data_y
Esempio n. 6
0
def infer(data_type):
    proposal_data = {'results': {}, 'version': "VERSION 1.0"}
    json_data = load_json("res/" + data_type + ".json")
    database = json_data['results']
    for video in database.keys():
        new_seg = []
        new_score = []
        this_vid_proposals = []
        last_ed = 0
        last_st = 0
        width = 0
        for index, i in enumerate(database[video]):
            st, ed = i['segment']
            score = i['score']
            if score < 0.5:
                continue
            if st - last_ed < 30 and width <= 6 and last_ed != 0:
                width += 1
                last_ed = ed
                continue
            if last_ed != 0:

                new_seg.append([last_st, last_ed])
                new_score.append(i['score'])
            last_st = st
            last_ed = ed
            width = 0

        for i, sore in zip(new_seg, new_score):
            last_st, last_ed = i
            if last_ed - last_st < 150:
                if last_ed - last_st <= 30:
                    continue
                last_st = (last_st + last_ed) / 2 - 80
                last_ed = (last_st + last_ed) / 2 + 80

            proposal = {
                'score': sore,
                'segment': [last_st, last_ed],
            }
            this_vid_proposals += [proposal]
        proposal_data['results'][video] = this_vid_proposals
    with open("res/" + data_type + "_refine.json", 'w') as fobj:
        json.dump(proposal_data, fobj)
Esempio n. 7
0
def infer(video, st, ed):
    proposal_data = {'results': {}, 'version': "VERSION 1.0"}
    json_data = load_json("data/meta.json")
    database = json_data['database']
    dict_dim = 2048
    class_num = 2
    dataSet = 'validation'
    model_path = "model/video_lstm30.tar.gz"
    prob_layer = lstm_net(dict_dim, class_num, is_infer=True)

    # initialize PaddlePaddle
    paddle.init(use_gpu=False, trainer_count=1)

    # load the trained models
    if os.path.exists(model_path):
        with gzip.open(model_path, 'r') as f:
            parameters = paddle.parameters.Parameters.from_tar(f)
    with open("data/" + dataSet + "/" + str(video) + ".pkl", 'rb') as f:
        video_fea = cPickle.load(f)
    probs = paddle.infer(output_layer=prob_layer,
                         parameters=parameters,
                         input=[[video_fea[int(st):int(ed)]]],
                         field="value")
    print probs