Пример #1
0
def generatedata(path,feature_extractor):
    x_train=[]
    y_train=[]
    for video_file in tqdm(glob.glob(path)):
        video_name = os.path.basename(video_file).split('.')[0]
        if video_name[0:6] == "Normal":
            y_train.append(0)
        else:
            y_train.append(1)
        video_clips, num_frames = get_video_clips(video_file)  
        rgb_features = []
        for i, clip in enumerate(video_clips):
            clip = np.array(clip)
            if len(clip) < params.frame_count:
                continue
            clip = preprocess_input(clip)
            rgb_feature = feature_extractor.predict(clip)[0]
            rgb_features.append(rgb_feature)
    
        rgb_features = np.array(rgb_features)
    
        rgb_feature_bag = interpolate(rgb_features, params.features_per_bag)
        x_train.append(rgb_feature_bag)
    return np.array(x_train),y_train
Пример #2
0
def run_demo():
    sample_video_path = './testing/*.mp4'
    # read video
    score_TT = 0
    score_TF = 0
    score_FF = 0
    score_FT = 0
    step = 0
    real_n = 0
    real_a = 0
    
    for video_file in glob.glob(sample_video_path):
        step += 1
        print("\nStep : ",step)
        video_name = os.path.basename(video_file).split('.')[0]
        print("Video_name : ",video_name)
        if video_name[0:6] == "Normal":
            real_n += 1
        else:
            real_a += 1
        video_clips, num_frames = get_video_clips(video_file)
    
        print("Number of clips in the video : ", len(video_clips))
        
        rgb_features = []
        print("\tprocessing clip ...")
        for i, clip in enumerate(video_clips):
            clip = np.array(clip)
            if len(clip) < params.frame_count:
                continue
    
            clip = preprocess_input(clip)
            rgb_feature = feature_extractor.predict(clip)[0]
            rgb_features.append(rgb_feature)
    
        rgb_features = np.array(rgb_features)
    
        # bag features
        rgb_feature_bag = interpolate(rgb_features, params.features_per_bag)
    
        # classify using the trained classifier model
        leng,widt=rgb_feature_bag.shape
        predictions = model.predict(rgb_feature_bag.reshape(1,leng,widt))
        predictions = np.array(predictions).squeeze()
        if predictions> 0.5:
            print("*** anomaly video ***")
            if video_name[0:6] == "Normal":
                score_TF += 1
            else:
                score_FF += 1
        else:
            if video_name[0:6] == "Normal":
                score_TT += 1
            else:
                score_FT += 1
            print(' ** Normal video ** ')
    print("Number of Video files = ",step)
    print("Number of Actual-Normal-Videos = ",real_n,"Number of Predicted Normal Videos = ",score_TT + score_FT)
    print("Number of Actual-Anomaly Videos = ",real_a,"Number of Predicted Anomaly Videos = ",score_TF + score_FF)
    print(" ** Accuaracy of Prediction ** ")
    if real_n!=0:
        print(" TT = ",round(score_TT/real_n,2),"TF = ",round(score_TF/real_n,2))
    if real_a!=0:
        print(" FT = ",round(score_FT/real_a,2),"FF = ",round(score_FF/real_a,2))
    Accuracy = (score_TT + score_FF)/(score_TT+ score_TF+ score_FT+ score_FF)
    print("Accuracy = ",Accuracy)
normal_videos.sort()

print("Processing normal videos...")
for vid_name in normal_videos:
    print("Processing {}".format(vid_name))
    vid_path = os.path.join(cfg.normal_videos_path, vid_name)
    feats_path = os.path.join(cfg.raw_normal_train_features,
                              vid_name[:-9] + ".npy")

    clips, frames = video_util.get_video_clips(vid_path)

    # Remove last clip if number of frames is not equal to 16
    if frames % 16 != 0:
        clips = clips[:-1]

    prep_clips = [c3d.preprocess_input(np.array(clip)) for clip in clips]
    prep_clips = np.vstack(prep_clips)

    features = feature_extractor.predict(prep_clips)
    features = sklearn.preprocessing.normalize(features, axis=1)

    with open(feats_path, "wb") as f:
        np.save(f, features)

abnormal_videos = os.listdir(cfg.abnormal_videos_path)
abnormal_videos.sort()
print("Processing abnormal videos...")
for vid_name in abnormal_videos:
    print("Processing {}".format(vid_name))
    vid_path = os.path.join(cfg.abnormal_videos_path, vid_name)
    feats_path = os.path.join(cfg.raw_abnormal_train_features,