def rebuild_load_data():
    normal_videos = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'normal_videos.txt'))
    normal_videos_from_anormal = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'normal_videos_from_anormal.txt'))
    anomaly_normal = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'anomaly_normal.txt'))
    violence_videos = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'violence_videos.txt'))
    X, y, numFrames = [], [], []
    ll = [violence_videos, anomaly_normal]
    llabels = [1, 0]
    for i, videos_list in enumerate(ll):
        for v_video in videos_list:
            if llabels[i] == 1:
                v_path = os.path.join(
                    constants.PATH_UCFCRIME2LOCAL_FRAMES_VIOLENCE, v_video)
            else:
                v_path = os.path.join(
                    constants.PATH_UCFCRIME2LOCAL_FRAMES_NONVIOLENCE, v_video)
            l_frames = os.listdir(v_path)
            X.append(v_video)
            y.append(llabels[i])
            numFrames.append(len(l_frames))

    save_csvfile_multicolumn(
        zip(X, y, numFrames),
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README, 'All_data.txt'))
    return X, y, numFrames
def crime2localgGetSplit(X, y, numFrames, splits=5):
    # print(X)
    kfold = KFold(splits, shuffle=True)

    if not os.path.exists(
            os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                         'fold-1-train.txt')):
        for i, (train_idx, test_idx) in enumerate(kfold.split(X)):
            save_file(
                train_idx,
                os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                             'fold-{}-train.txt'.format(i + 1)))
            save_file(
                test_idx,
                os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                             'fold-{}-test.txt'.format(i + 1)))

    for i in range(splits):
        train_idx = read_file(
            os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                         'fold-{}-train.txt'.format(i + 1)))
        test_idx = read_file(
            os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                         'fold-{}-test.txt'.format(i + 1)))
        train_idx = list(map(int, train_idx))
        test_idx = list(map(int, test_idx))
        yield train_idx, test_idx
def get_Fold_Data(fold):
    train_idx = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'fold-{}-train.txt'.format(fold)))
    test_idx = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'fold-{}-test.txt'.format(fold)))
    train_idx = list(map(int, train_idx))
    test_idx = list(map(int, test_idx))
    return train_idx, test_idx
def load_fold_data(dataset, fold):
    if dataset == 'hockey' or dataset == 'ucfcrime2local' or dataset == 'vif':
        if dataset == 'vif':
            folder = constants.PATH_VIF_README
        else:
            folder = constants.PATH_UCFCRIME2LOCAL_README if dataset == 'ucfcrime2local' else constants.PATH_HOCKEY_README
        train_idx = read_file(
            os.path.join(folder, 'fold_{}_train.txt'.format(fold)))
        test_idx = read_file(
            os.path.join(folder, 'fold_{}_test.txt'.format(fold)))
        train_idx = list(map(int, train_idx))
        test_idx = list(map(int, test_idx))
        return train_idx, test_idx
def hockeyTrainTestSplit(split_type, datasetAll, labelsAll, numFramesAll):
    train_idx = read_file(
        os.path.join(
            constants.PATH_HOCKEY_README,
            'fold_{}_train.txt'.format(int(split_type[len(split_type) - 1]))))
    test_idx = read_file(
        os.path.join(
            constants.PATH_HOCKEY_README,
            'fold_{}_test.txt'.format(int(split_type[len(split_type) - 1]))))
    train_idx = list(map(int, train_idx))
    test_idx = list(map(int, test_idx))

    train_x = list(itemgetter(*train_idx)(datasetAll))
    train_y = list(itemgetter(*train_idx)(labelsAll))
    train_numFrames = list(itemgetter(*train_idx)(numFramesAll))
    test_x = list(itemgetter(*test_idx)(datasetAll))
    test_y = list(itemgetter(*test_idx)(labelsAll))
    test_numFrames = list(itemgetter(*test_idx)(numFramesAll))

    return train_x, train_y, train_numFrames, test_x, test_y, test_numFrames
Beispiel #6
0
def k_folds(n_splits, subjects, splits_folder):
    """
    Generates folds for cross validation
    Args:
        n_splits: folds number
        subjects: number of patients
        frames: length of the sequence of each patient
    """
    indices = np.arange(subjects).astype(int)
    if n_splits == 1:
        for i in range(1):
            train, test = train_test_split(80, 20, subjects)
            yield train, test
    # indices = np.arange(subjects * frames).astype(int)
    else:
        if not os.path.exists(os.path.join(splits_folder, 'fold_1_train.txt')):
            # if True:
            for fold, test_idx in enumerate(get_indices(n_splits, subjects)):
                train_idx = np.setdiff1d(indices, test_idx)
                save_file(
                    train_idx,
                    os.path.join(splits_folder,
                                 'fold_' + str(fold + 1) + '_train.txt'))
                save_file(
                    test_idx,
                    os.path.join(splits_folder,
                                 'fold_' + str(fold + 1) + '_test.txt'))
                yield train_idx, test_idx
        else:
            for fold in range(n_splits):
                train_idx = read_file(
                    os.path.join(splits_folder,
                                 'fold_' + str(fold + 1) + '_train.txt'))
                test_idx = read_file(
                    os.path.join(splits_folder,
                                 'fold_' + str(fold + 1) + '_test.txt'))
                train_idx = list(map(int, train_idx))
                test_idx = list(map(int, test_idx))
                yield train_idx, test_idx
def plot_bbox_annotations():
    anomaly_normal = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'anomaly_normal.txt'))
    for av in anomaly_normal:
        v_path = os.path.join(constants.PATH_UCFCRIME2LOCAL_FRAMES_NONVIOLENCE,
                              av)
        print(v_path)
        l_frames = os.listdir(v_path)
        l_frames.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
        av = av[:-8]
        bdx_file_path = os.path.join(
            constants.PATH_UCFCRIME2LOCAL_Txt_ANNOTATIONS, av + '.txt')
        data = []
        with open(bdx_file_path, 'r') as file:
            for row in file:
                data.append(row.split())
        # data = np.array(data)
        gt_bboxes = []
        counter = 0
        start = False
        end = False

        for i, frame_path in enumerate(l_frames):
            # print('------frame_path=',frame_path)
            pth, frame_name = os.path.split(frame_path)
            splits = re.split('(\d+)', frame_name)
            frame_number = int(splits[1])

            if frame_number >= len(data):
                break
            frame_data = data[frame_number]
            # print('video={}, frame={}, frame_number={}, gt={}'.format(video_name, frame_name, frame_number, frame_data))
            if frame_number != int(frame_data[5]):
                print('=========*********** Error en Ground Truth!!!!!!!!!')
                break
            x0, y0, w, h = int(frame_data[1]), int(
                frame_data[2]), int(frame_data[3]) - int(frame_data[1]), int(
                    frame_data[4]) - int(frame_data[2])
            gt_bboxes.append([x0, y0, w, h])
            frame = cv2.imread(os.path.join(v_path, frame_path))
            flac = int(frame_data[6])
            if flac == 0:
                cv2.rectangle(frame, (x0, y0), (x0 + w, y0 + h), (0, 255, 0),
                              2)

            cv2.imshow("frame", frame)
            key = cv2.waitKey(0)
def temporal_cut_long_videos():
    anomaly_normal = read_file(
        os.path.join(constants.PATH_UCFCRIME2LOCAL_README,
                     'anomaly_normal.txt'))
    for av in anomaly_normal:
        v_path = os.path.join(constants.PATH_UCFCRIME2LOCAL_FRAMES_NONVIOLENCE,
                              av)
        print(v_path)
        l_frames = os.listdir(v_path)
        l_frames.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
        bdx_file_path = os.path.join(
            constants.PATH_UCFCRIME2LOCAL_Txt_ANNOTATIONS, av + '.txt')
        data = []
        with open(bdx_file_path, 'r') as file:
            for row in file:
                data.append(row.split())
        # data = np.array(data)
        gt_bboxes = []
        anomaly_clips = []
        counter = 0
        start = False
        end = False

        clip_frames = []
        counter = 0
        for i, frame_path in enumerate(l_frames):
            # print('------frame_path=',frame_path)
            pth, frame_name = os.path.split(frame_path)
            splits = re.split('(\d+)', frame_name)
            frame_number = int(splits[1])

            if frame_number >= len(data):
                break
            frame_data = data[frame_number]
            # print('video={}, frame={}, frame_number={}, gt={}'.format(video_name, frame_name, frame_number, frame_data))
            if frame_number != int(frame_data[5]):
                print('=========*********** Error en Ground Truth!!!!!!!!!')
                break
            x0, y0, w, h = int(frame_data[1]), int(
                frame_data[2]), int(frame_data[3]) - int(frame_data[1]), int(
                    frame_data[4]) - int(frame_data[2])
            gt_bboxes.append([x0, y0, w, h])
            # frame = cv2.imread(os.path.join(v_path,frame_path))
            flac = int(frame_data[6])
            if flac == 0:
                # cv2.rectangle(frame, (x0, y0),(x0+w, y0+h), (0,255,0), 2)
                clip_frames.append(os.path.join(v_path, frame_path))
                if not start:
                    # print('================')
                    # clip_frames.append(os.path.join(v_path,frame_path))
                    start = True
            elif start:
                end = True
                start = False
                # for f in clip_frames:
                #     print(f)
                anomaly_clips.append(clip_frames)
                counter += 1
                f_name = av + '-VSplit-' + str(counter)
                if not os.path.isdir(
                        os.path.join(
                            constants.PATH_UCFCRIME2LOCAL_FRAMES_NONVIOLENCE,
                            f_name)):
                    os.mkdir(
                        os.path.join(
                            constants.PATH_UCFCRIME2LOCAL_FRAMES_NONVIOLENCE,
                            f_name))
                for fimg in clip_frames:
                    newPath = shutil.copy(
                        fimg,
                        os.path.join(
                            constants.PATH_UCFCRIME2LOCAL_FRAMES_NONVIOLENCE,
                            f_name))

                clip_frames = []
def customize_kfold(n_splits, dataset, X, y, shuffle=True):
    # def customize_kfold(n_splits, dataset, shuffle=True):
    # X=np.arange(X_len)
    if dataset == 'hockey' or dataset == 'ucfcrime2local':
        kfold = StratifiedKFold(n_splits, shuffle=shuffle)
        folder = constants.PATH_UCFCRIME2LOCAL_README if dataset == 'ucfcrime2local' else constants.PATH_HOCKEY_README
        if not os.path.exists(os.path.join(folder, 'fold_1_train.txt')):
            # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
            for i, (train_idx, test_idx) in enumerate(kfold.split(X, y)):
                save_file(
                    train_idx,
                    os.path.join(folder, 'fold_{}_train.txt'.format(i + 1)))
                save_file(
                    test_idx,
                    os.path.join(folder, 'fold_{}_test.txt'.format(i + 1)))

        for i in range(n_splits):
            train_idx = read_file(
                os.path.join(folder, 'fold_{}_train.txt'.format(i + 1)))
            test_idx = read_file(
                os.path.join(folder, 'fold_{}_test.txt'.format(i + 1)))
            train_idx = list(map(int, train_idx))
            test_idx = list(map(int, test_idx))
            yield train_idx, test_idx
    elif dataset == 'vif':
        splitsLen = []
        folder = os.path.join(constants.PATH_VIF_README)
        if not os.path.exists(os.path.join(folder, 'fold_1_train.txt')):
            if not os.path.exists(os.path.join(folder, 'lengths.txt')):
                for fold in range(n_splits):
                    violence_path = os.path.join(constants.PATH_VIF_FRAMES,
                                                 str(fold + 1), 'Violence')
                    non_violence_path = os.path.join(constants.PATH_VIF_FRAMES,
                                                     str(fold + 1),
                                                     'NonViolence')
                    violence_videos = os.listdir(violence_path)
                    non_violence_videos = os.listdir(non_violence_path)
                    splitsLen.append(
                        len(violence_videos) + len(non_violence_videos))
                save_file(splitsLen, os.path.join(folder, 'lengths.txt'))
            else:
                splitsLen = read_file(os.path.join(folder, 'lengths.txt'))
                splitsLen = list(map(int, splitsLen))

            for i, l in enumerate(splitsLen):
                end = np.sum(splitsLen[:(i + 1)])
                start = end - splitsLen[i]
                test_idx = np.arange(start, end)
                train_idx = np.arange(0, start).tolist() + np.arange(
                    end, len(X)).tolist()
                if shuffle:
                    random.shuffle(train_idx)
                    random.shuffle(test_idx)
                save_file(
                    train_idx,
                    os.path.join(folder, 'fold_{}_train.txt'.format(i + 1)))
                save_file(
                    test_idx,
                    os.path.join(folder, 'fold_{}_test.txt'.format(i + 1)))
        for i in range(n_splits):
            train_idx = read_file(
                os.path.join(folder, 'fold_{}_train.txt'.format(i + 1)))
            test_idx = read_file(
                os.path.join(folder, 'fold_{}_test.txt'.format(i + 1)))
            train_idx = list(map(int, train_idx))
            test_idx = list(map(int, test_idx))
            yield train_idx, test_idx
    elif dataset == 'rwf-2000':
        train_idx, test_idx = [], []
        for i in range(1):
            yield train_idx, test_idx