示例#1
0
 def get_sample_list(self):
     if 'orig' in self.split:
         index = int(self.split.split('.')[1])
         if 'UCF101' in self.source_folder:
             split_dpath = osp.join(self.data_root, 'ucfTrainTestlist')
             class_list = [
                 k.strip().split() for k in baseio.read_txt_lines2list(
                     osp.join(split_dpath, 'classInd.txt'))
             ]
             split_fname = 'trainlist0%d.txt' % index if self.is_training else 'testlist0%d.txt' % index
             sample_list = baseio.read_txt_lines2list(
                 osp.join(split_dpath, split_fname))
             sample_list = [
                 k.strip().replace('.avi', '').split() for k in sample_list
             ]
             # class_list MUST be in the 1 -> 101 order
             if [int(i[0]) for i in class_list
                 ] != [i + 1 for i in range(len(class_list))]:
                 raise ValueError('class_list not in a appropriate order')
             # for test.split in UCF101 do not have label
             if not self.is_training:
                 _ = [
                     j.append(class_list[[k[1] for k in class_list
                                          ].index(osp.dirname(j[0]))][0])
                     for j in sample_list
                 ]
         else:
             raise NameError('Unknown DataSet Name in %s' %
                             self.source_folder)
     else:
         raise NameError('Unknown Split Type in %s' % self.split)
     # class_list[i]  = ['1', ApplyEyeMakeup]
     # sample_list[i] = ['ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01', '1']
     return class_list, sample_list
示例#2
0
def main(results_draw=RESULTS_DRAW,
         gt_folder_path=GT_FOLDER_PATH,
         save_dir_path=SAVE_DIR_PATH):
    to_draw_list = basepy.get_1tier_file_path_list(
        results_draw, suffix=get_charactor(results_draw)[1])
    to_draw_list = sorted(to_draw_list,
                          key=lambda x: int(osp.basename(x).split('.')[0]))
    video_gt_path = osp.join(gt_folder_path, osp.basename(results_draw))
    save_img_path = basepy.check_or_create_path(
        osp.join(save_dir_path, osp.basename(results_draw)))
    image_size = get_charactor(results_draw)[2]
    for frame in to_draw_list:
        img = cv2.imread(frame)
        img_gt_file = osp.join(video_gt_path,
                               osp.basename(frame).split('.')[0] + '.txt')
        if osp.exists(img_gt_file):
            spatial_annotations = [
                yoloLine2Shape(image_size, k[1], k[2], k[3], k[4])
                for k in basepy.read_txt_lines2list(img_gt_file, ' ')
            ]
            for y0, x0, y1, x1 in spatial_annotations:
                cv2.rectangle(img, (y0, x0), (y1, x1), (53, 134, 238), 1)

        save_frame = osp.join(save_img_path, osp.basename(frame))
        cv2.imwrite(save_frame, img)
示例#3
0
def get_iou_one(c3d_list_in_one_frame, video_spatial_annotation_path):
    image_size, wei_shu = (
        (240, 360),
        3) if 'ucsdped2' in video_spatial_annotation_path.lower() else ((240,
                                                                         320),
                                                                        5)
    iou_list_in_one_frames, frame_index = [], c3d_list_in_one_frame[0][-12]
    spatial_annotation_txt = osp.join(
        video_spatial_annotation_path,
        str(int(frame_index)).zfill(wei_shu) + '.txt')
    spatial_annotations = [
        yoloLine2Shape(image_size, k[1], k[2], k[3], k[4])
        for k in basepy.read_txt_lines2list(spatial_annotation_txt, ' ')
    ]
    for i in c3d_list_in_one_frame:
        event_proposal = (int(i[-10]), int(i[-9]), int(i[-10] + i[-8]),
                          int(i[-9] + i[-7]))
        iou_list_in_one_frames.append(
            max([
                compute_iou(event_proposal, bx) for bx in spatial_annotations
            ]))

    print(max(iou_list_in_one_frames))
    if max(iou_list_in_one_frames) > 0.1:
        select_one = iou_list_in_one_frames.index(max(iou_list_in_one_frames))
        # return (c3d_list_in_one_frame[select_one][:4096] + c3d_list_in_one_frame[select_one][4096:8192])/2
        return np.maximum(c3d_list_in_one_frame[select_one][:4096],
                          c3d_list_in_one_frame[select_one][4096:8192])
    else:
        return False
示例#4
0
def network_eval(save_file_path, set_gpu):
    model_folder = save_file_path if osp.isdir(
        save_file_path) else osp.dirname(save_file_path)
    json_file_path = basepy.get_1tier_file_path_list(model_folder,
                                                     suffix='.json')[0]
    d = basepy.DictCtrl(
        zdefault_dict.EXPERIMENT_KEYS).read4path(json_file_path)
    d['batch_size'], d['set_gpu'] = 1, set_gpu

    print('EVALUATING...... D values:')
    _ = [print('    ', i, ":", d[i]) for i in d]

    feature_path_list = basepy.get_1tier_file_path_list(d['npy_file_path'],
                                                        suffix='.npy')

    # test_txt = '/absolute/datasets/Anomaly-Detection-Dataset/Temporal_Anomaly_Annotation_for_Testing_Videos.txt'
    test_list = basepy.read_txt_lines2list(d['testing_list'], sep='  ')
    test_list = base.reform_train_list(test_list,
                                       feature_path_list,
                                       if_print=False)
    feature_dict = base.read_npy_file_path_list(test_list)

    merged_keys, merged_features = merge_keys_and_features_in_one(feature_dict)
    merged_keys.append(False)

    print('Evaluating %s...' % save_file_path)
    print('Testing list %s.' % d['testing_list'])
    if not os.path.isdir(save_file_path):
        _ = eval_one_ckpt(merged_keys,
                          merged_features,
                          d,
                          ckpt_file=save_file_path)
    else:
        # model_checkpoint_path: "/absolute/tensorflow_models/190516014752/190516014752.ckpt-16001"
        ckpt_check_list = [
            i[1][1:-1] for i in basepy.read_txt_lines2list(
                osp.join(model_folder, 'checkpoint'), sep=': ')
        ][1:]

        _ = [
            eval_one_ckpt(merged_keys, merged_features, d, ckpt_file=one_ckpt)
            for one_ckpt in ckpt_check_list
        ]
    print('EVALUATION DONE ------ Debug Symbol ------ %s ------' %
          time.asctime(time.localtime(time.time())))
    return None
示例#5
0
def get_np_from_txt(txt_file_path, renum=1001):
    feature = basepy.read_txt_lines2list(txt_file_path)
    try:
        feature = random.sample(feature, renum)
    except ValueError:
        quotient, remainder = divmod(renum, len(feature))
        feature = feature * quotient + random.sample(feature, remainder)
    return np.array([i[0] for i in feature], dtype='float32')
示例#6
0
def get_spatial_pr_curve(results_all_in_one,
                         annotation_folder_path,
                         temporal_annotation_file,
                         inflate,
                         iou_threshold=0.143):
    annotation_in_all = basepy.read_txt_lines2list(temporal_annotation_file,
                                                   '  ')
    image_size = (
        240,
        320) if 'Anomaly-Detection-Dataset' in temporal_annotation_file else (
            240, 360)
    wei_shu = 5 if 'Anomaly-Detection-Dataset' in temporal_annotation_file else 3
    spatial_annotation, all_annotation_num = {}, 0
    for i in annotation_in_all:
        video_name_temp = i[0].split('.')[0]
        index_list = list(range(int(i[2]), int(i[3]))) + list(
            range(int(i[4]), int(i[5])))
        in_one_video = {}
        for j in index_list:
            if j % inflate == 0:
                spatial_annotation_txt = osp.join(
                    annotation_folder_path, video_name_temp,
                    str(j).zfill(wei_shu) + '.txt')
                if not osp.exists(spatial_annotation_txt):
                    raise ValueError('Not Exists: annotation txt path %s' %
                                     spatial_annotation_txt)
                in_one_video[j] = [[
                    yoloLine2Shape(image_size, k[1], k[2], k[3], k[4]), 0
                ]
                                   for k in basepy.read_txt_lines2list(
                                       spatial_annotation_txt, ' ')]
                all_annotation_num = all_annotation_num + len(in_one_video[j])
        spatial_annotation[video_name_temp] = in_one_video

    spatial_groud_truth, covered_num = get_spatial_groud_truth(
        results_all_in_one,
        spatial_annotation,
        scale_id=2,
        iou_threshold=iou_threshold)
    spatial_anomaly_score = [i[-1] for i in results_all_in_one]
    spatial_fpr, spatial_tpr, spatial_thresholds = metrics.roc_curve(
        spatial_groud_truth, spatial_anomaly_score)

    return spatial_fpr, spatial_tpr, spatial_thresholds, covered_num / all_annotation_num
示例#7
0
def recall_iou_all(temporal_annotation_file, video_spatial_annotation_path,
                   event_proposal_json_path):
    annotation_in_all = basepy.read_txt_lines2list(temporal_annotation_file)
    multi_all, single_all, frame_all = [], [], []
    for each_line in annotation_in_all:
        multi_region, one_region, iou_frame = recall_iou_video(
            each_line, video_spatial_annotation_path, event_proposal_json_path)
        multi_all.extend(multi_region)
        single_all.extend(one_region)
        frame_all.extend(iou_frame)

    return multi_all, single_all, frame_all
示例#8
0
def collect_features(temporal_annotation_file,
                     original_c3d_path,
                     event_proposal_c3d_path,
                     spatial_annotation_path,
                     max_num=None):
    annotation_in_all = basepy.read_txt_lines2list(temporal_annotation_file)
    original_c3d_anomaly_all, original_c3d_normal_all, event_proposal_c3d_anomaly_all, event_proposal_c3d_normal_all = [],[],[],[]
    for each_line in annotation_in_all:
        original_c3d_anomaly, original_c3d_normal, event_proposal_c3d_anomaly, event_proposal_c3d_normal = \
            collect_each_video(each_line,original_c3d_path, event_proposal_c3d_path, spatial_annotation_path, max_num=max_num)
        original_c3d_anomaly_all.extend(original_c3d_anomaly)
        original_c3d_normal_all.extend(original_c3d_normal)
        event_proposal_c3d_anomaly_all.extend(event_proposal_c3d_anomaly)
        event_proposal_c3d_normal_all.extend(event_proposal_c3d_normal)

    return np.array(original_c3d_anomaly_all), np.array(original_c3d_normal_all), \
           np.array(event_proposal_c3d_anomaly_all), np.array(event_proposal_c3d_normal_all)
示例#9
0
def npy_reform(npy_file_folder_path, multiscale, multiregion, reform_type,
               reform_num, if_multiprocessing, test_file):
    try:
        results_folder_path = npy_file_folder_path.replace('_motion_', '_motion_reformed_') \
            .replace('_pyramid_', '_%s_' % multiscale) \
            .replace('_c3d_npy', '_%dregion_c3d_npy' % multiregion)
    except:
        results_folder_path = npy_file_folder_path.replace(
            '_motion_', '_motion_reformed_')
    results_folder_path = results_folder_path.replace(
        '_c3d_npy', '_%s_%d_c3d_npy' % (reform_type, reform_num))
    test_str = str(basepy.read_txt_lines2list(test_file, sep='  '))

    print('Converting %s to %s :' %
          (npy_file_folder_path, results_folder_path))
    multiprocessing_num = int(mp.cpu_count() / 4)
    remaining_list, split_list = basepy.get_remaining_to_multi(
        basepy.get_1tier_file_path_list(npy_file_folder_path, '.npy'),
        basepy.get_1tier_file_path_list(
            basepy.check_or_create_path(results_folder_path), suffix='.npy'),
        divide_num=multiprocessing_num,
        if_print=True)
    # npy_list_preprocessing(remaining_list, EVAL_RESULT_FOLDER, MULTISCALE, MULTIREGION)
    if if_multiprocessing:
        p = mp.Pool(multiprocessing_num)
        for j, em in enumerate(split_list):
            p.apply_async(npy_list_preprocessing,
                          args=(em, results_folder_path, multiscale,
                                multiregion, reform_type, reform_num,
                                test_str))
        p.close()
        p.join()
    else:
        npy_list_preprocessing(remaining_list, results_folder_path, multiscale,
                               multiregion, reform_type, reform_num, test_str)
    # END
    print('Converting DONE ------ Debug Symbol ------ %s ------' %
          time.asctime(time.localtime(time.time())))
    return results_folder_path
示例#10
0
import shutil
import data_io.basepy as basepy

test_list_txt = '/absolute/datasets/Anomaly-Detection-Dataset/Temporal_Anomaly_Annotation_for_Testing_Videos.txt'
data_set_path = '/absolute/datasets/anoma'
to_test_path = '/absolute/datasets/anoma_all_test'

video_folder_list = basepy.get_2tier_folder_path_list(data_set_path)

test_video_list = basepy.read_txt_lines2list(test_list_txt, sep='  ')
# test_video_list = [i[0].split('.')[0] for i in test_video_list]

frame_in_all = 0
anoma_in_all = 0

for one_line_test_video in test_video_list:
    a_test_video, class_name, an_start_1, an_end_1, an_start_2, an_end_2 = one_line_test_video
    a_test_video = a_test_video.split('.')[0]
    # print(a_test_video, an_start_1, an_end_1, an_start_2, an_end_2)
    a_test_video_folder_path_list = [
        j for j in video_folder_list if a_test_video in j
    ]
    if a_test_video_folder_path_list.__len__() != 1:
        raise ValueError('NOT only %s in %s' %
                         (a_test_video, a_test_video_folder_path_list))
    a_test_video_folder_path = a_test_video_folder_path_list[0]

    frame_in_all = frame_in_all + basepy.get_1tier_file_path_list(
        a_test_video_folder_path).__len__()
    anoma_in_all = anoma_in_all + int(an_end_1) - int(an_start_1) + int(
        an_end_2) - int(an_start_2)
示例#11
0
def network_train(tf_flags, npy_reformed_file_path, top_k=20):
    # decode flags
    d, ckpt_file_path = decode_flags(tf_flags, npy_reformed_file_path)
    # make example list
    feature_path_list = basepy.get_1tier_file_path_list(d['npy_file_path'], suffix='.npy')
    test_list = basepy.read_txt_lines2list(d['testing_list'], sep=' ')
    test_list = base.reform_train_list(test_list, feature_path_list, if_print=False)
    train_list = [i for i in feature_path_list if i not in test_list]
    print('TRAINING: %d training examples in all' % len(train_list))
    anomaly_npy_list = [i for i in train_list if 'normal' not in i.lower()]
    normal_npy_list = [i for i in train_list if 'normal' in i.lower()]

    anomaly_npy_reformed = list2np_array(anomaly_npy_list, reform_num=d['segment_num'])
    normal_npy_reformed = list2np_array(normal_npy_list, reform_num=d['segment_num'])

    # set saving for every saving_interval epochs
    samples_in_one_epoch = min(len(anomaly_npy_list), len(normal_npy_list))
    step2show = [int(i * samples_in_one_epoch / d['batch_size'])
                 for i in range(d['epoch_num']) if i % int(d['saving_interval'] / 3) == 0]
    step2save = [int(i * samples_in_one_epoch / d['batch_size'])
                 for i in range(d['epoch_num']) if i % d['saving_interval'] == 0]
    step_in_all = list(range(step2save[-1] + 1)) + [False]

    # NET SETTING
    with tf.name_scope('input'):
        input_anom = tf.placeholder(tf.float32, [d['batch_size'], d['segment_num'], d['feature_len']], name='anom')
        input_norm = tf.placeholder(tf.float32, [d['batch_size'], d['segment_num'], d['feature_len']], name='norm')

    with tf.name_scope('forward-propagation'):
        score_anomaly = base.network_fn(input_anom,
                                        fusion=d['fusion'], feature_len=d['feature_len'], segment_num=d['segment_num'])
        score_normal = base.network_fn(input_norm,
                                       fusion=d['fusion'], feature_len=d['feature_len'], segment_num=d['segment_num'])

    with tf.name_scope('loss'):
        top_max_anomaly, _ = tf.nn.top_k(score_anomaly, top_k)
        mil_loss = tf.maximum(0., 1 - tf.reduce_min(top_max_anomaly, axis=1) + tf.reduce_max(score_normal, axis=1))
        regu = tf.contrib.layers.apply_regularization(
            tf.contrib.layers.l2_regularizer(d['regularization_scale']), tf.trainable_variables())
        mean_mil = tf.reduce_mean(mil_loss)
        loss = mean_mil + regu

    global_step = tf.Variable(0, trainable=False)
    with tf.name_scope('moving_average'):
        variable_averages = tf.train.ExponentialMovingAverage(d['moving_average_decay'], global_step)
        variable_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.name_scope('train_step'):
        train_step = tf.train.AdamOptimizer(learning_rate=d['learning_rate_base']
                                            ).minimize(loss, global_step=global_step)
        with tf.control_dependencies([train_step, variable_averages_op]):
            train_op = tf.no_op(name='train')

    print(('* ' + 'Variables to be trained' + ' *').center(60, '*'))
    pprint(tf.trainable_variables())
    print('Model .ckpt save path: %s' % ckpt_file_path)

    saver = tf.train.Saver(max_to_keep=100)

    init_op = tf.global_variables_initializer()
    os.environ["CUDA_VISIBLE_DEVICES"] = d['set_gpu']
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = '1'
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(gpu_options=gpu_options)
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        print('program begins, timestamp %s' % time.asctime(time.localtime(time.time())))
        if d['lasting']:
            restore_ckpt = basetf.get_ckpt_path(d['lasting'])
            saver_goon = tf.train.Saver()
            saver_goon.restore(sess, restore_ckpt)

        step = 0
        while step_in_all[step] is not False:
            time1 = time.time()
            anomaly_in = anomaly_npy_reformed[np.random.choice(anomaly_npy_reformed.shape[0], d['batch_size'])]
            normal_in = normal_npy_reformed[np.random.choice(normal_npy_reformed.shape[0], d['batch_size'])]

            time2 = time.time()
            loss_, _, mean_mil_, regu_ = sess.run([loss, train_op, mean_mil, regu],
                                                  feed_dict={input_anom: anomaly_in, input_norm: normal_in})
            if step in step2show:
                print('After %5d steps, loss = %.5e, mil = %.5e, regu = %.5e, feed: %.3fsec, train: %.3fsec' %
                      (step, loss_, mean_mil_, regu_, time2 - time1, time.time() - time2))
            if step in step2save:
                print('Save tfrecords at step %5d / %4d epochs.'
                      % (step, d['saving_interval'] * step2save.index(step)))
                saver.save(sess, ckpt_file_path, global_step=global_step)
            step += 1

    print('Model .ckpt save path: %s' % ckpt_file_path)
    print('TRAINING Finish ------ Debug Symbol ------ %s ------' % time.asctime(time.localtime(time.time())))
    return osp.dirname(ckpt_file_path)
示例#12
0
import data_io.basepy as basepy
import os.path as osp
import json

crime2local_videos = '/absolute/datasets/UCFCrime2Local/Videos_from_UCFCrime.txt'
datasets_path = '/absolute/datasets/anoma'
json_path = '/absolute/datasets/UCFCrime2Local_motion_all_json'

normal_list = [
    i[0] for i in basepy.read_txt_lines2list(crime2local_videos, sep='.')
    if 'normal' in i[0].lower()
]

video_folder_list = basepy.get_2tier_folder_path_list(datasets_path)

normal_video_list = []
for normal_video_name in normal_list:
    normal_video_list.extend(
        [i for i in video_folder_list if normal_video_name in i])

for one_video_path in normal_video_list:
    video_name = osp.basename(one_video_path)
    class_name = osp.basename(osp.dirname(one_video_path))

    tfrecord_name = '%s@%s' % (class_name, video_name)
    tfrecord_path = osp.join(json_path, tfrecord_name + '.json')

    frames_path = basepy.get_1tier_file_path_list(one_video_path,
                                                  suffix='.jpg')
    frame_list = sorted(frames_path,
                        key=lambda x: int(osp.basename(x).split('.')[0]))
示例#13
0
def recall_iou_video(each_line, video_spatial_annotation_path,
                     event_proposal_json_path):
    print(each_line[0])
    video_name, video_class, start1, final1, start2, final2 = each_line[
        0].split('  ')
    start1, final1, start2, final2 = int(start1), int(final1), int(
        start2), int(final2)
    video_name, video_class, inflate = (
        video_name.replace('.mp4', ''),
        video_class.replace('Normal', 'normal_test'),
        16) if '.mp4' in video_name else (video_name, video_class, 8)

    index_ep_vs_annotation = [[i, int(i + inflate / 2)]
                              for i in list(range(start1, final1)) +
                              list(range(start2, final2)) if i % inflate == 0]
    ep_json_file = osp.join(event_proposal_json_path,
                            video_class + '@' + video_name + '.json')
    with open(ep_json_file, 'r') as f:
        info = json.load(f)
    annotation_path = osp.join(video_spatial_annotation_path, video_name)
    image_size, frame_region, wei_shu = (
        (240, 360), (0, 0, 360, 240),
        3) if 'ucsdped2' in video_spatial_annotation_path.lower() else ((240,
                                                                         320),
                                                                        (0, 0,
                                                                         320,
                                                                         240),
                                                                        5)
    multi_all, single_all, frame_all = [], [], []
    for ep_index, at_index in index_ep_vs_annotation:
        try:
            spatial_annotation_txt = osp.join(
                annotation_path,
                str(int(at_index)).zfill(wei_shu) + '.txt')
            spatial_annotations = [
                yoloLine2Shape(image_size, k[1], k[2], k[3],
                               k[4]) for k in basepy.read_txt_lines2list(
                                   spatial_annotation_txt, ' ')
            ]
        except FileNotFoundError:
            spatial_annotation_txt = osp.join(
                annotation_path,
                str(int(ep_index)).zfill(wei_shu) + '.txt')
            spatial_annotations = [
                yoloLine2Shape(image_size, k[1], k[2], k[3],
                               k[4]) for k in basepy.read_txt_lines2list(
                                   spatial_annotation_txt, ' ')
            ]

        ep_in_frame = [i for i in info if i[2] == ep_index]

        if ep_in_frame:
            ep_iou_on_at = [[
                compute_iou((int(i[-9]), int(
                    i[-8]), int(i[-9] + i[-7]), int(i[-8] + i[-6])), j)
                for i in ep_in_frame
            ] for j in spatial_annotations]
            multi_region = [max(k) for k in ep_iou_on_at]
            one_region = [
                s[[k[-1] for k in ep_in_frame
                   ].index(max([k[-1] for k in ep_in_frame]))]
                for s in ep_iou_on_at
            ]
            iou_frame = [
                compute_iou(j, frame_region) for j in spatial_annotations
            ]

            multi_all.extend(multi_region)
            single_all.extend(one_region)
            frame_all.extend(iou_frame)

    return multi_all, single_all, frame_all
示例#14
0
def get_temporal_duration(json_file, inflate, temporal_annotation_file):
    video_name = json_file.split('@')[-1].split('.')[0]
    # print('getting temporal duration: %s' % video_name)
    # json_file = '/absolute/tensorflow_models/191007174553_anoma_motion_reformed_single_180_127_4region_maxtop_256_c3d_npy/191007174553.ckpt-15188_eval_json/normal_test@Normal_Videos_876_x264.json'
    with open(json_file, 'r') as f:
        info = json.load(f)
    last_at_start_index = max([i[2] for i in info])
    # get max frame index in .json
    zero_to_last_index = range(int(last_at_start_index + 1))
    annotation_in_all = basepy.read_txt_lines2list(temporal_annotation_file)
    video_in_annotation = [
        i[0] for i in annotation_in_all if video_name in i[0]
    ]
    if video_in_annotation.__len__() != 1:
        raise ValueError('Too many %s in %s' %
                         (video_name, video_in_annotation))
    else:
        video_in_annotation = video_in_annotation[0]
    video_mp4_name, video_class, start1, final1, start2, final2 = video_in_annotation.split(
        '  ')
    start1, final1, start2, final2 = int(start1), int(final1), int(
        start2), int(final2)
    # get annotation in all frames (max is max in info)
    frame_annotation = [
        1 if start1 <= index <= final1 or start2 <= index <= final2 else 0
        for index in zero_to_last_index
    ]

    temporal_truth = [
        i for j, i in enumerate(frame_annotation) if j % inflate == 0
    ]
    temporal_score = [-1] * len(temporal_truth)
    for line in info:
        frame_index, anomaly_score = line[2], line[-1]
        if frame_index % inflate == 0:
            index_deflated = int(frame_index // inflate)
            temporal_score[index_deflated] = max(
                temporal_score[index_deflated], anomaly_score)
    if -1 in temporal_score:
        print(temporal_score)
        raise ValueError('Missing index in %s' % json_file)
    if len(temporal_score) != len(temporal_truth):
        raise ValueError(
            'temporal_score and temporal_truth do not match in number in %s' %
            json_file)

    temporal_score_select = [0] * len(temporal_truth)
    select_num = int(osp.dirname(osp.dirname(json_file)).split('_')[-3])
    select_num = select_num * 4 if '4region' in json_file else select_num
    info2 = sorted(info, key=lambda x: x[-3], reverse=True)[:select_num]
    for line in info2:
        frame_index, anomaly_score = line[2], line[-1]
        if frame_index % inflate == 0:
            index_deflated = int(frame_index // inflate)
            temporal_score_select[index_deflated] = max(
                temporal_score_select[index_deflated], anomaly_score)

    window_length = min(2 * int((len(temporal_score) - 1) / 2 / 2) + 1, 13)
    window = [1 / window_length] * window_length
    temporal_score_select_smooth = np.convolve(temporal_score_select,
                                               window,
                                               mode='same')
    temporal_score_select_smooth = np.minimum(temporal_score_select_smooth,
                                              0.9999)
    info_smooth = info
    for j, one_clip in enumerate(info_smooth):
        index = int(one_clip[2] // inflate)
        if one_clip[-1] == 0:
            one_clip[-1] = temporal_score_select_smooth[index] / 100
        elif temporal_score_select[index] == 0:
            one_clip[-1] = temporal_score_select_smooth[index]
        else:
            # if temporal_score_select[index] == 0 :
            #     print(json_file, index)
            one_clip[-1] = (one_clip[-1] / temporal_score_select[index]
                            ) * temporal_score_select_smooth[index]
    info_smooth = sorted(info_smooth, key=lambda x: x[-3],
                         reverse=True)[:select_num]

    return video_name, len(
        temporal_score
    ), temporal_score, temporal_score_select_smooth, temporal_truth, info
示例#15
0
import numpy as np
import data_io.basepy as basepy

sample_txt = './guinea/test.txt'

array_list = np.random.rand(2, 3).tolist()

sep = ',,'

make_line = (str(array_list), ',,', '1', ',,', '32', '\n')

basepy.write_txt_add_lines(sample_txt, 'a', '1', 'name')
basepy.read_txt_lines2list(sample_txt)