Esempio n. 1
0
def EM(model, DATA, epochs, n_iter, update_var=0, extra=None):

    batch_size = model['kwargs']['batch_size']
    if model['model'] == 'VAE':
        extra['assign'](*model['params']())
        L = [NLL(extra, DATA).mean()]
        LL = []
        for e in range(epochs):
            LL.append([])
            for i in range(len(DATA) // batch_size):
                LL[-1].append(model['train'](DATA[i * batch_size: (i + 1) * batch_size]))
            extra['assign'](*model['params']())
            LL[-1].append(np.mean(LL[-1]))
            L.append(NLL(extra, DATA).mean())
        return np.array(L), np.array(LL)


    S, D, R = model['S'], model['D'], model['R']
    z = np.random.randn(S)/10

    m0 = np.zeros((DATA.shape[0], R))
    m1 = np.zeros((DATA.shape[0], R, S))
    m2 = np.zeros((DATA.shape[0], R, S, S))

    m_loss = [NLL(model, DATA).mean()]
    for e in range(epochs):
        output, A, b, inequalities, signs = model['input2all'](z)
        regions = utils.search_region(model['signs2ineq'], model['signs2Ab'],
                                      signs, model['input2signs'])
        V = []
        batch_signs = np.pad(np.array(list(regions.keys())),
                             [[0, R - len(regions)], [0, 0]])

        varx = np.eye(D) * model['varx']()
        varz = np.eye(S) * model['varz']()
   
        m0 *= 0
        m1 *= 0
        m2 *= 0
        m0[:, :len(regions)], m1[:, :len(regions)], m2[:, :len(regions)] = utils.marginal_moments(DATA, regions, varx, varz)[1:]


        for i in tqdm(range(n_iter), ascii=True, desc='M step'):
            if e > update_var:
                model['update_var'](batch_signs, DATA, m0, m1, m2)
            for l in np.random.permutation(2 * model['L']) % model['L']:
                if np.random.randn() < 0:
                    model['update_vs'](0.05, l, batch_signs, DATA, m0, m1, m2)
                else:
                    model['update_Ws'](0.05, l, batch_signs, DATA, m0, m1, m2)
        m_loss.append(NLL(model, DATA).mean())
        print('after M step', m_loss[-1])
    return np.array(m_loss)
Esempio n. 2
0
def NLL(model, DATA):

    S, D, R = model['S'], model['D'], model['R']
    z = np.random.randn(S)/10

    output, A, b, inequalities, signs = model['input2all'](z)
    regions = utils.search_region(model['signs2ineq'], model['signs2Ab'], signs)
    cov_x = np.eye(D) * model['varx']()
    cov_z = np.eye(S) * model['varz']()
    log_px = utils.marginal_moments(DATA, regions, cov_x, cov_z)[0]

    return - log_px + model['prior']()
Esempio n. 3
0
        fig = plt.figure(figsize=(5, 5))
        f, g, h, all_g, train_f = utils.create_fns(input,
                                                   in_signs,
                                                   Ds,
                                                   x,
                                                   m0,
                                                   m1,
                                                   m2,
                                                   batch_in_signs,
                                                   sigma=2)

        z = np.random.randn(Ds[0])
        output, A, b, inequalities, signs = f(z)
        outpute = output + np.random.randn(Ds[-1]) * np.sqrt(0.05)

        regions = utils.search_region(all_g, g, signs)
        print(len(regions))

        As = np.array([regions[s]['Ab'][0] for s in regions])
        Bs = np.array([regions[s]['Ab'][1] for s in regions])

        predictions = np.array(
            [f(np.random.randn(Ds[0]))[0] for z in range(200)])
        noise = np.random.randn(*predictions.shape) * np.sqrt(sigma_x[0, 0])

        p1 = utils.posterior(xx, regions, output, As, Bs, mu_z, sigma_z,
                             sigma_x)
        p2 = utils.posterior(xx, regions, outpute, As, Bs, mu_z, sigma_z,
                             sigma_x)
        print(p1, p2)
        print((p1 * 8 / 500).sum(), (p2 * 8 / 500).sum())
Esempio n. 4
0
    w = list(range(int(np.ceil(Y0)), int(Y1) + 1))
    plt.yticks(w, [str(t) for t in w])


for ss in [0.5, 0.2, 0.5]:
    np.random.seed(int(sys.argv[-1]) + 10)
    sigma_x = np.eye(Ds[-1]) * ss
    for i in range(5):
        fig = plt.figure(figsize=(5, 5))
        model = networks.create_fns(BS, R, Ds, 0, var_x=np.ones(2) * ss**2)

        output, A, b, inequalities, signs = model['input2all'](np.random.randn(
            Ds[0]))

        regions = utils.search_region(model['signs2ineq'], model['signs2Ab'],
                                      signs)

        As = np.array([regions[s]['Ab'][0] for s in regions])
        Bs = np.array([regions[s]['Ab'][1] for s in regions])

        predictions = np.array([
            model['input2all'](np.random.randn(Ds[0]))[0] for z in range(200)
        ])

        noise = np.random.randn(*predictions.shape) * ss
        plt.scatter(predictions[:, 0] + noise[:, 0],
                    predictions[:, 1] + noise[:, 1],
                    color='blue',
                    label=r'$g(\mathbf{z})+\epsilon$',
                    alpha=0.5,
                    edgecolors='k')
Esempio n. 5
0
def EM2(model, DATA, epochs, n_iter, update_var=False, pretrain=False):

    batch_size = model['kwargs']['batch_size']
    if model['model'] == 'VAE':
        L = []
        for e in range(epochs):
            for i in range(len(DATA) // batch_size):
                L.append(model['train'](DATA[i * batch_size: (i + 1) * batch_size]))
        return L


    # PRETRAIN WITH GLO
    if pretrain:
        glo = create_glo(**model['kwargs'])
        error = 1
        cpt = 0
        Z = np.random.randn(DATA.shape[0], model['kwargs']['Ds'][0])
        while 1:
            cpt +=1
            II = np.random.permutation(len(DATA))[:batch_size]
            glo['reset'](Z[II])
            bat = DATA[II]
            for j in range(10):
                z = glo['estimate'](bat)
            Z[II]=z
            for j in range(4):
                glo['train'](bat)
            error = glo['loss'](bat)
            if cpt % 200 == 0:
                print(cpt, error)
            if cpt > 2000:
                break
     
        # THEN SET IT UP
        print('Setting pu the weights')
        model['assign'](*glo['params']())

    S, D, R = model['S'], model['D'], model['R']
    z = np.random.randn(S)/10

    m0 = np.zeros((DATA.shape[0], R))
    m1 = np.zeros((DATA.shape[0], R, S))
    m2 = np.zeros((DATA.shape[0], R, S, S))

    m_loss = []
    for e in range(epochs):
        output, A, b, inequalities, signs = model['input2all'](z)
        regions = utils.search_region(model['signs2ineq'], model['signs2Ab'],
                                      signs, model['input2signs'])
#        others = utils.search_region_sample(model['input2signs'])
#        print('regions', len(regions), len(others))
#        print('Equal ?', regions.keys() == others)
#        print(regions.keys())
#        print(utils.search_region_sample(model['input2signs']))
        V = []
        for r in regions:
            V.append(utils.get_vertices(regions[r]['ineq'][:, :-1], regions[r]['ineq'][:,-1]))
        print('VERTICES',np.sort(np.unique(V)).round(2))
#        print(model['params']())
        if len(regions) > R:
            print('ALARMMM')
            print(model['params']())
        batch_signs = np.pad(np.array(list(regions.keys())),
                             [[0, R - len(regions)], [0, 0]])

        varx = np.eye(D) * model['varx']()
        varz = np.eye(S) * model['varz']()
        print('varx', np.diag(varx))
#        print('varz', np.diag(varz))
   
        m0 *= 0
        m1 *= 0
        m2 *= 0
        m0[:, :len(regions)], m1[:, :len(regions)], m2[:, :len(regions)] = utils.marginal_moments(DATA, regions, varx, varz)[1:]

#        m_loss.append(NLL(model, DATA).mean())
#        print('after E step', m_loss[-1])

        for i in range(n_iter):
            if update_var:
                model['update_var'](batch_signs, DATA, m0, m1, m2)
#            m_loss.append(model['train'](batch_signs, DATA, m0, m1, m2))
#            if np.isnan(m_loss[-1]):
#                return [None]
#            if i %10 == 0:
#                params = model['params']()
#                print('here?',np.max(params[0]), np.max(params[1]), np.max(params[2]), np.max(params[3]),
#                        model['loss'](batch_signs, DATA, m0, m1, m2))
            for l in np.random.permutation(2 * model['L']) % model['L']:#-1, -1, -1):
                if np.random.randn() < 0:
                    model['update_vs'](0.05, l, batch_signs, DATA, m0, m1, m2)
#                    m_loss.append(NLL(model, DATA).mean())
                else:
                    model['update_Ws'](0.05, l, batch_signs, DATA, m0, m1, m2)
#                    m_loss.append(NLL(model, DATA).mean())
#                    print('after W', m_loss[-1])
        m_loss.append(NLL(model, DATA).mean())
        print('after M step', m_loss[-1])
#        if n_iter > 1:
#            print('strictly decreasing M step ?:', np.diff(m_loss).max())
    return m_loss
Esempio n. 6
0
def get_anomalies_sequential(video_reader,
                             reid_model_path,
                             fbf_results_dict,
                             static_results_dict,
                             ignore_matrix_gen=None,
                             reid_model_name="resnet50",
                             start_frame=1,
                             frame_interval=20,
                             abnormal_duration_thresh=60,
                             detect_thresh=5,
                             undetect_thresh=8,
                             score_thresh=0.3,
                             light_thresh=0.8,
                             anomaly_score_thresh=0.7,
                             similarity_thresh=0.95,
                             suspicious_time_thresh=18,
                             verbose=False,
                             anomaly_nms_thresh=0.8):
    """
    Performs the anomaly detection. Sequential version

    video_reader: VideoReader object for raw video
    reid_model_path: path to re-ID model checkpoint
    fbf_results_dict: ResultsDict object for frame-by-frame/raw video detection results
    static_results_dict: ResultsDict object for static/background detection results
    ignore_matrix_gen: generator yielding ignore matrix, must have the same interval as frame_interval.
        Or single numpy array, or path to .npy file.
    reid_model_name: backbone used for reid model
    start_frame: video frame to start from
    frame_interval: interval between frames to do calculations on
    abnormal_duration_thresh: duration (in seconds) to consider an object abnormal
    detect_thresh: duration (in frames) to consider an object for tracking
    undetect_thresh: duration (in frames) to stop considering an object for tracking
    score_thresh: detection score threshold for bounding boxes
    light_thresh: brightness threshold (not sure what it does)
    anomaly_score_thresh: threshold to consider an object an anomaly
    similarity_thresh: threshold for object re-ID
    suspicious_time_thresh: duration (in seconds) for an object to be considered suspicious
    verbose: verbose printing
    anomaly_nms_thresh: IoU threshold for anomaly NMS.


    """
    def get_ignore_gen(ign_matrix):
        """
        Handles different inputs for ignore matrix

        :param ign_matrix:
        :return:
        """

        if isinstance(ign_matrix, types.GeneratorType):
            return ign_matrix

        # load/create matrix
        if ign_matrix is None:
            matrix = np.ones((h, w), dtype=bool)  # Dont ignore anything

        elif type(ign_matrix) == str:  # filename
            matrix = np.load(ign_matrix).astype(bool)

        else:
            raise TypeError("Invalid ignore matrix type:", type(ign_matrix))

        return (matrix for _ in iter(int, 1))  # infinite generator

    # Get video data
    num_frames, framerate, image_shape = video_reader.nframes, video_reader.framerate, video_reader.img_shape

    # load model
    reid_model = ReidExtractor(reid_model_name, reid_model_path)

    # Set up information matrices
    h, w, _ = image_shape

    ignore_matrix_gen = get_ignore_gen(ignore_matrix_gen)

    detect_count_matrix = np.zeros((h, w))
    undetect_count_matrix = np.zeros((h, w))
    start_time_matrix = np.zeros((h, w))
    end_time_matrix = np.zeros((h, w))
    score_matrix = np.zeros((h, w))
    state_matrix = np.zeros(
        (h, w), dtype=bool
    )  # State matrix, 0/1 distinguishes suspicious candidate states

    if verbose:
        print(
            f"total frames: {num_frames}, framerate: {framerate}, height: {h}, width: {w}"
        )
        print("-------------------------")

    ### Main loop
    start = False
    tmp_start = False
    all_results = []
    anomaly_now = {}
    for frame in range(start_frame, num_frames, frame_interval):
        try:
            ignore_matrix = next(ignore_matrix_gen)

            # if frame % (10*30) == 0:
            #     plt.imshow(ignore_matrix)
            #     plt.show()
        except StopIteration:
            pass  # keep same ignore matrix

        # Comment out if not using crop boxes, not needed
        # if fbf_results_dict.max_frame < static_results_dict.max_frame:
        #     fbf_results_dict.gen_next()

        # create tmp_score, tmp_detect
        static_results = static_results_dict[frame]
        if static_results is not None:
            boxes = static_results.loc[
                static_results["score"] > score_thresh,
                ["x1", "y1", "x2", "y2", "score"]].values
        else:
            boxes = []

        tmp_score, tmp_detect = add_boxes(boxes, ignore_matrix)

        ### plotting
        # img = video_reader.get_frame(frame)
        # cmap = plt.get_cmap("viridis")
        # for x1, y1, x2, y2, score in boxes:
        #     x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
        #     col = tuple(int(c * 255) for c in cmap(score)[:3])
        #     cv.rectangle(img, (x1, y1), (x2, y2), col, thickness=2)
        #
        # if frame % 12 == 0:
        #     plt.imshow(img)
        #     plt.show()
        ###

        if verbose:
            print(f"frame: {frame}")

            if len(boxes) > 0:
                print("\tboxes:", len(boxes))

        score_matrix += tmp_score  # add running totals
        detect_count_matrix += tmp_detect

        # Update detection matrices
        undetect_count_matrix += ~tmp_detect
        undetect_count_matrix[tmp_detect] = 0

        # Update time matrices
        start_time_matrix[
            detect_count_matrix ==
            1] = -600 if frame == 1 else frame  # why -600 for frame 1?
        end_time_matrix[detect_count_matrix > 0] = frame

        # Update state matrices
        state_matrix[detect_count_matrix > detect_thresh] = True

        # Detect anomaly
        time_delay = utils.mask(end_time_matrix - start_time_matrix,
                                state_matrix)
        delay_max_idx = np.unravel_index(time_delay.argmax(), time_delay.shape)

        #         print(f"\tmax delay: {time_delay.max()}, start: {start_time_matrix[delay_max_idx]}, end: {end_time_matrix[delay_max_idx]}, state: {state_matrix[delay_max_idx]}")
        if not start and time_delay.max(
        ) / framerate > abnormal_duration_thresh:  # and score_matrix[delay_max_idx]/detect_count_matrix[delay_max_idx]>0.8:

            delay_max_idx = np.unravel_index(time_delay.argmax(),
                                             time_delay.shape)

            # backtrack the start time
            time_frame = int(start_time_matrix[delay_max_idx] /
                             5) * 5  # + 1  # why 5s and 1?

            G = np.where(
                detect_count_matrix < detect_count_matrix[delay_max_idx] - 2,
                0, 1)  # What does G represent?, why -2?
            region = utils.search_region(G, delay_max_idx)

            # vehicle reid
            if 'start_time' in anomaly_now and (
                    time_frame / framerate -
                    anomaly_now['end_time']) < 30:  # why 30?
                f1_frame_num = max(1, anomaly_now['start_time'] * framerate)
                f2_frame_num = max(1, time_frame)

                similarity = reid_model.similarity(
                    video_reader.get_frame(f1_frame_num),
                    video_reader.get_frame(f2_frame_num),
                    anomaly_now["region"], region)

                if similarity > similarity_thresh:
                    time_frame = int(anomaly_now['start_time'] * framerate /
                                     5) * 5  # + 1  # why 5s and 1?
                else:
                    anomaly_now['region'] = region

            else:
                anomaly_now['region'] = region

            # IoU stuff
            max_iou = 1
            count = 1
            start_time = time_frame
            tmp_len = 1
            raio = 1
            while (max_iou > 0.1 or tmp_len < 40
                   or raio > 0.6) and time_frame > 1:  # why 0.1, 40, 0.6?
                raio = count / tmp_len

                print("time frame:", time_frame)
                fbf_results = fbf_results_dict[time_frame]
                if fbf_results is not None:
                    bboxes = fbf_results[["x1", "y1", "x2", "y2",
                                          "score"]].values
                    max_iou = utils.compute_iou(anomaly_now['region'], bboxes)

                else:
                    max_iou = 0

                time_frame -= 5  # why 5?
                if max_iou > 0.3:  # why 0.3?
                    count += 1
                    if max_iou > 0.5:  # why 0.5?  # they mention 0.5 IoU in the paper for NMS, might not be this
                        start_time = time_frame

                tmp_len += 1

            # back track start_time, until brightness at that spot falls below a threshold
            for time_frame in range(start_time, 1, -5):
                #                 print(f"\ttimeframe: {time_frame}")
                tmp_im = video_reader.get_frame(time_frame)
                if utils.compute_brightness(
                        tmp_im[region[1]:region[3],
                               region[0]:region[2]]) <= light_thresh:
                    break

                start_time = time_frame

            anomaly_now['start_time'] = max(0, start_time / framerate)
            anomaly_now['end_time'] = max(
                0, end_time_matrix[delay_max_idx] / framerate)
            start = True

        elif not tmp_start and time_delay.max(
        ) > suspicious_time_thresh * framerate:
            time_frame = start_time_matrix[delay_max_idx]

            G = np.where(
                detect_count_matrix < detect_count_matrix[delay_max_idx] - 2,
                0, 1)  # what does G represent?
            region = utils.search_region(G, delay_max_idx)

            # vehicle reid
            if 'start_time' in anomaly_now and (
                    time_frame / framerate -
                    anomaly_now['end_time']) < 30:  # why 30?
                f1_frame_num = max(1, anomaly_now['start_time'] * framerate)
                f2_frame_num = max(1, time_frame)

                similarity = reid_model.similarity(
                    video_reader.get_frame(f1_frame_num),
                    video_reader.get_frame(f2_frame_num),
                    anomaly_now["region"], region)

                if similarity > similarity_thresh:
                    time_frame = int(
                        anomaly_now['start_time'] * framerate / 5) * 5 + 1
                    region = anomaly_now['region']

            anomaly_now['region'] = region
            anomaly_now['start_time'] = max(0, time_frame / framerate)
            anomaly_now['end_time'] = max(
                0, end_time_matrix[delay_max_idx] / framerate)

            tmp_start = True

        if start and time_delay.max() / framerate > abnormal_duration_thresh:

            delay_max_idx = np.unravel_index(time_delay.argmax(),
                                             time_delay.shape)

            if undetect_count_matrix[delay_max_idx] > undetect_thresh:
                anomaly_score = score_matrix[
                    delay_max_idx] / detect_count_matrix[delay_max_idx]

                print("\t", anomaly_now, anomaly_score)
                if anomaly_score > anomaly_score_thresh:
                    anomaly_now['end_time'] = end_time_matrix[
                        delay_max_idx] / framerate
                    anomaly_now['score'] = anomaly_score

                    all_results.append(anomaly_now)
                    anomaly_now = {}

                start = False

        elif tmp_start and time_delay.max(
        ) > suspicious_time_thresh * framerate:
            if undetect_count_matrix[delay_max_idx] > undetect_thresh:

                anomaly_score = score_matrix[
                    delay_max_idx] / detect_count_matrix[delay_max_idx]
                if anomaly_score > anomaly_score_thresh:
                    anomaly_now['end_time'] = end_time_matrix[
                        delay_max_idx] / framerate
                    anomaly_now['score'] = anomaly_score

                tmp_start = False

        # undetect matrix change state_matrix
        state_matrix[undetect_count_matrix > undetect_thresh] = False
        undetect_count_matrix[undetect_count_matrix > undetect_thresh] = 0

        # update matrix
        tmp_detect |= state_matrix
        detect_count_matrix = utils.mask(detect_count_matrix, tmp_detect)
        score_matrix = utils.mask(score_matrix, tmp_detect)

    # Add all anomalies to the results list
    print("---", start, time_delay.max(), score_matrix[delay_max_idx],
          detect_count_matrix[delay_max_idx])
    if start and time_delay.max() > abnormal_duration_thresh * framerate:
        anomaly_score = score_matrix[delay_max_idx] / detect_count_matrix[
            delay_max_idx]
        if anomaly_score > anomaly_score_thresh:
            anomaly_now[
                'end_time'] = end_time_matrix[delay_max_idx] / framerate
            anomaly_now['score'] = anomaly_score

            all_results.append(anomaly_now)
            anomaly_now = {}
            start = False

    # Apply Non-Maximal Supression to the results
    if all_results:
        nms_out = utils.anomaly_nms(all_results, anomaly_nms_thresh)

        #         final_result = {'start_time': 892, 'score': 0} # why 892?
        #         for nms_start_time, nms_end_time in nms_out[:, 5:7]:
        #             if nms_start_time < final_result["start_time"]:
        #                 final_result["start_time"] = max(0, int(nms_start_time - 1))
        #                 final_result["score"] = 1
        #                 final_result["end_time"] = nms_end_time

        final_results = pd.DataFrame(nms_out,
                                     columns=[
                                         "x1", "y1", "x2", "y2", "score",
                                         "start_time", "end_time"
                                     ])

        return final_results

    return None
Esempio n. 7
0
Ds = [2, 6, 1]
input = T.Placeholder((Ds[0],), 'float32')
in_signs = T.Placeholder((np.sum(Ds[1:-1]),), 'bool')

f, g, h, all_g = create_fns(input, in_signs, Ds)

x = np.ones(Ds[0])
output, A, b, inequalities, signs = f(x)
K = 200
X, Y = np.meshgrid(np.linspace(-2, 2, K), np.linspace(-2, 2, K))
xx = np.vstack([X.flatten(), Y.flatten()]).T


############################################################
regions = utils.search_region(all_g, g, signs)
ds = [0, 1, 2, 3, 4]

grey = matplotlib.cm.get_cmap('gray')
cmap, norm = sj.utils.create_cmap([0, 1] + list(range(2, len(ds) + 1)),
                ['w', (0.1, 0.1, 0.1)] + [grey(i) for i in np.linspace(0.3, 0.85, len(ds)-1)])
dregions = []
FINAL = np.zeros(len(xx))
fig = plt.figure(figsize=(len(ds)*4, 4.2))

for k, d in enumerate(ds):
    dregions.append(utils.search_region(all_g, g, signs, max_depth=ds[k]))
 
    ax = plt.subplot(1, len(ds) + 1, 2 + k)
    final = np.zeros(len(xx))
    if k > 0: 
Esempio n. 8
0
    all_g = sj.function(in_signs, outputs=inequalities_code)
    h = sj.function(input, outputs=maps[-1])

    return f, g, h, all_g


for Ds in [[2, 4, 1], [2, 8, 1], [2, 3, 3, 2, 1]]:
    input = T.Placeholder((Ds[0], ), 'float32')
    in_signs = T.Placeholder((np.sum(Ds[1:-1]), ), 'bool')

    f, g, h, all_g = create_fns(input, in_signs, Ds)

    x = np.random.randn(Ds[0]) / 10
    output, A, b, inequalities, signs = f(x)
    regions = []
    utils.search_region(all_g, regions, signs)

    K = 200
    xx = np.meshgrid(np.linspace(-10, 10, K), np.linspace(-10, 10, K))
    xx = np.vstack([xx[0].flatten(), xx[1].flatten()]).T

    yy = np.zeros((K * K, 1))
    yy2 = np.zeros((K * K, 1))

    allA, allB = [], []

    for flips in regions:

        A_w, b_w = g(flips)
        allA.append(A_w)
        allB.append(b_w)