Esempio n. 1
0
    def __init__(self,hps, vgg, sess):

        self.sample_list = ['cam1', 'cam2', 'cam3', 'cam4', 'cam5', 'cam6']

        self.frame_container = []
        self.bbox_container = {}

        self.sess =sess
        self.vgg =vgg
        self.nn_frame = 5
        self.vlader = VLAD(hps.vlad_k)


        mpath = self.make_model_path(hps)
        print('Loading model: ' , mpath)

        self.svc_model = pickle.load(open(mpath, 'rb'))
        self.hps = hps

        vgg_level = 'vgg' + str(hps.cnn_ver)
        clip_level = 'clip'
        pca_level =  'pca' + str(hps.pca_k)
        vlad_level = pca_level + '_vlad' + str(hps.vlad_k)

        vgg_bpath = fm.mkdir(os.path.join(var.GIST_VIOLENCE_PATH, vgg_level))
        self.clip_bpath = os.path.join(vgg_bpath, clip_level)
        self.pca_bpath = fm.mkdir(os.path.join(vgg_bpath,pca_level))

        self.vlad_bpath = fm.mkdir(os.path.join(vgg_bpath, vlad_level))
        self.vlad_maker = VLAD(self.hps)
Esempio n. 2
0
    def aaaaaaaaaaaaaaaa(self):
        for sample_nxt in sample_list[3:4]:  # for each video
            print('Visualizing ', sample_nxt)
            bboxX = get_bbox(var.GIST_VIOLENCE_PATH, sample_nxt)  # bounding-box of video

            frames, prop = im.read_video_as_list_by_path(var.GIST_VIOLENCE_PATH, sample_nxt + '.avi')
            path = os.path.join(var.GIST_VIOLENCE_PATH, 'predicted', sample_nxt + '.avi')


            fpath = os.path.join(var.GIST_VIOLENCE_PATH,'vgg16','clip30_vlad9','nf6', 'cam4')
            feax = pickle.load(open(fpath+'/fea_nameX','rb'))

            model_level = 'svc_nf6_' + 'v' + str(self.hps.cnn_ver) + '_c' + str(self.hps.clip_n) + '_v' + str(
                self.hps.vlad_k)
            tmp = fm.mkdir(os.path.join(var.GIST_VIOLENCE_PATH, 'models'))
            mpath = os.path.join(tmp, model_level)
            model = pickle.load(open(mpath,'rb'))

            for i in range(0,1000):

                name = feax[i]
                _,fid,oid = name.split('.')[0].split('_')

                fid = int(fid)
                oid = int(oid)
                print(name, ' >> ' , fid, oid)
                frame = frames[fid]
                bbox_ = bboxX[fid]
                bbox= bbox_[oid]

                x = np.load(fpath+'/nf6_'+name)
                pred = model.predict(x.reshape(1, -1))[0]
                # print('prediction: ' , pred)

                color = var.green if pred == 0 else var.red
                msg = 'Walk' if pred == 0 else '~Walk'


                cv2.rectangle(frame, (bbox[0], bbox[2]), (bbox[1], bbox[3]), var.black, 2)
                # cv2.rectangle(dp, (bbox[0], bbox[2]), (bbox[0] + txtSize[0], bbox[2] + txtSize[1]), color, -1)

                # msg0 = 'L:' + str(y)
                # txtSize, baseLine = cv2.getTextSize(msg0, var.FONT_FACE, var.FONT_SCALE, var.thickness)
                # cv2.rectangle(dp, (bbox[0], bbox[2]), (bbox[0] + txtSize[0], bbox[2] + txtSize[1]), color, -1)
                # cv2.putText(dp, msg0, (bbox[0], bbox[2] + txtSize[1]), cv2.FONT_HERSHEY_COMPLEX, var.FONT_SCALE,
                #             var.white, var.thickness)

                msg1 = 'P:' + str(pred)
                txtSize, baseLine = cv2.getTextSize(msg1, var.FONT_FACE, var.FONT_SCALE, var.thickness)
                cv2.rectangle(frame, (bbox[0], bbox[2] + 20), (bbox[0] + txtSize[0], bbox[2] + txtSize[1] + 20), color, -1)
                cv2.putText(frame, msg1, (bbox[0], bbox[2] + txtSize[1] + 20), cv2.FONT_HERSHEY_COMPLEX, var.FONT_SCALE,
                            var.white, var.thickness)


            tmp_path = fm.mkdir(os.path.join(var.GIST_VIOLENCE_PATH, 'predicted','tmp'))
            im.write_video(frames[0:1000],prop['fps'],tmp_path,'bbbb', prop)
Esempio n. 3
0
    def run_gist_case(self):
        chdir('..')
        self.bpath = files.mkdir(getcwd(), 'gist')
        self.res_path = files.mkdir(self.bpath, 'res')
        self.param_path = files.mkdir(self.res_path, 'params')
        self.graph_path = files.mkdir(self.res_path, 'graphs')
        self.model_path = files.mkdir(self.res_path, 'models')
        prepare = Prepare(self.res_path)
        self.FEATURE = 'featureset.npy'
        self.GT = 'groundtruth.npy'

        fgset, dp_color, dp_mask = prepare.prepare_gist()
Esempio n. 4
0
def get_bbox(src_path, cam_nxt):
    if fm.isExist(src_path, 'bbox_'+cam_nxt,pr=1):
    # if False:
        return pickle.load(open(src_path + '/bbox_' + cam_nxt, 'rb'))
    else:
        gt = read_txt2(src_path, cam_nxt)
        pickle.dump(gt, open(src_path + '/bbox_' + cam_nxt, 'wb'))
        return gt
Esempio n. 5
0
    def run_pets_case(self):
        chdir('..')
        self.bpath = files.mkdir(getcwd(), 'S1L1')
        self.res_path = files.mkdir(self.bpath, 'res')
        self.param_path = files.mkdir(self.res_path, 'params')
        self.graph_path = files.mkdir(self.res_path, 'graphs')
        self.model_path = files.mkdir(self.res_path, 'models')
        prepare = Prepare(self.res_path)
        self.FEATURE = 'featureset.npy'
        self.GT = 'groundtruth.npy'

        a1357, b1357, a1359, b1359, weight = prepare.prepare()
        fg1357 = a1357[0]
        dpcolor1357 = b1357[0]
        dpmask1357 = b1357[1]
        version = 4
        # v2: only K
        # v3: only K E T
        # v3: K E T P S S2

        self.create_feature_set(prepare, fg1357, dpcolor1357, weight, version)
        features = np.load(self.param_path + '/v' + str(version) + '_' +
                           self.FEATURE)
        labels = ['K', 'E', 'T', 'P', 'S', 'S2']
        #K,E,T,P,S,S2
        K = features[0]
        S = features[4]
        KS = []
        for i in range(len(K)):
            KS.append(np.hstack((K[i], S[i])))
        KS = np.array(KS)
        self.test(KS, version, 'KS')
        self.test_trainset_test_same(KS, version, 'KS')
        quit('qqqqqqqqqqqqqq')

        for i in range(len(labels)):
            self.test(features[i], version, labels[i])

        for i in range(len(labels)):
            self.test_trainset_test_same(features[i], version, labels[i])
Esempio n. 6
0
    def read_count_groundtruth(self):
        """
        reads text file which contains groundtruth.
        :return:
        """

        lines = files.read_text(self.res_path, 'count_gt')
        res = []
        for line in lines:
            tmp = line.split(',')
            tmp = tools.int2round(tmp)
            res.append(tmp)
        return res
Esempio n. 7
0
    def prepare(self):
        if files.isExist(self.param_path, self.FG1357):
            fg1357 = np.load(self.param_path + '/' + self.FG1357)
            fg1359 = np.load(self.param_path + '/' + self.FG1359)
            dp1357 = np.load(self.param_path + '/' + self.DP1357)
            dp1359 = np.load(self.param_path + '/' + self.DP1359)

        else:
            path_time_13_57 = files.mkdir(self.DATASET,
                                          'S1/L1/Time_13-57/View_001')
            path_time_13_59 = files.mkdir(self.DATASET,
                                          'S1/L1/Time_13-59/View_001')
            data1357 = self.create_parameters(path_time_13_57)
            data1359 = self.create_parameters(path_time_13_59)
            #each data contains [fg set, gray-images, color-images, gray-images of fg set.]

            fg1357 = data1357[0]
            dp1357 = data1357[1]
            fg1359 = data1359[0]
            dp1359 = data1359[1]

            np.save(self.param_path + '/' + self.FG1357, fg1357)
            np.save(self.param_path + '/' + self.DP1357, dp1357)
            np.save(self.param_path + '/' + self.FG1359, fg1359)
            np.save(self.param_path + '/' + self.DP1359, dp1359)

        if files.isExist(self.param_path, self.WEIGHT):
            weight = np.load(self.param_path + '/' + self.WEIGHT)
        else:
            shape = fg1357[0][0].shape
            weight = self.create_weight_matrix(shape)
            np.save(self.param_path + '/' + self.WEIGHT, weight)

        # self.draw_weight_map(weight)

        return fg1357, dp1357, fg1359, dp1359, weight
Esempio n. 8
0
    def test(self, features, version, label):
        """
        Learns GPR and KNR model from given training set and test on test set.

        Here, training set consists of every odd feature and test set consists of every training set is equal to test set.

        :param features:
        :param version:
        :param label:
        :return:
        """

        groundtruth = np.load(self.param_path + '/v' + str(version) + '_' +
                              self.GT)

        _trainX = np.concatenate(features[0:features.shape[0]:2])
        _trainY = np.concatenate(groundtruth[0:groundtruth.size:2])
        testX = features[1:features.shape[0]:2]
        testY = groundtruth[1:groundtruth.size:2]

        print 'features.shape: ', features.shape, ', groundtruth.shape: ', groundtruth.shape
        print '_trainX.shape: ', _trainX.shape, ', _trainY.shape: ', _trainY.shape

        trainX, trainY = self.exclude_label(_trainX, _trainY, c=0)

        PYGPR = 'gpr_' + label
        KNR = 'knr_' + label
        if files.isExist(self.model_path, PYGPR):
            gprmodel = self.loadf(self.model_path, PYGPR)
            knrmodel = self.loadf(self.model_path, KNR)

        else:
            print 'Learning GPR model'
            gprmodel = pyGPs.GPR()
            gprmodel.getPosterior(trainX, trainY)
            gprmodel.optimize(trainX, trainY)
            self.savef(self.model_path, PYGPR, gprmodel)

            print 'Learning KNR model'
            knrmodel = knr(trainX, trainY)
            self.savef(self.model_path, KNR, knrmodel)

            print 'Learning both GPR and KNR model is DONE.'

        self.plot_gpr(gprmodel, testX, testY, label, 'odd_feature')
        self.plot_knr(knrmodel, testX, testY, label, 'odd_feature')
Esempio n. 9
0
    def prepare_gist(self):
        if files.isExist(self.param_path, self.FG2GIST):
            fgset = np.load(self.param_path + '/' + self.FG2GIST)
            dpset = np.load(self.param_path + '/' + self.DP2GIST)
            dp_color = dpset[0]
            dp_mask = dpset[1]

        else:
            fgset, dp_color, dp_mask = self.create_gist_parameters(
                self.res_path, 'gist2.avi')
            # each data contains [fg set, gray-images, color-images, gray-images of fg set.]

            np.save(self.param_path + '/' + self.FG2GIST, fgset)
            np.save(self.param_path + '/' + self.DP2GIST,
                    np.array([dp_color, dp_mask]))

        return fgset, dp_color, dp_mask
Esempio n. 10
0
    def test_trainset_test_same(self, features, version, label):
        """
        Learns GPR and KNR model from given training set and test on test set.

        Here, training set is equal to test set.

        :param features:
        :param version:
        :param label:
        :return:
        """
        groundtruth = np.load(self.param_path + '/v' + str(version) + '_' +
                              self.GT)

        _trainX = np.concatenate(features)
        _trainY = np.concatenate(groundtruth)

        trainX, trainY = self.exclude_label(_trainX, _trainY, c=0)
        testX = features
        testY = groundtruth

        PYGPR = 'gpr_all_' + label
        KNR = 'knr_all_' + label
        if files.isExist(self.model_path, PYGPR):
            gprmodel = self.loadf(self.model_path, PYGPR)
            knrmodel = self.loadf(self.model_path, KNR)

        else:
            print 'Learning GPR model'
            gprmodel = pyGPs.GPR()
            gprmodel.getPosterior(trainX, trainY)
            gprmodel.optimize(trainX, trainY)
            self.savef(self.model_path, PYGPR, gprmodel)

            print 'Learning KNR model'
            knrmodel = knr(trainX, trainY)
            self.savef(self.model_path, KNR, knrmodel)

            print 'Learning both GPR and KNR model is DONE.'

        self.plot_gpr(gprmodel, testX, testY, label, 'all_feature')
        self.plot_knr(knrmodel, testX, testY, label, 'all_feature')
Esempio n. 11
0
    def create_parameters(self, selected_path):
        """
        Learns background model from data S0 and creates foreground mask of selected data S1.
        This returns four parameters which are
        foreground mask set, gray images, color images, visualized images of foreground mask.

        :param selected_path:
        :return:
        """

        path_s0 = files.mkdir(self.DATASET, 'S0/Background/View_001/')
        path_s0_dir = listdir(str(path_s0))

        background_train_set = []
        for selected_dir in path_s0_dir[2:3]:
            background_train_set += images.read_images_by_path(
                path_s0 + '/' + selected_dir, False)

        dp_color = images.read_images_by_path(selected_path)
        dp_gray = images.read_images_by_path(selected_path, False)

        background_model = cv2.BackgroundSubtractorMOG2(
            len(background_train_set), varThreshold=266, bShadowDetection=True)
        for frame in background_train_set:
            background_model.apply(
                frame, len(background_train_set))  # given frame, learning-rate

        th = 150
        fgmask_set = []
        dp_mask = []
        for frame in dp_gray:
            forward = background_model.apply(
                frame
            )  # create foreground mask which is gray-scale(0~255) image.
            tmp = cv2.cvtColor(forward, cv2.COLOR_GRAY2BGR)  # convert to color
            dp_mask.append(tmp)
            #convert gray-scale foreground mask to binary image.
            a = stats.threshold(forward, threshmin=th, threshmax=255, newval=0)
            a = stats.threshold(a, threshmin=0, threshmax=th, newval=1)
            fgmask_set.append(a)

        return (fgmask_set, dp_gray), (dp_color, dp_mask)
Esempio n. 12
0
    def __init__(self, rpath):
        self.res_path = rpath
        self.param_path = files.mkdir(self.res_path, 'params')

        self.FG1357 = 'fg1357.npy'
        self.FG1359 = 'fg1359.npy'
        self.DP1357 = 'dp1357.npy'
        self.DP1359 = 'dp1359.npy'

        self.FG2GIST = 'fg_gist2.npy'
        self.DP2GIST = 'dp_gist2.npy'

        self.GT = 'groundtruth.npy'
        self.WEIGHT = 'weightmatrix'
        self.DATASET = 'F:/DATASET/6.PETS2009/Crowd_PETS09'

        gt_tree, gt_list = self.parse_xml()
        np_gt = np.array(gt_list)
        self.width = min(np_gt[:, 2])
        self.height = min(np_gt[:, 3])
        self.y = min(np_gt[:, 1])
Esempio n. 13
0
 def make_model_path(self, hps):
     model_level = 'svc_' + hps.fl + '_v' + str(hps.cnn_ver) \
                   + '_c' + str(hps.clip_n) + '_v' + str(hps.vlad_k)
     tmp = fm.mkdir(os.path.join(var.GIST_VIOLENCE_PATH, 'models'))
     return os.path.join(tmp, model_level)
Esempio n. 14
0
    def create_feature_set(self, prepare, fgset, dpcolor, weight, version):
        """
        Extracts features (e.g., K, S, P, E, T) from each image.

        K.shape = n_frames * 2
        S.shape = n_frames * 2
        P.shape = n_frames * 4
        E.shape = n_frames * 6
        T.shape = n_frames * 4

        :param prepare:
        :param fgset:
        :param dpcolor:
        :param weight:
        :param version:
        :return:
        """

        if files.isExist(self.param_path,
                         'v' + str(version) + '_' + self.FEATURE):
            return

        print 'making feature set sequence.'

        contours_tree = []
        rectangles_tree = []
        param = prepare.min_width_height()
        groundtruth_tree = self.read_count_groundtruth()

        for f in fgset:
            rect, cont = self.segmentation_blob(f, param)
            contours_tree.append(cont)
            rectangles_tree.append(rect)

        size = len(fgset)
        K = []
        groundtruth = []
        E = []
        T = []
        S = []
        S2 = []
        P = []
        for i in range(1, size - 1):
            print 'extracting at ', i, ', ', round(float(i) / size, 3), '%'
            groundtruth.append(groundtruth_tree[i])

            ks = directs.run_SURF_v4(dpcolor[i], weight, rectangles_tree[i])
            kf = directs.run_FAST_v4(dpcolor[i], weight, rectangles_tree[i])
            K.append(np.vstack((ks, kf)).T)

            e = directs.get_canny_edges(dpcolor[i], weight, rectangles_tree[i])
            E.append(e)

            t = directs.get_texture_T(dpcolor[i - 1:i + 2, :, :],
                                      rectangles_tree[i])
            T.append(t)

            l = indirects.get_size_L(fgset[i], weight, contours_tree[i])
            s = indirects.get_size_S(fgset[i], weight, contours_tree[i])
            s2 = indirects.get_size_S_v2(fgset[i], weight, rectangles_tree[i])
            S.append(np.vstack((s, l)).T)
            S2.append(np.vstack((s2, l)).T)

            p = indirects.get_shape_P(fgset[i], weight, contours_tree[i])
            P.append(p)

        K = np.array(K)
        np.save(self.param_path + '/v' + str(version) + '_' + self.FEATURE,
                [K, E, T, P, S, S2])
        np.save(self.param_path + '/v' + str(version) + '_' + self.GT,
                groundtruth)
Esempio n. 15
0
    def visualize_video(self, features, version, label, _fgset, _colordp,
                        param):
        groundtruth = np.load(self.param_path + '/v' + str(version) + '_' +
                              self.GT)

        _trainX = np.concatenate(features[0:features.shape[0]:2])
        _trainY = np.concatenate(groundtruth[0:groundtruth.size:2])
        testX = features[1:features.shape[0]:2]
        testY = groundtruth[1:groundtruth.size:2]

        np.savetxt(self.res_path + '/feature_' + label + '.txt',
                   np.hstack((_trainX, _trainY.reshape(-1, 1))),
                   fmt='%d')
        print 'features.shape: ', features.shape, ', groundtruth.shape: ', groundtruth.shape
        print '_trainX.shape: ', _trainX.shape, ', _trainY.shape: ', _trainY.shape

        trainX, trainY = self.exclude_label(_trainX, _trainY, c=0)

        PYGPR = 'gpr_' + label
        KNR = 'knr_' + label
        if files.isExist(self.res_path, PYGPR):
            gprmodel = self.loadf(self.res_path, PYGPR)
            knrmodel = self.loadf(self.res_path, KNR)

        else:
            print 'Learning GPR model'
            gprmodel = pyGPs.GPR()
            gprmodel.getPosterior(trainX, trainY)
            gprmodel.optimize(trainX, trainY)
            self.savef(self.res_path, PYGPR, gprmodel)

            print 'Learning KNR model'
            knrmodel = knr(trainX, trainY)
            self.savef(self.res_path, KNR, knrmodel)

            print 'Learning both GPR and KNR model is DONE.'

        Y_pred = np.array([])
        Y_sum_pred = []
        Y_pred_frame = []
        for x in testX:
            ym, ys2, fm, fs2, lp = gprmodel.predict(np.array(x))
            Y_pred = np.hstack((Y_pred, ym.reshape(ym.size)))
            ym = ym.reshape(ym.size)
            Y_sum_pred.append(sum(ym))
            Y_pred_frame.append(ym)

        Y_label = []
        Y_sum_label = []

        for y in testY:
            Y_label += y
            Y_sum_label.append(sum(y))

        imgset = []
        fgset = _fgset[1:len(_fgset) - 1]
        colordp = _colordp[1:len(_colordp) - 1]
        for i in range(len(fgset)):
            rect, cont = self.segmentation_blob(fgset[i], param)
            tmp = colordp[i].copy()

            pred = Y_pred_frame[i]
            gt = groundtruth[i]
            for j in range(len(rect)):
                r = rect[j]
                cv2.rectangle(tmp, (r[0], r[2]), (r[1], r[3]), tools.green, 1)

                msg_pred = 'Pred: ' + str(pred[j])
                msg_gt = 'GT: ' + str(gt[j])
                cv2.putText(tmp, msg_pred, (r[0], r[2]),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.0, tools.blue)
                cv2.putText(tmp, msg_gt, (r[0] + 10, r[2]),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.0, tools.red)

            imgset.append(tmp)
        images.display_img(imgset, 300)