Пример #1
0
    def validateFace(self, dataFile, labelsFile, n, width, height, itemNo = -1):
        items = readData.loadDataFile(dataFile, n, width, height)
        labels = readData.loadLabelsFile(labelsFile, n)

        function = 0
        guess = 0
        correct = 0

        if itemNo != -1:
            image = items[itemNo]
            features = getFeatures.getFeatures(image.pixels, width, height)
            for j in range(len(features)):
                function += (self.weightsFace[j] * features[j])
            function += self.weightsFace[len(features)]
            if function >= 0: return 1
            else: return 0

        for i in range(len(items)):
            image = items[i]
            features = getFeatures.getFeatures(image.pixels, width, height)
            for j in range(len(features)):
                function += (self.weightsFace[j] * features[j])
            function += self.weightsFace[len(features)]
            if function >= 0: guess = 1
            else: guess = 0
            if guess == labels[i]: correct += 1
            function = 0
            
        return correct/n
Пример #2
0
def objectTracking():
    numObjects = 2
    maxCorners = 20
    qualityLevel = 0.01
    minDistance = 8
    filename = "Easy.mp4"
    numFrames = 2

    #load video
    frameData, length, h, w, fps = loadVideo(filename, 0, numFrames)

    #output video
    output = np.zeros([numFrames, h, w, 3])

    #get bounding boxes
    init_img = frameData[0, :, :, :]
    init_img_gray = cv2.cvtColor(frameData[0, :, :, :], cv2.COLOR_BGR2GRAY)
    bbox_list = []
    bbox_pts = []
    for i in range(0, numObjects):
        bbox_list, bbox_pts, init_img = getBoundingBox(init_img, bbox_list,
                                                       bbox_pts)
    startXs, startYs, _ = getFeatures(init_img_gray, bbox_list, maxCorners,
                                      qualityLevel, minDistance)

    output[0, :, :, :] = init_img
    cnt = 0
    i = 0
    curr_bbox = bbox_pts

    while i < length - 1:

        if cnt == numFrames - 1:
            frameData, _, _, _, _ = loadVideo(filename, cnt + 1, numFrames)
            startXs, startYs, _ = getFeatures(init_img_gray, bbox_list,
                                              maxCorners, qualityLevel,
                                              minDistance)
            cnt = 0

        img1 = frameData[i, :, :, :]
        img2 = frameData[i + 1, :, :, :]
        img1_gray = cv2.cvtColor(frameData[i, :, :, :], cv2.COLOR_BGR2GRAY)
        img2_gray = cv2.cvtColor(frameData[i + 1, :, :, :], cv2.COLOR_BGR2GRAY)
        [newXs, newYs] = estimateAllTranslation(startXs, startYs, img)
        [Xs, Ys,
         new_bbox] = applyGeometricTransformation(startXs, startYs, newXs,
                                                  newYs, curr_bbox)

        plotBbox(img2, new_bbox)

        output[frame + 1, :, :, :] = img2

        startXs = newXs
        startYs = newYs
        cnt += 1
Пример #3
0
    def validateFace(self, dataFile, labelsFile, n, width, height, itemNo=-1):
        items = readData.loadDataFile(dataFile, n, width, height)
        labels = readData.loadLabelsFile(labelsFile, n)

        like = 0.0
        guess = 0
        correct = 0

        if itemNo != -1:
            image = items[itemNo]
            features = getFeatures.getFeatures(image.pixels, width, height)
            productTrue = pow(10, 300.0)
            productFalse = pow(10, 300.0)
            for j in range(len(self.featurepd)):
                if features[j] > 0:
                    productTrue *= self.featurepd[j][1]
                    productFalse *= self.featurepd[j][0]
                else:
                    productTrue *= (1 - self.featurepd[j][1])
                    productFalse *= (1 - self.featurepd[j][0])

            like = (productTrue * self.faceProb) / (productFalse *
                                                    (1 - self.faceProb))

            if like >= 1:
                return 1
            else:
                return 0

        for i in range(len(items)):
            image = items[i]
            features = getFeatures.getFeatures(image.pixels, width, height)
            productTrue = pow(10, 300.0)
            productFalse = pow(10, 300.0)
            for j in range(len(self.featurepd)):
                if features[j] > 0:
                    productTrue *= self.featurepd[j][1]
                    productFalse *= self.featurepd[j][0]
                else:
                    productTrue *= (1 - self.featurepd[j][1])
                    productFalse *= (1 - self.featurepd[j][0])

            like = (productTrue * self.faceProb) / (productFalse *
                                                    (1 - self.faceProb))

            if like >= 1:
                guess = 1
            else:
                guess = 0
            if guess == labels[i]:
                correct += 1

        return correct / n
Пример #4
0
	def track_object(self):
		template = cv2.cvtColor(self.img1,cv2.COLOR_BGR2GRAY)
		out = cv2.VideoWriter('Results/'+self.folder_name+'_results.avi',0,cv2.VideoWriter_fourcc('M','J','P','G'),20.0,(template.shape[1],template.shape[0]))
		self.startXs, self.startYs = getFeatures(template,self.bbox[0],use_shi=False)
		for i in range(1,self.no_frames):
			print('Tracking for frame: %s'%str(i))
			self.frame1 = cv2.imread(self.folder_name+'/%s.png'%str(i-1))
			self.frame2 = cv2.imread(self.folder_name+'/%s.png'%str(i))
			newXs, newYs = estimateAllTranslation(self.startXs,self.startYs, self.frame1, self.frame2)
			ag = applyGeometricTransformations(self.startXs, self.startYs, newXs, newYs, self.bbox[i-1])
			Xs, Ys, self.bbox[i] = ag.transform()
			self.center[i]= np.empty((self.no_object,1,2), dtype=int)
			self.center2[i]= np.empty((self.no_object,1,2), dtype=int)

			# updating coordinates
			self.startXs = Xs
			self.startYs = Ys

			# updating feature points
			no_features_left = np.sum(Xs!=-1)
			print('No. of features: %d'%no_features_left)
			if no_features_left<15:
				print('Generate new features')
				self.startXs, self.startYs = getFeatures(cv2.cvtColor(self.frame2,cv2.COLOR_BGR2GRAY),self.bbox[i])

			# drawing bounding box and visualising feature point for each object
			frames_draw = self.frame2.copy()
			
			for j in range(self.no_object):
				x,y,w,h = cv2.boundingRect(self.bbox[i][j,:,:].astype(int))
				self.center[i][j,:,:]= np.array([x,y])
				self.center2[i][j,:,:]= np.array([x,y+h])
				frames_draw = cv2.rectangle(frames_draw, (x,y),(x+w,y+h), (255,0,0), 2)
				for k in range(self.startXs.shape[0]):
					frames_draw= cv2.circle(frames_draw, (int(self.startXs[k,j]),int(self.startYs[k,j])),3,(0,255,0),thickness=2)
				center_points1=[]
				center_points2=[]
				for l in range(i+1):
					center_points1.append(self.center[l][j,:,:])
					center_points2.append(self.center2[l][j,:,:])
				if(j==1):
					frames_draw = cv2.drawContours(frames_draw, np.array(center_points1), -1, (0,0,255), 3)
					frames_draw = cv2.drawContours(frames_draw, np.array(center_points2), -1, (0,0,255), 3)
				else:
					frames_draw = cv2.drawContours(frames_draw, np.array(center_points1), -1, (255,255,255), 3)
					frames_draw = cv2.drawContours(frames_draw, np.array(center_points2), -1, (255,255,255), 3)
			cv2.imshow("window", frames_draw)
			cv2.waitKey(10)
			out.write(frames_draw)

		out.release()
Пример #5
0
def train(features):

    train_data = None
    for i in range(10):
        feature, tempo = getFeatures("alda-ml/samples/" + str(i+1) + "/out.wav")
        feature = np.reshape(feature[-1], -1).astype('float32')
        if train_data is not None:
            train_data = np.vstack([train_data, feature])
        else:
            train_data = np.array(feature)
    # print(train_data.shape)

    train_labels = None
    for i in range(10):
        with open("alda-ml/samples/" + str(i+1) + "/score.json") as f:
            score = json.loads(f.read())
            note = score['events'][0]['midi-note']
            label = [0 for x in range(88)]
            label[note] = 1
        if train_labels is not None:
            train_labels = np.vstack([train_labels, label])
        else:
            train_labels = np.array(label)
    # print(train_labels.shape)

    clf = tf.estimator.Estimator(
        model_fn=cnn, model_dir="tmp/coolModel")

    tensorTrain(train_data, train_labels, clf)

    return clf
Пример #6
0
    def trainFace(self, dataFile, labelsFile, n, width, height, randomList):
        items = readData.loadDataFile(dataFile, n, width, height)
        labels = readData.loadLabelsFile(labelsFile, n)
        self.featurepd = []
        numberFaces = 0

        # featureTimes is a list of tuples where the first number is the number of times the feature occurs when the image is not a face
        # and the second number is the number of times the features occurs when the image is a face
        featureTimes = []
        numberFeatures = (math.ceil(height / getFeatures.DIM)) * math.ceil(
            (width / getFeatures.DIM))
        for i in range(numberFeatures):
            featureTimes.append([0.0, 0.0])

        for count in randomList:
            image = items[count]
            if labels[count] == 1:
                numberFaces += 1.0
            features = getFeatures.getFeatures(image.pixels, width, height)
            for i in range(len(features)):
                if features[i] > 0:
                    if labels[count] == 1:
                        featureTimes[i][1] += 1
                    else:
                        featureTimes[i][0] += 1

        for i in range(len(featureTimes)):
            self.featurepd.append([
                (featureTimes[i][0] + self.k) /
                (len(randomList) - numberFaces + self.k),
                (featureTimes[i][1] + self.k) / (numberFaces + self.k)
            ])

        self.faceProb = numberFaces / len(randomList)
Пример #7
0
def main():
    # --------------------------------------Get Preprocessed Data------------------------------------------------------
    a = GetDirs()
    f1 = a.getDirs('DataFolder/CGMSeriesLunch')
    f2 = a.getDirs('DataFolder/CGMTimeSeries')
    zippedTups = a.zipFiles(f1, f2)
    allArraysLunch = np.array([])
    allArraysdateNum = np.array([])
    allArraysLunch = np.concatenate(
        [a.returnArrays(i[0], i[1], 'Lunch') for i in zippedTups])
    allArraysdateNum = np.concatenate(
        [a.returnArrays(i[0], i[1]) for i in zippedTups])

    # ------------------------------------------Get Features-----------------------------------------------------------

    getF = getFeatures(allArraysLunch)
    F1 = getF.fft()
    F2 = getF.entropy()
    F3 = getF.skewness()
    F4 = getF.movingStd()

    # ----------------------------------------------Get PCA------------------------------------------------------------

    PCs = getPCA(np.concatenate((F1, F2[:, None], F3[:, None], F4),
                                axis=1)).pca()

    # -----------------------------Get PCA Results in decreasing order of accuracy--------------------------------------
    # print(getPCA(np.concatenate((F1, F2[:, None], F3[:, None], F4), axis=1)).results())

    # --------------------------------------------Plot PCA--------------------------------------------------------------
    plotDiag("Principal Conponent Analysis", 'Red', PCs['PC1'], 'CGM Data',
             'PCA Values', 'PC1').plot()
Пример #8
0
def load_letter(folder):
    """Load the data for a single letter label."""
    image_files = os.listdir(folder)
    dataset = np.ndarray(shape=(len(image_files), 41), dtype=np.float32)
    print(folder)
    num_images = 0
    for image in image_files:
        image_file = os.path.join(folder, image)
        try:
            im = cv2.imread(image_file)
            if im.shape != (image_size, image_size, 3):
                raise Exception('%s\nUnexpected image shape: %s' %
                                (image_file, str(im.shape)))
            features = getFeatures(im)
            dataset[num_images, :] = features
            num_images = num_images + 1
        except IOError as e:
            print('Could not read:', image_file, ':', e,
                  '- it\'s ok, skipping.')

    dataset = dataset[:num_images, :]
    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset
Пример #9
0
def addNewClass(jsonGMM, newClassName, newClassData=None):
    """
    Incorporate a new class into an existing GMM JSON model (i.e. list not dict!)
    @param jsonGMM: List containing GMMs, see createJSON.py for exact structure
    @param newClassName: Name of the new class. Has to match the name of the folder where the sound files are located
    @param newClassData: Already extracted MFCC Features for the given newClassName. If not provided, a feature extraction will be performed
    @return: List containing the added class as the last element
    """
    
    jGMM = copy.deepcopy(jsonGMM)

    newClassData = getFeatures(newClassName)

    scaler = preprocessing.StandardScaler()
    scaler.mean_ = jGMM[0]["scale_means"]
    scaler.std_ = jGMM[0]["scale_stddevs"]
    
    X_train = scaler.transform(newClassData)

    n_train_new = X_train.shape[0]
    n_features = X_train.shape[1]
    n_components = 16

    print("Start training new class " + newClassName)

    newClf = GMM(n_components = n_components, covariance_type='full', n_iter=100) # set n_iter to 100 to speed up the training
    newClf.fit(X_train)

    """ Update the dict containing mapping of class names: """
    newClassDict = dict(jGMM[0]['classesDict'])
    newClassDict[newClassName] = len(newClassDict.values())

    new_n_classes = jGMM[0]["n_classes"] + 1
    
    """ Update classesDict and n_classes for all old classes: """
    for i in range(len(jGMM)):
        jGMM[i]["classesDict"] = newClassDict
        jGMM[i]["n_classes"] = new_n_classes

    """ Add the new class to the list: """
    jGMM.append({})   
    
    jGMM[-1]["classesDict"] = newClassDict
    jGMM[-1]["n_classes"] = new_n_classes
    jGMM[-1]["scale_mean"] = scaler.mean_
    jGMM[-1]["scale_stddev"] = scaler.std_
    
    jGMM[-1]["n_components"] = n_components
    jGMM[-1]["n_features"] = n_features
    jGMM[-1]["n_train"] = n_train_new    
    
    jGMM[-1]["weights"] = newClf.weights_.tolist()
    jGMM[-1]["means"] = newClf.means_.tolist()
    jGMM[-1]["covars"] = newClf.covars_.tolist()

    return jGMM
Пример #10
0
def train():
    features=[]
    
    for i in range(1,35):
        if i==6 or i==8:
            continue
        filename='SSM'+str(i)
        features.append(GF.getFeatures(filename))
        print i
        
    numpyfeatures=np.array(features)
    
    np.savetxt('TrainingData', numpyfeatures)
    
    print numpyfeatures.shape
Пример #11
0
def train():
    features = []

    for i in range(1, 35):
        if i == 6 or i == 8:
            continue
        filename = 'SSM' + str(i)
        features.append(GF.getFeatures(filename))
        print i

    numpyfeatures = np.array(features)

    np.savetxt('TrainingData', numpyfeatures)

    print numpyfeatures.shape
Пример #12
0
def getDataSet():
    X, Y = getFeatures()
    features = len(X[0])
    cases = len(X)

    DS = SupervisedDataSet(features, 1)

    i = 0
    while (i < cases):
        DS.addSample(X[i], Y[i])
        i += 1

    TrainDS, TestDS = DS.splitWithProportion(0.7)

    return TrainDS, TestDS
Пример #13
0
def BasicPCAGraphs(vecnumber):
    countyvecs, diagvecs, diagdict = getFeatures.getFeatures()
    pca = PCA(n_components=3)
    standardscaler = preprocessing.StandardScaler()

    veclist = getVecList(countyvecs, vecnumber, "Medicare")
    vecs = standardscaler.fit_transform(veclist)
    Medicareprincipalcomponents = pca.fit_transform(vecs)
    Medicarevar = pca.explained_variance_ratio_
    x1, y1, z1 = Medicareprincipalcomponents.T

    veclist = getVecList(countyvecs, vecnumber, "Medicaid")
    vecs = standardscaler.fit_transform(veclist)
    Medicaidprincipalcomponents = pca.fit_transform(vecs)
    Medicaidvar = pca.explained_variance_ratio_
    x2, y2, z2 = Medicaidprincipalcomponents.T

    veclist = getVecList(countyvecs, vecnumber, "Uninsured")
    vecs = standardscaler.fit_transform(veclist)
    Uninsuredprincipalcomponents = pca.fit_transform(vecs)
    Uninsuredvar = pca.explained_variance_ratio_
    x3, y3, z3 = Uninsuredprincipalcomponents.T

    plt.plot(x1, y1, 'ro', x2, y2, 'bo', x3, y3, 'go')
    plt.title("PCA of Procedure Based Vectors, Colored By Payment Method")
    plt.show()

    diagveclist = getVecList(diagvecs, -1, "Medicare")
    vecs = standardscaler.fit_transform(diagveclist)
    Medicareprincipalcomponents = pca.fit_transform(vecs)
    Medicarevar = pca.explained_variance_ratio_
    x1, y1, z1 = Medicareprincipalcomponents.T

    diagveclist = getVecList(diagvecs, -1, "Medicaid")
    vecs = standardscaler.fit_transform(diagveclist)
    Medicaidprincipalcomponents = pca.fit_transform(vecs)
    Medicaidvar = pca.explained_variance_ratio_
    x2, y2, z2 = Medicaidprincipalcomponents.T

    diagveclist = getVecList(diagvecs, -1, "Uninsured")
    vecs = standardscaler.fit_transform(diagveclist)
    Uninsuredprincipalcomponents = pca.fit_transform(vecs)
    Uninsuredvar = pca.explained_variance_ratio_
    x3, y3, z3 = Uninsuredprincipalcomponents.T
    plt.plot(x1, y1, 'ro', x2, y2, 'bo', x3, y3, 'go')
    plt.title("PCA of Diagnosis Based Vectors, Colored By Payment Method")

    plt.show()
Пример #14
0
def displayFeatures(filename):
    numObjects = 2
    maxCorners = 20
    qualityLevel = 0.01
    minDistance = 8

    im = cv2.imread(filename)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    bbox_list = []
    bbox_pts = []
    for i in range(0, numObjects):
        bbox_list, bbox_pts, newImage = getBoundingBox(im, bbox_list, bbox_pts)
    print(bbox_pts)
    x, y, _ = getFeatures(im, bbox_list, maxCorners, qualityLevel, minDistance)
    print(x)
    print(y)
def test_detect(vidfname, outfname):
    #vid1 = imageio.get_reader("shifted.mp4", 'ffmpeg')
    vid1 = imageio.get_reader(vidfname, 'ffmpeg')

    prevframe = None
    prevbboxes = None
    prevXs, prevYs = np.zeros((1, 1)), np.zeros((1, 1))
    allimg = []
    for i, frame in enumerate(vid1):
        print i
        if i % 30 == 0 or count_minfeats(prevXs) < 20:
            print "RECALCULATING"
            try:
                newbboxes = detectFace(frame)
            except AttributeError:
                print "NO NEW FACE FOUND! Trying again on next frame"
                continue
            greyframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            Xs, Ys = getFeatures(greyframe, newbboxes)
            print Xs.shape
            prevXs, prevYs = Xs, Ys
        else:
            Xs, Ys = estimateAllTranslation(prevXs, prevYs, prevframe, frame)
            Xs, Ys, newbboxes = applyGeometricTransformation(
                prevXs, prevYs, Xs, Ys, prevbboxes)
            #plot_all(frame, newbboxes, Xs, Ys, display=True)

        allimg.append(plot_all(frame, newbboxes, Xs, Ys, display=False))
        # if i==0 or (i > 9 and not i % 10):
        # plot_features(frame, Xs, Ys, display=True)
        #     print Xs-prevXs

        prevbboxes = newbboxes
        prevframe = frame
        prevXs, prevYs = Xs.copy(), Ys.copy()


#        if i > 60:
#            print "Quitting early for testing purposes"
#            break

    imageio.mimsave(outfname, allimg)
    return
Пример #16
0
 def trainFace(self, dataFile, labelsFile, n, width, height, rList):
     items = readData.loadDataFile(dataFile, n, width, height)
     old_labels = readData.loadLabelsFile(labelsFile, n)
     self.weightsFace = []
     
     numberFeatures = (math.ceil(height/getFeatures.DIM)) * math.ceil((width/getFeatures.DIM))
     for i in range(numberFeatures + 1):
         self.weightsFace.append(0)
         
     labels = []
     for r in rList:
         labels.append(old_labels[r])
         
     # this array contains the features for every image
     features = []
     for i in rList:
         features.append(getFeatures.getFeatures(items[i].pixels, width, height))
     loop = 0
     index = 0
     # traverses all the items and updates the weights accordingly
     # stops when a full loop is completed without updates
     iterations = 0
     while loop < len(rList) and iterations <  10 *len(rList):
         iterations +=1
         function = 0
         for i in range(numberFeatures):
             function += self.weightsFace[i] * features[index][i]
         function += self.weightsFace[numberFeatures]
         if (function >= 0 and labels[index] == 1) or (function < 0 and labels[index] == 0):
             loop += 1
         elif function < 0 and labels[index] == 1:
             for i in range(numberFeatures):
                 self.weightsFace[i] += features[index][i]
             self.weightsFace[numberFeatures] += 1
             loop = 0
         else:
             for i in range(numberFeatures):
                 self.weightsFace[i] -= features[index][i]
             self.weightsFace[numberFeatures] -= 1
             loop = 0
         if index == len(rList)-1: index = 0
         else: index += 1
Пример #17
0
def learnFeatureExistance(busImportantFeatures, userImportantFeatures, trainReviews, path):
    logger = logging.getLogger('signature.lFE.learnFE')
    logger.info('starting learnFeatureExistance from %d reviews'%len(trainReviews))
    fsw = featureStructureWorker()
    modelDict = dict()
    trainAveragesDict = dict()
    
    
    for f, feature in enumerate(fsw.featureIdicator):
        if not fsw.featureIdicator[feature]:
            continue
        logger.debug('Start working with %s'%feature)
        
        #get data
        X, Y, trainAveragesDict[feature] = getFeatures(logger, feature, trainReviews, busImportantFeatures, userImportantFeatures,
                                          trainAverages = {}, is_train = True)
        logger.debug('Got features for %d reviews'%len(X))
#        #cross validation
#        indicator = range(len(X))
#        random.shuffle(indicator)
#        thres = int(len(indicator)*0.8)
#        trainX = np.array([X[i] for i in indicator[:thres]])
#        trainY = np.array([Y[i] for i in indicator[:thres]])
#        testX = np.array([X[i] for i in indicator[thres:]])
#        testY = np.array([Y[i] for i in indicator[thres:]])
        
        #Logistic Regression
        #bestThres,bestF1,logmodel = getBestLogModel(logger, feature, trainX, trainY, testX, testY, X, Y, path)
        bestThres,bestF1,logmodel = getBestLogModel(logger, feature, X, Y, path)
        #bestThresSVM,bestF1SVM,svmmodel = getBestSVMModel(logger, feature, X, Y, path)
        
#       crossValidation(logger, np.array(X), np.array(Y))
        
        
        modelDict[feature] = [bestThres,bestF1,logmodel]
        
#        print f
#        if f > 6:
#            break
        
    return trainAveragesDict, modelDict
Пример #18
0
def learnTopicExistence(busImportantFeatures, userImportantFeatures, trainReviews, path):
    logger = logging.getLogger('signature.lTE.learnTopicExistence')
    logger.info('starting learnTopicExistence from %d reviews'%len(trainReviews))
    fsw = featureStructureWorker()
    modelDict = dict()
    
    for f, topic in enumerate(fsw.featureIdicator):
        if not fsw.featureIdicator[topic]:
            continue
        logger.debug('Start working with %s'%topic)
        
        #get data
        X, Y = getFeatures(logger, topic, trainReviews, busImportantFeatures, userImportantFeatures,
                                          trainAverages = {}, is_train = True)
        logger.debug('Got features for %d reviews'%len(X))
        
        
        modelDict[topic] = getBestModel(logger, topic, X, Y, path)
    
    #print modelDict
    return modelDict
Пример #19
0
def objectTracking(rawVideo,
                   draw_bb=False,
                   play_realtime=False,
                   save_to_file=False):
    # initilize
    n_frame = 400
    frames = np.empty((n_frame, ), dtype=np.ndarray)
    frames_draw = np.empty((n_frame, ), dtype=np.ndarray)
    bboxs = np.empty((n_frame, ), dtype=np.ndarray)
    for frame_idx in range(n_frame):
        _, frames[frame_idx] = rawVideo.read()

    # draw rectangle roi for target objects, or use default objects initilization
    if draw_bb:
        n_object = int(input("Number of objects to track:"))
        bboxs[0] = np.empty((n_object, 4, 2), dtype=float)
        for i in range(n_object):
            (xmin, ymin, boxw, boxh) = cv2.selectROI("Select Object %d" % (i),
                                                     frames[0])
            cv2.destroyWindow("Select Object %d" % (i))
            bboxs[0][i, :, :] = np.array([[xmin, ymin], [xmin + boxw, ymin],
                                          [xmin, ymin + boxh],
                                          [xmin + boxw,
                                           ymin + boxh]]).astype(float)
    else:
        n_object = 1
        bboxs[0] = np.array([[[291, 187], [405, 187], [291, 267],
                              [405, 267]]]).astype(float)

    if save_to_file:
        out = cv2.VideoWriter('output.avi', 0,
                              cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20.0,
                              (frames[i].shape[1], frames[i].shape[0]))

    # Start from the first frame, do optical flow for every two consecutive frames.
    startXs, startYs = getFeatures(cv2.cvtColor(frames[0], cv2.COLOR_RGB2GRAY),
                                   bboxs[0],
                                   use_shi=False)
    for i in range(1, n_frame):
        print('Processing Frame', i)
        newXs, newYs = estimateAllTranslation(startXs, startYs, frames[i - 1],
                                              frames[i])
        Xs, Ys, bboxs[i] = applyGeometricTransformation(
            startXs, startYs, newXs, newYs, bboxs[i - 1])

        # update coordinates
        startXs = Xs
        startYs = Ys

        # update feature points as required
        n_features_left = np.sum(Xs != -1)
        print('# of Features: %d' % n_features_left)
        if n_features_left < 15:
            print('Generate New Features')
            startXs, startYs = getFeatures(
                cv2.cvtColor(frames[i], cv2.COLOR_RGB2GRAY), bboxs[i])

        # draw bounding box and visualize feature point for each object
        frames_draw[i] = frames[i].copy()
        for j in range(n_object):
            (xmin, ymin, boxw,
             boxh) = cv2.boundingRect(bboxs[i][j, :, :].astype(int))
            frames_draw[i] = cv2.rectangle(frames_draw[i], (xmin, ymin),
                                           (xmin + boxw, ymin + boxh),
                                           (255, 0, 0), 2)
            for k in range(startXs.shape[0]):
                frames_draw[i] = cv2.circle(
                    frames_draw[i], (int(startXs[k, j]), int(startYs[k, j])),
                    3, (0, 0, 255),
                    thickness=2)

        # imshow if to play the result in real time
        if play_realtime:
            cv2.imshow("win", frames_draw[i])
            cv2.waitKey(10)
        if save_to_file:
            out.write(frames_draw[i])

    if save_to_file:
        out.release()
Пример #20
0
def applyFeatureExistance(busImportantFeatures, userImportantFeatures, testReviews, modelDict, trainAveragesDict, path):
    logger = logging.getLogger('signature.aFE.applyFE')
    logger.info('starting applyFeatureExistance from %d reviews'%len(testReviews))
    fsw = featureStructureWorker()
    featureWeights = dict()
    featureF1 = dict()
    
    for i, feature in enumerate(fsw.featureIdicator):
        if not fsw.featureIdicator[feature]:
            continue
        logger.debug('Start working with %s'%feature)
        #get data
        X, Y = getFeatures(logger, feature, testReviews, busImportantFeatures, userImportantFeatures,
                                          trainAverages = trainAveragesDict[feature], is_train = False)
        
        #weight = frequency
        featureWeights[feature] = float(list(Y).count(1))/len(Y)
        
        Ypred = [x[1] for x in modelDict[feature][2].predict_proba(np.array(X))]
        Yreal = np.array(Y)
        
        Ybus = []
        for review in testReviews:
            busID = review['business_id']
            if busID in busImportantFeatures:
                pfreq = busImportantFeatures[busID]['featureFreq'].get(feature,0.0)
            else:
                pfreq = featureWeights[feature]
            Ybus.append(pfreq)
        
        featureF1[feature] = drawPR(feature,Yreal,Ypred,Ybus, modelDict[feature][0], path)
        
        for r, review in enumerate(testReviews):
            #reviewFeatures = fsw.getReviewFeaturesExistence(review['features'])
            review['exPredFeatures'] = review.get('exPredFeatures', {})
        
            existence = Yreal[r]
            #print Yreal[r], Ypred[r], modelDict[feature][0]
            if Ypred[r] >= modelDict[feature][0]:
                predictedExistence = 1
            else:
                predictedExistence = 0
                
            #check if feature important
            if existence + predictedExistence > 0.5:
                review['exPredFeatures'][feature] = [existence, predictedExistence]
                
            #print review['exPredFeatures']
            if not r%10000:
                logger.debug('%d reviews processed'%r)
        
    Jaccard = list()
    Jaccard_weighted = list()
    Jaccard_baseline = list()
    Jaccard_baseline_weighted = list()
    TP = 0
    FP = 0
    FN = 0
    
    TP_all = 0
    FP_all = 0
    FN_all = 0
    
    TP_bus = 0
    FP_bus = 0
    FN_bus = 0
    
    TP_int = 0
    FP_int = 0
    FN_int = 0
    
    
    for r, review in enumerate(testReviews):
        Jaccard_intersection = 0.0
        Jaccard_union = 0.0
        
        Jaccard_intersection_weighted = 0.0
        Jaccard_union_weighted = 0.0
        
        Jaccard_intersection_baseline = 0.0
        Jaccard_union_baseline = 0.0
        
        Jaccard_intersection_baseline_weighted = 0.0
        Jaccard_union_baseline_weighted = 0.0
        
        busID = review['business_id']
        if busID in busImportantFeatures:
            busAspects = set([f for f in busImportantFeatures[busID]['featureFreq'] if busImportantFeatures[busID]['featureFreq'][f] > 10 and
                                       busImportantFeatures[busID]['sentiment'][f][1] > 1])
        else:
            busAspects = set([f for f in fsw.featureIdicator if fsw.featureIdicator[feature]])
            
            
#        userID = review['user_id']
#        if userID in userImportantFeatures:
#            userAspects = set([f for f in userImportantFeatures[userID]['featureFreq'] if userImportantFeatures[userID]['featureFreq'][f] > 10 and
#                                       userImportantFeatures[userID]['sentiment'][f][1] > 1])
#        else:
#            userAspects = set([f for f in fsw.featureIdicator if fsw.featureIdicator[feature]])
        
            
        #interBU = userAspects.intersection(busAspects)
        #buildin INTERSECTION
        busID = review['business_id']
        if busID in busImportantFeatures:
            busImpAspects = set([f for f in busImportantFeatures[busID]['featureFreq'] if busImportantFeatures[busID]['featureFreq'][f] > 50 and
                                       busImportantFeatures[busID]['sentiment'][f][1] > 1])
            busIntAspects = set([f for f in busImportantFeatures[busID]['featureFreq'] if busImportantFeatures[busID]['featureFreq'][f] > 10 and
                                       busImportantFeatures[busID]['sentiment'][f][1] > 1])
        else:
            busImpAspects = set([f for f in fsw.featureIdicator if fsw.featureIdicator[feature]])
            busIntAspects = set([f for f in fsw.featureIdicator if fsw.featureIdicator[feature]])
            
            
        userID = review['user_id']
        if userID in userImportantFeatures:
            userAspects = set([f for f in userImportantFeatures[userID]['featureFreq'] if userImportantFeatures[userID]['featureFreq'][f] > 10 and
                                       userImportantFeatures[userID]['sentiment'][f][1] > 1])
        else:
            userAspects = set([f for f in fsw.featureIdicator if fsw.featureIdicator[feature]])
        
        interBU = busImpAspects.union(userAspects.intersection(busIntAspects))
        
        
        for feature in review['exPredFeatures']:
            if review['exPredFeatures'][feature] == [1,1]:
                TP += 1
            elif review['exPredFeatures'][feature] == [0,1]:
                FP += 1
            if review['exPredFeatures'][feature] == [1,0]:
                FN += 1
            
            #baseline all
            if review['exPredFeatures'][feature][0] == 1:
                TP_all += 1
           
            #baseline business
            if feature in busAspects and review['exPredFeatures'][feature][0] == 1:
                TP_bus += 1
            elif feature in busAspects and review['exPredFeatures'][feature][0] == 0:
                FP_bus += 1
            elif feature not in busAspects and review['exPredFeatures'][feature][0] == 1:
                FN_bus += 1
            
            
            #baseline intersection
            if feature in interBU and review['exPredFeatures'][feature][0] == 1:
                TP_int += 1
            elif feature in interBU and review['exPredFeatures'][feature][0] == 0:
                FP_int += 1
            elif feature not in interBU and review['exPredFeatures'][feature][0] == 1:
                FN_int += 1
            #print TP_int, FP_int, FN_int
            
            
            if review['exPredFeatures'][feature] == [1,1]:
                Jaccard_intersection += 1.0
                Jaccard_intersection_weighted += featureWeights[feature]
            Jaccard_union += 1.0
            Jaccard_union_weighted += featureWeights[feature]
            
            if review['exPredFeatures'][feature][0] == 1:
                Jaccard_intersection_baseline  += 1.0
                Jaccard_intersection_baseline_weighted += featureWeights[feature]
        
        for feature in fsw.featureIdicator:
            if fsw.featureIdicator[feature]:
                FP_all += 1
                
                Jaccard_union_baseline += 1
                Jaccard_union_baseline_weighted += featureWeights[feature]
                
        
        if Jaccard_union:
            Jaccard.append(Jaccard_intersection/Jaccard_union)       
        if Jaccard_union_weighted:
            Jaccard_weighted.append(Jaccard_intersection_weighted/Jaccard_union_weighted)
        if Jaccard_union_baseline:
            Jaccard_baseline.append(Jaccard_intersection_baseline/Jaccard_union_baseline)
        if Jaccard_union_baseline_weighted:
            Jaccard_baseline_weighted.append(Jaccard_intersection_baseline_weighted/Jaccard_union_baseline_weighted)
    
    #SIGNATURE METHOD    
    Presision = float(TP)/(TP+FP)
    Recall = float(TP)/(TP+FN)
    F1 = 2*Presision*Recall/(Presision+Recall)
    PreRec = [Presision,Recall,F1]
    
    #baseline ALL
    Presision_all = float(TP_all)/(TP_all+FP_all)
    Recall_all = float(TP_all)/(TP_all+FN_all)
    F1_all = 2*Presision_all*Recall_all/(Presision_all+Recall_all)
    PreRec_all = [Presision_all,Recall_all,F1_all]
    
    #baseline BUSINESS
    Presision_bus = float(TP_bus)/(TP_bus+FP_bus)
    Recall_bus = float(TP_bus)/(TP_bus+FN_bus)
    F1_bus = 2*Presision_bus*Recall_bus/(Presision_bus+Recall_bus)
    PreRec_bus = [Presision_bus,Recall_bus,F1_bus]
    
    #print TP_int, FP_int
    #baseline INTERSECTION
    Presision_int = float(TP_int)/(TP_int+FP_int)
    Recall_int = float(TP_int)/(TP_int+FN_int)
    F1_int = 2*Presision_int*Recall_int/(Presision_int+Recall_int)
    PreRec_int = [Presision_int,Recall_int,F1_int]
    
    
    return testReviews, featureWeights, [[np.average(Jaccard), np.average(Jaccard_weighted)],
                         [np.average(Jaccard_baseline), 
                          np.average(Jaccard_baseline_weighted)]], featureF1, [PreRec,PreRec_all,
                                                                               PreRec_bus, PreRec_int]
Пример #21
0
def faceTracking(rawVideo):
    cap = cv2.VideoCapture(rawVideo)
    output = None
    pre_img = None

    # first frame
    ret, cur_img = cap.read()
    bbox = detectFace(cur_img)
    startXs, startYs = getFeatures(cur_img, bbox)

    # initialize video writer
    h, w, l = cur_img.shape
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    # change tracked_video name for each run
    tracked_video = './Output_Video/tracked_video.avi'
    output = cv2.VideoWriter(tracked_video, fourcc, 20, (w, h), True)

    # draw box on first frame
    imgwbox = drawBox(cur_img, bbox)
    output.write(imgwbox)
    pre_img = cur_img

    count = 0
    while (cap.isOpened()):
        ret, cur_img = cap.read()

        if not ret:
            break

        newXs, newYs = estimateAllTranslation(startXs, startYs, pre_img,
                                              cur_img)
        Xs, Ys, newbbox = applyGeometricTransformation(startXs, startYs, newXs,
                                                       newYs, bbox)

        box_features = np.array([])
        for i in range(len(Xs)):
            box_features = np.append(box_features, len(Xs[i]))

        print sum(box_features)
        if sum(box_features) < 10:
            newbbox = detectFace(cur_img)
            Xs, Ys = getFeatures(cur_img, newbbox)

        pre_img = cur_img
        startXs = Xs
        startYs = Ys
        bbox = newbbox

        imgwbox = drawBox(cur_img, bbox)
        output.write(imgwbox)

        # print video record
        print('{} frame finished').format(count)
        count += 1

    # close video writer
    cv2.destroyAllWindows()
    cap.release()
    output.release()

    return tracked_video
Пример #22
0
	currVal=[]
	#print str(len(item))
	#print
	#if d >=int(start) and d< int(end):
		
	if d>=0:
		#print item
		print
		soup = BeautifulSoup(item)
		script = soup.find("script")
		ret ={}
		#condition for top level script
		if script is not None:
			if script.parent.name == "head"	:
				actItem = script.text
				ret = getFeatures.getFeatures(cleanString(actItem))
			else:
				ret = getFeatures.getFeatures(cleanString(item))
		else:
			ret = getFeatures.getFeatures(cleanString(item))

		#print ".....................................................SCRIPT COUNT"  + str(d+1)
		#skip this entry if could not be parsed
		if len(ret.keys())!=0:
			valid+=1
			for key in ret.keys():
				currVal.append(ret[key])
				#print "current key is " + str(key)  + " and value is "  + str(ret[key])
			#append a 1 for malicious
			currVal.append("0")
			
Пример #23
0
def faceTracking(rawVideo):
    #TODO: Your code here
    import numpy as np
    import cv2
    import scipy
    import matplotlib.pyplot as plt
    import matplotlib
    from detectFace import detectFace
    from getFeatures import getFeatures
    from estimateAllTranslation import estimateAllTranslation
    from applyGeometricTransformation import applyGeometricTransformation
    frameSet = []  #list to store all frames of the input video
    newFrameSet = []  #list to store all frames of the output video
    tf = True
    plt.ioff()
    while tf:  #read each frame in the input video
        tf, frame = rawVideo.read()
        frameSet.append(frame)
    frameSet = frameSet[:-1]

    bbox = detectFace(frameSet[0])  #detect bounding box in the first frame
    gray = cv2.cvtColor(
        frameSet[0],
        cv2.COLOR_BGR2GRAY)  #convert first frame to gray scale image
    x, y = getFeatures(
        gray,
        bbox)  #extract feature points from gray scale image and bounding box

    #drawing
    plt.imshow(cv2.cvtColor(frameSet[0], cv2.COLOR_BGR2RGB))
    [r1b, c1b, d1b] = np.asarray(bbox.shape)
    for i in range(r1b):  #plot bounding box and feature points
        b = bbox[i, :, :]
        xloc = x[:, i]
        yloc = y[:, i]
        facebb = matplotlib.patches.Polygon(b, closed=True, fill=False)
        #      facebb.set_edgecolor('w')#uncomment if bounding box color blends with black background
        features = plt.plot(xloc, yloc, 'w.', ms=1)
        plt.gca().add_patch(facebb)
    plt.axis('off')
    plt.savefig("temp.png", dpi=300, bbox_inches="tight")
    img = cv2.imread("temp.png")
    plt.close()
    newFrameSet.append(img)

    #getting features and transforming
    for k in range(
            1, len(frameSet)):  #iterate through all frames of the input video
        newXs, newYs = estimateAllTranslation(x, y, frameSet[k - 1],
                                              frameSet[k])
        Xs, Ys, newbbox = applyGeometricTransformation(x, y, newXs, newYs,
                                                       bbox)
        plt.imshow(cv2.cvtColor(frameSet[k], cv2.COLOR_BGR2RGB))
        print len(Xs)
        for j in range(r1b):
            b = newbbox[j, :, :]
            xloc = Xs[:, j]
            yloc = Ys[:, j]
            facebb = matplotlib.patches.Polygon(b, closed=True, fill=False)
            #          facebb.set_edgecolor('w')#uncomment if bounding box color blends with black background
            features = plt.plot(xloc, yloc, 'w.', ms=1)
            plt.gca().add_patch(facebb)
        plt.axis('off')
        plt.savefig("temp.png", dpi=300, bbox_inches="tight")
        img = cv2.imread("temp.png")
        plt.close()
        newFrameSet.append(img)
        x = Xs
        y = Ys
        bbox = newbbox

    [height, width, layer] = np.asarray(newFrameSet[0].shape)
    trackedVideo = cv2.VideoWriter(
        'output.mp4', cv2.VideoWriter_fourcc(*'MP4V'), 30,
        (width, height))  #write frames to a video in mp4 format and 30fps
    for m in range(len(newFrameSet)):
        trackedVideo.write(newFrameSet[m].astype('uint8'))
        cv2.destroyAllWindows()
    trackedVideo.release()

    return trackedVideo
Пример #24
0
def begin(filename):
    illumination_correction.illu_Correct(filename)
    Otsu_Segmentation.segment(filename)
    getFeatures.getFeatures(filename)
Пример #25
0
def objectTracking(rawVideo):

    video_arr = get_video_as_numpy(rawVideo)

    img1 = video_arr[0]
    img1_grey = img1.dot([0.299, 0.587, 0.114])
    h, w = img1_grey.shape

    raw_bbox, class_names = mrcnn_detect(img1)

    bbox = np.zeros((len(raw_bbox), 4, 2), dtype=int)
    for k in range(len(raw_bbox)):
        x_low, y_low, x_high, y_high = raw_bbox[k]
        bbox[k, 0, :] = (x_low, y_low)
        bbox[k, 1, :] = (x_high, y_low)
        bbox[k, 2, :] = (x_high, y_high)
        bbox[k, 3, :] = (x_low, y_high)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=500,
                          qualityLevel=0.0001,
                          minDistance=3,
                          blockSize=3)
    # Parameters for lucas kanade optical flowimg1_grey
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    p0 = cv2.goodFeaturesToTrack(img1_grey.astype('uint8'),
                                 mask=None,
                                 **feature_params)

    bg_mask = bgdMask(bbox, img1_grey)
    plt.figure()
    plt.imshow(bg_mask)
    plt.show()

    pxs = np.clip(p0[:, :, 1], 0, h - 1).astype(int)
    pys = np.clip(p0[:, :, 0], 0, w - 1).astype(int)

    is_bg = bg_mask[pxs, pys]
    bg_p0 = p0[is_bg].reshape(-1, 1, 2)

    bbox_h_threshold = abs(bbox[0][1][0] - bbox[0][0][0]) * 2.5
    bbox_w_threshold = abs(bbox[0][3][1] - bbox[0][0][1]) * 2.5

    startXs, startYs = getFeatures(img1_grey, bbox)

    starting_num_features = startXs.shape[0]

    oldXs, oldYs = startXs, startYs
    oldFrame = img1.copy()
    for obj in bbox:

        x1, y1 = int(np.round(obj[0][1])), int(np.round(obj[0][0]))
        x2, y2 = int(np.round(obj[2][1])), int(np.round(obj[2][0]))
        cv2.rectangle(img1, (x1, y1), (x2, y2), (255, 150, 150), 1)
    n, f = startXs.shape
    for i in range(n):
        for j in range(f):
            if startXs[i][j] == -1:
                continue

    newVideoName = 'output_videos/' + rawVideo.split('/')[1].split(
        '.')[-2] + '_result' + '.avi'

    _, h, w, _ = video_arr.shape
    writer = cv2.VideoWriter(newVideoName,
                             cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                             25, (w, h),
                             isColor=True)
    writer.write(img1[:, :, [2, 1, 0]])
    traj_x = []
    traj_y = []

    for idx in range(1, len(video_arr)):
        newFrame = video_arr[idx]
        newXs, newYs = estimateAllTranslation(oldXs, oldYs, oldFrame, newFrame)
        new_grey = newFrame.dot([0.299, 0.587, 0.114])
        old_gray = oldFrame.dot([0.299, 0.587, 0.114])

        bg_p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray.astype('uint8'),
                                                  new_grey.astype('uint8'),
                                                  bg_p0, None, **lk_params)

        restXs, restYs, newbbox = applyGeometricTransformation(
            oldXs, oldYs, newXs, newYs, bbox)

        if len(bg_p0) == 0:
            break
        movingBox = IsMoving(bg_p0[st == 1], bg_p1[st == 1], bbox, newbbox,
                             0.5, new_grey)

        traj_x += list(restXs.flatten())
        traj_y += list(restYs.flatten())
        if len(restXs) < 2:
            break

        bg_mask = bgdMask(newbbox, new_grey)
        p0 = cv2.goodFeaturesToTrack(new_grey.astype('uint8'),
                                     mask=None,
                                     **feature_params)

        try:
            pxs = np.clip(p0[:, :, 1], 0, h - 1).astype(int)
            pys = np.clip(p0[:, :, 0], 0, w - 1).astype(int)

            is_bg = bg_mask[pxs, pys]
            bg_p0 = p0[is_bg].reshape(-1, 1, 2)
        except:
            break

        oldFrame = newFrame.copy()
        oldXs, oldYs = restXs, restYs

        bbox = newbbox.copy()
        old_class_names = class_names
        if idx % 50 == 0:
            raw_bbox, class_names = mrcnn_detect(video_arr[idx])
            bbox = np.zeros((len(raw_bbox), 4, 2), dtype=int)
            for k in range(len(raw_bbox)):
                x_low, y_low, x_high, y_high = raw_bbox[k]
                bbox[k, 0, :] = (x_low, y_low)
                bbox[k, 1, :] = (x_high, y_low)
                bbox[k, 2, :] = (x_high, y_high)
                bbox[k, 3, :] = (x_low, y_high)
            oldXs, oldYs = getFeatures(new_grey, bbox)

        for i in range(len(newbbox)):
            obj = newbbox[i]

            if np.isnan(obj).any():
                continue
            if outOfBounds(obj, new_grey):

                continue

            x1, y1 = int(np.round(obj[0][1])), int(np.round(obj[0][0]))
            x2, y2 = int(np.round(obj[2][1])), int(np.round(obj[2][0]))

            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = ((x1, y1))
            fontScale = 0.6
            lineType = 2

            if movingBox[i]:
                cv2.rectangle(newFrame, (x1, y1), (x2, y2), (255, 150, 150), 2)
                cv2.putText(newFrame, 'moving ' + old_class_names[i],
                            bottomLeftCornerOfText, font, fontScale,
                            (255, 150, 150), lineType)
            else:
                cv2.rectangle(newFrame, (x1, y1), (x2, y2), (0, 150, 150), 2)
                cv2.putText(newFrame, 'still ' + old_class_names[i],
                            bottomLeftCornerOfText, font, fontScale,
                            (0, 150, 150), lineType)

        n, f = oldXs.shape

        writer.write(newFrame[:, :, [2, 1, 0]])

    return newVideoName
Пример #26
0
def faceTracking(rawVideo):

    #process the video here, convert into frames of images
    #assuming that 'rawVideo' is a video path
    cap = cv2.VideoCapture(rawVideo)
    #ie, test with rawVideo = 'Data/Easy/TheMartian.mp4'
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    num_frames = int(cap.get(7))
    #create an array that holds all the frames in the video, format: frame_number x width x height
    frames = np.zeros([num_frames, frame_height, frame_width, 3], np.uint8)
    f = 0

    while (cap.isOpened()):
        ret, frame = cap.read()
        #cv2.imshow('fr ame')
        frames[f, :, :, :] = frame

        f += 1
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if f == num_frames - 1:
            break

    cap.release()
    cv2.destroyAllWindows()

    #need to detect face only on the first frame of the video
    #if "good" face not found, then try other frames, until a good face is found
    face_found = False
    f = 0
    face = None
    while not face_found:
        #face is a Fx4x2 bounding box
        face = detectFace(frames[f, :, :, :])
        f += 1
        if face is not None:
            face_found = True
            f -= 1

    #find the start features
    init_frame = f
    init_img = frames[init_frame, :, :, :]
    init_img_gray = cv2.cvtColor(init_img, cv2.COLOR_BGR2GRAY)
    #there will always be 1000 xy's, because we have padded with (0,0)
    #make srue to ignore the (0,0) points later in the code
    startXs, startYs = getFeatures(init_img_gray, face)

    #initialize the the output matrix of tracked images
    outputMatrix = np.zeros((num_frames - f, frame_height, frame_width, 3),
                            np.uint8)

    #draw rectangles of all the faces on the current image
    initImgWithBBox = init_img
    [numFaces, _, _] = face.shape
    for i in range(0, numFaces):
        bboxOfCurrFace = face[i, :, :]
        # get the position of the corners of the bounding box for the current face
        first = bboxOfCurrFace[0, :]
        second = bboxOfCurrFace[3, :]
        # add a bounding box to the initial image
        cv2.rectangle(initImgWithBBox, (first[0], first[1]),
                      (second[0], second[1]), (255, 0, 0))
        initImgWithBBox = plotPoints(initImgWithBBox, startYs[:, i],
                                     startXs[:, i])

    #add the initial image as the first image
    outputMatrix[0, :, :, :] = initImgWithBBox

    #actually do the transform and find the new bounding box
    for frame in range(f, num_frames - 1):  #this should probably not be -1
        #get the two consecutive frames at the index
        img1 = frames[frame, :, :, :]
        img2 = frames[frame + 1, :, :, :]

        #find new feature points every 10th frame
        if (frame % 10 == 0):
            faceCurr = detectFace(img1)
            if faceCurr is not None:
                face = faceCurr
            #else
            #just use the last face since it couldn't detect a new face

            #convert first image to grey
            img1grey = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
            #find the starting features on the first image
            startXs, startYs = getFeatures(img1grey, face)

        [newXs, newYs] = estimateAllTranslation(startXs, startYs, img1, img2)
        [Xs, Ys,
         newbbox] = applyGeometricTransformation(startXs, startYs, newXs,
                                                 newYs, face)
        #print newXs
        #now add a rectangle of newbbox to img2 and add the feature points
        img2WithBoundingBox = img2
        for facei in range(0, numFaces):
            #get the bounding box for the current face
            bboxOfCurrFace = newbbox[facei, :, :]
            #get the positions of the two corners for the bounding box of the current face
            first = bboxOfCurrFace[0, :].astype(int)
            second = bboxOfCurrFace[3, :].astype(int)
            #draw the bounding box
            img2WithBoundingBox = cv2.rectangle(img2WithBoundingBox,
                                                (first[0], first[1]),
                                                (second[0], second[1]),
                                                (255, 0, 0))
            #draw the feature points
            if numFaces == 1:
                img2WithBoundingBox = plotPoints(img2WithBoundingBox, Ys, Xs)
            else:
                img2WithBoundingBox = plotPoints(img2WithBoundingBox,
                                                 Ys[:, facei], Xs[:, facei])

        #add img2 to the output matrix
        outputMatrix[frame + 1, :, :, :] = img2WithBoundingBox
        #set the new bbox to the face for the next iteration
        face = newbbox

        #set the xs and ys of the features for the new features
        startXs = newXs
        startYs = newYs

    #output the final video
    imageio.mimwrite('finalVideo.avi', outputMatrix, fps=30)
    trackedVideo = []
    return trackedVideo
    #     plt.axis('off')
    # plt.show()

    return Xs, Ys, newbbox


if __name__ == '__main__':
    cap = cv2.VideoCapture("./Datasets/Difficult/StrangerThings.mp4")
    ret, img1 = cap.read()
    ret, img2 = cap.read()
    cap.release()
    tmpimg1 = img1.copy()
    tmpimg2 = img2.copy()

    bbox = detectFace(img1)
    startXs, startYs = getFeatures(img1, bbox)
    newXs, newYs = estimateAllTranslation(startXs, startYs, img1, img2)
    Xs, Ys, newbbox = applyGeometricTransformation(startXs, startYs, newXs,
                                                   newYs, bbox)

    for box in bbox:
        cv2.rectangle(tmpimg1, (int(box[0][1]), int(box[0][0])),
                      (int(box[-1][1]), int(box[-1][0])), (0, 255, 0), 3)
    plt.figure()
    plt.imshow(tmpimg1)
    for j in range(len(startYs)):
        plt.plot(startYs[j], startXs[j], 'w+')
    plt.axis('off')
    plt.show()

    for newbox in newbbox:
Пример #28
0
import pickle
from getFeatures import getFeatures
###accuracy over iterations

##infant state
if __name__ == '__main__':

    #filename=raw_input("file name: ")
    #_file=open(filename,'r')
    #file_reader=csv.reader(_file)

    states = [2, 3, 5, 6]

    for s in range(0, len(states)):

        X, Y, pca, temp_dict = getFeatures(windowSize=1, numStates=states[s])
        print states[s], " states"
        print pca

        X = np.array(X).T
        Y = np.array(Y)
        dictionary = {v: k for k, v in temp_dict.iteritems()}
        labels = []
        for i in range(0, len(dictionary)):
            labels.append(dictionary[i])

        X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                            Y,
                                                            test_size=0.2,
                                                            random_state=1)
        """
Пример #29
0
def objectTracking(rawVideo):

    video = cv2.VideoCapture(rawVideo)

    success, prevframe = video.read()
    prev_grayframe = cv2.cvtColor(prevframe, cv2.COLOR_BGR2GRAY)
    ''' Get contour image '''
    num_objects = int(input("How many objects to track? : "))
    all_bbox_corners = np.zeros((num_objects, 4, 2))
    for f in range(num_objects):
        contour_image = maskImage(prev_grayframe).astype(np.uint8)
        image, bbox_corners = getBoundingBox(prev_grayframe, contour_image)
        all_bbox_corners[f, :, :] = bbox_corners

    xs, ys = gf.getFeatures(prev_grayframe, all_bbox_corners)

    # first_frame = prevframe.copy()
    # first_frame[xs, ys, :] = np.array([0, 0, 255])
    ''' Open up output file '''
    w = int(video.get(3))
    h = int(video.get(4))
    fps = video.get(cv2.CAP_PROP_FPS)
    out = cv2.VideoWriter("out.avi", cv2.VideoWriter_fourcc(*'MJPG'), fps,
                          (w, h))

    count = 0  # to count out the iteration
    while 1:
        prev_grayframe = cv2.cvtColor(prevframe, cv2.COLOR_BGR2GRAY)

        success, frame = video.read()
        if not success: break
        grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Every 6 frames, get new features
        if count % 6 == 0:
            print("iter", count)
            more_xs, more_ys = gf.getFeatures(grayframe, all_bbox_corners)
            if more_xs is not None:
                xs = np.concatenate((xs, more_xs), axis=0)
                ys = np.concatenate((ys, more_ys), axis=0)
            #print(xs.shape, ys.shape)

        newXs, newYs = eat.estimateAllTranslation(xs, ys, prevframe, frame)
        finalXs, finalYs, final_bbox = applyGeometricTransformation(
            xs, ys, newXs, newYs, all_bbox_corners)
        if finalXs is None:
            xs, ys = gf.getFeatures(grayframe, all_bbox_corners)
            newXs, newYs = eat.estimateAllTranslation(xs, ys, prevframe, frame)
            finalXs, finalYs, final_bbox = applyGeometricTransformation(
                xs, ys, newXs, newYs, all_bbox_corners)

        output = frame.copy()

        for f in range(num_objects):
            x = int(np.round(final_bbox[f, 0, 0]))
            xw = int(np.round(final_bbox[f, 3, 0]))
            y = int(np.round(final_bbox[f, 0, 1]))
            yh = int(np.round(final_bbox[f, 3, 1]))
            cv2.rectangle(output, (x, y), (xw, yh), (0, 255, 0), 2)
        out.write(output)

        prevframe = frame.copy()
        xs, ys, all_bbox_corners = finalXs, finalYs, final_bbox

        count += 1
    print("done loop")

    video.release()
    out.release()

    return out
Пример #30
0
frame1 = generate_output_frame(np.copy(img1), bbox)
frame1 = Image.fromarray(frame1)
frame1.save("easy_frame1.jpg")
# plt.imshow(frame1)
# plt.show()

# For debugging: Show the bounding box we've chosen
# plt.imshow(img1)
# for box in bbox:
# 	for i in range(3):
# 		plt.plot(box[i: i+2, 0], box[i: i+2, 1], color="red")
# 	plt.plot([box[0, 0], box[3, 0]], [box[0, 1], box[3, 1]], color="red")
# plt.show()

# Get the features from inside the bounding box
x, y = getFeatures(rgb2gray(img1), bbox)

# For debugging: Show the bounding box and the features inside
# plt.imshow(img1)
# for box in bbox:
# 	for i in range(3):
# 		plt.plot(box[i: i+2, 0], box[i: i+2, 1], color="red")
# 	plt.plot([box[0, 0], box[3, 0]], [box[0, 1], box[3, 1]], color="red")
# for i in range(x.shape[1]):
# 	plt.scatter(x[i], y[i][:], color="blue")
# plt.show()

nextframe = np.copy(img2)
warped = np.copy(img1)
newXs = np.copy(x)
newYs = np.copy(y)
Пример #31
0
def objectTracking(rawVideo):
    folder_name = str(datetime.now()).replace('-',
                                              '_').replace(':', '_').replace(
                                                  '.', '_').replace(' ', '_')
    os.mkdir(folder_name + '_bounding_box')
    os.mkdir(folder_name + '_trajectory')
    n = 2  # Number of bounding boxes

    medium = 0
    nof = 5  # number of features

    if 'medium' in rawVideo:
        medium = 1
        nof = 10
        n = 1
    frameExtractor(folder_name=folder_name, video_path=rawVideo, medium=medium)

    noframes = len(os.listdir(folder_name))

    filename1 = folder_name + '/0.jpg'

    img1 = Image.open(filename1).convert('RGB')
    img1 = np.array(img1)
    gray1 = rgb2gray(img1).astype('float32')

    # to draw own bounding box, comment the next 4 lines and uncomment the 72nd line
    if 'easy' in rawVideo:
        bbox = bboxgen.get_bbox(img1, n)
    else:
        bbox = np.array([[[276, 457], [276, 518], [350, 518], [347, 456]]])

    # bbox = bboxgen.get_bbox(img1, n)
    x_index, y_index = getFeatures(gray1, bbox, nof)
    bbox = bbox.astype(float)

    for i in range(1, noframes):

        filename2 = folder_name + '/' + str(i) + '.jpg'
        img2 = Image.open(filename2).convert('RGB')
        img2 = np.array(img2)

        for b in range(len(bbox)):
            x_min = np.min(bbox[b, :, 0])
            x_max = np.max(bbox[b, :, 0])
            y_min = np.min(bbox[b, :, 1])
            y_max = np.max(bbox[b, :, 1])

            if len(x_index[x_index[:, b] != -1, b]) <= 3:
                if x_min < 0 or y_min < 0 or x_max >= img2.shape[
                        1] or y_max >= img2.shape[0]:
                    bbox[b] = 0 * bbox[b] - 1
                else:
                    print('recomputing....')
                    size = x_index[x_index[:, b] != -1, b].shape[0]
                    x_old_index = x_index[x_index[:, b] != -1, b]
                    y_old_index = y_index[y_index[:, b] != -1, b]
                    gray1 = rgb2gray(img1).astype('float32')
                    x_index, y_index = getFeatures(gray1, bbox.astype(int),
                                                   nof)
                    x_index[0:size, b] = x_old_index
                    y_index[0:size, b] = y_old_index

        newXs, newYs = estimateAllTranslation(x_index, y_index, img1, img2)
        x_index[np.where(newXs == -1)] = -1
        y_index[np.where(newXs == -1)] = -1
        Xs, Ys, newbbox = applyGeometricTransformation(x_index, y_index, newXs,
                                                       newYs, bbox)

        xmin = []
        ymin = []
        xmax = []
        ymax = []

        for b in range(len(newbbox)):
            max_x_y = newbbox[b].max(axis=0)
            min_x_y = newbbox[b].min(axis=0)

            Xs[Xs[:, b] < min_x_y[0], b] = -1

            Xs[Xs[:, b] > max_x_y[0], b] = -1
            Ys[Ys[:, b] < min_x_y[1], b] = -1
            Ys[Ys[:, b] > max_x_y[1], b] = -1

            Ys[Xs[:, b] == -1, b] = -1
            Xs[Ys[:, b] == -1, b] = -1

            xmin.append(min_x_y[0])
            xmax.append(max_x_y[0])
            ymin.append(min_x_y[1])
            ymax.append(max_x_y[1])

        x_index = np.copy(Xs)
        y_index = np.copy(Ys)
        img1 = np.copy(img2)
        bbox = np.copy(newbbox)

        plot_box(xmin, ymin, xmax, ymax, img2, i, Xs[Xs != -1], Ys[Ys != -1],
                 folder_name + '_bounding_box')
        #plot_trajectory(Xs[Xs != -1], Ys[Ys != -1], img2, i, folder_name + '_trajectory')

    makeMovie(folder_name + '_bounding_box', folder_name + '_bb_output_gif',
              noframes)
Пример #32
0
#Assemble features in train set
#==================================
# This is the section that will take the longest time running since it will actually be extracting the features of
# the malware for each file. This could be a lengthy process.
#==================================
CNN_accuracy = 0
NGRAM_accuracy = 0
combAccPercent = 0
numCrossVals = 10

data_cnn = []
data_nGram = []
base = [0, 0, 0, 0, 0, 0, 0, 0, 0]
labels = []
for fileStub in mySet:
    featureListNGRAM = getFeatures.getFeatures(fileStub, "NGRAM", info)
    featureList = getFeatures.getFeatures(MALWARE_FILE_PATH + fileStub, "CNN", info)

    #check for error
    if (len(featureList) == 0 or len(featureListNGRAM) == 0):
        #print("FEATURE GRAB ERROR")
        #print("Not adding file: " + fileStub + "to the train set")
        continue

    #Add values to data matrices (one matrix per classifier)
    data_nGram.append(numpy.asarray(featureListNGRAM))
    data_cnn.append(numpy.asarray(featureList))

    #Add classes to class matrix
    malwareClass = classDictionary[fileStub]
    base2 = base[:]
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    ret, frame2 = cap.read()  # get second frame
    frame1_gray = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
    frame2_gray = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)

    n_object = 1
    bbox = np.array([[[291, 187], [405, 187], [291, 267], [405, 267]]])
    startXs, startYs = getFeatures(frame1_gray, bbox)
    y = bbox[0][0][1]
    x = bbox[0][0][0]
    pdb.set_trace()
    img_req_1 = frame1_gray[y:bbox[0][3][1], x:bbox[0][3][0]]
    corners = cv2.goodFeaturesToTrack(img_req_1, 25, 0.1, 5)
    corners = np.int0(corners.squeeze())
    trans_x = corners[:, 0].copy() + bbox[0][0][0]
    trans_y = corners[:, 1].copy() + bbox[0][0][1]

    newXs, newYs = estimateAllTranslation(startXs, startYs, frame1, frame2)

    import matplotlib.pyplot as plt
    from matplotlib.patches import Rectangle
    fig, ax = plt.subplots()
    diff = np.subtract(frame1_gray.astype(int), frame2_gray.astype(int))
Пример #34
0
def basicPredictor(vecnumber):
    procvec, diagvec, diagdict = getFeatures.getFeatures()
    procvec1 = procvec["Medicare"]
    diagvec1 = diagvec["Medicare"]
    procvec2 = procvec["Medicaid"]
    diagvec2 = diagvec["Medicaid"]
    procvec3 = procvec["Uninsured"]
    diagvec3 = diagvec["Uninsured"]
    keylist = procvec1.keys()
    keylist.sort()
    veclist = []
    diaglist = []
    statenamelist = []
    i = vecnumber
    for key in keylist:
        vecs = procvec1[key]
        countvec = vecs[i]
        veclist.append(countvec)
        diaglist.append(diagvec1[key])

        try:
            veclist.append(procvec2[key][i])
            diaglist.append(diagvec2[key])
        except:
            pass
        try:
            veclist.append(procvec3[key][i])
            diaglist.append(diagvec3[key])
        except:
            pass

        statenamelist.append(key)
    veclist = np.asarray(veclist)
    diaglist = np.asarray(diaglist)
    '''
    #encodes only top 5 diagnoses for each input vector, does not give good accuracy!!
    diagindex=np.argpartition(diaglist,-5,axis=1)[:,-5:]
    hotlist=[]
    for i in range(len(diaglist)):
        hot=np.zeros((len(diaglist[0]),),dtype=float)
        hot[diagindex[i]]=1.0
        hotlist.append(hot)
    hotlist=np.asarray(hotlist)
    hotlist=preprocessing.normalize(hotlist)
    
    '''
    #print diagindex
    hotlist = preprocessing.normalize(diaglist)
    standardscaler = preprocessing.StandardScaler()
    vecs = standardscaler.fit_transform(veclist)

    #  for testing how size of training data affects accuracy
    sizevec = [0.11, 0.22, 0.33, 0.44, 0.55, 0.66]
    error = []
    for i in sizevec:
        xtrain, xtest, ytrain, ytest = train_test_split(vecs,
                                                        hotlist,
                                                        test_size=i,
                                                        random_state=17)
        knn = neighbors.KNeighborsRegressor()
        knn.fit(xtrain, ytrain)
        predictedys = knn.score(xtest, ytest)
        error.append(predictedys)
    print error
    '''
    return newX, newY


if __name__ == '__main__':
    # setup video capture
    cap = cv2.VideoCapture("./Datasets/Easy/MarquesBrownlee.mp4")
    ret, img1 = cap.read()
    ret, img2 = cap.read()
    cap.release()

    img1 = np.array(img1)
    img2 = np.array(img2)

    bbox = detectFace(img1)
    x, y = getFeatures(img1, bbox)
    img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    Ix, Iy = np.gradient(img1_gray)
    Ix = np.array(Ix)
    Iy = np.array(Iy)

    newXs = []
    newYs = []
    for i in range(len(x)):
        for j in range(len(x[i])):
            startX = x[i][j]
            startY = y[i][j]
            newX, newY = estimateFeatureTranslation(startX, startY, Ix, Iy,
                                                    img1_gray, img2_gray)
            newXs.append(newX)
Пример #36
0
def main(video_file, output_filename):
    imgs = np.array([])
    cap = cv2.VideoCapture(video_file)
    ret, img1 = cap.read()
    img1 = img1[..., ::-1]
    h, w, d = img1.shape

    display_img = img1.copy()
    display_img = cv2.cvtColor(display_img, cv2.COLOR_BGR2RGB)
    cv2.namedWindow("Start Frame")
    cv2.setMouseCallback("Start Frame", draw_box)

    # Loop until the user is done drawing boxes
    while True:
        cv2.imshow("Start Frame", display_img)
        key = cv2.waitKey(0)

        if key == ord('q'):
            break

    # Destroy the drawing window
    cv2.destroyAllWindows()

    # Show the result
    for i in range(int(len(refPt) / 2)):
        cv2.rectangle(display_img, refPt[2 * i], refPt[(2 * i) + 1],
                      (0, 255, 0), 2)

    cv2.imshow("Result", display_img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    bbox = []
    for i in range(int(len(refPt) / 2)):

        # Top Left and bottom right
        box_corners = np.array([refPt[2 * i], refPt[(2 * i) + 1]])
        start_x, start_y, width, height = cv2.boundingRect(box_corners)

        # Create the four coordinates for the box and reshape
        box = np.array([[start_x, start_y], [start_x + width, start_y],
                        [start_x + width, start_y + height],
                        [start_x, start_y + width]])

        bbox.append(box)

    # Turn it into a numpy array
    bbox = np.array(bbox)

    orig_box = np.copy(bbox)
    centers = np.zeros((len(bbox), 2))
    trajectory_indexer = np.zeros((h, w), dtype=bool)

    # Get the features from inside the bounding box
    x, y = getFeatures(rgb2gray(img1), bbox)

    newXs = np.copy(x)
    newYs = np.copy(y)

    f = 0
    frame = generate_output_frame(np.copy(img1), bbox,
                                  np.copy(trajectory_indexer), np.copy(newXs),
                                  np.copy(newYs))
    frame = Image.fromarray(frame)

    # Store the processed frames so we can turn it into a video later
    all_frames = []
    all_frames.append(frame)

    a = 0
    while ret:
        f += 1
        a += 1
        if not f % 8:
            print("Frame: ", f)
            a = 1
            for i in range(len(bbox)):
                # xmin = np.sort(bbox[i, :, 0])[0]
                # xmax = np.sort(bbox[i, :, 0])[3]
                # ymin = np.sort(bbox[i, :, 1])[0]
                # ymax = np.sort(bbox[i, :, 1])[3]
                # bbox[i, ...] = np.array([xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]).reshape(4,2)
                orig_box = np.copy(bbox)
            x, y = getFeatures(rgb2gray(img1), bbox)
            newXs = np.copy(x)
            newYs = np.copy(y)

        thresh = .1 + .02 * a

        ret, img2 = cap.read()
        if not ret:
            break
        img2 = img2[..., ::-1]

        iterations = 1

        # Get the new feature locations in the next frame
        updatex, updatey, x, y = estimateAllTranslation(
            newXs, newYs, np.copy(x), np.copy(y), np.copy(img1), np.copy(img2),
            np.copy(bbox))

        for k in range(len(bbox)):
            centers[k] = np.array(
                [np.mean(bbox[k, :, 0]),
                 np.mean(bbox[k, :, 1])]).astype(int)

        # Warp the image for the next iteration
        newXs, newYs, bbox, warped = applyGeometricTransformation(
            np.copy(x), np.copy(y), updatex, updatey, np.copy(orig_box),
            np.copy(img1), np.copy(img2), thresh)

        for k in range(len(bbox)):
            xcen = int(np.mean(bbox[k, :, 0]))
            ycen = int(np.mean(bbox[k, :, 1]))
            num = int(
                max([abs(xcen - centers[k, 0]),
                     abs(ycen - centers[k, 1])]))
            centerx = np.linspace(centers[k, 0], xcen + 1, num).astype(int)
            centery = np.linspace(centers[k, 1], ycen + 1, num).astype(int)
            if centerx.size > 0 and centery.size > 0:
                trajectory_indexer[centery, centerx] = True
                trajectory_indexer[centery + 1, centerx] = True
                trajectory_indexer[centery, centerx + 1] = True
                trajectory_indexer[centery + 1, centerx + 1] = True
            else:
                trajectory_indexer[ycen, xcen] = True
                trajectory_indexer[ycen + 1, xcen] = True
                trajectory_indexer[ycen, xcen + 1] = True
                trajectory_indexer[ycen + 1, xcen + 1] = True

        frame = generate_output_frame(np.copy(img2), bbox,
                                      np.copy(trajectory_indexer),
                                      np.copy(newXs), np.copy(newYs))
        frame = Image.fromarray(frame)
        # frame.save("medium_frame%d.jpg" % f)

        img1 = np.copy(img2)
        all_frames.append(frame)

    cap.release()

    np_frames = np.array(
        [cv2.cvtColor(np.array(f), cv2.COLOR_BGR2RGB) for f in all_frames])
    gen_video(np.array(np_frames), "{0}.avi".format(output_filename))
Пример #37
0
def faceTracking(rawVideo):
    #TODO: Your code here

    ind = 0
    trackedVideo = []
    nFrames = len(rawVideo)

    # detect faces
    bbox = detectFace(rawVideo[0])

    # detect features
    gray_im = rgb2gray(rawVideo[0])
    x, y = getFeatures(gray_im, bbox)

    for f in range(0, x.shape[1]):
        x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
        y[:, f] = y[:, f] + bbox[f][0][0]  # col coords

    for i in range(0, nFrames - 1):

        if x.shape[0] < 15:
            # detect faces
            bbox = detectFace(rawVideo[0])

            # detect features
            gray_im = rgb2gray(rawVideo[0])
            x, y = getFeatures(gray_im, bbox)

            for f in range(0, x.shape[1]):
                x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
                y[:, f] = y[:, f] + bbox[f][0][0]  # col coords

        # get frames
        img1 = rawVideo[ind]
        img2 = rawVideo[ind + 1]

        # track features from first frame to second using KLT procedure

        newX, newY = estimateAllTranslation(x, y, img1, img2)

        # apply resulting transformation
        newXs, newYs, bbox = applyGeometricTransformation(
            x, y, newX, newY, bbox)
        # apply tracked features and bounding box to frames, update output array
        for f in range(0, newXs.shape[1]):
            im = cv2.rectangle(img2, (int(bbox[f][0][0]), int(bbox[f][0][1])),
                               (int(bbox[f][2][0]), int(bbox[f][2][1])),
                               (0, 0, 0), 3)
            for j in range(0, len(newXs)):
                im = cv2.circle(im, (int(newYs[j][f]), int(newXs[j][f])), 1,
                                (0, 0, 255), 2)

        trackedVideo.append(im)

        ind = ind + 1

        x = newXs
        y = newYs

    trackedVideo = np.array(trackedVideo)

    return trackedVideo
Пример #38
0
from estimateAllTranslation import estimateAllTranslation
from applyGeometricTransformation import applyGeometricTransformation
from helper import rgb2gray
import numpy as np
import cv2

ind = 0
trackedVideo = []
nFrames = len(rawVideo)

# detect faces
bbox = detectFace(rawVideo[0])

# detect features
gray_im = rgb2gray(rawVideo[0])
x, y = getFeatures(gray_im, bbox)

for f in range(0, x.shape[1]):
    x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
    y[:, f] = y[:, f] + bbox[f][0][0]  # col coords

# detect faces
bbox = detectFace(rawVideo[0])

# detect features
gray_im = rgb2gray(rawVideo[0])
x, y = getFeatures(gray_im, bbox)

for f in range(0, x.shape[1]):
    x[:, f] = x[:, f] + bbox[f][0][1]  # row coords
    y[:, f] = y[:, f] + bbox[f][0][0]  # col coords