Esempio n. 1
0
def main(args):

    basedir = args.test_imgs_path
    num_histograms = args.num_test_pics
    channel = 0
    classifiers = load_classifiers()
    max_textons = args.max_textons
    n_clusters = args.num_textons
    weights = 1

    texton_hists = []

    for i in range(num_histograms):
        img_dir = basedir + "/" + str(i) + ".png"
        img = treXton.imread_opponent(img_dir)

        if args.local_standardize:
            for channel in range(args.channels):
                img = standardize(img, channel)

        hists_per_channel = []
        for channel in range(args.channels):
            classifier = classifiers[channel]        
                
            texton_hist = treXton.img_to_texton_histogram(img[:, :, channel], classifier, max_textons,
                                                      n_clusters, weights, args, channel)
            hists_per_channel.append(texton_hist)
            
        hists_per_channel = np.ravel(np.array(hists_per_channel)).astype(np.float32)
        color_histogram = False
        all_hists = hists_per_channel
        if color_histogram:
            # reorder data into for suitable for histogramming
            data = np.vstack((img[:, :, 0].flat, 
                              img[:, :, 1].flat,
                              img[:, :, 2].flat)).astype(np.uint8).T

            m = 4  # size of 3d histogram cube
            color_hist, edges = np.histogramdd(data, bins=m)
#            print np.ravel(color_hist / (640 * 480))
            #print hists_per_channel
            #print np.ravel(color_hist)
            all_hists = np.concatenate((hists_per_channel, np.ravel(color_hist)))

        texton_hists.append(all_hists)

    np.savetxt("mat_train_hists_cross.csv", texton_hists, delimiter=",", fmt='%d') 
def display_textons(args):

    mean, stdv = np.load("mean_stdv.npy")

    print(mean, stdv)
    
    img = cv2.imread(args.image, 0)

    print(args.image)


    img = img -  mean
    img = img / stdv


    print(img[0:30])
    
    
    # Load classifier from file
    classifier = joblib.load('classifiers/kmeans.pkl') 
    
    hist = treXton.img_to_texton_histogram(img, classifier, args.max_textons, args.num_textons, 1, args)
    for i, t in enumerate(hist):
        print i, t
Esempio n. 3
0
def main(args):

    cap = cv2.VideoCapture(args.dev)

    kmeans = []

    for channel in range(3):
    
        kmean = joblib.load('classifiers/kmeans' + str(channel) + '.pkl')
        kmeans.append(kmean)

    if args.do_separate:
        # Load classifier
        clf_x = joblib.load('classifiers/clf_x.pkl')
        clf_y = joblib.load('classifiers/clf_y.pkl') 

    else:
        clf = joblib.load('classifiers/clf_multi.pkl')

    # Load tfidf
    tfidf = joblib.load('classifiers/tfidf.pkl') 

    # Feature importances
    #for a in zip(range(150), clf_x.feature_importances_):
    #    print a
    #print clf_y.feature_importances_

    fp = open("predictions_cross.csv", "w")

    for i in range(args.num_test_pics):

        query_file = args.test_imgs_path + str(i) + ".png"
        query_image = treXton.imread_opponent(query_file)

        if args.local_standardize:

            mymean, mystdv = cv2.meanStdDev(query_image)
            mymean = mymean.reshape(-1)
            mystdv = mystdv.reshape(-1)

            query_image = (query_image - mymean) / mystdv      

        # Get texton histogram of picture
        query_histograms = np.zeros((args.channels, args.num_textons))

        for channel in range(args.channels):
            kmean = kmeans[channel]        
            histogram = img_to_texton_histogram(query_image[:, :, channel], 
                                                  kmean,
                                                  args.max_textons,
                                                  args.num_textons,
                                                  1, args, channel)
            query_histograms[channel] = histogram
            
        query_histograms = query_histograms.reshape(1, args.num_textons * args.channels)  

        if args.tfidf:
            query_histograms = tfidf.transform(query_histograms).todense()
            query_histograms = np.ravel(query_histograms)

            query_histograms = query_histograms.reshape(1, args.num_textons * args.channels)  

        if args.use_xgboost:
            dtest = xgb.DMatrix(query_histograms)
            pred_x = clf_x.predict(dtest)[0]
            pred_y = clf_y.predict(dtest)[0]
        elif not(args.do_separate):
            print "Not separate"
            pred = clf.predict(query_histograms)[0]
            pred_x = pred[0]
            pred_y = pred[1]
        else:                
            pred_x = clf_x.predict(query_histograms)[0]
            pred_y = clf_y.predict(query_histograms)[0]
        
        if args.prediction_variance:
            pred_var_x = prediction_variance(clf_x, query_histograms)
            pred_var_y = prediction_variance(clf_y, query_histograms)
            fp.write("%f,%f,%f,%f\n" % (pred_x, pred_y, pred_var_x, pred_var_y)) 
        else:
            fp.write("%f,%f\n" % (pred_x, pred_y)) 

    fp.close()
Esempio n. 4
0
def main(args):

    labels = np.load("labels.npy")
    histograms = np.load("histograms_logos.npy")

    cap = cv2.VideoCapture(args.dev)

    kmeans = []

    for channel in range(3):
    
        kmean = joblib.load('classifiers/kmeans' + str(channel) + '.pkl')
        kmeans.append(kmean)

        
    minidrone = read_png("img/minidrone.png")
    background_map = plt.imread("../image_recorder/general_images/mosaic.png")
    imagebox = OffsetImage(minidrone, zoom=1)
    ax = plt.gca()
    ax.imshow(background_map)
    k = 0

    while True:

        if k != 0:
            drone_artist.remove()
        
        distances = []

        ret, pic_bgr = cap.read()
        pic = cv2.cvtColor(pic_bgr, cv2.COLOR_BGR2RGB)
        pic = RGB2Opponent(pic)

        cv2.imshow("Capture", pic_bgr)

        gray = cv2.cvtColor(pic_bgr, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

        thresholding = False

        if thresholding:
        
            # noise removal
            kernel = np.ones((3,3),np.uint8)
            opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations = 2)

            # sure background area
            sure_bg = cv2.dilate(opening, kernel, iterations=3)

            # Finding sure foreground area
            dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2,5)
            ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)

            # Finding unknown region
            sure_fg = np.uint8(sure_fg)
            unknown = cv2.subtract(sure_bg, sure_fg)

            ret, markers = cv2.connectedComponents(sure_fg)

            # Add one to all labels so that sure background is not 0, but 1
            markers = markers+1

            # Now, mark the region of unknown with zero
            markers[unknown==255] = 0

            markers = cv2.watershed(pic_bgr, markers)
            pic_bgr[markers == -1] = [255,0,0]
        
        #cv2.imshow("Threshold", pic_bgr)
        
        if args.local_standardize:
            for channel in range(args.channels):
                mymean = np.mean(np.ravel(pic[:, :, channel]))
                mystdv = np.std(np.ravel(pic[:, :, channel]))

                pic[:, :, channel] = pic[:, :, channel] - mymean
                pic[:, :, channel] = pic[:, :, channel] / mystdv
        

        # Get texton histogram of picture
        query_histograms = np.zeros((args.channels, args.num_textons))

        for channel in range(args.channels):
            histogram = img_to_texton_histogram(pic[:, :, channel],
                                                kmeans[channel],
                                                args.max_textons,
                                                args.num_textons,
                                                1,
                                                args,
                                                channel)
            query_histograms[channel] = histogram

        query_histograms = query_histograms.reshape(1, -1)

        for hist in histograms:
            hist = hist.reshape(1, -1)
            dist = np.linalg.norm(hist[0] - query_histograms[0])
            distances.append(dist)

        distances = np.array(distances)
        min_dist = distances.min()
        arg_min = distances.argmin()

        #print(min_dist)

        sorted_dists = np.sort(distances)

        #print(sorted_dists[:2])
        #print("")

        sorted_labels = [x for (y,x) in sorted(zip(distances, labels))]

        clf = joblib.load("classifiers/logo_clf.pkl")

        pred = clf.predict(query_histograms.reshape(1, -1))
        probs = clf.predict_proba(query_histograms.reshape(1, -1))

        signs = ['linux', 'camel', 'firefox']
        
        for i in zip(signs, probs[0]):
            print i
        print("")

        xy = bayes.maximum_a_posteriori(*list(probs[0]))

        #if any(i >= 0.75 for i in probs[0]):
        #    print("pred is", pred[0])
        #else:
        #    print("pred is", "background")

        xy = (xy[0], xy[1])

        print(xy)
        
        ab = AnnotationBbox(imagebox, xy,
                            xycoords='data',
                            pad=0.0,
                            frameon=False)

        drone_artist = ax.add_artist(ab)
        k += 1
        plt.pause(1e-10)        
        cv2.waitKey(1)
Esempio n. 5
0
def validate(args):

    # Load k-means

    kmeans = []
    for channel in range(3):
    
        kmean = joblib.load('classifiers/kmeans' + str(channel) + '.pkl')
        kmeans.append(kmean)
        
    # Load random forest
    if args.do_separate:
        clf_x = joblib.load('classifiers/clf_x.pkl')
        clf_y = joblib.load('classifiers/clf_y.pkl') 
    
    else:
        clf0 = joblib.load('classifiers/clf0.pkl')
        clf1 = joblib.load('classifiers/clf1.pkl')
        clfs = [clf0, clf1]

    # Load tfidf
    tfidf = joblib.load('classifiers/tfidf.pkl') 
        
    path = args.test_imgs_path
    # Laptop
    labels = pd.read_csv("../orthomap/imgs/sift_targets.csv", index_col=0)
    # PC
    #labels = pd.read_csv("../datasets/imgs/sift_targets.csv", index_col=0)    

    if args.standardize:
        mean, stdv = np.load("mean_stdv.npy")

    if args.filter:
        my_filter = init_tracker()

    test_on_the_fly = True
    if test_on_the_fly:
        xs = []
        ys = []
        
    errors = []
    errors_x = []
    errors_y = []

    times = []
    
    for i in labels.index:
        start = time.time()
        img_path = path + str(i) + ".png"
        start_reading = time.time()                        
        pic = imread_opponent(img_path)
        end_reading = time.time()
        if args.measure_time:        
            print("reading", end_reading - start_reading)        
        

        if args.color_standardize:

            mymean = np.mean(np.ravel(pic[:, :, 0]))
            mystdv = np.std(np.ravel(pic[:, :, 0]))


            pic[:, :, 0] = pic[:, :, 0] - mymean
            pic[:, :, 0] = pic[:, :, 0] / mystdv
            pic[:, :, 1] = pic[:, :, 1] / mystdv
            pic[:, :, 2] = pic[:, :, 2] / mystdv


        start_ls = time.time()
        if args.local_standardize:

            mymeans, mystdvs = cv2.meanStdDev(pic)
            mymeans = mymeans.reshape(-1)
            mystdvs = mystdvs.reshape(-1)            
            
            #mymeans = np.mean(pic, axis=(0, 1))
            #mystdvs = np.std(pic, axis=(0, 1))
            
            pic = (pic - mymeans) / mystdvs
        end_ls = time.time()            
        if args.measure_time:
            print("local standardize", int(1000 * (end_ls - start_ls)))        
            
        if args.histogram_standardize:
            # create a CLAHE object (Arguments are optional)self.
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
            for channel in range(args.channels):

                pic[:, :, channel] = clahe.apply(pic[:, :, channel])
            
                
        if args.standardize:
            for channel in range(args.channels):
                mean, stdv = np.load("mean_stdv_" + str(channel) + ".npy")
                pic[:, :, channel] = pic[:, :, channel] - mean
                pic[:, :, channel] = pic[:, :, channel] / stdv
            

        # Get texton histogram of picture
        query_histograms = np.empty((args.channels, args.num_textons))

        start_histo = time.time()
                
        for channel in range(args.channels):
            histogram = img_to_texton_histogram(pic[:, :, channel],
                                                    kmeans[channel],
                                                    args.max_textons,
                                                    args.num_textons,
                                                    1,
                                                    args,
                                                    channel)
            query_histograms[channel] = histogram

        end_histo = time.time()
        if args.measure_time:
            print("histograms", end_histo - start_histo)
            
        start_tfidf = time.time()
        if args.tfidf:
            histogram = tfidf.transform(query_histograms.reshape(1, args.num_textons * args.channels)).todense()
            histogram = np.ravel(histogram)
        end_tfidf = time.time()
        if args.measure_time:        
            print("tfidf", end_tfidf - start_tfidf)        
        
        preds = []
        start_prediction = time.time()        
        if args.do_separate:
            pred_x = clf_x.predict(histogram.reshape(1, args.num_textons * args.channels))
            pred_y = clf_y.predict(histogram.reshape(1, args.num_textons * args.channels))
            pred  = np.array([(pred_x[0], pred_y[0])])
            #err_down, err_up = pred_ints(clf_x, [histogram], percentile=75)
            #print(err_down)
            #print(pred[0][0])
            #print(err_up)
            #print("")
            
        else:
            for i, clf in enumerate(clfs):
                print(i)
                pred = clf.predict(histogram.reshape(1, -1))
                err_down, err_up = pred_ints(clf, histogram.reshape(1, -1), percentile=90)
                print(err_down)
                print(err_up)
                print("")
                preds.append(pred)

            pred = np.mean(preds, axis=0)
        end_prediction = time.time()        
        if args.measure_time:        
            print("prediction (clf)", end_prediction - start_prediction)        
            

#        if args.filter:
#            my_filter.update(pred.T)
#            filtered_pred = (my_filter.x[0][0], my_filter.x[2][0])
#            my_filter.predict()


        #print("Ground truth (x, y)", xs_opti[i], ys_opti[i])
        #print("Prediction (x, y)", pred[0][0], pred[0][1])

        if test_on_the_fly:
            if args.do_separate:
                xy = (pred_x[0], pred_y[0])
            else:
                xy = (pred[0][0], pred[0][1])

        else:
            xy = (xs[i], ys[i])

        start_error_stats = time.time()                                
        ground_truth =  (labels.x[i], labels.y[i])
        diff =  np.subtract(ground_truth, xy)
        abs_diff = np.fabs(diff)
        errors_x.append(abs_diff[0] ** 2)
        errors_y.append(abs_diff[1] ** 2)
        error = np.linalg.norm(abs_diff)
        errors.append(error ** 2)
        end = time.time()
        times.append(end - start)
        end_error_stats = time.time()
        if args.measure_time:        
            print("error stats", end_error_stats - start_error_stats)        

        

    val_errors = np.mean(errors)
    val_errors_x = np.mean(errors_x)
    val_errors_y = np.mean(errors_y)
    val_times = np.mean(times)
    print("times", val_times)
    print("frequency", int(1 / val_times))
    
    print("errors", np.sqrt(val_errors))
    print("errors x", np.sqrt(val_errors_x))
    print("errors y", np.sqrt(val_errors_y))

    all_errors = np.array([val_errors,
                           val_errors_x,
                           val_errors_y])
    
    np.save("all_errors.npy", all_errors)



    return val_errors, val_errors_x, val_errors_y
Esempio n. 6
0
def train_classifier_draug(path,
                           max_textons=None,
                           n_clusters=20,
                           args=None):

    classifiers = []


    for channel in range(args.channels):

        # Load classifier from file
        classifier = joblib.load('classifiers/kmeans' + str(channel) + '.pkl')
        classifiers.append(classifier)


    histograms = []
    labels = []

    base_dir = "/home/pold/Documents/Internship/image_recorder/"

    for symbol in symbols:

        for i in range(args.max_imgs_per_class):

            genimg_path = base_dir + symbol + '/' + str(i) + '.png'
            if os.path.exists(genimg_path):
            
                query_image = treXton.imread_opponent(genimg_path)
                labels.append(symbol)
                query_histograms = []


                if args.local_standardize:
                    for channel in range(args.channels):
                        mymean = np.mean(np.ravel(query_image[:, :, channel]))
                        mystdv = np.std(np.ravel(query_image[:, :, channel]))

                        query_image[:, :, channel] = query_image[:, :, channel] - mymean
                        query_image[:, :, channel] = query_image[:, :, channel] / mystdv



                for channel in range(args.channels):

                    classifier = classifiers[channel]

                    if args.use_dipoles:
                        query_histogram = treXton.img_to_texton_histogram(query_image,
                                                                  classifier,
                                                                  max_textons,
                                                                  n_clusters,
                                                                  1,
                                                                  args,
                                                                  channel)
                    else:
                        query_histogram = treXton.img_to_texton_histogram(query_image[:, :, channel],
                                                                  classifier,
                                                                  max_textons,
                                                                  n_clusters,
                                                                  1,
                                                                args,
                                                                  channel)
                    query_histograms.append(query_histogram)

                query_histograms = np.ravel(query_histograms)

                histograms.append(query_histograms)

            np.savetxt(symbol + ".csv", histograms, delimiter=",", fmt='%d')

        np.save("histograms_logos.npy", np.array(histograms))
        np.save("labels.npy", np.array(labels))
        clf = RandomForestClassifier(n_estimators=300,
                                     max_depth=15)
        clf.fit(np.array(histograms), np.array(labels))
        joblib.dump(clf, 'classifiers/logo_clf.pkl')
Esempio n. 7
0
def main(args):

    labels = np.load("labels.npy")
    histograms = np.load("histograms_logos.npy")

    cap = cv2.VideoCapture(args.dev)

    kmeans = []

    for channel in range(3):
    
        kmean = joblib.load('classifiers/kmeans' + str(channel) + '.pkl')
        kmeans.append(kmean)

        
    minidrone = read_png("img/minidrone.png")
    background_map = plt.imread("../image_recorder/general_images/mosaic.png")
    imagebox = OffsetImage(minidrone, zoom=1)
    ax = plt.gca()
    ax.imshow(background_map)
    k = 0

    while True:

        if k != 0:
            drone_artist.remove()
        
        distances = []

        ret, pic_bgr = cap.read()
        pic = cv2.cvtColor(pic_bgr, cv2.COLOR_BGR2RGB)
        pic = RGB2Opponent(pic)

        #cv2.imshow("Capture", pic_bgr)

        gray = cv2.cvtColor(pic_bgr, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

        
        if args.local_standardize:
            for channel in range(args.channels):
                mymean = np.mean(np.ravel(pic[:, :, channel]))
                mystdv = np.std(np.ravel(pic[:, :, channel]))

                pic[:, :, channel] = pic[:, :, channel] - mymean
                pic[:, :, channel] = pic[:, :, channel] / mystdv
        

        # Get texton histogram of picture
        query_histograms = np.zeros((args.channels, args.num_textons))

        for channel in range(args.channels):
            histogram = img_to_texton_histogram(pic[:, :, channel],
                                                kmeans[channel],
                                                args.max_textons,
                                                args.num_textons,
                                                1,
                                                args,
                                                channel)
            query_histograms[channel] = histogram

        query_histograms = query_histograms.reshape(1, -1)

        for hist in histograms:
            hist = hist.reshape(1, -1)
            dist = np.linalg.norm(hist[0] - query_histograms[0])
            distances.append(dist)

        distances = np.array(distances)
        min_dist = distances.min()
        arg_min = distances.argmin()

        #print(min_dist)

        sorted_dists = np.sort(distances)

        #print(sorted_dists[:2])
        #print("")

        sorted_labels = [x for (y,x) in sorted(zip(distances, labels))]

        clf = joblib.load("classifiers/logo_clf.pkl")

        pred = clf.predict(query_histograms)
        probs = clf.predict_proba(query_histograms)

        print("pred is", pred[0])

        for i in zip(clf.classes_, probs[0]):
            print i
        print("")


        if min_dist < 200 and sorted_labels[0] == sorted_labels[1] == sorted_labels[2] == sorted_labels[3] == sorted_labels[4]:
            pass
            #print(sorted_dists[0])
            #print(labels[arg_min])

        else:
            print("Background")
Esempio n. 8
0
def show_graphs(v, f):
    

    plt.ion()
    predictions = np.load(args.predictions)
    path = args.test_imgs_path
    background_map = plt.imread(mymap)
    y_width, x_width, _ = background_map.shape
    
    # First set up the figure, the axis, and the plot element we want to animate
    ax = plt.subplot2grid((2,2), (0, 0))
    ax.set_xlim([0, x_width])
    ax.set_ylim([0, y_width])

    line, = ax.plot([], [], lw=2)

    ax.set_title('Position prediction based on textons')

    xs = predictions[:, 0]
    ys = predictions[:, 1]

    minidrone = read_png("img/minidrone.png")
    minidrone_f = read_png("img/minidrone_f.png")
    minidrone_s = read_png("img/minisift.png")
    imagebox = OffsetImage(minidrone, zoom=1)
    filter_imagebox = OffsetImage(minidrone_f, zoom=0.6)
    sift_imagebox = OffsetImage(minidrone_s, zoom=0.7)
    ax.imshow(background_map, zorder=0, extent=[0, x_width, 0, y_width])


    if args.mode == 0:
        ax_opti = plt.subplot2grid((2,2), (1, 0), colspan=2)
        ax_opti.set_title('Texton histogram')
        line_opti, = ax_opti.plot([], [], lw=2)

    elif args.mode == 1:
        ax_opti = plt.subplot2grid((2,2), (1, 0), colspan=2)
        ax_opti.set_title('Texton histogram')
        line_opti, = ax_opti.plot([], [], lw=2)

        #optitrack = pd.read_csv("../draug/targets.csv")
        #xs_opti = optitrack.x
        #ys_opti = optitrack.y
        
        
    elif args.mode == 2:
        optitrack = np.load("optitrack_coords.npy")
        ax_opti = plt.subplot2grid((2,2), (1, 0), colspan=2)
        ax_opti.set_title('OptiTrack ground truth')
        line_opti, = ax_opti.plot([], [], lw=2)
        ax_opti.set_xlim([-10, 10])
        ax_opti.set_ylim([-10, 10])
        xs_opti = optitrack[:, 0]
        ys_opti = optitrack[:, 1]
        ys_opti, xs_opti = rotate_coordinates(xs_opti, ys_opti, np.radians(37))


    ax_inflight = plt.subplot2grid((2,2), (0, 1))
    ax_inflight.set_title('Pictures taken during flight')

    # Load k-means

    kmeans = []
    for channel in range(3):
    
        kmean = joblib.load('classifiers/kmeans' + str(channel) + '.pkl')
        kmeans.append(kmean)

    # Load random forest
    if args.do_separate:
        clf_x = joblib.load('classifiers/clf_x.pkl')
        clf_y = joblib.load('classifiers/clf_y.pkl') 
    
    else:
        clf0 = joblib.load('classifiers/clf0.pkl')
        clf1 = joblib.load('classifiers/clf1.pkl')
        clfs = [clf0, clf1]

    # Load tfidf
    tfidf = joblib.load('classifiers/tfidf.pkl') 

    if args.mode == 0:
        # Initialize camera
        cap = cv2.VideoCapture(args.dev)

    labels = pd.read_csv("handlabeled/playingmat.csv", index_col=0)

    xs = []
    ys = []
        
    i = 0

    if args.filter:
        my_filter = init_tracker()
            
        
    xs = []
    ys = []

    errors = []
    errors_x = []
    errors_y = []

    # Use SIFT relocalizer from OpenCV/C++
    if args.use_sift:
        rel = relocalize.Relocalizer(args.mymap)


    labels = pd.read_csv("handlabeled/playingmat.csv", index_col=0)

    if args.use_ground_truth:
        truth = pd.read_csv("../datasets/imgs/sift_targets.csv")
        truth.set_index(['id'], inplace=True)

    if args.use_particle_filter:
        mydrone = pf.robot()
        N = 80  # Number of particles
        p = pf.init_particles(N)
        dt = 1

    while True:

        start = time.time()

        while v.value != 0:
            pass
        
        if args.mode == 0:
            # Capture frame-by-frame
            ret, pic = cap.read()
            pic = cv2.cvtColor(pic, cv2.COLOR_BGR2RGB)
            pic = RGB2Opponent(pic)

        else:
            img_path = path + str(i) + ".png"
            pic_c = imread_opponent(img_path)
            pic = imread_opponent(img_path)


        if args.standardize:
            for channel in range(args.channels):
                mean, stdv = np.load("mean_stdv_" + str(channel) + ".npy")
                pic[:, :, channel] = pic[:, :, channel] - mean
                pic[:, :, channel] = pic[:, :, channel] / stdv

        if args.local_standardize:

            mymean, mystdv = cv2.meanStdDev(pic)
            mymean = mymean.reshape(-1)
            mystdv = mystdv.reshape(-1)

            pic = (pic - mymean) / mystdv         
            

        # Get texton histogram of picture
        query_histograms = np.zeros((args.channels, args.num_textons))

        if args.color_standardize:

            mymean = np.mean(np.ravel(pic[:, :, 0]))
            mystdv = np.std(np.ravel(pic[:, :, 0]))

            pic[:, :, 0] = pic[:, :, 0] - mymean
            pic[:, :, 0] = pic[:, :, 0] / mystdv
            pic[:, :, 1] = pic[:, :, 1] / mystdv
            pic[:, :, 2] = pic[:, :, 2] / mystdv

        if args.use_dipoles:
            histogram = img_to_texton_histogram(pic,
                                                kmeans[0],
                                                args.max_textons,
                                                args.num_textons,
                                                1,
                                                args,
                                                0)
            query_histograms = histogram.reshape(1, args.num_textons * 4)
                
        else:
            for channel in range(args.channels):
                histogram = img_to_texton_histogram(pic[:, :, channel],
                                                        kmeans[channel],
                                                        args.max_textons,
                                                        args.num_textons,
                                                        1,
                                                        args,
                                                        channel)
                query_histograms[channel] = histogram

            query_histograms = query_histograms.reshape(1, args.num_textons * args.channels)            
                             

        if args.tfidf:
            query_histograms = tfidf.transform(query_histograms).todense()
            histogram = np.ravel(histogram)


        preds = []
        if args.do_separate:

            if args.use_xgboost:
                dtest = xgb.DMatrix(query_histograms)
                pred_x = clf_x.predict(dtest)
                pred_y = clf_y.predict(dtest)
            else:                
                pred_x = clf_x.predict(query_histograms)
                pred_y = clf_y.predict(query_histograms)


            #err_down_x, err_up_x = pred_ints(clf_x, [histogram])
            #err_down_y, err_up_y = pred_ints(clf_y, [histogram])

            #err_x = pred_x - err_down_x
            #err_y = pred_y - err_down_y

            pred = np.array([[pred_x[0], pred_y[0]]])
            #print("pred x is", pred_x)
            #print("classifier is", clf_x)
            xy = (pred_x[0], pred_y[0])
        else:
            for clf in clfs:
                pred = clf.predict(query_histograms)
                #print "Pred is",  pred
                preds.append(pred)

                pred = np.mean(preds, axis=0)
                #print "Averaged pred is", pred
            xy = (pred[0][0], pred[0][1])

        # Pritn prediction that is used for plotting
        print(xy)


        # Get particle positions
        if args.use_particle_filter:
            p = pf.move_all(p, xy, dt)
            plt_xs, plt_ys = pf.get_x_y(p)        

        if args.use_sift:
            #sift_loc = rel.calcLocationFromPath(img_path)
            #sift_loc[1] = y_width - sift_loc[1]
            #print(sift_loc)
            #sift_xy = tuple(sift_loc)
            #sift_x = truth.ix[i, "x"]
            #sift_y = truth.ix[i, "y"]
            sift_xy = (sift_x, sift_y)

            sift_ab = AnnotationBbox(sift_imagebox, sift_xy,
                                     xycoords='data',
                                     pad=0.0,
                                     frameon=False)


        if args.use_normal:
            ab = AnnotationBbox(imagebox, xy,
                                xycoords='data',
                                pad=0.0,
                                frameon=False)


        if args.filter:
            my_filter.update(pred.T)
            filtered_pred = (my_filter.x[0][0], my_filter.x[2][0])
            my_filter.predict()
            
            filtered_ab = AnnotationBbox(filter_imagebox, filtered_pred,
                                         xycoords='data',
                                         pad=0.0,
                                         frameon=False)
            

        if args.use_ground_truth:
            ground_truth =  (truth.ix[i, "x"], truth.ix[i, "y"])
            diff =  np.subtract(ground_truth, xy)
            abs_diff = np.fabs(diff)
            errors_x.append(abs_diff[0])
            errors_y.append(abs_diff[1])
            error = np.linalg.norm(abs_diff)
            errors.append(error)
            
            
            # Update predictions graph
            line.set_xdata(xs[max(0, i - 13):i]) 
            line.set_ydata(ys[max(0, i - 13):i])
            
            ab = AnnotationBbox(imagebox, xy,
                                xycoords='data',
                                pad=0.0,
                                frameon=False)

        
        if i == 0:
            if args.show_histogram:
                query_flat = np.ravel(query_histograms)                
                histo_bar = ax_opti.bar(np.arange(len(query_flat)), query_flat)
            img_artist = ax_inflight.imshow(pic[:,:,0])
        else:
            img_artist.set_data(pic[:,:,0])
            if args.use_sift: sift_drone_artist.remove()
            if args.use_particle_filter: particle_plot.remove()
            if args.use_normal:
                drone_artist.remove()
                
                #ebars[0].remove()
                #for line in ebars[1]:
                #    line.remove()
                #for line in ebars[2]:
                #    line.remove()
            if args.filter: filtered_drone_artist.remove()

            if args.show_histogram:
                query_flat = np.ravel(query_histograms)
                for rect, h in zip(histo_bar, query_flat):
                    rect.set_height(h)

        if args.use_particle_filter:
            particle_plot = ax.scatter(plt_xs, plt_ys)
        if args.use_normal:
            drone_artist = ax.add_artist(ab)
            # Plot particle positions
            #ax.add_artist(particle_plot)

            #ebars = ax.errorbar(xy[0], xy[1], xerr=err_x, yerr=err_y, ecolor='b')
        if args.filter: filtered_drone_artist = ax.add_artist(filtered_ab)
        if args.use_sift: sift_drone_artist = ax.add_artist(sift_ab)

        plt.pause(1e-10)

        # Particle filter
        if args.use_particle_filter:
            ws, w_sum = pf.get_weights(p, xy, dt, i)
            new_p = pf.resample_wheel(p, ws, N)

        
        i += 1
        
    else:
        print("Unknown mode; Please specify a mode (0, 1, 2)")