コード例 #1
0
def main():
    measurement = np.zeros((2,1)) # Measurement vector
    state = np.zeros((2,1))       # Initial state vector [x,y,vx,vy]
    pose  = Pose2D()
    kalman = Kalman()
    navdata = navdata_info()

    # conversions
    x_conversion = 0.64
    y_conversion = 0.36

    rospy.init_node('filtered_tag_data')
    pub_kalman = rospy.Publisher('/filtered_tag_pose', Pose2D, queue_size=100)

    rate = rospy.Rate(200) # 200Hz

    while not rospy.is_shutdown():
        measurement[0, 0] = navdata.tag_x * x_conversion
        measurement[1, 0] = navdata.tag_y * y_conversion

        kalman.state_callback()
        kalman.measurement_callback(measurement)

        state = kalman.x

        pose.x = state[0, 0]
        pose.y = state[1, 0]

        pub_kalman.publish(pose)

        rate.sleep()
コード例 #2
0
	def __init__(self):
		self.depthSub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.depth_callback)
		self.pose = rospy.Subscriber('/pose', PoseStamped, self.coordinate_callback)
		
		self.d1 = None # Distance from robot to landmark 1. 
		self.d2 = None # Distance from robot to landmark 2.

		self.odom_initial_x = None
		self.odom_initial_y = None

		self.linear_velocity = 0
		self.angular_velocity = 0

		self.pose_x = 0
		self.pose_y = 0
		self.angular_displacement = 0

		self.delta_t = 0 # Change in time between pose messages.
		self.delta_d = 0 # Change in distance between pose messages.
		self.delta_angle = 0

		self.current_time = rospy.get_time()

		# Initial State: [x, y, phi, linear velocity]
		self.starting_state = np.array([[START_X, START_Y, 0, 0]]).reshape(4, 1) # Convert to 4x1 vector.

		# Initial velocity: [linear velocity, angular velocity]
		self.control_state = np.array([[0, 0]]).reshape(2, 1)

		self.filter = Kalman(self.starting_state, self.control_state)

		rospy.sleep(2)
コード例 #3
0
def main():
    measurement = np.zeros((2, 1))  # Measurement vector
    state = np.zeros((2, 1))  # Initial state vector [x,y,vx,vy]
    pose = Pose2D()
    kalman = Kalman()
    navdata = navdata_info()

    rospy.init_node('filtered_tag_data')
    pub_kalman = rospy.Publisher('/kalman', Pose2D, queue_size=100)

    rate = rospy.Rate(200)  # 200Hz

    while not rospy.is_shutdown():
        # measurement[0, 0] = navdata.norm_tag_x
        # measurement[1, 0] = navdata.norm_tag_y
        # measurement[2, 0] = navdata.tag_vx
        # measurement[3, 0] = navdata.tag_vy

        measurement[0, 0] = navdata.tag_x
        measurement[1, 0] = navdata.tag_y

        kalman.state_callback()
        kalman.measurement_callback(measurement)

        state = kalman.x

        pose.x = state[0, 0]
        pose.y = state[1, 0]

        pub_kalman.publish(pose)

        rate.sleep()
コード例 #4
0
    def doKalman(self, X, Y):
        self.Kfilter = Kalman(X, Y)
        Mu, Sigma = self.Kfilter.runKalman(self.bzrc)

        enemyX = Mu.item((0, 0))
        enemyY = Mu.item((3, 0))

        distance = Point(self.tank.x,
                         self.tank.y).distance(Point(enemyX, enemyY))

        if distance > 450:
            return

        pred = (distance / float(self.constants['shotspeed'])) * (1.0 / t)
        pred = int(pred + 1)
        MuPred, garbage = self.Kfilter.predictiveKalman(Mu, Sigma, pred)
        targetX, targetY = MuPred.item((0, 0)), MuPred.item((3, 0))
        deltaX, deltaY = self.getDeltaXY(self.tank, Point(targetX, targetY))

        theta = math.atan2(deltaY, deltaX)
        theta = self.normalize_angle(theta - self.tank.angle)

        #distance = Point(tank.x, tank.y).distance(Point(targetX,targetY))

        if distance <= 390 and (theta < 0.2 and theta > -0.2):
            shoot = True
        else:
            shoot = False

        self.kalmanCommand(deltaX, deltaY, theta, shoot, self.tank)
コード例 #5
0
    def __init__(self, historyKal, dimKal, historyBayes, distType=None):

        # First check Kalman related parameters
        if historyKal < 1 or dimKal < 1:
            print('Hybrid module reporting: ')
            raise ValueError(
                'Improper value entered for history length and/or dimension of observation matrix (Kalman)...'
            )

        if dimKal < 2:
            print('Hybrid module reporting: ')
            print(
                'Caution! Low accuracy due to the size of the observation matrix. (Affecting mostly Kalman)'
            )

        # Now check for Bayes related parameters
        if historyBayes < 1:
            print('Hybrid module reporting: ')
            raise ValueError('History length is too small (Bayes)...')

        # Final check to ensure smooth operations
        if historyBayes < historyKal:
            print('Hybrid module reporting: ')
            raise ValueError(
                'History length for Bayesian system must be greater or equal to that of Kalman. '
            )

        # Check if the distribution is supported
        if distType is None:
            correctionType = 'none'  # Initialise type of data to be handled/corrected (will be detected)
        else:
            if distType.lower() in distAllowed.keys():
                correctionType = distAllowed[distType.lower(
                )]  # Initialise type of data to be handled/corrected (user defined)
            else:
                print('Hybrid module reporting: ')
                raise ValueError(
                    'Distribution not currently implemented for the system.')

        # Now ready to use the input (could defer to classes but thought better to do it here as well)
        self.KF = Kalman(historyKal, dimKal)
        self.BS = Bayes(historyBayes, correctionType)
    def __init__(self, unique_name, refresh_rate):
        self.unique_name = unique_name
        self.source_frame = "/map"
        self.target_frame = "/{}/base_footprint".format(self.unique_name)
        self.state = Kalman(1, 0.2)

        self.route = []
        self.last_route_timestamp = rospy.Time().now()
        self.route_timeout = rospy.Duration.from_sec(5)
        # use only every X point in path, this safes alot of computing
        # power, good values may change with different configs
        self.route_skip_points = 25
        self.refresh_rate = refresh_rate

        self.tflistener = tf.TransformListener()

        self.subscriber = rospy.Subscriber(
            "{}/move_base/DWAPlannerROS/global_plan".format(self.unique_name),
            Path,
            self.handle_robot_path,
            queue_size=1)
コード例 #7
0
    def init_kalman(self):
        deltaT = 1 / self.fps
        print(deltaT)

        F = np.matrix([[1, 0, deltaT, 0], [0, 1, 0, deltaT], [0, 0, 1, 0],
                       [0, 0, 0, 1]])
        B = np.matrix([[deltaT**2 / 2, 0], [0, deltaT**2 / 2], [deltaT, 0],
                       [0, deltaT]])
        H = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

        model_noise = 1
        measurement_noise = 0.1

        R = measurement_noise**2 * np.matlib.identity(4) / deltaT
        Q = model_noise**2 * np.matlib.identity(4) * deltaT

        init_mu = np.matrix([[0.], [0.]])  # This is the acceleration estimate
        init_P = np.matlib.identity(4)

        init_x = np.matrix([[0], [0], [0], [0]])
        new_kalman = Kalman(F, B, Q, H, R, init_x, init_mu, init_P, deltaT)

        return new_kalman, F, B, Q, H, R, init_mu, init_P, deltaT
コード例 #8
0
    modVar = 0.5

    pltStart = 1000
    pltNum = 45
    figSize = (11, 9)

    # Create some imaginary data
    obs = np.random.normal(obsMean, obsVar, size=totNum)
    model = obs + np.random.normal(modBias, modVar, size=len(obs))

    # Define the plot range and size of the figure
    pltRange = range(pltStart, pltStart + pltNum)

    # Start Testing
    # Create an instance of the filter
    kf = Kalman(history, order)

    # Use the first #history of them for training
    obs_train = obs[:history]
    model_train = model[:history]

    # The rest to be used dynamically
    obs_dyn = obs[history:]
    model_dyn = model[history:]
    fcst = np.zeros_like(obs_dyn)

    # Perform an initial training of the model
    kf.trainMe(obs_train, model_train)

    for ij in range(len(obs_dyn)):
        # Provide a correction to the forecast
コード例 #9
0
    [280],  # x'
    [3000],  # y
    [120]  # y'
])
initial_covar = numpy.array([
    [400, 0, 0, 0],  # x
    [0, 25, 0, 0],  # x'
    [0, 0, 0, 0],  # y
    [0, 0, 0, 0]  # y'
])

acceleration = numpy.array([[2], [0]])

dt = 1

kfilter = Kalman(initial_state, initial_covar)

observation = numpy.array([
    [4260],  # x
    [282],  # x'
    [0],  # y
    [0]  # y'
])
kfilter.estimate(observation, acceleration, dt)

observation = numpy.array([
    [4550],  # x
    [285],  # x'
    [0],  # y
    [0]  # y'
])
コード例 #10
0
def main():
    s = Strapdown()
    K = Kalman()
    rot_mean = toVector(0., 0., 0.)
    acc_mean = toVector(0., 0., 0.)
    mag_mean = toVector(0., 0., 0.)

    # read sensors
    filePath = "data\\adafruit10DOF\\10min_calib_360.csv"
    d = CSVImporter(filePath,
                    columns=range(0, 10),
                    skip_header=7,
                    hasTime=True)
    accelArray, rotationArray, magneticArray = convArray2IMU(d.values)

    # variable sample rate test
    time = d.values[:, 0]
    diff = ([x - time[i - 1] for i, x in enumerate(time)][1:])
    diff.insert(0, 0.0134981505504)

    # realtime 3D plot
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    plt.ion()

    start = datetime.now()
    phi_list = []
    theta_list = []
    psi_list = []
    for i in range(1, int(47999)):  #62247
        dt = diff[i]
        # for 10 - 15 minutes
        if not s.isInitialized:
            rot_mean = runningAverage(rot_mean, rotationArray[:, i], 1 / i)
            acc_mean = runningAverage(acc_mean, accelArray[:, i], 1 / i)
            mag_mean = runningAverage(mag_mean, magneticArray[:, i], 1 / i)
            if i >= 45500:  #100*60*7.5 :
                s.Initialze(acc_mean, mag_mean)
                gyroBias = rot_mean  #toVector(0.019686476,-0.014544547,0.002910090)

                print('STRAPDOWN INITIALIZED with %i values\n' % (i, ))
                print('Initial bearing\n', s.getOrientation() * 180 / pi)
                print('Initial velocity\n', s.getVelocity())
                print('Initial position\n', s.getPosition())
                print('Initial gyro Bias\n', gyroBias * 180 / pi)
        else:
            if i % 10 == 0:  # plot area
                plt.cla()
                fig.texts = []
                plot3DFrame(s, ax)
                plt.draw()
                plt.pause(0.01)

            phi, theta, psi = rad2deg(s.getOrientation())
            phi_list.append(phi)
            theta_list.append(theta)
            psi_list.append(psi)

            acceleration = accelArray[:, i]
            rotationRate = rotationArray[:, i] - gyroBias
            magneticField = magneticArray[:, i]

            s.quaternion.update(rotationRate, dt)
            s.velocity.update(acceleration, s.quaternion, dt)
            s.position.update(s.velocity, dt)

            K.timeUpdate(s.quaternion, dt)
#             if i%10 == 0:
#                 K.measurementUpdate(acceleration, magneticField, s.quaternion)
#
#                 bearingOld = s.getOrientation()
#                 errorQuat = Quaternion(K.bearingError)
#                 s.quaternion *= errorQuat
#                 #s.quaternion.update(K.bearingError/DT) #angle = rate*DT
# #                 print(s.quaternion.values)
#                 bearingNew = s.getOrientation()
# #                 print("Differenz zwischen neuer und alter Lage \n",rad2deg(bearingNew-bearingOld))
#                 gyroBias = gyroBias+K.gyroBias
#                 K.resetState()

    print("Differenz zwischen x Bias End-Start: %E" %
          rad2deg(gyroBias[0] - rot_mean[0]))
    print("Differenz zwischen y Bias End-Start: %E" %
          rad2deg(gyroBias[1] - rot_mean[1]))
    print("Differenz zwischen z Bias End-Start: %E\n" %
          rad2deg(gyroBias[2] - rot_mean[2]))
    print("RMS_dphi = %f deg" % rad2deg(sqrt(K.P[0, 0])))
    print("RMS_dthe = %f deg" % rad2deg(sqrt(K.P[1, 1])))
    print("RMS_dpsi = %f deg" % rad2deg(sqrt(K.P[2, 2])))
    print("RMS_bx = %f deg/s" % rad2deg(sqrt(K.P[3, 3])))
    print("RMS_by = %f deg/s" % rad2deg(sqrt(K.P[4, 4])))
    print("RMS_bz = %f deg/s\n" % rad2deg(sqrt(K.P[5, 5])))
    print('final orientation\n', s.getOrientation() * 180 / pi)
    end = datetime.now()
    diff = end - start
    print('Time needed = ', diff.total_seconds())
    print("RMS_phi = %f deg, max = %f, min = %f\n" %
          (std(phi_list), max(phi_list), min(phi_list)))
    print("RMS_theta = %f deg, max = %f, min = %f\n" %
          (std(theta_list), max(theta_list), min(theta_list)))
    print("RMS_psi = %f deg, max = %f, min = %f\n" %
          (std(psi_list), max(psi_list), min(psi_list)))
    plt.show()
コード例 #11
0
    ax2.legend()

    # Guarda la primer gráfica con el nombre de los errores que se haya puesto; en el directorio 'Assets'
    fig.savefig(directorioAssets + "/" + tipo_sensor + " dt_" + str(dt) +
                " Q_" + str(_Q) + ".png")
    # Guarda la segunda gráfica con el nombre de los errores que se haya puesto; en el directorio 'Assets'
    fig2.savefig(directorioAssets + "/Ganancia_Kalman " + tipo_sensor +
                 " dt_" + str(dt) + " Q_" + str(_Q) + ".png")

    plt.show()
    plt.show()


if __name__ == "__main__":
    # Crea el objeto Kalman pasando 4 datos como parámetros.
    ka = Kalman(F, Q, H, R)
    i = 1  # Contador para guardar en el txt e identficar las iteraciones

    llenarDatos()
    verificarCarpetas()
    archivo = open(directorio + "/Datos_" + tipo_sensor + " dt_" + str(dt) +
                   " Q_" + str(_Q) + ".txt",
                   "w",
                   encoding='utf-8')
    archivo.write("I, Z,   Z_f,   dt = " + str(dt) + "\t Q = " + str(_Q) +
                  "\n")

    for dato_z in datos_x:
        x_f = np.dot(H, ka.predecir())[0]
        x_filtrada.append(x_f)
        ganancia.append(ka.actualizar(dato_z)[0])
コード例 #12
0
 def __init__(self, Kp, Ki, n, A, B, C, Q, R):
     self.Kp = Kp
     self.Ki = Ki
     self.u = 0.0
     self.err = 0.0
     self.kalman = Kalman(n, A, B, C, Q, R)
コード例 #13
0
# Estimating DSGE by Maximum Likelihood in Python
# Author: Michal Miktus
# Date: 20.03.2019
# Import libraries
from LinApp_Solve import LinApp_Solve
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import numpy as np
import pickle
from numpy import vstack, hstack, zeros, eye
from Linear_Time_Iteration import Linear_Time_Iteration
# %matplotlib inline
# Set printing options
np.set_printoptions(precision=3, suppress=True, linewidth=120)
pd.set_option(
    'float_format',
    lambda x: '%.3g' % x,
)
# --------------------------------------------------------------------------
# -- Define the model
# --------------------------------------------------------------------------
#  VARIABLES   [M,3] cell array: Variable name (case sensitive) ~ Variable type ~ Description
#              Variable type: 'X' ... Endogenous state variable
#                             'Y' ... Endogenous other (jump) variable
#                             'Z' ... Exogenous state variable
#                             'U' ... Innovation to exogenous state variable
#                             ''  ... Skip variable
variable_symbols = [
    r'$e_a$', r'$e_I$', r'$e_b$', r'$e_L$', r'$e_G$', r'$e_pi$', r'$pi$',
コード例 #14
0
from Kalman import Kalman
import smbus
import time
import math

kalmanX = Kalman()
kalmanY = Kalman()

RestrictPitch= True
radToDeg = 57.2957786
kalmanAngleX = 0
kalmanAngleY = 0

#Bazı MPU6050 Registerları ve Registerların Adresleri
PWR_MGMT_1   = 0x6B
SMPLRT_DIV   = 0x19
CONFIG       = 0x1A
GYRO_CONFIG  = 0x1B
INT_ENABLE   = 0x38

ACCEL_XOUT_H = 0x3B #ACCEL_XOUT_L = 0x3C
ACCEL_YOUT_H = 0x3D #ACCEL_YOUT_L = 0x3E
ACCEL_ZOUT_H = 0x3F #ACCEL_ZOUT_L = 0x40

GYRO_XOUT_H  = 0x43 #GYRO_XOUT_L = 0x44
GYRO_YOUT_H  = 0x45 #GYRO_YOUT_L = 0x46
GYRO_ZOUT_H  = 0x47 #GYRO_ZOUT_L = 0x48

#Read the gyro and acceleromater values from MPU6050
def MPU_Init():
	#write to sample rate register
コード例 #15
0
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$

## Simple talker demo that listens to std_msgs/Strings published
## to the 'chatter' topic
from Kalman import Kalman
from std_msgs.msg import Int64
import numpy as np
K1 = Kalman(10, 0.05, 0.00001)
import rospy
from my_pack.msg import NumArray


def callback(data):
    #rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data.data)

    result = K1.Kalman_continue(np.array(data.numArray).tolist())
    pub = rospy.Publisher('Distance', Int64, queue_size=5)
    pub.publish(result[0])

    pub1 = rospy.Publisher('rawdata', Int64, queue_size=5)
    pub1.publish(result[1])
    #print "Final Value ",result
コード例 #16
0
    def run_optical_flow_with_kalman(self,
                                     window_name,
                                     use_transformation=False):
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        cv2.setMouseCallback(window_name, self.on_mouse)

        # Init kalman filter params
        new_kalman, F, B, Q, H, R, init_mu, init_P, deltaT = self.init_kalman()
        kalman_array = [new_kalman]

        # Create a mask image for drawing purposes
        ret, old_frame = self.video.read()
        old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

        if use_transformation:
            old_gray = cv2.warpPerspective(old_gray, self.h, self.size)
            old_gray = self.rotateImage(old_gray, -90)

        mask = np.zeros_like(old_frame)

        has_completed_one_cycle = False

        while True:
            # Start timer
            timer = cv2.getTickCount()

            p0 = np.array([[[0, 0]]], dtype=np.float32)

            ret, frame = self.video.read()

            cv2.imwrite('Orig.png', frame)
            cv2.waitKey(10)

            throw_away_frame, background = self.subtract_background(frame)

            if use_transformation:
                frame = cv2.warpPerspective(frame, self.h, self.size)
                background = cv2.warpPerspective(background, self.h, self.size)
                frame = self.rotateImage(frame, -90)
                background = self.rotateImage(background, -90)

            background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if has_completed_one_cycle:
                for point in self.trackedPoints:
                    temp_array = np.append(p0, [[[point[0], point[1]]]],
                                           axis=0)
                    p0 = np.array(temp_array, dtype=np.float32)

                new_points_to_track = self.find_new_trackpoints(background)
                for bbox in new_points_to_track:
                    point_x = bbox[0] + bbox[2] / 2
                    point_y = bbox[1] + bbox[3] / 2
                    temp_array = np.append(p0, [[[point_x, point_y]]], axis=0)
                    p0 = np.array(temp_array, dtype=np.float32)
                    init_x = np.matrix([[point_x], [point_y], [0], [0]])
                    new_kalman = Kalman(F, B, Q, H, R, init_x, init_mu, init_P,
                                        deltaT)
                    kalman_array.append(new_kalman)

                p0 = np.delete(p0, 0, axis=0)

                self.trackedPoints.clear()

            has_completed_one_cycle = True

            # Perform optical flow
            old_gray, kalman_array = self.increment_optical_flow_kalman(
                old_gray, frame_gray, mask, frame, p0, kalman_array)

            # Draw kalman
            self.draw_kalman_speed(frame, kalman_array, 40)

            # Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            # Display FPS on frame
            cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # Display result
            cv2.imshow(window_name, frame)

            # cv2.imwrite('Kalman.png', frame)
            # cv2.waitKey(500)
            # print("NOW!")
            # cv2.waitKey(500)

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                break
コード例 #17
0
            _, binary = cv2.threshold(img_blur, 25, 255, cv2.THRESH_BINARY)

            # Morphology
            kernel = np.ones((4, 4), np.uint8)
            binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)

            # find circles
            color, sorted_pt2 = find_circles(binary, color)

            # Kalman filter
            filtered_pts = np.zeros((4, 2))
            pts_velocity = []
            for i in range(0, len(sorted_pt2)):
                x_m = sorted_pt2[i][0]
                y_m = sorted_pt2[i][1]
                x_h, y_h, vx, vy, param = Kalman(x_m, y_m, Kalman_params[i])
                Kalman_params[i] = param
                print(param.get_x())
                filtered_pts[i, :] = np.array([x_h, y_h])
                pts_velocity.append(vx)
                pts_velocity.append(vy)
            list_pts_vel.append(pts_velocity)
            """
            # print the location
            retP, rvec, tvec = cv2.solvePnP(object_points, filtered_pts, cameraMtx, distCoff)
            text = 'Location: [' + \
                   str(int(tvec[0][0])) + ', ' + \
                   str(int(tvec[1][0])) + ', ' + \
                   str(int(tvec[2][0])) + ']'
            color = cv2.putText(color, text, (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
            """
コード例 #18
0
ファイル: MainThread.py プロジェクト: fjafjan/Tracker
    def run(self):
        ## Short explanatory text re parameters, options and state.
        ## PARAMETERS
        ## Parameters are numbers where there is some optimal or obvious value, they might be tweaked between runs
        ## but ultimately we want to stick with one.
        ##
        ## OPTIONS
        ## Options are typically yes or no questions, but more over it is simple things that you might want to change
        ## between runs simply depending on what you want to do. Are you debugging? If so, printing output is nice.
        ##
        ## STATE
        ## State values are things that change during a run, it's all the stuff like the position of the particle
        ## the speed of the step engine, etc etc.

        # parameters holding object
        self.kalman = Kalman(fps_max=10,
                             going_right=True,
                             middle=self.parameters.middle)

        # option holding object
        options = self.options
        options.testing_disconnected = False
        options.printing_output = True
        #		options.starting_left   	= True
        #		options.needs_average 		= False
        options.saving_directory = "E:/staffana/Feb 26/"
        print "starting left is ", options.starting_left
        # state holding object. Could be replaced if we implement a Kalman filter
        #self.state			= DetectorState(self.parameters, options)

        # Object stores the times taken for various computations in this class.
        timedata = TimeData(
        )  # The "class" that handles all the times we meassure

        stepfile = open("MetaData/step_data.txt", 'w')
        stepfile.write(
            "i \t re_rod_pos  \t rod vel  \t step vel \t\t correction_vel \t\t new velocity \n"
        )

        #### CONNECT TO THE STEP MOTOR AND LET THE USER FIND A PARTICLE
        if not options.testing_disconnected:
            aver_im, self.state = InitializeStepEngine(self.order_list,
                                                       self.im_list, options,
                                                       self.parameters,
                                                       timedata)
            #~ state.step_vel, aver_im, state.speed_error, state.vel_fac, state.going_right = outputs

            state = self.state  # I think this is safe.
            ### TRY TO OPEN A SEPARATE WINDOW WHERE THE USER CAN PRESS STOP MAYBE BUT FIRST JUST SHOW IT
        else:
            #			dirt_im = Image.open("Images/Averaging/average_image.jpg")
            dirt_im = Img.open("C:/test.jpg")
            dirt_arr = np.array(dirt_im)
            aver_im = dirt_arr.astype(np.float32)

        ## NOW WE START THE (POSSIBLY) NEVER ENDING THE LOOP OF TRACKING THE PARTICLE
        for j in range(1, 30000):
            timedata.iteration_start = time.clock()
            timedata.pos_save_total = 0
            #print "before running SaveStep pos our timedata pos save total was ", timedata.pos_save_total
            ignorePosition = SaveStepPos(options, timedata)
            #print "After running SaveStep pos our timedata pos save total is ", timedata.pos_save_total
            if j % 2 == 0:
                i = j / 2
            else:
                continue

            print "\n our current frame is ", i, "\n"
            #### ACQUIRE AND CROP THE IMAGE
            timedata.image_old = timedata.image_start
            timedata.image_start = time.clock()
            im = ImageGrab.grab(self.parameters.box)
            if options.printing_output:
                im.save("Images/cropped image" + str(i) + ".jpg")
            if self.image_to_show == "original":
                self.im_list.put(im)
            timedata.image_end = time.clock()

            #### PERFORM EDGE DETECTION AND CONTOUR DETECTION
            timedata.contour_start = time.clock()

            contours, pix = GetContours(im, aver_im, 190,
                                        options.printing_output, i)
            timedata.contour_end = time.clock()

            best_contour_nr, positions = DetectParticle(
                state, contours, i, timedata, pix, self.parameters,
                options)  # i is the current nr of runs
            # To save space we could possibly merge contours with state? It seems a bit weird either way...
            ## Updates the state based on the particle we have, or have not, found.
            state.update(best_contour_nr, contours, positions, self.parameters,
                         i, timedata, options)
            self.kalman.predict(state, self.parameters.step_2_pixel)
            self.kalman.update(state)
            print " the found position we think was ", state.pos_approx
            print "Kalman predicted the position to be ", [
                self.kalman.x_pred.item(0),
                self.kalman.x_pred.item(2)
            ]
            print "Kalman thinks our actual position is ", [
                self.kalman.x_est.item(0),
                self.kalman.x_est.item(2)
            ],
            if best_contour_nr < 0:
                continue
            ## If we are printing output we save a picture of the chosen contour
            if options.printing_output:
                try:
                    cv2.drawContours(pix, [contours[best_contour_nr]], -1,
                                     (0, 255, 0), 3)
                except IndexError as ie:
                    print "contours is ", contours, " and the best contour number is ", best_contour_nr
                    raise
                im = Image.fromarray(pix)
                if self.image_to_show == "main_contour":
                    self.im_list.put(im)
                    #print "WE ADDED AN IMAGEHOORAY HOORAY \n \n"

                im.save("Images/main_contour" + str(i) + ".jpg")

            ## Saves the time now that we are done with detecting the particle
            timedata.detection_end

            #### 	MEASURE THE POSITION AND VELOCITY OF THE STEP ENGINE
            if not options.testing_disconnected:
                state.currentPosition = SaveStepPos(options, timedata)

                ## I HAVE CHAGED SAVE STEP POS NEED TO CHANGE THE DEFINITION
                state.update_correction_vector(state,
                                               self.parameters,
                                               options,
                                               current_frame=j)
                state.corr_vec *= 0
                # this should probably also be a state operation.. it would really finish cutting down on the main loop!

                ####	AND THEN DO THE REQUIRED CORRECTIONS
                timedata.step_control_start = time.clock()
                timedata.step_control_end = time.clock()

                ### IF WE ARE TOO CLOSE TO EITHER INLET WE WANT TO REVERSE THE PUMP
                state.check_pump()

        #### Check the image queue and the order queue if we should stop or something
            self.check_queues()

            #### SLEEP ANY TIME THAT IS LEFT
            timedata.iteration_end = time.clock()
            dt = timedata.iteration_end - timedata.iteration_start
            time_to_sleep = max(0, (1 / self.parameters.fps_max) - dt)
            timedata.write_times(
            )  ## Think of a better name for this function...
            #	print "dt = ", dt, "time to sleep is ", time_to_sleep
            #	print "we are taking approximately ", (dt*fps_max)*100 ,"% of the CPU"
            time.sleep(time_to_sleep)
            print " \n"  # just to separate the iterations more.