Ejemplo n.º 1
0
    def __init__(self, racecar_name, model, height, width):
        self.cv_bridge = CvBridge()
        self.image_topic = str(
            racecar_name) + '/camera/zed/rgb/image_rect_color'
        self.model = load_model(model)
        #this handles the reshaping
        self.util = ImageUtils()
        self.width = width
        self.height = height
        self.classes = ['left', 'right', 'straight', 'weak_left', 'weak_right']
        self.pub = rospy.Publisher(racecar_name + '/drive_parameters',
                                   drive_param,
                                   queue_size=5)
        self.image_sub = rospy.Subscriber(self.image_topic, Image,
                                          self.image_callback)
        #fields for plotting
        self.commands = []
        self.times = []
        self.start_time = time.time()
        #figure for live animation
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)
        self.window = 4000

        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig,
                                      self.animate,
                                      fargs=(self.commands, self.times),
                                      interval=1000)
        plt.show()
Ejemplo n.º 2
0
    def __init__(self, racecar_name, model, vesc_name):
        self.cv_bridge = CvBridge()

        #synchronize the input teleop messages with the camera messages
        self.image_rect_color = Subscriber(
            str(racecar_name) + '/camera/zed/rgb/image_rect_color', Image)
        self.ackermann_stamped = Subscriber(
            str(vesc_name) + '/ackermann_cmd_mux/input/teleop',
            AckermannDriveStamped)

        #to save the image data get the path
        r = rospkg.RosPack()
        self.save_path_root = r.get_path(
            'race') + '/scripts/computer_vision/data/'

        #load the keras model
        self.model = load_model(
            model, custom_objects={'customAccuracy': self.customAccuracy})
        #this handles the reshaping
        self.util = ImageUtils()
        #get the height and width from the model so we don't get annoying errors
        self.height = self.model.layers[0].input_shape[1]
        self.width = self.model.layers[0].input_shape[2]

        #window that we track
        self.window = 200

        #tolerance
        self.tolerance = 0.02

        #create the time synchronizer
        self.sub = ApproximateTimeSynchronizer(
            [self.image_rect_color, self.ackermann_stamped],
            queue_size=20,
            slop=0.049)
        #register the callback to the synchronizer
        self.sub.registerCallback(self.master_callback)

        #this is for plotting
        self.xs = []
        self.ys = []
        self.count = 0
        # Create figure for plotting
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)

        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig,
                                      self.animate,
                                      fargs=(self.xs, self.ys),
                                      interval=1000)
        plt.show()
Ejemplo n.º 3
0
    def __init__(self, car_name, model, height, width):
        self.cv_bridge = CvBridge()
        self.image_topic = str(car_name) + '/camera/zed/rgb/image_rect_color'
        self.model = load_model(
            model, custom_objects={'customAccuracy': self.customAccuracy})
        #this handles the reshaping
        self.util = ImageUtils()
        self.width = width
        self.height = height

        self.pub = rospy.Publisher(car_name + '/LEC_steering_angle',
                                   Steering_Angle,
                                   queue_size=5)
Ejemplo n.º 4
0
    def __init__(self,racecar_name,model,vesc_name):
        self.cv_bridge=CvBridge()

        #synchronize the input teleop messages with the camera messages
        self.image_rect_color=Subscriber(str(racecar_name)+'/camera/zed/rgb/image_rect_color',Image)
        self.ackermann_stamped=Subscriber(str(vesc_name)+'/ackermann_cmd_mux/input/teleop',AckermannDriveStamped)
        
        #load the keras model
        self.model=load_model(model)
        #this handles the reshaping
        self.util=ImageUtils()      
        #get the height and width from the model so we don't get annoying errors
        self.height=self.model.layers[0].input_shape[1]
        self.width=self.model.layers[0].input_shape[2]

        #to save the image data get the path
        r = rospkg.RosPack()
        self.save_path_root=r.get_path('race')+'/scripts/computer_vision/data/'
        self.count=0

        #image classes
        self.classes=['left','right','straight','weak_left','weak_right']

        #this will be used to display misclassifications
        self.buckets=np.arange(len(self.classes))
        #misclassification counts
        self.misclassifications=np.zeros(len(self.classes))

        #create the time synchronizer
        self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped], queue_size = 20, slop = 0.049)
        #register the callback to the synchronizer
        self.sub.registerCallback(self.master_callback)

        #figure for live animation
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)
        self.ls=('left','right','straight','weak_left','weak_right')

        


        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig, self.animate, fargs=(self.buckets, self.misclassifications), interval=1000)
        plt.show()
Ejemplo n.º 5
0
class ROS_Daev:

    #define the constructor
    def __init__(self, racecar_name, model, height, width):
        self.cv_bridge = CvBridge()
        self.image_topic = str(
            racecar_name) + '/camera/zed/rgb/image_rect_color'
        self.model = load_model(
            model, custom_objects={'customAccuracy': self.customAccuracy})
        #this handles the reshaping
        self.util = ImageUtils()
        self.width = width
        self.height = height

        self.pub = rospy.Publisher(racecar_name + '/drive_parameters',
                                   drive_param,
                                   queue_size=5)

    #image callback
    def image_callback(self, ros_image):
        #convert the ros_image to an openCV image
        try:
            orig_image = self.cv_bridge.imgmsg_to_cv2(ros_image,
                                                      "bgr8") / 255.0
            cv_image = self.util.reshape_image(orig_image, self.height,
                                               self.width)
            #print(cv_image.shape)
        except CvBridgeError as e:
            print(e)

        predict_image = np.expand_dims(cv_image, axis=0)
        pred = self.model.predict(predict_image)[0] * 0.6108652353

        #Want to keep things consistent
        msg = drive_param()
        msg.angle = pred
        msg.velocity = 1.0
        self.pub.publish(msg)

        cv2.imshow("Image fed to network", predict_image[0])
        print(str(pred))
        cv2.imshow("Original Image", orig_image)
        cv2.waitKey(3)

    #define a custom metric for DAEV, accuracy doesn't cut it
    def customAccuracy(self, y_true, y_pred):
        diff = K.abs(
            y_true -
            y_pred)  #absolute difference between correct and predicted values
        correct = K.less(
            diff, 0.01)  #tensor with 0 for false values and 1 for true values
        return K.mean(correct)  #sum all 1's and divide by the total.
Ejemplo n.º 6
0
    def __init__(self,racecar_name,model,vesc_name):
        self.cv_bridge=CvBridge()

        #synchronize the input teleop messages with the camera messages
        self.image_rect_color=Subscriber(str(racecar_name)+'/camera/zed/rgb/image_rect_color',Image)
        self.ackermann_stamped=Subscriber(str(vesc_name)+'/ackermann_cmd_mux/input/teleop',AckermannDriveStamped)
        
        #load the keras model
        self.model=load_model(model)
        #this handles the reshaping
        self.util=ImageUtils()      
        #get the height and width from the model so we don't get annoying errors
        self.height=self.model.layers[0].input_shape[1]
        self.width=self.model.layers[0].input_shape[2]

        #image classes
        self.classes=['left','right','straight','weak_left','weak_right']


        #create the time synchronizer
        self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped], queue_size = 20, slop = 0.049)
        #register the callback to the synchronizer
        self.sub.registerCallback(self.master_callback)

        #fields for plotting
        self.commands=[]
        self.times=[]
        #ground truth commands
        self.gt_commands=[]
        self.start_time=time.time()
        #figure for live animation
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)
        self.window=4000

        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig, self.animate, fargs=(self.commands, self.times), interval=1000)
        plt.show()
Ejemplo n.º 7
0
ap.add_argument("-o",
                "--output",
                required=True,
                help="directory where we will output the model")
args = vars(ap.parse_args())

# initialize the list of data and labels
data = []
commands = []

#desired height and width these come from the DAVE model
height = 66
width = 200

#load the image utils
iu = ImageUtils()

data, labels = iu.load_from_directory(args['dataset'],
                                      height,
                                      width,
                                      verbose=1,
                                      regression=True)

#normalize the images and commands
commands = labels / 0.6108652353
data = data / 255.0

#End to End data
(EndtrainX, EndtestX, EndtrainY, EndtestY) = train_test_split(data,
                                                              commands,
                                                              test_size=0.05,
Ejemplo n.º 8
0
class ROS_Classify:

    #define the constructor
    def __init__(self, racecar_name, model, height, width):
        self.cv_bridge = CvBridge()
        self.image_topic = str(
            racecar_name) + '/camera/zed/rgb/image_rect_color'
        self.model = load_model(model)
        #this handles the reshaping
        self.util = ImageUtils()
        self.width = width
        self.height = height
        self.classes = ['left', 'right', 'straight', 'weak_left', 'weak_right']
        self.pub = rospy.Publisher(racecar_name + '/drive_parameters',
                                   drive_param,
                                   queue_size=5)
        self.image_sub = rospy.Subscriber(self.image_topic, Image,
                                          self.image_callback)
        #fields for plotting
        self.commands = []
        self.times = []
        self.start_time = time.time()
        #figure for live animation
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)
        self.window = 4000

        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig,
                                      self.animate,
                                      fargs=(self.commands, self.times),
                                      interval=1000)
        plt.show()

    #image callback
    def image_callback(self, ros_image):
        #convert the ros_image to an openCV image
        try:
            orig_image = self.cv_bridge.imgmsg_to_cv2(ros_image,
                                                      "bgr8") / 255.0
            cv_image = self.util.reshape_image(orig_image, self.height,
                                               self.width)
            #print(cv_image.shape)
        except CvBridgeError as e:
            print(e)

        #reshape the image so we can feed it ot the neural network
        predict_image = np.expand_dims(cv_image, axis=0)
        #make the prediction
        pred = self.model.predict(predict_image)
        #publish the actuation command
        self.send_actuation_command(pred)

        #uncomment the following to display the image classification
        #cv2.imshow(self.classes[pred[0].argmax()],predict_image[0])

        #log the result to the console
        print("INFO prediction: {}".format(self.classes[pred[0].argmax()]))

        #uncomment to show the original image
        #cv2.imshow("Original Image",orig_image)
        #cv2.waitKey(3)

    #computes the actuation command to send to the car
    def send_actuation_command(self, pred):
        #create the drive param message
        msg = drive_param()
        msg.angle = 0.0
        msg.velocity = 1.0
        #get the label
        label = self.classes[pred[0].argmax()]

        if (label == "left"):
            msg.angle = 0.4108652353
        elif (label == "right"):
            msg.angle = -0.4108652353
        elif (label == "straight"):
            msg.angle = 0.0
        elif (label == "weak_left"):
            msg.angle = 0.10179
        elif (label == "weak_right"):
            msg.angle = -0.10179
        else:
            print("error:", label)
            msg.velocity = 0
            msg.angle = 0
        self.commands.append(msg.angle)
        self.times.append(time.time() - self.start_time)
        self.pub.publish(msg)
        #plt.show()

    #function that animates the plotting
    def animate(self, i, commands, times):
        # Limit x and y lists to window items
        self.commands = self.commands[-self.window:]
        self.times = self.times[-self.window:]
        # Draw x and y lists
        self.ax.clear()
        self.ax.plot(self.times, self.commands)

        # Format plot
        plt.xticks(rotation=45, ha='right')
        plt.subplots_adjust(bottom=0.30)
        plt.title('Time (s) vs Steering Angle (radians)')
        plt.ylabel('Steering Angle (radians)')
        plt.xlabel('Time (s)')
Ejemplo n.º 9
0
class Analyze_Discrete:
    def __init__(self,racecar_name,model,vesc_name):
        self.cv_bridge=CvBridge()

        #synchronize the input teleop messages with the camera messages
        self.image_rect_color=Subscriber(str(racecar_name)+'/camera/zed/rgb/image_rect_color',Image)
        self.ackermann_stamped=Subscriber(str(vesc_name)+'/ackermann_cmd_mux/input/teleop',AckermannDriveStamped)
        
        #load the keras model
        self.model=load_model(model)
        #this handles the reshaping
        self.util=ImageUtils()      
        #get the height and width from the model so we don't get annoying errors
        self.height=self.model.layers[0].input_shape[1]
        self.width=self.model.layers[0].input_shape[2]

        #image classes
        self.classes=['left','right','straight','weak_left','weak_right']


        #create the time synchronizer
        self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped], queue_size = 20, slop = 0.049)
        #register the callback to the synchronizer
        self.sub.registerCallback(self.master_callback)

        #fields for plotting
        self.commands=[]
        self.times=[]
        #ground truth commands
        self.gt_commands=[]
        self.start_time=time.time()
        #figure for live animation
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)
        self.window=4000

        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig, self.animate, fargs=(self.commands, self.times), interval=1000)
        plt.show()

        #master callback for this class
    def master_callback(self,ros_image,ack):
        #preprocess the image
        try:
            orig_image=self.cv_bridge.imgmsg_to_cv2(ros_image,"bgr8")/255.0
            save_img=self.cv_bridge.imgmsg_to_cv2(ros_image,"bgr8")
            cv_image=self.util.reshape_image(orig_image,self.height,self.width)
        except CvBridgeError as e:
            print(e)

        #expand the dimensions so that the model can make a prediction
        predict_image=np.expand_dims(cv_image, axis=0)
        #make the prediction
        pred=self.model.predict(predict_image)

        #get the label and convert it to the respective actuation command
        lb=self.classes[pred[0].argmax()]
        act_command=self.translate_to_actuation_angle(lb)
        self.commands.append(act_command)
        steering_command=ack.drive.steering_angle
        self.gt_commands.append(steering_command)
        self.times.append(time.time()-self.start_time)

        #display the images
        #cv2.imshow("Original Image",orig_image)
        #cv2.imshow("Image passed to network",cv_image)
        #cv2.waitKey(3) 

    #function that animates the plotting
    def animate(self,i,commands,times):
        # Limit x and y lists to window items
        self.commands = self.commands[-self.window:]
        self.times = self.times[-self.window:]
        self.gt_commands=self.gt_commands[-self.window:]
        # Draw x and y lists
        self.ax.clear()
        self.ax.plot(self.times, self.commands,label='Discrete MiniVGG')
        self.ax.plot(self.times, self.gt_commands,label='Continous Wall Following')
        self.ax.legend(loc='upper left')

        # Format plot
        plt.xticks(rotation=45, ha='right')
        plt.subplots_adjust(bottom=0.30)
        plt.title('Time (s) vs Steering Angle (radians)')
        plt.ylabel('Steering Angle (radians)')
        plt.xlabel('Time (s)')


    def translate_to_actuation_angle(self,label):
        if (label=="left"):
            angle=0.4108652353
        elif (label=="right"):
            angle=-0.4108652353
        elif (label=="straight"):
            angle=0.0
        elif (label=="weak_left"):
            angle=0.10179
        elif (label=="weak_right"):
            angle=-0.10179
        else: 
            print("error:",label)
            angle=0
        return angle
Ejemplo n.º 10
0
class AnalyzeE2E:
    def __init__(self, racecar_name, model, vesc_name):
        self.cv_bridge = CvBridge()

        #synchronize the input teleop messages with the camera messages
        self.image_rect_color = Subscriber(
            str(racecar_name) + '/camera/zed/rgb/image_rect_color', Image)
        self.ackermann_stamped = Subscriber(
            str(vesc_name) + '/ackermann_cmd_mux/input/teleop',
            AckermannDriveStamped)

        #to save the image data get the path
        r = rospkg.RosPack()
        self.save_path_root = r.get_path(
            'race') + '/scripts/computer_vision/data/'

        #load the keras model
        self.model = load_model(
            model, custom_objects={'customAccuracy': self.customAccuracy})
        #this handles the reshaping
        self.util = ImageUtils()
        #get the height and width from the model so we don't get annoying errors
        self.height = self.model.layers[0].input_shape[1]
        self.width = self.model.layers[0].input_shape[2]

        #window that we track
        self.window = 200

        #tolerance
        self.tolerance = 0.02

        #create the time synchronizer
        self.sub = ApproximateTimeSynchronizer(
            [self.image_rect_color, self.ackermann_stamped],
            queue_size=20,
            slop=0.049)
        #register the callback to the synchronizer
        self.sub.registerCallback(self.master_callback)

        #this is for plotting
        self.xs = []
        self.ys = []
        self.count = 0
        # Create figure for plotting
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)

        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig,
                                      self.animate,
                                      fargs=(self.xs, self.ys),
                                      interval=1000)
        plt.show()

    #master callback for this class
    def master_callback(self, ros_image, ack):
        try:
            orig_image = self.cv_bridge.imgmsg_to_cv2(ros_image,
                                                      "bgr8") / 255.0
            cv_image = self.util.reshape_image(orig_image, self.height,
                                               self.width)
            save_img = self.cv_bridge.imgmsg_to_cv2(ros_image, "bgr8")
            #print(cv_image.shape)
        except CvBridgeError as e:
            print(e)

        predict_image = np.expand_dims(cv_image, axis=0)
        pred = self.model.predict(predict_image)[0] * 0.6108652353

        #get the angle from the ackermann message

        #get the difference need it to be smaller than 0.01 radians which 0.57 degrees we can make it smaller but
        #that is fine for now
        diff = abs(pred[0] - ack.drive.steering_angle)
        #self.xs.append("%.2f" % float(dt.datetime.now().strftime('%S.%f')))
        self.xs.append(self.count)
        self.ys.append(diff)
        self.count += 1

        if diff > self.tolerance:
            command = '%.10f' % ack.drive.steering_angle
            #replace the period with ~ so it's a valid filename
            command = command.replace('.', '~')
            save_path = self.save_path_root + self.label_image(
                ack.drive.steering_angle) + '/' + str(
                    rospy.Time.now()) + '~' + command + '.jpg'
            print(self.label_image(ack.drive.steering_angle))
            cv2.imwrite(save_path, save_img)

    def animate(self, i, xs, ys):
        # Limit x and y lists to window items
        self.ys = self.ys[-self.window:]
        self.xs = self.xs[-self.window:]

        # Draw x and y lists
        self.ax.clear()
        self.ax.plot(self.xs, self.ys)

        # Format plot
        plt.xticks(rotation=45, ha='right')
        plt.subplots_adjust(bottom=0.30)
        plt.title('Difference between prediction and Ground Truth')
        plt.ylabel('Error')
        plt.xlabel('index of prediction')

    #this is used for saving it to correct directory
    #function that categorizes images into left, weak_left, straight, weak_right, right
    def label_image(self, steering_angle):
        if (steering_angle < -0.20179):
            return "right"
        elif (steering_angle > 0.20179):
            return "left"
        elif (steering_angle < -0.0523599 and steering_angle > -0.20179):
            return "weak_right"
        elif (steering_angle > 0.0523599 and steering_angle < 0.20179):
            return "weak_left"
        else:
            return "straight"

    #define a custom metric for DAEV, accuracy doesn't cut it
    def customAccuracy(self, y_true, y_pred):
        diff = K.abs(
            y_true -
            y_pred)  #absolute difference between correct and predicted values
        correct = K.less(
            diff, 0.01)  #tensor with 0 for false values and 1 for true values
        return K.mean(correct)  #sum all 1's and divide by the total.
Ejemplo n.º 11
0
class AnalyzeClassification:
    def __init__(self,racecar_name,model,vesc_name):
        self.cv_bridge=CvBridge()

        #synchronize the input teleop messages with the camera messages
        self.image_rect_color=Subscriber(str(racecar_name)+'/camera/zed/rgb/image_rect_color',Image)
        self.ackermann_stamped=Subscriber(str(vesc_name)+'/ackermann_cmd_mux/input/teleop',AckermannDriveStamped)
        
        #load the keras model
        self.model=load_model(model)
        #this handles the reshaping
        self.util=ImageUtils()      
        #get the height and width from the model so we don't get annoying errors
        self.height=self.model.layers[0].input_shape[1]
        self.width=self.model.layers[0].input_shape[2]

        #to save the image data get the path
        r = rospkg.RosPack()
        self.save_path_root=r.get_path('race')+'/scripts/computer_vision/data/'
        self.count=0

        #image classes
        self.classes=['left','right','straight','weak_left','weak_right']

        #this will be used to display misclassifications
        self.buckets=np.arange(len(self.classes))
        #misclassification counts
        self.misclassifications=np.zeros(len(self.classes))

        #create the time synchronizer
        self.sub = ApproximateTimeSynchronizer([self.image_rect_color,self.ackermann_stamped], queue_size = 20, slop = 0.049)
        #register the callback to the synchronizer
        self.sub.registerCallback(self.master_callback)

        #figure for live animation
        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(1, 1, 1)
        self.ls=('left','right','straight','weak_left','weak_right')

        


        #Animation
        # Set up plot to call animate() function periodically
        ani = animation.FuncAnimation(self.fig, self.animate, fargs=(self.buckets, self.misclassifications), interval=1000)
        plt.show()

        

    
    
    

    #master callback for this class
    def master_callback(self,ros_image,ack):
        #preprocess the image
        try:
            orig_image=self.cv_bridge.imgmsg_to_cv2(ros_image,"bgr8")/255.0
            save_img=self.cv_bridge.imgmsg_to_cv2(ros_image,"bgr8")
            cv_image=self.util.reshape_image(orig_image,self.height,self.width)
        except CvBridgeError as e:
            print(e)

        #expand the dimensions so that the model can make a prediction
        predict_image=np.expand_dims(cv_image, axis=0)
        #make the prediction
        pred=self.model.predict(predict_image)

        #get the label and ground truth
        lb=self.classes[pred[0].argmax()]
        gt=self.label_image(ack.drive.steering_angle)

        #create the text to show on the openCv Image
        label='prediction: '+self.classes[pred[0].argmax()]
        #get the ground truth label
        ground_truth_label='ground_truth: '+self.label_image(ack.drive.steering_angle)

        if(lb!=gt):
            #get the index of the misclassification
            ind=self.classes.index(lb)
            #increment our counter
            self.misclassifications[ind]+=1
            print(self.misclassifications,lb)

            command='%.10f' % ack.drive.steering_angle
            #replace the period with ~ so it's a valid filename
            command=command.replace('.','~')
            save_path=self.save_path_root+self.label_image(ack.drive.steering_angle)+'/'+str(rospy.Time.now())+'~'+command+'.jpg'
            if self.label_image(ack.drive.steering_angle)!='straight':
                cv2.imwrite(save_path,save_img)
                cv2.imshow("Original Image",save_img)
                cv2.waitKey(25)
                print(lb)
            else: 
                print("nah its straight")
        
        #cv2.putText(orig_image, label, (32, 32),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
        #cv2.putText(orig_image, ground_truth_label, (32, 460),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 255), 2)
        
        #if(self.count%10==0):
         #   cv2.imshow("Original Image",orig_image)
          #  cv2.waitKey(25) 
        self.count+=1
    #this was how the training data was collected
    #function that categorizes images into left, weak_left, straight, weak_right, right
    def label_image(self,steering_angle):
        if(steering_angle<-0.20179):
            return "right"
        elif(steering_angle>0.20179):
            return "left"
        elif(steering_angle<-0.0523599 and steering_angle>-0.20179):
            return "weak_right"
        elif(steering_angle>0.0523599 and steering_angle<0.20179):
            return "weak_left"
        else:
            return "straight"

    #function that animates the matplotlib
    def animate(self,i, buckets, misclassifications):
        # Draw x and y lists
        self.ax.clear()
        self.ax.bar(self.buckets, self.misclassifications, alpha=0.5)

        # Format plot
        #plt.subplots_adjust(bottom=0.30)
        plt.xticks(self.buckets, self.ls)
        plt.ylabel('Number of Misclassifications')
        plt.title('Classification Model Performance')