示例#1
0
 def __init__(self):
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
     self.colors_thre = {
         "green": [(42, 60, 80), (90, 255, 255)],
         "blue": [(95, 100, 100), (120, 255, 255)],
         "red": [(140, 100, 80), (180, 255, 255), (0, 100, 100),
                 (10, 255, 255)]
     }
     self.color_channel = {"blue": 0, "green": 1, "red": 2}
     self.mean_theshold = {"blue": 0.5, "green": 0.5, "red": 0.3}
     self.color = {
         "blue": (255, 0, 0),
         "green": (0, 255, 0),
         "red": (0, 0, 255)
     }
     self.places_offset = [(0, 1, 0, 0), (1, 0, 0, 0), (1, 1, 0, 0)]
     self.pos = (
         145, 0
     )  # (z, tilt), z: height above the atoms, tilt:looking down:0, looking front: ~315 (dynamixel units)
     self.H_rc_to_vc = self.compute_rotation_matrix()
     self.minRadius = BASE_MIN_RADIUS
     self.maxRadius = BASE_MAX_RADIUS
     self.minDist = 2 * self.minRadius
     self.border_mask = self.get_border_mask()
示例#2
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
        self.imagecount = 1
        self.minimagecount = 142
        self.maximagecount = 360
        self.currentimagecount = self.minimagecount
        #self.cameraMatrix=np.matrix([[659.0,342],[0,658,250],[0,0,1]])
        #self.distcoeff=np.matrix([0.236,-0.290,-0.00115,-0.00087,-2.4])
        self.currentloopcount = 0
        self.maxloopcount = 15

        self.cameraMatrix = np.zeros((3, 3))
        self.cameraMatrix[0][0] = 329.0
        self.cameraMatrix[0][2] = 120
        self.cameraMatrix[1][1] = 329
        self.cameraMatrix[1][2] = 160
        self.cameraMatrix[2][2] = 1

        self.distcoeff = np.zeros((1, 5))
        self.distcoeff[0][0] = 0.236
        self.distcoeff[0][1] = -0.290
        self.distcoeff[0][2] = -0.00115
        self.distcoeff[0][3] = -0.00087
        self.distcoeff[0][4] = -2.4

        self.currentimagecount = random.randint(1, 50001)
        self.datafile = open("targetData.txt", "w+")
        self.written = False
 def __init__(self):
     
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("sandbox", 25, jevois.LOG_DEBUG)
     
     #A bunch of standard init stuff
     self.prevlcent=0
     self.prevrcent=0
     self.resetcounter=0
     self.frame=0
     self.framerate_fps=0
     self.CPULoad_pct=0
     self.CPUTemp_C=0
     self.pipelineDelay_us=0
     self.pattern = re.compile('([0-9]*\.[0-9]+|[0-9]+) fps, ([0-9]*\.[0-9]+|[0-9]+)% CPU, ([0-9]*\.[0-9]+|[0-9]+)C,')
     
     #The real world points of our object with (0,0,0) being the centerpoint of the line connecting the two closest points
     self.ObjPoints=np.array([(0,-5.3771,-5.3248),
                         (0,-7.3134,-4.8241),
                         (0,-5.9363,0.5008),
                         (0,-4.0000,0),
                         (0,5.3771,-5.3248),
                         (0,4.0000,0),
                         (0,5.9363,0.5008),
                         (0,7.3134,-4.8241)
                         ],dtype=np.float64)
    def __init__(self):
        self.confThreshold = 0.2 # Confidence threshold (0..1), higher for stricter confidence.
        self.inpWidth = 227      # Resized image width passed to network
        self.inpHeight = 227     # Resized image height passed to network
        self.scale = 1.0         # Value scaling factor applied to input pixels
        self.mean = [104, 117, 123] # Mean BGR value subtracted from input image
        self.rgb = True          # True if model expects RGB inputs, otherwise it expects BGR

        # Select one of the models:
        model = 'SqueezeNet'            # SqueezeNet v1.1, Caffe model

        # You should not have to edit anything beyond this point.
        backend = cv.dnn.DNN_BACKEND_DEFAULT
        target = cv.dnn.DNN_TARGET_CPU
        self.classes = None
        classnames = None
        if (model == 'SqueezeNet'):
            classnames = '/jevois/share/opencv-dnn/classification/synset_words.txt'
            modelname = '/jevois/share/opencv-dnn/classification/squeezenet_v1.1.caffemodel'
            configname = '/jevois/share/opencv-dnn/classification/squeezenet_v1.1.prototxt'

        # Load names of classes
        if classnames:
            with open(classnames, 'rt') as f:
                self.classes = f.read().rstrip('\n').split('\n')
        
        # Load a network
        self.net = cv.dnn.readNet(modelname, configname)
        self.net.setPreferableBackend(backend)
        self.net.setPreferableTarget(target)
        self.timer = jevois.Timer('Neural classification', 10, jevois.LOG_DEBUG)
        self.model = model
示例#5
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
        self.imagecount=1
        self.minimagecount=142
        self.maximagecount=360
        self.currentimagecount=self.minimagecount
        #self.cameraMatrix=np.matrix([[659.0,342],[0,658,250],[0,0,1]])
        #self.distcoeff=np.matrix([0.236,-0.290,-0.00115,-0.00087,-2.4])
        self.currentloopcount=0
        self.maxloopcount=10

        self.cameraMatrix=np.zeros((3,3))
        self.cameraMatrix[0][0]=659.0
        self.cameraMatrix[0][2]=342
        self.cameraMatrix[1][1]=658
        self.cameraMatrix[1][2]=250
        self.cameraMatrix[2][2]=1
        
        self.distcoeff=np.zeros((1,5))
        self.distcoeff[0][0]=0.236
        self.distcoeff[0][1]=-0.290
        self.distcoeff[0][2]=-0.00115
        self.distcoeff[0][3]=-0.00087
        self.distcoeff[0][4]=-2.4
示例#6
0
    def __init__(self):
        # HSV color range to use:
        #
        # H: 0=red/do not use because of wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan,
        #      105=light blue, 120=blue, 135=purple, 150=pink
        # S: 0 for unsaturated (whitish discolored object) to 255 for fully saturated (solid color)
        # V: 0 for dark to 255 for maximally bright
        #self.HSVmin = np.array([ 20,  50, 180], dtype=np.uint8)
        self.HSVmin = np.array([ 70,  10, 100], dtype=np.uint8)
        self.HSVmax = np.array([ 90, 255, 255], dtype=np.uint8)
        #self.HSVmin = np.array([ 70,  100, 100], dtype=np.uint8)
        #self.HSVmax = np.array([ 90, 255, 255], dtype=np.uint8)
        #self.HSVmax = np.array([ 80, 255, 255], dtype=np.uint8)

        # Measure your U-shaped object (in meters) and set its size here:
        self.owm = 0.280 # width in meters
        self.ohm = 0.175 # height in meters

        # Other processing parameters:
        self.epsilon = 0.015               # Shape smoothing factor (higher for smoother)
        self.hullarea = ( 20*20, 300*300 ) # Range of object area (in pixels) to track
        self.hullfill = 50                 # Max fill ratio of the convex hull (percent)
        self.ethresh = 900                 # Shape error threshold (lower is stricter for exact shape)
        self.margin = 5                    # Margin from from frame borders (pixels)
    
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("FirstPython", 100, jevois.LOG_INFO)
 def __init__(self):
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
     self.frame = 0
     self.facenames = []
     self.facenames = os.listdir(
         'modules/JeVois/PythonSandbox/training-data/')
     self.facename = ""
示例#8
0
 def __init__(self):
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
     
     # a simple frame counter used to demonstrate sendSerial():
     self.frame = 0
     self.countup=0.0
     self.datafile=open("serialrecord.txt","w+")
示例#9
0
 def __init__(self):
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
     self.colors_thre = {
         "green": [(42, 70, 100), (70, 255, 255)],
         "blue": [(95, 100, 100), (120, 255, 255)],
         "red": [(175, 100, 100), (180, 255, 255), (0, 100, 100),
                 (10, 255, 255)]
     }
示例#10
0
    def __init__(self):
        self.inpWidth = 64        # Resized image width passed to network
        self.inpHeight = 64       # Resized image height passed to network
        self.scale = 1.0          # Value scaling factor applied to input pixels
        self.mean = [127,127,127] # Mean BGR value subtracted from input image
        self.rgb = False          # True if model expects RGB inputs, otherwise it expects BGR

        # This network takes a while to load from microSD. To avoid timouts at construction,
        # we will load it in process() instead.
        
        self.timer = jevois.Timer('Neural emotion', 10, jevois.LOG_DEBUG)
        self.frame = 0 # a simple frame counter used to demonstrate sendSerial()
示例#11
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)

        self.draw = True

        self.tracker = cv2.TrackerKCF_create()

        self.hasTarget = False
        self.frame = 1

        self.runcount = 1
示例#12
0
    def __init__(self):
        jevois.LINFO("Preseason2019 Constructor")
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
        """initializes all values to presets or None if need to be set
        """
        self.__hsl_threshold_hue = [0, 255.0]
        self.__hsl_threshold_saturation = [0, 255.0]
        self.__hsl_threshold_luminance = [0, 255.0]

        self.hsl_threshold_output = None

        self.__find_contours_input = self.hsl_threshold_output
        self.__find_contours_external_only = False

        self.find_contours_output = None

        self.__filter_contours_contours = self.find_contours_output
        self.__filter_contours_min_area = 0.0
        self.__filter_contours_min_perimeter = 0
        self.__filter_contours_min_width = 0
        self.__filter_contours_max_width = 1000
        self.__filter_contours_min_height = 0
        self.__filter_contours_max_height = 1000
        self.__filter_contours_solidity = [0, 100]
        self.__filter_contours_max_vertices = 1000000
        self.__filter_contours_min_vertices = 0
        self.__filter_contours_min_ratio = 0
        self.__filter_contours_max_ratio = 1000

        self.filter_contours_output = None
        
        try:
            fInit = open("modules/Lightning/Template/vals.txt", "r")
            if(fInit.mode == "r"):
                valsInit = fInit.read().split('[')[1].split(',')
                self.__hsl_threshold_hue[0] = float(valsInit[0])
                self.__hsl_threshold_hue[1] = float(valsInit[1])
                self.__hsl_threshold_saturation[0] = float(valsInit[2])
                self.__hsl_threshold_saturation[1] = float(valsInit[3])
                self.__hsl_threshold_luminance[0] = float(valsInit[4])
                self.__hsl_threshold_luminance[1] = float(valsInit[5])
                self.__filter_contours_min_area = float(valsInit[6])
            fInit.close()
        except:
            jevois.LINFO("Error loading parameters from file")
            
            
        self.sendFrames = True

        jevois.LINFO("END CONSTRUCTOR")
示例#13
0
    def process(self, inframe, outframe):
        # Create a parallel processing pool and a timer, if needed (on first frame only):
        if not hasattr(self, 'pool'):
            # create a multiprocessing pool, not specifying the number of processes, to use the number of cores:
            self.pool = mp.Pool()
            # Instantiate a JeVois Timer to measure our processing framerate:
            self.timer = jevois.Timer("PythonParallel", 100, jevois.LOG_INFO)

        # Get the next camera image (may block until it is captured) and convert it to OpenCV GRAY:
        inimggray = inframe.getCvGRAY()

        # Start measuring image processing time (NOTE: does not account for input conversion time):
        self.timer.start()

        # Detect edges using the Canny algorithm from OpenCV, launching 4 instances in parallel:
        futures = [
            self.pool.apply_async(computefunc,
                                  args=(
                                      inimggray,
                                      10 * x,
                                      20 * x,
                                  )) for x in range(1, 5)
        ]

        # Collect the results, handling any exception thrown by the workers. Here, we make sure we get() all the results
        # first, then rethrow the last exception received, if any, so that we do ensure that all results will be
        # collected before we bail out on an exception:
        results = []
        error = 0
        for ii in range(4):
            try:
                results.append(futures[ii].get(timeout=10))
            except Exception as e:
                error = e
        if error: raise error

        # Aggregate the worker result images into a single output image:
        outimggray = np.hstack(results)

        # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
        fps = self.timer.stop()
        height, width = outimggray.shape
        cv2.putText(outimggray, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, 255, 1, cv2.LINE_AA)

        # Convert our GRAY output image to video output format and send to host over USB:
        outframe.sendCvGRAY(outimggray)
示例#14
0
    def processNoUSB(self, inframe):
        # Create a parallel processing pool and a timer, if needed (on first frame only):
        if not hasattr(self, 'pool'):
            # create a multiprocessing pool, not specifying the number of processes, to use the number of cores:
            self.pool = mp.Pool()
            # Instantiate a JeVois Timer to measure our processing framerate:
            self.timer = jevois.Timer("PythonParallel", 100, jevois.LOG_INFO)

        # Get the next camera image (may block until it is captured) and convert it to OpenCV GRAY:
        inimggray = inframe.getCvGRAY()

        # Start measuring image processing time (NOTE: does not account for input conversion time):
        self.timer.start()

        # Detect edges using the Canny algorithm from OpenCV, launching 4 instances in parallel:
        futures = [
            self.pool.apply_async(computefunc,
                                  args=(
                                      inimggray,
                                      10 * x,
                                      20 * x,
                                  )) for x in range(1, 5)
        ]

        # Collect the results, handling any exception thrown by the workers. Here, we make sure we get() all the results
        # first, then rethrow the last exception received, if any, so that we do ensure that all results will be
        # collected before we bail out on an exception:
        results = []
        error = 0
        for ii in range(4):
            try:
                results.append(futures[ii].get(timeout=10))
            except Exception as e:
                error = e
        if error: raise error

        # In real modules, we would do something with the results... Here, just report their size:
        str = ""
        for ii in range(4):
            h, w = results[ii].shape
            str += "Canny {}: {}x{}    ".format(ii, w, h)

        # Send a message to serout:
        jevois.sendSerial(str)

        # Report frames/s info to serlog:
        self.timer.stop()
示例#15
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
        self.profiler = jevois.Profiler("Profiler", 100, jevois.LOG_INFO)

        self.w = None
        self.h = None
        self.fov = 60.0  # degrees
        self.tape_diag = math.hypot(2.0, 5.5)  # inches
        self.tape_fw = 700 #349.96 for a 90 degree lens
        self.target_w = 11.3115  # inches
        self.hsv_min = (60, 40, 40)
        self.hsv_max = (90, 255, 255)
        self.open_kernel = np.ones((7, 7), np.uint8)
        self.stretch_factor = 16.0
        self.last_ping = None
        self.time_offset = 0.0
        self.requested_time = None
示例#16
0
    def __init__(self):
        # HSV color range to use:
        #
        # H: 0=red/do not use because of wraparound, 30=yellow, 45=light green, 60=green, 75=green cyan, 90=cyan,
        #      105=light blue, 120=blue, 135=purple, 150=pink, 180=red
        # S: 0 for unsaturated (whitish discolored object) to 255 for fully saturated (solid color)
        # V: 0 for dark to 255 for maximally bright
        self.HSVmin = np.array([45, 60, 60], dtype=np.uint8)
        self.HSVmax = np.array([90, 255, 255], dtype=np.uint8)

        # Measure the object (in meters) and set its size here:
        self.owm = 0.275  # width in meters
        self.ohm = 0.150  # height in meters

        # Camera Values
        self.fov = 1.13446  # field of view in radians
        self.width = 640  # width of resolution
        self.height = 480  # height of resolution

        # Other processing parameters:
        self.epsilon = 0.019  # Shape smoothing factor (higher for smoother)
        self.hullarea = (10 * 20, 300 * 300
                         )  # Range of object area (in pixels) to track
        self.hullfill = 50  # Max fill ratio of the convex hull (percent)
        self.ethresh = 900  # Shape error threshold (lower is stricter for exact shape)
        self.margin = 5  # Margin from from frame borders (pixels)
        self.mToFt = 3.28084  # Conversion of Meters to Feet
        self.cameraAngle = 0.401426  # Angle up from the surface of the floor (radians)

        # Averaging variables
        self.tsum = [[0], [0], [0]]
        self.rsum = [[0], [0], [0]]
        self.sumCount = 0

        # Targeting variables
        self.targetRatio = (
            300.0 / 275.0
        )  # Ratio between distance of top points and bottom points between two targets
        self.pxThreshold = 20  # How close the target can be to the edge of the image
        self.percentFill = 0.1  # Relative amount that the U-Shape map will be filled

        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("FirstPython", 100, jevois.LOG_INFO)
示例#17
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)

        self.draw = True

        self.lowerH = 57
        self.upperH = 84

        self.lowerS = 222
        self.upperS = 255

        self.lowerV = 62
        self.upperV = 255

        #gain = 20
        #exposure = 27

        self.stringForHSV = "hi"
示例#18
0
    def __init__(self):
        # Full file name of the training image:
        self.fname = "/jevois/modules/JeVois/PythonObject6D/images/reference.png"

        # Measure your object (in meters) and set its size here:
        self.owm = 48 * 0.0254  # width in meters (specs call for 48 inches)
        self.ohm = 77.75 * 0.0254  # height in meters (specs call for 77.75 inches)

        # Window within the object for which we will compute 3D pose: top-left corner in meters relative to the top-left
        # corner of the full reference object, and window width and height in meters:
        self.wintop = (
            77.75 - 18) * 0.0254  # top of exchange window is 18in from ground
        self.winleft = 6.88 * 0.0254  # left of exchange window is 6.88in from left edge
        self.winw = (12 + 9) * 0.0254  # exchange window is 1ft 9in wide
        self.winh = (12 + 4.25) * 0.0254  # exchange window is 1ft 4-1/4in tall

        # Other parameters:
        self.distth = 50.0  # Descriptor distance threshold (lower is stricter for exact matches)

        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("PythonObject6D", 100, jevois.LOG_INFO)
示例#19
0
    def __init__(self):
        # yellow objects hsv range
        self.lower_yellow = numpy.array([20, 200, 100])
        self.upper_yellow = numpy.array([40, 255, 255])

        # all distance dimensions are in inches
        self.cameraDisplacement = 11.25
        self.armDisplacement = 15
        self.actualDimension = 17.0
        self.actualDistance = 82
        self.pixelDimension = 78
        self.focalLength = self.pixelDimension * self.actualDistance / self.actualDimension

        self.minArea = 2000
        self.maxArea = 120000
        self.minAspectRatio = 0.7
        self.maxAspectRatio = 1.4
        self.minExtent = 0.6

        self.recentAngles = [0, 0, 0, 0]
        self.recentDistances = [0, 0, 0, 0]
        self.timer = jevois.Timer("FrcRobot", 100, jevois.LOG_INFO)
示例#20
0
    def __init__(self):
        jevois.LINFO("CasseroleVision Constructor...")

        #Frame Index
        self.frame = 0

        #USB send frame decimation
        #Reduces send rate by this factor to limit USB bandwidth at high process rates
        self.frame_dec_factor = 6  #At 60FPS, this still delivers 10FPS to the driver

        #Processing tune constants
        #TODO - Pick better constants
        self.hsv_thres_lower = np.array([0, 0, 220])
        self.hsv_thres_upper = np.array([255, 255, 255])

        #Target Information
        self.tgtAngle = "0.0"
        self.tgtRange = "0.0"
        self.tgtAvailable = "f"

        #Timer and Variables to track statistics we care about
        self.timer = jevois.Timer("CasseroleVisionStats", 25, jevois.LOG_DEBUG)

        #regex for parsing info out of the status returned from the jevois Timer class
        self.pattern = re.compile(
            '([0-9]*\.[0-9]+|[0-9]+) fps, ([0-9]*\.[0-9]+|[0-9]+)% CPU, ([0-9]*\.[0-9]+|[0-9]+)C,'
        )

        #Tracked stats
        self.framerate_fps = "0"
        self.CPULoad_pct = "0"
        self.CPUTemp_C = "0"
        self.pipelineDelay_us = "0"

        #data structure object to hold info about the present data processed from the image fram
        self.curTargets = []

        jevois.LINFO("CasseroleVision construction Finished")
示例#21
0
 def __init__(self):
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("sandbox", 1000, jevois.LOG_INFO)
     self.old_frame=None
     self.player=DiffPlayer()
示例#22
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)

        # SPECIAL REPLACED BLUR CONSTANT
        self.__blur_type = 0

        # ###################################################################################################
        # ALL CONSTANTS GO UNDER HERE (make sure to remove the self.__blur_type line)

        self.__blur_radius = 2.8301886792452833

        self.blur_output = None

        self.__cv_extractchannel_src = self.blur_output
        self.__cv_extractchannel_channel = 1.0

        self.cv_extractchannel_output = None

        self.__cv_threshold_src = self.cv_extractchannel_output
        self.__cv_threshold_thresh = 100.0
        self.__cv_threshold_maxval = 255.0
        self.__cv_threshold_type = cv2.THRESH_BINARY

        self.cv_threshold_output = None

        self.__mask_input = self.blur_output
        self.__mask_mask = self.cv_threshold_output

        self.mask_output = None

        self.__normalize_input = self.mask_output
        self.__normalize_type = cv2.NORM_MINMAX
        self.__normalize_alpha = 0.0
        self.__normalize_beta = 255.0

        self.normalize_output = None

        self.__hsv_threshold_input = self.normalize_output
        self.__hsv_threshold_hue = [62.71186440677964, 104.4033670815371]
        self.__hsv_threshold_saturation = [93.64406779661016, 255.0]
        self.__hsv_threshold_value = [0.0, 255.0]

        self.hsv_threshold_output = None

        self.__cv_erode_src = self.hsv_threshold_output
        self.__cv_erode_kernel = None
        self.__cv_erode_anchor = (-1, -1)
        self.__cv_erode_iterations = 0.0
        self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
        self.__cv_erode_bordervalue = (-1)

        self.cv_erode_output = None

        self.__cv_dilate_src = self.cv_erode_output
        self.__cv_dilate_kernel = None
        self.__cv_dilate_anchor = (-1, -1)
        self.__cv_dilate_iterations = 0.0
        self.__cv_dilate_bordertype = cv2.BORDER_REFLECT101
        self.__cv_dilate_bordervalue = (-1)

        self.cv_dilate_output = None

        self.__find_contours_input = self.cv_dilate_output
        self.__find_contours_external_only = True

        self.find_contours_output = None

        self.__filter_contours_contours = self.find_contours_output
        self.__filter_contours_min_area = 25.0
        self.__filter_contours_min_perimeter = 0.0
        self.__filter_contours_min_width = 8.0
        self.__filter_contours_max_width = 140.0
        self.__filter_contours_min_height = 16.0
        self.__filter_contours_max_height = 240.0
        self.__filter_contours_solidity = [70.62146892655366, 100]
        self.__filter_contours_max_vertices = 1000000.0
        self.__filter_contours_min_vertices = 0.0
        self.__filter_contours_min_ratio = 0.25
        self.__filter_contours_max_ratio = 0.9

        self.filter_contours_output = None
 def __init__(self):
     self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
     self.storedlist= []
     self.GraterActivation=0
示例#24
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)

        # SPECIAL REPLACED BLUR CONSTANT
        self.__blur_type = 0

        # ###################################################################################################
        # ALL CONSTANTS GO UNDER HERE (make sure to remove the self.__blur_type line)

        self.__blur_radius = 5.0229474757776655

        self.blur_output = None

        self.__cv_extractchannel_src = self.blur_output
        self.__cv_extractchannel_channel = 1.0

        self.cv_extractchannel_output = None

        self.__cv_threshold_src = self.cv_extractchannel_output
        self.__cv_threshold_thresh = 48.0
        self.__cv_threshold_maxval = 264.0
        self.__cv_threshold_type = cv2.THRESH_BINARY

        self.cv_threshold_output = None

        self.__mask_input = self.blur_output
        self.__mask_mask = self.cv_threshold_output

        self.mask_output = None

        self.__normalize_input = self.mask_output
        self.__normalize_type = cv2.NORM_MINMAX
        self.__normalize_alpha = 0.0
        self.__normalize_beta = 500.0

        self.normalize_output = None

        self.__hsv_threshold_input = self.normalize_output
        self.__hsv_threshold_hue = [0.0, 64.49197860962566]
        self.__hsv_threshold_saturation = [0.0, 255.0]
        self.__hsv_threshold_value = [45.621468926553675, 255.0]

        self.hsv_threshold_output = None

        self.__cv_erode_src = self.hsv_threshold_output
        self.__cv_erode_kernel = None
        self.__cv_erode_anchor = (-1, -1)
        self.__cv_erode_iterations = 3.0
        self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
        self.__cv_erode_bordervalue = (-1)

        self.cv_erode_output = None

        self.__cv_dilate_src = self.cv_erode_output
        self.__cv_dilate_kernel = None
        self.__cv_dilate_anchor = (-1, -1)
        self.__cv_dilate_iterations = 5.0
        self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT
        self.__cv_dilate_bordervalue = (-1)

        self.cv_dilate_output = None

        self.__find_contours_input = self.cv_dilate_output
        self.__find_contours_external_only = True

        self.find_contours_output = None

        self.__filter_contours_contours = self.find_contours_output
        self.__filter_contours_min_area = 450.0
        self.__filter_contours_min_perimeter = 0.0
        self.__filter_contours_min_width = 0.0
        self.__filter_contours_max_width = 50.0
        self.__filter_contours_min_height = 0.0
        self.__filter_contours_max_height = 100.0
        self.__filter_contours_solidity = [0.0, 100.0]
        self.__filter_contours_max_vertices = 1000000.0
        self.__filter_contours_min_vertices = 0.0
        self.__filter_contours_min_ratio = 0.0
        self.__filter_contours_max_ratio = 1000000.0

        self.filter_contours_output = None
示例#25
0
 def __init__(self):
     # A simple frame counter used to demonstrate sendSerial():
     self.frame = 0
     
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("sample", 100, jevois.LOG_INFO)
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("spectrum", 100, jevois.LOG_INFO)

        # SPECIAL REPLACED BLUR CONSTANT
        self.__blur_type = 0

        # ###################################################################################################
        # ALL CONSTANTS GO UNDER HERE (make sure to remove the self.__blur_type line)

        self.__blur_radius = 5.0229474757776655

        self.blur_output = None

        self.__cv_extractchannel_src = self.blur_output
        self.__cv_extractchannel_channel = 1.0

        self.cv_extractchannel_output = None

        self.__cv_threshold_src = self.cv_extractchannel_output
        self.__cv_threshold_thresh = 30.0
        self.__cv_threshold_maxval = 255.0
        self.__cv_threshold_type = cv2.THRESH_BINARY

        self.cv_threshold_output = None

        self.__mask_input = self.blur_output
        self.__mask_mask = self.cv_threshold_output

        self.mask_output = None

        self.__normalize_input = self.mask_output
        self.__normalize_type = cv2.NORM_MINMAX
        self.__normalize_alpha = 0.0
        self.__normalize_beta = 255.0

        self.normalize_output = None

        self.__hsv_threshold_input = self.normalize_output
        self.__hsv_threshold_hue = [66.36690647482014, 94.9130331623807]
        self.__hsv_threshold_saturation = [0.0, 255.0]
        self.__hsv_threshold_value = [41.384942893143105, 255.0]

        self.hsv_threshold_output = None

        self.__cv_erode_src = self.hsv_threshold_output
        self.__cv_erode_kernel = None
        self.__cv_erode_anchor = (-1, -1)
        self.__cv_erode_iterations = 2.0
        self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
        self.__cv_erode_bordervalue = (-1)

        self.cv_erode_output = None

        self.__cv_dilate_src = self.cv_erode_output
        self.__cv_dilate_kernel = None
        self.__cv_dilate_anchor = (-1, -1)
        self.__cv_dilate_iterations = 1.0
        self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT
        self.__cv_dilate_bordervalue = (-1)

        self.cv_dilate_output = None

        self.__find_contours_input = self.cv_dilate_output
        self.__find_contours_external_only = True

        self.find_contours_output = None

        self.__filter_contours_contours = self.find_contours_output
        self.__filter_contours_min_area = 25.0
        self.__filter_contours_min_perimeter = 0.0
        self.__filter_contours_min_width = 0.0
        self.__filter_contours_max_width = 1000.0
        self.__filter_contours_min_height = 0.0
        self.__filter_contours_max_height = 1000.0
        self.__filter_contours_solidity = [0.0, 100]
        self.__filter_contours_max_vertices = 1000000.0
        self.__filter_contours_min_vertices = 0.0
        self.__filter_contours_min_ratio = 0.3
        self.__filter_contours_max_ratio = 0.9

        self.filter_contours_output = None
示例#27
0
 def __init__(self):
     # Instantiate a JeVois Timer to measure our processing framerate:
     self.timer = jevois.Timer("processing timer", 100, jevois.LOG_INFO)
示例#28
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
        

	#################################################################################################
        # BEGIN GRIP CONSTANTS
    #################################################################################################
        self.__blur_type = 1
        self.__blur_radius = 5.405405405405405

        self.blur_output = None

        self.__hsl_threshold_input = self.blur_output
        self.__hsl_threshold_hue = [32.37410071942446, 66.36363636363635]
        self.__hsl_threshold_saturation = [75.67446043165468, 255.0]
        self.__hsl_threshold_luminance = [29.81115107913669, 184.16666666666666]

        self.hsl_threshold_output = None

        self.__cv_erode_src = self.hsl_threshold_output
        self.__cv_erode_kernel = None
        self.__cv_erode_anchor = (-1, -1)
        self.__cv_erode_iterations = 5.0
        self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
        self.__cv_erode_bordervalue = (-1)

        self.cv_erode_output = None

        self.__cv_dilate_src = self.cv_erode_output
        self.__cv_dilate_kernel = None
        self.__cv_dilate_anchor = (-1, -1)
        self.__cv_dilate_iterations = 5.0
        self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT
        self.__cv_dilate_bordervalue = (-1)

        self.cv_dilate_output = None

        self.__find_contours_input = self.cv_dilate_output
        self.__find_contours_external_only = False

        self.find_contours_output = None

        self.__convex_hulls_contours = self.find_contours_output

        self.convex_hulls_output = None

        self.__filter_contours_contours = self.convex_hulls_output
        self.__filter_contours_min_area = 3000.0
        self.__filter_contours_min_perimeter = 0.0
        self.__filter_contours_min_width = 0.0
        self.__filter_contours_max_width = 1000.0
        self.__filter_contours_min_height = 0.0
        self.__filter_contours_max_height = 1000.0
        self.__filter_contours_solidity = [0, 100]
        self.__filter_contours_max_vertices = 1000000.0
        self.__filter_contours_min_vertices = 0.0
        self.__filter_contours_min_ratio = 0.0
        self.__filter_contours_max_ratio = 1000.0

        self.filter_contours_output = None


        self.__mask_mask = self.cv_dilate_output

        self.mask_output = None
示例#29
0
    def __init__(self):
        # Instantiate a JeVois Timer to measure our processing framerate:
        self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)

        # SPECIAL REPLACED BLUR CONSTANT
        self.__blur_type = 0

    # ###################################################################################################  
        # ALL CONSTANTS GO UNDER HERE (make sure to remove the self.__blur_type line)
    
        self.__normalize_type = cv2.NORM_MINMAX
        self.__normalize_alpha = 0.0
        self.__normalize_beta = 255.0

        self.normalize_output = None

        self.__hsv_threshold_input = self.normalize_output
        self.__hsv_threshold_hue = [0.0, 106.20320855614972]
        self.__hsv_threshold_saturation = [0.0, 255.0]
        self.__hsv_threshold_value = [245.0, 255.0]

        self.hsv_threshold_output = None

        self.__cv_erode_src = self.hsv_threshold_output
        self.__cv_erode_kernel = None
        self.__cv_erode_anchor = (-1, -1)
        self.__cv_erode_iterations = 1.0
        self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
        self.__cv_erode_bordervalue = (-1)

        self.cv_erode_output = None

        self.__mask_input = self.cv_erode_output
        self.__mask_mask = self.hsv_threshold_output

        self.mask_output = None

        self.__blur_input = self.mask_output
        self.__blur_radius = 4.319224885262623

        self.blur_output = None

        self.__find_blobs_input = self.blur_output
        self.__find_blobs_min_area = 500.0
        self.__find_blobs_circularity = [0.0, 1.0]
        self.__find_blobs_dark_blobs = False

        self.find_blobs_output = None

        self.__cv_cvtcolor_src = self.blur_output
        self.__cv_cvtcolor_code = cv2.COLOR_YUV2GRAY_420

        self.cv_cvtcolor_output = None

        self.__find_contours_input = self.cv_cvtcolor_output
        self.__find_contours_external_only = False

        self.find_contours_output = None

        self.__filter_contours_contours = self.find_contours_output
        self.__filter_contours_min_area = 500.0
        self.__filter_contours_min_perimeter = 0.0
        self.__filter_contours_min_width = 0.0
        self.__filter_contours_max_width = 1000.0
        self.__filter_contours_min_height = 0.0
        self.__filter_contours_max_height = 1000.0
        self.__filter_contours_solidity = [0, 100]
        self.__filter_contours_max_vertices = 1000000.0
        self.__filter_contours_min_vertices = 0.0
        self.__filter_contours_min_ratio = 0.0
        self.__filter_contours_max_ratio = 1000.0

        self.filter_contours_output = None
    def __init__(self):
        self.confThreshold = 0.5  # Confidence threshold (0..1), higher for stricter detection confidence.
        self.nmsThreshold = 0.4  # Non-maximum suppression threshold (0..1), higher to remove more duplicate boxes.
        self.inpWidth = 160  # Resized image width passed to network
        self.inpHeight = 120  # Resized image height passed to network
        self.scale = 2 / 255  # Value scaling factor applied to input pixels
        self.mean = [127.5, 127.5,
                     127.5]  # Mean BGR value subtracted from input image
        self.rgb = True  # True if model expects RGB inputs, otherwise it expects BGR
        self.bbox = None
        self.tracker = cv.TrackerKCF_create()

        # Select one of the models:
        #model = 'Face'              # OpenCV Face Detector, Caffe model
        #model = 'MobileNetV2SSD'   # MobileNet v2 + SSD trained on Coco (80 object classes), TensorFlow model
        #model = 'MobileNetSSD'     # MobileNet + SSD trained on Pascal VOC (20 object classes), Caffe model
        model = 'MobileNetSSDcoco'  # MobileNet + SSD trained on Coco (80 object classes), TensorFlow model
        #model = 'YOLOv3'           # Darknet Tiny YOLO v3 trained on Coco (80 object classes), Darknet model
        #model = 'YOLOv2'           # Darknet Tiny YOLO v2 trained on Pascal VOC (20 object classes), Darknet model

        # You should not have to edit anything beyond this point.
        backend = cv.dnn.DNN_BACKEND_DEFAULT
        target = cv.dnn.DNN_TARGET_CPU
        self.classes = None
        classnames = None
        if (model == 'MobileNetSSD'):
            classnames = '/jevois/share/darknet/yolo/data/voc.names'
            modelname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.caffemodel'
            configname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.prototxt'
            self.rgb = False
        elif (model == 'MobileNetV2SSD'):
            classnames = '/jevois/share/darknet/yolo/data/coco.names'
            modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pb'
            configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pbtxt'
        elif (model == 'MobileNetSSDcoco'):
            classnames = '/jevois/share/darknet/yolo/data/coconew.names'
            modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pb'
            configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pbtxt'
            self.rgb = False
            self.nmsThreshold = 0.1
        elif (model == 'YOLOv3'):
            classnames = '/jevois/share/darknet/yolo/data/coco.names'
            modelname = '/jevois/share/darknet/yolo/weights/yolov3-tiny.weights'
            configname = '/jevois/share/darknet/yolo/cfg/yolov3-tiny.cfg'
        elif (model == 'YOLOv2'):
            classnames = '/jevois/share/darknet/yolo/data/voc.names'
            modelname = '/jevois/share/darknet/yolo/weights/yolov2-tiny-voc.weights'
            configname = '/jevois/share/darknet/yolo/cfg/yolov2-tiny-voc.cfg'
            self.inpWidth = 320
            self.inpHeight = 240
        else:
            classnames = '/jevois/share/opencv-dnn/detection/opencv_face_detector.classes'
            modelname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.caffemodel'
            configname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.prototxt'
            self.scale = 1.0
            self.mean = [104.0, 177.0, 123.0]
            self.rgb = False

        # Load names of classes
        if classnames:
            with open(classnames, 'rt') as f:
                self.classes = f.read().rstrip('\n').split('\n')

        # Load a network
        self.net = cv.dnn.readNet(modelname, configname)
        self.net.setPreferableBackend(backend)
        self.net.setPreferableTarget(target)
        self.timer = jevois.Timer('Neural detection', 10, jevois.LOG_DEBUG)
        self.model = model
        garbageclasses = [
            "shoe", "hat", "eye glasses", "frisbee", "bottle", "plate",
            "wine glass", "cup", "fork", "spoon", "bowl", "banana", "apple",
            "sandwich", "orange", "broccoli", "carrot", "fruit", "hotdog",
            "pizza", "donut", "cake", "vase", "scissors", "toothbrush",
            "cardboard", "napkin", "net", "paper", "plastic", "straw"
        ]
        self.garbageclasses = garbageclasses