예제 #1
0
파일: clf_svm.py 프로젝트: q3aob4on/robocar
 def getFeatures(self, img):
     ############################################################################
     # TO-DO: use the same feature vector that was used for training
     ############################################################################
     return [
         imagefunctions.num_red_pixels(img),
         imagefunctions.num_white_pixels(img)
     ]
예제 #2
0
def getFeatures(img):

    ######################################################################################
    # TO-DO: Feature engineering - assemble a "feature vector" for SVM to maximize accuracy
    # Note: features must be numerical values, i.e .feature vector is a vector of numbers.
    # Below is a 2-dimensional example:
    ######################################################################################

    return [
            imagefunctions.num_red_pixels(img),
            imagefunctions.num_white_pixels(img)
    ]
예제 #3
0
 def actualizePreview(self):
     seuillage_haut = self.radio_high.isChecked()
     seuillage_bas = self.radio_high.isChecked()
     seuil = self.slider.value()
     # Avoid image and new_image to become the same object (same adress)
     temp = QImage(self.image)
     if seuillage_haut:
         self.new_image = ImageFunctions.seuillage_haut(temp, seuil)
     elif seuillage_bas:
         self.new_image = ImageFunctions.seuillage_bas(temp, seuil)
     else:
         self.new_image = ImageFunctions.binarisation(temp, seuil)
     self.preview.setPixmap(QPixmap(self.new_image).scaled(self.preview.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation))
예제 #4
0
 def getFeatures(self, img):
     ############################################################################
     # TO-DO: use the same feature vector that was used for training
     ############################################################################
     return get_features(img)
     out = img[:, :2, :].ravel()
     return out
     return [
         imagefunctions.num_red_pixels(img),
         imagefunctions.num_white_pixels(img),
         imagefunctions.num_edges(img),
         imagefunctions.num_corners(img)
     ]
예제 #5
0
    def search_windows(self, img, windows, framenum=0):
        # preprocess frame
        img_prep = imagefunctions.preprocess_one_rgb(img[0:127][:])
        fvec = []
        for window in windows:
            # extract test window from image
            test_img = img_prep[window[0][1]:window[1][1],
                                window[0][0]:window[1][0]]
            # extract features
            feat = self.getFeatures(test_img)
            # normalize features
            normfeat = self.normalize_features(feat, self.fmean, self.fstd)
            # assemble batch
            testvec = np.asarray(normfeat).reshape(1, -1)
            fvec.append(testvec)

        # batch prediction
        if (np.array(fvec).ndim == 3):
            rvec = self.clf.predict(np.array(fvec).squeeze(axis=1))
        else:
            rvec = []

        # list of positive stop sign detection windows
        stop_indices = [i for i, x in enumerate(rvec) if x == 1]
        stop_windows = [windows[i] for i in stop_indices]

        # list of positive warn sign detection windows
        warn_indices = [i for i, x in enumerate(rvec) if x == 2]
        warn_windows = [windows[i] for i in warn_indices]

        # return positve detection windows
        return stop_windows, warn_windows
예제 #6
0
    def search_windows(self, img, windows, framenum=0):
        stop_windows = []  # list of positive stop sign detection windows
        warn_windows = []  # list of positive warn sign detection windows

        cropnum = 0
        for window in windows:
            # extract test window from orginal image
            #test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (50,50))
            img_crop = img[window[0][1]:window[1][1],
                           window[0][0]:window[1][0]]
            #img_crop_pp = imagefunctions.preprocess_one_rgb(img_crop)
            test_img = imagefunctions.preprocess_one_rgb(img_crop)
            #test_img = np.array(255*img_crop_pp, dtype=np.uint8)

            fname = 'crop-' + str(framenum) + '-' + str(cropnum) + '.png'
            #imsave('img50x50/'+fname, test_img)
            cropnum = cropnum + 1
            # extract features
            feat = self.getFeatures(test_img)
            # normalize features
            normfeat = self.normalize_features(feat, self.fmean, self.fstd)
            # predict using classifier
            testvec = np.asarray(normfeat).reshape(1, -1)
            prediction = self.clf.predict(testvec)
            #print prediction
            # save positive detection windows
            if prediction == 2:
                #print 'warning sign'
                warn_windows.append(window)
            elif prediction == 1:
                stop_windows.append(window)

        # return positve detection windows
        return stop_windows, warn_windows
예제 #7
0
def getFeatures(img):
    return get_features(img)
    ######################################################################################

    # TO-DO: Feature engineering - assemble a "feature vector" for SVM to maximize accuracy
    # Note: features must be numerical values, i.e .feature vector is a vector of numbers.
    # Below is a 2-dimensional example:
    #####################################################################################
    out = img[::2, ::2, :].ravel()

    return out

    return [
        imagefunctions.num_red_pixels(img),
        imagefunctions.num_white_pixels(img),
        imagefunctions.num_edges(img),
        imagefunctions.num_corners(img),
    ]
예제 #8
0
파일: ELIME.py 프로젝트: stahlfabrik/ELIME
def preProcessImageFiles(sourcePath, destinationPath=None, prefix=None, delete=False, customDateFormat=''):
  """Move image files from sourcePath to DestinationPath (permanent photo storage), renaming them with creation date"""
  logger = logging.getLogger('ELIME.pre')

  if not sourcePath:
    logger.error('sourcePath not valid')
    return
    
  if destinationPath is None:
    #copy and rename in place
    logger.warning('No destination path given. Using source path %s', sourcePath)
    destinationPath = sourcePath
    
  sourceFiles = []
  
  # get all files in sourcepath
  if os.path.isdir(sourcePath):
    sourceFiles = [ f for f in os.listdir(sourcePath) if os.path.isfile(os.path.join(sourcePath,f)) ]
  
  # filter only wanted files (jpgs)
  sourcePhotos = filter(HelperFunctions.filefilter, sourceFiles)
  
  count = len(sourcePhotos)
  if count:
    logger.info('Preprocessing %d photos from source %s to destination %s', count, sourcePath, destinationPath)
  else:
    logger.info('No photos to preprocess at %s', sourcePath)
    
  # rename files - as copy
  for photo in sourcePhotos:
    completeSourcePath = os.path.join(sourcePath, photo)
    
    photoDateTime = ImageFunctions.getCreationDateTimeOfPicture(completeSourcePath, customDateFormat)
    timestr = photoDateTime.strftime("%Y-%m-%d_%H-%M-%S")
    
    # create destination filename
    if prefix is None:
      destPath = os.path.join(destinationPath, timestr + os.path.splitext(photo)[1])
    else:
      destPath = os.path.join(destinationPath, prefix + '_' + timestr + os.path.splitext(photo)[1])
    
    logger.info("Copying: %s -> %s", completeSourcePath, destPath)
    
    # copy the file to destination
    if not os.path.isfile(destPath):
      shutil.copy2(completeSourcePath, destPath)
    else:
      logger.warning("File %s is already existing. Did not copy!", destPath)
      continue
    
    # delete source file if wanted (e.g. move and not copy)  
    if delete:
      logger.info("Deleting source file %s", completeSourcePath)
      os.remove(completeSourcePath)
  
  if count:      
    logger.info('Done preprocessing %d photos from source %s to destination %s!', count, sourcePath, destinationPath)
예제 #9
0
 def squelettisation(self):
     sub = self.getFocusedSubWindow()
     if sub is not None:
         dialog = SquelettisationDialog()
         dialog.radio_thinning.setChecked(True)
         result = dialog.exec_()
         if result == QDialog.Accepted:
             isThinning = dialog.getValues()
             image = sub.widget().pixmap().toImage()
             image.convertToFormat(QImage.Format_Grayscale8)
             if isThinning:
                 new_image = ImageFunctions.squelettisation_amincissement_homothopique(
                     image)
             else:
                 new_image = ImageFunctions.squelettisation_Lantuejoul(
                     image)
             self.createMDISubWindow(
                 "Sans Titre " + str(self._subWindowCounter),
                 QPixmap(new_image))
예제 #10
0
 def openingClosing(self):
     sub = self.getFocusedSubWindow()
     if sub is not None:
         dialog = OpeningClosingDialog()
         dialog.radio_carre.setChecked(True)
         dialog.radio_opening.setChecked(True)
         result = dialog.exec_()
         if result == QDialog.Accepted:
             isOpening, isBoule, dim = dialog.getValues()
             strel = ImageFunctions.createStrel(dim, isBoule)
             image = sub.widget().pixmap().toImage()
             image.convertToFormat(QImage.Format_Grayscale8)
             if isOpening:
                 new_image = ImageFunctions.ouverture(image, strel)
             else:
                 new_image = ImageFunctions.fermeture(image, strel)
             self.createMDISubWindow(
                 "Sans Titre " + str(self._subWindowCounter),
                 QPixmap(new_image))
예제 #11
0
    def initialize_image_operators(self, image):
        if self.first_image is True:
            self.first_image = False

            self.undistorter = imgf.Undistorter(self.camera_calibration_file,
                                                image.shape[1], image.shape[0])

            src, dst = get_transform_params(image.shape[0], image.shape[1])
            self.M = cv2.getPerspectiveTransform(src, dst)
            self.M_inv = cv2.getPerspectiveTransform(dst, src)
예제 #12
0
 def thinningThicking(self):
     sub = self.getFocusedSubWindow()
     if sub is not None:
         dialog = ThinningThickingDialog()
         dialog.radio_thinning.setChecked(True)
         result = dialog.exec_()
         if result == QDialog.Accepted:
             isThinning, max_iter = dialog.getValues()
             image = sub.widget().pixmap().toImage()
             image.convertToFormat(QImage.Format_Grayscale8)
             if isThinning:
                 strel = ImageFunctions.createThinningStrel()
                 new_image = ImageFunctions.amincissement(
                     image, strel, max_iter)
             else:
                 strel = ImageFunctions.createThickingStrel()
                 new_image = ImageFunctions.epaississement(
                     image, strel, max_iter)
             self.createMDISubWindow(
                 "Sans Titre " + str(self._subWindowCounter),
                 QPixmap(new_image))
예제 #13
0
def refine_segments_tiled(tile_id, segments):
    """Run tiled object refinement"""
    image_sar, image_sar_meta = ipf.tif2ar(sar_filename)
    return orf.apply_tiled_refiment(tile_id,
                                    segments.copy(),
                                    image_sar,
                                    image_sar_meta["transform"],
                                    t_stdev=t_stdev,
                                    t_conv=t_conv,
                                    t_shape=t_shape,
                                    bandnames_mean=bandnames_mean,
                                    bandnames_stdev=bandnames_stdev)
예제 #14
0
    def checkSnapInbox(self):
        try:
            print("Looking for snaps...")
            a = Snap()
            namesFound = a.getImageName(localFiles.getLocalUserInfo()[0])[0]
            idsFound = a.getImageName(localFiles.getLocalUserInfo()[0])[1]

            if len(namesFound) == 0:
                popup = Popup(
                    title='Oops!',
                    content=Label(
                        text='There are no new snaps for you, ' +
                        localFiles.getLocalUserInfo()[2].split(" ")[0]),
                    size_hint=(None, None),
                    size=(350, 200))
                popup.open()
            else:
                for x in range(len(namesFound)):
                    img = str(namesFound[x]).strip()
                    ImageFunctions.showImg(img)
                a.updateSnapStatus(localFiles.getLocalUserInfo()[0])
        except:
            print("ERROR")
예제 #15
0
def getFeatures(img):
    return [
        imagefunctions.num_corners(img),
        imagefunctions.num_edges(img),
        imagefunctions.num_red_pixels(img),
        imagefunctions.num_white_pixels(img),
        imagefunctions.abs_sobel_thresh(img,
                                        orient='y',
                                        sobel_kernel=3,
                                        thresh=(100, 200)),
        imagefunctions.mag_thresh(img, sobel_kernel=5, mag_thresh=(100, 180)),
        imagefunctions.dir_threshold(img,
                                     sobel_kernel=3,
                                     thresh=(np.pi / 8, np.pi / 4))
    ]
예제 #16
0
    def execute(self, image):
        undistorted_image, binary_warped = self.preprocess_image(image)

        self.lane_det.run_on_image(binary_warped)
        lane_area = self.lane_det.render_lane_area()
        colored_lanes = self.lane_det.render_lanes()

        postprocessed_lane_area = self.postprocess_result(lane_area)
        postprocessed_lanes = self.postprocess_result(colored_lanes)

        image_with_area = cv2.addWeighted(undistorted_image, 1.,
                                          postprocessed_lane_area, .3, 1)
        output_image = imgf.image_overlay(postprocessed_lanes,
                                          image_with_area,
                                          overlay_transparency=.1)

        return self.lane_det.display_metrics(output_image)
예제 #17
0
    def capture(self, option, snapReceiver):
        camera = self.ids['camera']

        if snapReceiver != "" and snapReceiver != None:

            # Naming snap
            timestr = time.strftime("%Y%m%d_%H%M%S")
            snapName = "Snap_{}.png".format(timestr)

            # Saving file.png
            camera.export_to_png(snapName)

            # Database registry !!FALTA SABER EL USERID DE QUIÉN LO MANDA Y A QUIÉN!!
            snap = Snap()
            snap.saveSnap(snapName=str(snapName),
                          snapSender=localFiles.getLocalUserInfo()[0],
                          snapFile="\"" +
                          str(ImageFunctions.imageToText(snapName)).strip() +
                          "\"",
                          snapReceiver=snapReceiver)

            path = os.getcwd()
            path = path + "/" + snapName

            if option == 1:
                print("NORMAL")
            elif option == 2:
                print("GRAYSCALE")
                ImageFunctions.editar3(path)
            elif option == 3:
                print("SEPIA")
                ImageFunctions.editar2(path)
            elif option == 4:
                print("BLUR")
                ImageFunctions.editar1(path)

        else:
            popup = Popup(
                title='Oh',
                content=Label(
                    text=
                    'It seems like you forgot to tell who this snap will be sent to (userID).'
                ),
                size_hint=(None, None),
                size=(550, 200))
            popup.open()
예제 #18
0
def split():
    try:
        desired_cols = int(des_col_entry.get())
        if desired_cols < 1 or desired_cols > 50:
            raise IncorrectColumnsError
        
        # IFun.reverseGif(selected_image, prefix_entry.get(), desired_cols)
        emote_string = IFun.splitGif(selected_image, prefix_entry.get(), desired_cols)
        # emote_string = IFun.splitImage(selected_image, prefix_entry.get(), desired_cols)
        # window.clipboard_clear()
        # window.clipboard_append(emote_string)
        # window.update()

    except IncorrectColumnsError:
        messagebox.showinfo("Error", "The desired amount of columns should be in between 1 and 50.", icon = "warning")

    except IFun.TooManyPartsError:
        messagebox.showinfo("Error", "The amount of parts exceed 50.", icon = "warning")

    except IFun.TooManyRowsError:
        messagebox.showinfo("Error", "There are too many rows given the desired columns.", icon = "warning")
예제 #19
0
 def substractImage(self):
     liste = self.mdi.subWindowList()
     if len(liste) > 0:
         dialog = AdditionSubstractionDialog(liste)
         dialog.radio_addition.setChecked(True)
         dialog.radio_substract.setChecked(False)
         result = dialog.exec_()
         if result == QDialog.Accepted:
             title1, title2 = dialog.getTitles()
             for sub in liste:
                 if title1 == sub.windowTitle():
                     sub1 = sub
                 if title2 == sub.windowTitle():
                     sub2 = sub
             image1 = sub1.widget().pixmap().toImage()
             image1 = image1.convertToFormat(QImage.Format_Grayscale8)
             image2 = sub2.widget().pixmap().toImage()
             image2 = image2.convertToFormat(QImage.Format_Grayscale8)
             image = ImageFunctions.soustraction(image1, image2)
             self.createMDISubWindow(
                 "Sans Titre " + str(self._subWindowCounter),
                 QPixmap(image))
         dialog.close()
예제 #20
0
def extract_features(image_filename,
                     image_label,
                     stats=["mean"],
                     band_num=1,
                     image_transform=None,
                     nodata=-9999):
    """Run zonal statistics using global var segments"""
    t_start = datetime.datetime.now()
    print("{} - {} - Extracting features for {} based on {}".format(
        datetime.datetime.now(), os.getpid(), image_label, stats))
    image, image_meta = ipf.tif2ar(image_filename, band=band_num)
    image_transform = image_meta["transform"]
    band_num = 1
    statistics = rs.zonal_stats(segments,
                                image,
                                stats=stats,
                                affine=image_transform,
                                band_num=band_num,
                                nodata=nodata)
    t_end = datetime.datetime.now()
    comp_time = (t_end - t_start).total_seconds()
    print("{} - {} - Feature extraction for {} done after {} sec.!".format(
        datetime.datetime.now(), os.getpid(), image_label, comp_time))
    return image_label, statistics
예제 #21
0
    def preprocess_image(self, image):
        self.initialize_image_operators(image)

        undistorted_image = self.undistorter.apply(image)

        hls_image = imgf.to_hls(undistorted_image)
        gray_image = imgf.to_grayscale(undistorted_image)

        s_threshold = imgf.color_threshold(hls_image, 2, (170, 255))
        gray_threshold = imgf.color_threshold((np.dstack(
            (gray_image, gray_image, gray_image))), 0, (200, 255))

        combined_color_thresholds = imgf.combine_binary(
            s_threshold, gray_threshold)

        sobel = imgf.SobelGradientThresholder(undistorted_image,
                                              sobel_kernel=3)
        gradient_thresholds = sobel.abs_thresh(orient='x', thresh=(20, 100))

        thresholded_image = imgf.combine_binary(gradient_thresholds,
                                                combined_color_thresholds)

        binary_warped = self.warp(thresholded_image)
        return undistorted_image, binary_warped
예제 #22
0
cam = config.cam

gain = ueye.INT(10)
#ueye.is_SetHardwareGain(cam.handle(), gain, ueye.IS_IGNORE_PARAMETER, ueye.IS_IGNORE_PARAMETER,
#ueye.IS_IGNORE_PARAMETER)

# Increment
increment = 2
# Loop from lowest possible exposure to highest possible exposure, incremented by 2 (ms)
for exposure in range(exposure_low, exposure_high, increment):
    # Set new exposure
    newExposure = ueye.DOUBLE(exposure)
    ret = ueye.is_Exposure(cam.handle(), ueye.IS_EXPOSURE_CMD_SET_EXPOSURE,
                           newExposure, ueye.sizeof(newExposure))
    time.sleep(0.05)
    img = ImageFunctions.capture_image(cam=cam, gripper_height=500)
    puck_list = QR_Scanner(img)
    print(puck_list)
    # Checking exposure
    d = ueye.DOUBLE()
    retVal = ueye.is_Exposure(cam.handle(), ueye.IS_EXPOSURE_CMD_GET_EXPOSURE,
                              d, 8)
    if retVal == ueye.IS_SUCCESS:
        print('Currently set exposure time %8.3f ms' % d)
    # Position returns as None if no QR-code is found
    if puck_list:
        exposure_values.append(exposure)

exposure = str(median(exposure_values))

config = configparser.ConfigParser()
예제 #23
0
        1. Image from above
        2. Move puck to middle
        3. Stack pucks
        4. Rotate puck
        5. Exit
        6. Repeatability test""")

    userinput = int(input('\nWhat should RAPID do?: '))

    if userinput == 3:
        print("Stack pucks")
        norbert.set_rapid_variable("WPW", 3)
        norbert.wait_for_rapid()

        while not robtarget_pucks:
            ImageFunctions.findPucks(config.cam, norbert, robtarget_pucks, 195)
        print(robtarget_pucks)

        for _ in range(len(robtarget_pucks)):

            pucknr = min(int(x.nr) for x in robtarget_pucks)

            for x in robtarget_pucks:
                if x.nr == pucknr:
                    puck_to_RAPID = x
                    break

            norbert.set_robtarget_variables("puck_target",
                                            puck_to_RAPID.get_xyz())
            norbert.set_rapid_variable("puck_angle",
                                       puck_to_RAPID.get_puckang())
예제 #24
0
파일: ELIME.py 프로젝트: stahlfabrik/ELIME
def renderPhotos(srcPath, dstPath, dbPath, mode='fill', offset_pct=(0.43,0.425),
                 dest_sz=(1920,1080), ttfontpath="./HelveticaNeueLight.ttf", 
                 fontSize=64, format='%x', localestr="de_DE", show=False, 
                 posDebug=False):
  """Render all photos from database to disk with correct eye positions"""
  #use "fondu" to get ttf on mac os x
  logger = logging.getLogger('ELIME.renderPhotos')
  
  
  if dbPath is None:
    logger.error("dbPath is not valid")
    return
    
  if srcPath is None:
    logger.error("srcPath is not valid")
    return
  
  if dstPath is None:
    logger.error("dstPath is not valid")
    return
    
  if srcPath == dstPath:
    logger.error("srcPath and dstPath MUST be different for security reasons;-)")
    return
  
  # set up locale
  locale.setlocale(locale.LC_TIME, localestr)
  
  # load truetype font for date imprint
  ttfont = None 
  
  if ttfontpath is not None:
    ttfontpath = os.path.abspath(ttfontpath)
    if os.path.isfile(ttfontpath):
      logger.info("Fontrendering active using font path %s", ttfontpath)
      ttfont = ImageFont.truetype(ttfontpath, fontSize)
    else:
      logger.error("Fontpath %s is not a file", ttfontpath)
      return None
      
  # connect to database
  conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
  c = conn.cursor() 
  
  # get photos ordered by date
  c.execute('''SELECT * FROM eyesInPhotos ORDER BY date''')
  dbPhotos = c.fetchall()
  
  for photo in dbPhotos:
    if not os.path.exists(os.path.join(srcPath, photo[1])):
      logger.error("Photo %s does not exist in srcPath %s! Check path, do tidydb, then try again!", photo[1], srcPath)
      sys.exit(1)
  
  # get time span of pictures in database      
  firstDatetime = dbPhotos[0][2].date()
  lastDatetime = dbPhotos[-1][2].date()
  
  logger.info("First photo %s in database taken on %s", dbPhotos[0][1], firstDatetime)
  logger.info("Last photo %s in database taken on %s", dbPhotos[-1][1], lastDatetime)

  # in fill mode, there will be created a frame for every day in the time span interval
  # if there is a picture in the database for each day or not
  # it is assumed that there is only one picture per date.
  if mode == 'fill':
    
    numdays = (lastDatetime - firstDatetime).days
    logger.info("Will generate %d frames", numdays)
    
    dates = [firstDatetime + timedelta(days=i) for i in range(0, numdays + 1)]
  
    brightness = 1.0
    
    lastPhoto = None
    for aDate in dates:
      for photo in dbPhotos:
        if photo[2].date() == aDate:
          lastPhoto = photo
          brightness = 1.0
          break
      else:
        logger.debug("No photo for date %s in database", aDate)
        brightness *= 0.90
        lastPhoto = (lastPhoto[0], lastPhoto[1], datetime(aDate.year, aDate.month, aDate.day), lastPhoto[3], lastPhoto[4], lastPhoto[5], lastPhoto[6])

      logger.info("Rendering Image %s, date %s", lastPhoto[1], lastPhoto[2].strftime(format))
      
      pilImage = ImageFunctions.renderPhoto(srcPath, lastPhoto, ttfont, format, offset_pct, dest_sz, brightness, posDebug)
      
      if show:
        cvImage = ImageFunctions.convertPIL2CV(pilImage)
        cv.NamedWindow(lastPhoto[1]+ " " + aDate.strftime(format), cv.CV_WINDOW_AUTOSIZE)
        cv.ShowImage(lastPhoto[1]+ " " + aDate.strftime(format), cvImage) 
        key = cv.WaitKey()
        
        if key == 113: # 'q' quit
          sys.exit(0)  
        
        cv.DestroyWindow(lastPhoto[1]+ " " + aDate.strftime(format))
      
      pilImage.save(os.path.join(dstPath, 'rendered_' + lastPhoto[2].strftime("%Y_%m_%d") + '.jpg'), quality=95)
  
  # in all mode render every picture in database, skip dates with no pics
  if mode == 'all':
    for photo in dbPhotos:
      logger.info("Rendering Image %s, date %s", photo[1], photo[2].strftime(format))
    
      pilImage = ImageFunctions.renderPhoto(srcPath, photo, ttfont, format, offset_pct, dest_sz, 1.0, posDebug)      
      
      if show:
        cvImage = ImageFunctions.convertPIL2CV(pilImage)
        cv.NamedWindow(photo[1]+ " " + photo[2].strftime(format), cv.CV_WINDOW_AUTOSIZE)
        cv.ShowImage(photo[1]+ " " + photo[2].strftime(format), cvImage) 
        key = cv.WaitKey()
        
        if key == 113: # 'q' quit
          sys.exit(0)  
        
        cv.DestroyWindow(photo[1]+ " " + photo[2].strftime(format))
          
      pilImage.save(os.path.join(dstPath, 'rendered_' + photo[2].strftime("%Y_%m_%d") + '.jpg'), quality=95)
    
  conn.close()       
예제 #25
0
파일: ELIME.py 프로젝트: stahlfabrik/ELIME
def checkEyeData(srcPath, dbPath, beginWith=[], maxDimension = 1024, zoomSize=640, detailOnly=True):
  """Check and correct eye positions in database on all or selected image files"""
  logger = logging.getLogger('ELIME.checkEyeDataOfPhotos')
  
  logger.info("Checking eyepositions stored in db")
  
  if dbPath is None:
    logger.error("dbPath is invalid")
    return  
  
  if srcPath is None:
    logger.error("srcPath is invalid")
    return
  
  # connect to databse
  conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
  c = conn.cursor() 
  
  # get list of files to check eye positions
  # assume we get it alphabetically ordered
  # begin with photo named in variable beginWith or take all
  filenames = []
  
  processing = True
  
  filenames = [ f for f in os.listdir(srcPath) if os.path.isfile(os.path.join(srcPath,f)) ]
  
  if len(beginWith) == 0:
    logger.debug("No filename to begin with specified. Will check all.")
  else:
    logger.debug("Starting with photo named %s", beginWith[0])
    processing = False
  
  # filter for jpgs   
  filenames = filter(HelperFunctions.filefilter, filenames)
  
  for filename in filenames:
    # start processing with given filename, if any
    if not processing:
      if filename == beginWith[0]:
        processing = True
      else:
        continue
          
    logger.debug("Image name: %s", filename)
    
    inputImageFilePath = os.path.join(srcPath, filename)
    
    # get pictures stored info from database
    c.execute('''SELECT * FROM eyesInPhotos WHERE photoFileName=?''',(filename,))
    dbPhotos = c.fetchall()

    numDBPhotos = len(dbPhotos)

    if numDBPhotos == 0:
      logger.error("Photo named %s not in database! Do nothing. You should add it!", filename)
      continue
      
    if numDBPhotos == 1:
      lEyeX = int(dbPhotos[0][3])
      lEyeY = int(dbPhotos[0][4])
      rEyeX = int(dbPhotos[0][5])
      rEyeY = int(dbPhotos[0][6])
      
      logger.debug("Eye position in db: lEyeX=%d, lEyeY=%d, rEyeX=%d, rEyeY=%d", lEyeX, lEyeY, rEyeX, rEyeY) 
      
      # load image to opencv image
      pilImage = ImageFunctions.loadAndTransposePILImage(inputImageFilePath)
      cvImage = ImageFunctions.convertPIL2CV(pilImage)

      # scale it down
      size = cv.GetSize(cvImage)
      
      maxDimension = float(maxDimension)
      
      scale = 1.0
      if size[0] > maxDimension or size[1] > maxDimension:
        scale = max(size[0]/maxDimension, size[1]/maxDimension)

      newSize = ( int(size[0] / scale), int (size[1] / scale) )

      scaledImage = cv.CreateImage(newSize, cvImage.depth, cvImage.nChannels)
      cv.Resize(cvImage, scaledImage)
      
      # calculate scaled eye coordinates      
      scaledEyeCoordinates = [(int(lEyeX / scale), int(lEyeY / scale)),
                              (int(rEyeX / scale), int(rEyeY / scale))]
      
      eyeCoordinates = [(lEyeX, lEyeY), (rEyeX, rEyeY)]
      
      # if we show not only show the zoomed detail one eye view but the whole picture
      if not detailOnly:
        # coarse eye positions in total face/image view
        newScaledEyeCoordinates = UiFunctions.manuallyAdjustEyePositions(scaledImage, filename, scaledEyeCoordinates)  
      
        if scaledEyeCoordinates == newScaledEyeCoordinates:
          logger.debug("No new coarse eye positions, taking positions from database for fine control")      
        else:
          logger.debug("New eye positions in coarse image set, taking these for fine control")
          eyeCoordinates = []  
          for scaledEyePos in newScaledEyeCoordinates:  
            (sx, sy) = scaledEyePos
            eyeCoordinates.append((int(sx * scale), int(sy * scale)))
      
      newEyeCoordinates = []
      
      # detail set eye position, one per eye
      for eyeIndex, eyeCoordinate in enumerate(eyeCoordinates):
        logger.debug("Eye position of eye %d before manual correction %s", eyeIndex, (eyeCoordinate[0], eyeCoordinate[1]))
        
        (x, y) = UiFunctions.manuallyDetailAdjustEyePosition(filename, eyeIndex, cvImage, eyeCoordinate[0], eyeCoordinate[1], zoomSize)
        
        logger.debug("True eye position of eye %d after manual correction %s", eyeIndex, (x, y))
        newEyeCoordinates.append((x, y))

      middleLeftEye = newEyeCoordinates[0]
      middleRightEye = newEyeCoordinates[1]
        
      # and update the database
      logger.info("Executing: 'UPDATE eyesInPhotos SET lEyeX=%d, lEyeY=%d, rEyeX=%d, rEyeY=%d WHERE photoFileName=%s'",
        middleLeftEye[0], 
        middleLeftEye[1], 
        middleRightEye[0], 
        middleRightEye[1],
        filename)
      
      c.execute('UPDATE eyesInPhotos SET lEyeX=?, lEyeY=?, rEyeX=?, rEyeY=? WHERE photoFileName=?', 
        (middleLeftEye[0], 
        middleLeftEye[1], 
        middleRightEye[0], 
        middleRightEye[1],
        filename))  
    
      conn.commit()

    if numDBPhotos > 1:
      logger.critical("Database in bad shape. Found %d occurences of photo named %s", numDBPhotos, filename)
      conn.close() 
      sys.exit(1)
  
  logger.info("Checking Eyepositions finished.")   
  conn.close() 
예제 #26
0
파일: ELIME.py 프로젝트: stahlfabrik/ELIME
def addMissingEyeData(srcPath, dbPath, maxDimension=1024, detectionDebug=False, zoomSize=640, customDateFormat=''):
  """Add eye postions of photos not yet in database to database"""
  logger = logging.getLogger('ELIME.addToDB')
   
  if dbPath is None:
    logger.error("dbPath is invalid")
    return     
  
  if not os.path.exists(dbPath):
    logger.info("No Database file at %s ,yet.", dbPath)
  
  if srcPath is None:
    logger.error("srcPath is not valid")
    return
  
  logger.debug("Preparing database tables...")
  
  # create database if it does not exist yet
  DatabaseFunctions.prepareDataBaseTable(dbPath)
  
  # connect to database file
  conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
  c = conn.cursor()
  
  # get all jpgs in source directory
  srcFiles = [ f for f in os.listdir(srcPath) if os.path.isfile(os.path.join(srcPath,f)) ]
  srcPhotos = filter(HelperFunctions.filefilter, srcFiles)
  
  numPhotos = len(srcPhotos)
  if numPhotos == 0:
    logger.warning("No photos found in source path %s", srcpath)
    return

  # get the number of pictures already in the database
  numAllDBPhotos = DatabaseFunctions.numberOfPhotosInDB(c)
  
  # simple consistency check on database: are there at least as many pictures in db as in
  # source path?
  if numPhotos < numAllDBPhotos:
    logger.warning("There are just %d photos in source path %s, but %d photos in database %s", numPhotos, srcPAth, numAllDBPhotos, dbPath)
    logger.warning("Please run a database tidy before, if you know what you are doing!")
    return
  
  # step through all pictures in sourcepath  
  for inputImageFileName in srcPhotos:
      
    logger.debug("Image name: %s", inputImageFileName)
    
    inputImageFilePath = os.path.join(srcPath, inputImageFileName)
    
    # get picture's creation date and time
    photoDateTime = ImageFunctions.getCreationDateTimeOfPicture(inputImageFilePath, customDateFormat)
    
    # check if photo is already and database
    c.execute('''SELECT * FROM eyesInPhotos WHERE photoFileName=?''',(inputImageFileName,))
    dbPhotos = c.fetchall()

    numDBPhotos = len(dbPhotos)

    if numDBPhotos == 0 or (numDBPhotos == 1 and ((dbPhotos[0][3] is None) or (dbPhotos[0][4] is None) or (dbPhotos[0][5] is None) or (dbPhotos[0][6] is None))):
      if numDBPhotos == 0:
        # the picture with this filename is not in database yet
        logger.info("Photo %s not in database yet", inputImageFileName)
      if numDBPhotos == 1:
        # there is one picture with the filename but data is incomplete
        logger.info("Eye info for photo %s in db incomplete (%d,%d), (%d,%d)", inputImageFileName, dbPhotos[0][3], dbPhotos[0][4], dbPhotos[0][5], dbPhotos[0][6])
      
      # find eye positions and add everything to database
      
      # create a opencv image from PIL image
      pilImage = ImageFunctions.loadAndTransposePILImage(inputImageFilePath)
      cvImage = ImageFunctions.convertPIL2CV(pilImage)

      # get the image size
      size = cv.GetSize(cvImage)
  
      # create scaling factor for too large images
      maxDimension = float(maxDimension)
      
      scale = 1.0
      if size[0] > maxDimension or size[1] > maxDimension:
        scale = max(size[0]/maxDimension, size[1]/maxDimension)

      logger.debug("Image scale factor is %f", scale)
      
      newSize = ( int(size[0] / scale), int (size[1] / scale) )

      # create a scaled down version of the original picture 
      scaledImage = cv.CreateImage(newSize, cvImage.depth, cvImage.nChannels)
      cv.Resize(cvImage, scaledImage)
      
      # find eye coordinates in scaled picture automatically
      scaledEyeRects = OpenCvFunctions.eyeRectsInImage(scaledImage, inputImageFileName, detectionDebug)
      logger.debug("Scaled eye rectangles detected %s", scaledEyeRects)
      
      scaledEyeCoordinates = []
      for scaledEyeRect in scaledEyeRects:
        scaledEyeCoordinates.append(HelperFunctions.middleOfRect(scaledEyeRect))
      
      logger.debug("Scaled eye positions detected %s", scaledEyeCoordinates)

      # manually adjust eye positions in scaled image
      scaledEyeCoordinates = UiFunctions.manuallyAdjustEyePositions(scaledImage, inputImageFileName, scaledEyeCoordinates)
      logger.debug("Scaled eye positions manually corrected %s", scaledEyeCoordinates)

      eyeCoordinates = []
      
      # scale back eye position to original sized image
      for eyeIndex, scaledEyePos in enumerate(scaledEyeCoordinates):
        (sx, sy) = scaledEyePos
        (eyecenterX, eyecenterY) = (int(sx * scale), int(sy * scale))
        logger.debug("True eye position of eye %d before manual correction %s", eyeIndex, (eyecenterX, eyecenterY))
        (x, y) = UiFunctions.manuallyDetailAdjustEyePosition(inputImageFileName, eyeIndex, cvImage, eyecenterX, eyecenterY, zoomSize)
        logger.debug("True eye position of eye %d after manual correction %s", eyeIndex, (x, y))
        eyeCoordinates.append((x, y))
        
      # save everything to database
      middleLeftEye = eyeCoordinates[0]
      middleRightEye = eyeCoordinates[1]
    
      if len(dbPhotos) == 0:
        # create new entry in db
        logger.debug("Executing: 'INSERT INTO eyesInPhotos (photoFileName, date, lEyeX, lEyeY, rEyeX, rEyeY) VALUES (%s, %s, %d, %d, %d, %d)'", 
          inputImageFileName, 
          photoDateTime, 
          middleLeftEye[0], 
          middleLeftEye[1], 
          middleRightEye[0], 
          middleRightEye[1])
          
        c.execute('INSERT INTO eyesInPhotos (photoFileName, date, lEyeX, lEyeY, rEyeX, rEyeY) VALUES (?, ?, ?, ?, ?, ?)', 
          (inputImageFileName, 
          photoDateTime, 
          middleLeftEye[0], 
          middleLeftEye[1], 
          middleRightEye[0], 
          middleRightEye[1]))
          
      else:
        # update entry in database			
        logger.debug("Executing: 'UPDATE eyesInPhotos SET lEyeX=%d, lEyeY=%d, rEyeX=%d, rEyeY=%d WHERE photoFileName=%s'",
          middleLeftEye[0], 
          middleLeftEye[1], 
          middleRightEye[0], 
          middleRightEye[1],
          inputImageFileNam)
        
        c.execute('UPDATE eyesInPhotos SET lEyeX=?, lEyeY=?, rEyeX=?, rEyeY=? WHERE photoFileName=?', 
          (middleLeftEye[0], 
          middleLeftEye[1], 
          middleRightEye[0], 
          middleRightEye[1],
          inputImageFileName))  
      
      conn.commit()
    
    # we found the image in the database with complete data or there are more than 1 image
    else:
      if numDBPhotos > 1:
        logger.critical("Database in bad shape. Found %d occurences of photo named %s", numDBPhotos, inputImageFileName)
        conn.close() 
        sys.exit(1)
      else:
        logger.info("Photo %s already in db", inputImageFileName)
        
  newNumAllDBPhotos = DatabaseFunctions.numberOfPhotosInDB(c)
        
  logger.info("Added %d photos with eyeinfo to database %s",  newNumAllDBPhotos - numAllDBPhotos, dbPath)
  conn.close()    
def apply_quickseg(image,
                   image_bandnames,
                   image_metadata,
                   ratio=1.0,
                   maxdist=4,
                   kernel_window_size=7,
                   directory_output=None):
    """
    Apply quickshift segmentation for the specified set of parameters.
    
    Inputs:
    image: nd array
        Input array for segmentation. Dimensions should be rows x columns x bands.
    image_bandnames: list
        List specifying the band order in image. Possible elements are:
        rVH, rVV, fVH, fVV, rR, fR, B2, B3, B4, B5, B6, B7, B8, B8A, B11, B12
    image_metadata: dict
        Dictionary specifying image metdata, output of rasterio meta property.
    ratio: float (default=1.0)
        Ratio balancing color-space proximity and image-space proximity, should be between 0 and 1. Higher values give more weight to color-space.
    maxdist: float (default=4)
        Cut-off point for data distances. Higher means fewer clusters.
    kernel_window_size: int (default=7)
        Size of Gaussian kernel used in smoothing the sample density. Higher means fewer clusters. Minimum equals 7x7.
    directory_output: str or None (default=None)
        If not None, output will be saved to specified path.
    Outputs:
    segments_quick: nd array
        Array of segments IDs. 
    """
    # Check image dimensions
    no_rows, no_cols, no_bands = image.shape
    if no_bands > no_rows:
        print(
            "Warning! Image dimensions should be row x column x bands. Current dimensions are {}x{}x{}, which seems wrong. Swapping axes..."
            .format(no_rows, no_cols, no_bands))
        image = np.transpose(image, (1, 2, 0))
        no_rows, no_cols, no_bands = image.shape
    else:
        print("Image dimensions ({}x{}x{}) are valid.".format(
            no_rows, no_cols, no_bands))
    # Normalize data
    for band_index in np.arange(no_bands):
        if np.nanstd(image[:, :, band_index]) != 1 or np.nanmean(
                image[:, :, band_index]) != 0:
            band = image[:, :, band_index]
            band[np.isfinite(
                band)] = preprocessing.StandardScaler().fit_transform(
                    band[np.isfinite(band)].reshape(-1, 1))[:, 0]
            image[:, :, band_index] = band
    # Segmentation
    kernel_size = (kernel_window_size - 1) / 6
    image_segmented = seg.quickshift(image.astype('double'),
                                     ratio=ratio,
                                     max_dist=maxdist,
                                     kernel_size=kernel_size,
                                     convert2lab=False)
    image_segmented += 1  # add 1 to avoid background value 0
    image_segmented = measure.label(image_segmented, connectivity=1)
    num_segments = np.unique(image_segmented).size
    mask = ~np.isnan(image[:, :, 0])
    segments_quick = ipf.polygonize(image_segmented,
                                    mask=mask,
                                    transform=image_metadata["transform"])
    print("{} - {} segments detected.".format(datetime.datetime.now(),
                                              num_segments))
    # Save output
    if directory_output:
        output_filename_tiff = os.path.join(
            directory_output,
            "Segments_r{}_m{}_k{}.tif".format(ratio, maxdist,
                                              kernel_window_size))
        print("Saving raster output to {}...".format(output_filename_tiff))
        ipf.ar2tif(image_segmented,
                   output_filename_tiff,
                   image_metadata["crs"],
                   image_metadata["transform"],
                   dtype=rasterio.int32)

        output_filename_shp = os.path.join(
            directory_output,
            "Segments_r{}_m{}_k{}.shp".format(ratio, maxdist,
                                              kernel_window_size))
        print("Saving features output to {}...".format(output_filename_shp))
        segments_quick.to_file(output_filename_shp)
    # Return
    return segments_quick, image_segmented
예제 #28
0
n_test = X_test.shape[0]
# Shape of traffic sign image
image_shape = X_train[0].shape
# How many unique classes/labels there are in the dataset.
classes = np.unique(y_train)

print('Images loaded.')
print "Training samples: " + str(n_train)
print "Testing samples: " + str(n_test)
print "Image data shape: " + str(image_shape)
print "Classes: " + str(classes) + "\n"
# ------------------------------------------------------------------ #

# Pre-Process
## Pre-Process: RGB
X_train_prep = imagefunctions.preprocess_rgb(X_train)
X_test_prep = imagefunctions.preprocess_rgb(X_test)
## Pre-Process: Grayscale
#X_train_prep = imagefunctions.preprocess_grayscale(X_train)
#X_test_prep = imagefunctions.preprocess_grayscale(X_test)

# check quality after pre-processing
check_quality = False
if (check_quality):
    index = random.randint(0, len(X_train))
    print("Random Test for {0}".format(y_train[index]))
    plt.figure(figsize=(5, 5))

    plt.subplot(1, 2, 1)
    plt.imshow(X_train[index].squeeze())
    plt.title("Before")
예제 #29
0
norbert.wait_for_rapid()

cam_comp = True
cam = config.cam

adjustment_file = open('camera_adjustment_XS.txt', 'w')

while norbert.is_running():
    norbert.set_rapid_variable("WPW", 5)  # Start camera adjustment procedure in RAPID

    norbert.wait_for_rapid()

    robtarget_pucks = []

    while not robtarget_pucks:
        ImageFunctions.findPucks(cam, norbert, robtarget_pucks, cam_comp=cam_comp)

    norbert.set_robtarget_variables("puck_target", robtarget_pucks[0].get_xyz())
    norbert.set_rapid_variable("image_processed", "TRUE")

    robtarget_pucks.clear()

    norbert.wait_for_rapid()

    while not robtarget_pucks:
        ImageFunctions.findPucks(cam, norbert, robtarget_pucks, cam_comp=cam_comp)

    norbert.set_rapid_variable("image_processed", "TRUE")

    pos_low = robtarget_pucks[0].get_xyz()
    print(f'Low robtarget: ({pos_low[0]:.1f},{pos_low[1]:.1f})')
예제 #30
0
min_red_pixels = 20
imgcount = 0
framenum = 0
for filename in glob.glob(inputpath):
    im = Image.open(filename)
    img = np.array(im)  #imnp = list(im.getdata())

    cropnum = 0
    for x in range(startx, stopx, slidestep[0]):
        for y in range(starty, stopy, slidestep[1]):
            # edge - crop needs to be full size
            if (y + windowsize[1] - 1 >= imgsize[1]):
                y = imgsize[1] - windowsize[1]
            if (x + windowsize[0] - 1 >= imgsize[0]):
                x = imgsize[0] - windowsize[0]

            window = ((x, y), (x + windowsize[0], y + windowsize[1]))
            test_img = img[window[0][1]:window[1][1],
                           window[0][0]:window[1][0]]

            if (imagefunctions.num_red_pixels(test_img) > min_red_pixels):
                fname = 'crop-' + str(framenum) + '-' + str(cropnum) + '.png'
                #print test_img.shape
                imsave(outputdir + fname, test_img)
                cropnum = cropnum + 1
                imgcount = imgcount + 1

    framenum = framenum + 1

print 'extracted ' + str(imgcount) + ' training images'
예제 #31
0
    def find_signs(self, img):
        startx = 0  #60
        stopx = imgsize[0] - windowsize[0]  #80
        starty = 0  #20 #19
        stopy = imgsize[1] - windowsize[1]  #30

        window_list = []
        for x in range(startx, stopx, slidestep[0]):
            for y in range(starty, stopy, slidestep[1]):
                img_crop = img[y:y + windowsize[1], x:x + windowsize[0]]
                img_crop_pp = imagefunctions.preprocess_one_rgb(img_crop)
                img_in = np.array(255 * img_crop_pp, dtype=np.uint8)
                if (imagefunctions.num_red_pixels(img_in) > min_red_pixels):
                    window_list.append(
                        ((x, y), (x + windowsize[0], y + windowsize[1])))

        stop_windows, warn_windows = self.search_windows(
            img, window_list, framenum=random.randint(0, 9999))

        # heatmap
        heat_stop = np.zeros_like(img[:, :, 0]).astype(np.float)
        heat_warn = np.zeros_like(img[:, :, 0]).astype(np.float)
        for bbox in window_list:
            startx = bbox[0][0]
            starty = bbox[0][1]
            endx = bbox[1][0]
            endy = bbox[1][1]
            #cv2.rectangle(img,(startx, starty),(endx, endy),(0,0,200),1)
        for bbox in warn_windows:
            startx = bbox[0][0]
            starty = bbox[0][1]
            endx = bbox[1][0]
            endy = bbox[1][1]
            heat_warn[starty:endy, startx:endx] += 1.
            #cv2.rectangle(img,(startx, starty),(endx, endy),(0,255,0),1)
        for bbox in stop_windows:
            startx = bbox[0][0]
            starty = bbox[0][1]
            endx = bbox[1][0]
            endy = bbox[1][1]
            heat_stop[starty:endy, startx:endx] += 1.
            #cv2.rectangle(img,(startx, starty),(endx, endy),(255,0,0),1)

        score_stop = np.max(heat_stop)
        score_warn = np.max(heat_warn)
        #print '[scores] stop:' + str(score_stop) + ' warn:' + str(score_warn)

        detthresh = 20
        mapthresh = 10
        labels = [None]
        if score_stop < detthresh and score_warn < detthresh:
            #print 'NO SIGN'
            decision = 0
            draw_img = img
        elif score_stop > score_warn:
            #print 'STOP'
            decision = 1
            heatmap_stop = heat_stop
            heatmap_stop[heatmap_stop <= mapthresh] = 0
            labels = label(heatmap_stop)
            #draw_img = draw_labeled_bboxes(np.copy(img), labels_stop, boxcolor=(255,0,0))
        else:
            #print 'WARNING'
            decision = 2
            # draw box
            heatmap_warn = heat_warn
            heatmap_warn[heatmap_warn <= mapthresh] = 0
            labels = label(heatmap_warn)
            #draw_img = draw_labeled_bboxes(np.copy(img), labels_warn, boxcolor=(0,255,0))

        #Image.fromarray(draw_img).show()
        return decision, labels  #draw_img
예제 #32
0
    def find_signs(self, img):
        startx = 0  #60
        stopx = imgsize[0] - windowsize[0]  #80
        starty = 0  #20 #19
        stopy = imgsize[1] - windowsize[1]  #30

        window_list = []
        for x in range(startx, stopx, slidestep[0]):
            for y in range(starty, stopy, slidestep[1]):
                img_in = img[y:y + windowsize[1], x:x + windowsize[0]]
                #img_crop_pp = imagefunctions.preprocess_one_rgb(img_crop)
                #img_in = np.array(255*img_crop_pp, dtype=np.uint8)
                if (imagefunctions.num_red_pixels(img_in) > min_red_pixels):
                    window_list.append(
                        ((x, y), (x + windowsize[0], y + windowsize[1])))

        #stop_windows, warn_windows = self.search_windows(img, window_list, framenum=random.randint(0,9999))
        stop_windows, warn_windows = self.search_windows(img, window_list)

        # if no window to search
        numwin = len(window_list)
        if (numwin == 0):
            decision = 0
            labels = [None]
            return decision, labels, img

        # Method 1 - Count windows


#        if ((len(stop_windows)<2) and (len(warn_windows)<2)):
#            return 0,[None]
#        elif (len(stop_windows)>=len(warn_windows)):
#            return 1,[None]
#        else:
#            return 2,[None]

# Method 2 - Localized heatmap based decision
        heat_stop = np.zeros_like(img[:, :, 0]).astype(np.float)
        heat_warn = np.zeros_like(img[:, :, 0]).astype(np.float)
        for bbox in window_list:
            startx = bbox[0][0]
            starty = bbox[0][1]
            endx = bbox[1][0]
            endy = bbox[1][1]
            #cv2.rectangle(img,(startx, starty),(endx, endy),(200,0,0),1)
        for bbox in warn_windows:
            startx = bbox[0][0]
            starty = bbox[0][1]
            endx = bbox[1][0]
            endy = bbox[1][1]
            heat_warn[starty:endy, startx:endx] += 1.
            cv2.rectangle(img, (startx, starty), (endx, endy), (0, 255, 0), 1)
        for bbox in stop_windows:
            startx = bbox[0][0]
            starty = bbox[0][1]
            endx = bbox[1][0]
            endy = bbox[1][1]
            heat_stop[starty:endy, startx:endx] += 1.
            cv2.rectangle(img, (startx, starty), (endx, endy), (255, 0, 0), 1)

        score_stop = np.max(heat_stop)
        score_warn = np.max(heat_warn)

        # ---- GET DECISION ---- #
        decision = self.get_decision(score_stop, score_warn, numwin)

        # plot final decision region
        mapthresh = self.K_mapthresh * numwin
        labels = [None]
        if (decision == 1):
            heatmap_stop = heat_stop
            heatmap_stop[heatmap_stop <= mapthresh] = 0
            labels = label(heatmap_stop)
        elif (decision == 2):
            heatmap_warn = heat_warn
            heatmap_warn[heatmap_warn <= mapthresh] = 0
            labels = label(heatmap_warn)

        return decision, labels, img
예제 #33
0
                                    t_stdev=t_stdev,
                                    t_conv=t_conv,
                                    t_shape=t_shape,
                                    bandnames_mean=bandnames_mean,
                                    bandnames_stdev=bandnames_stdev)


def save_topickle(segments, out_filename):
    """Save intermediate result to pickle file"""
    with open(out_filename, "wb") as handle:
        pickle.dump(segments, handle)


#%% Import input data
print("{} - Loading input data...".format(datetime.datetime.now()))
image_sar, image_sar_meta, image_sar_bandnames = ipf.tif2ar(
    sar_filename, return_bandnames=True)
pixel_resolution_x = image_sar_meta["transform"][0]
pixel_resolution_y = -image_sar_meta["transform"][4]
image_sar_bandnames = ["rVH", "rVV", "fVH", "fVV"]

#%% Image segmentation
print("{} - Image segmentation...".format(datetime.datetime.now()))
t_start = datetime.datetime.now()
segments, segments_raster = qsf.apply_quickseg(
    np.transpose(image_sar, (1, 2, 0)).copy(),
    image_sar_bandnames,
    image_sar_meta,
    ratio=ratio,
    maxdist=maxdist,
    kernel_window_size=kernel_window_size)
t_end = datetime.datetime.now()
예제 #34
0
# Shape of traffic sign image
image_shape = X_train[0].shape
# How many unique classes/labels there are in the dataset.
classes = np.unique(y_train)
n_classes = len(classes)

print('Images loaded.')
print "Training samples: " + str(n_train)
print "Validation samples: " + str(n_valid)
print "Image data shape: " + str(image_shape)
print "Classes: " + str(classes) + "\n"
# ------------------------------------------------------------------ #

# Pre-Process
## Pre-Process: RGB
X_train_prep = imagefunctions.preprocess_rgb(X_train)
X_valid_prep = imagefunctions.preprocess_rgb(X_valid)
## Pre-Process: Grayscale
#X_train_prep = imagefunctions.preprocess_grayscale(X_train)
#X_valid_prep = imagefunctions.preprocess_grayscale(X_valid)

# check quality after pre-processing
check_quality = False
if (check_quality):
    index = random.randint(0, len(X_train))
    print("Random Test for {0}".format(y_train[index]))
    plt.figure(figsize=(5, 5))

    plt.subplot(1, 2, 1)
    plt.imshow(X_train[index].squeeze())
    plt.title("Before")