Exemple #1
0
def trainingHookFDR(net, path, report):
  """
  This training hook is used extensively by the FDR experiments. It prints
  out the learned information about each level in the network
  """
  # Print header
  print "\n## Executing trainingHook...."

  # Print info for each level
  tierCount = VU.getTierCount(net)

  # This loop is temporary until NuPIC 2 allows retrieval of Region names and
  # node types
  regions = net.regions
  for name, r in regions.items():
    if 'FDR' in r.type or 'CLARegion' in r.type:
      if not r.getParameter('disableTemporal'):
        msg = "\n# Temporal node info for %s (nodetype %s):" % (name, r.type)
        print msg
        report.write(msg + os.linesep)

        for paramName in ['tpNumCells', 'tpNumSegments', 'tpNumSynapses',
                          'tpNumSynapsesPerSegmentMax',
                          'tpNumSynapsesPerSegmentAvg']:
          msg = "#   %s: %s" % (paramName, str(r.getParameter(paramName)))
          print msg
          report.write(msg + os.linesep)

      if not r.getParameter('disableSpatial'):
        msg = "\n# Spatial node info for %s (nodetype %s):" % (name, r.type)
        print msg
        report.write(msg + os.linesep)

        if 'spLearningStatsStr' in r.spec.parameters:
          stats = eval(r.getParameter('spLearningStatsStr'))
          keys = stats.keys()
          keys.sort()
          for key in keys:
            msg = "#   %-25s: %s" % (key, str(stats[key]))
            print msg
            report.write(msg + os.linesep)
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """

    BaseFilter.process(self, image)


    # If no background image, just return the input image as-is
    if self.bgPath is None:
      return image

    # ---------------------------------------------------------------------------
    # Open the background image(s) if we haven't done so already
    if self.bgImgs is None:

      # If given a relative path, make it relative to the vision data directory
      if not os.path.isabs(self.bgPath):
        basePath = os.path.abspath(os.curdir)
        basePath = os.path.split(basePath)
        while(basePath[0]):
          if basePath[1] == 'vision':
            break
          basePath = os.path.split(basePath[0])

        # Did we find the vision directory?
        if basePath[1] == 'vision':
          fullPath = VisionUtils.findData(os.path.join(basePath[0], 'vision'),
                        self.bgPath, 'backgound', 'background images', True)
          #basePath = os.path.join(basePath[0], 'vision', 'data')
        else:
          fullPath = self.bgPath
      else:
        fullPath = self.bgPath


      # If given a filename, we only have 1 image
      if os.path.isfile(fullPath):
        self.bgImgs = [Image.open(fullPath).convert('LA')]

      # Else, open up all images in this directory
      else:
        self.bgImgs = []
        w = os.walk(fullPath)
        while True:
          try:
            dirpath, dirnames, filenames = w.next()
          except StopIteration:
            break

          # Don't enter directories that begin with '.'
          for d in dirnames[:]:
            if d.startswith('.'):
              dirnames.remove(d)
          dirnames.sort()

          # Ignore files that begin with '.'
          filenames = [f for f in filenames if not f.startswith('.')]
          filenames.sort()
          imageFilenames = [os.path.join(dirpath, f) for f in filenames]

          # Process each image
          for filename in imageFilenames:
            self.bgImgs.append(Image.open(filename).convert('L'))

      # Keep a cache of all images, scaled to the input image size
      self.scaledBGImgs = [x.copy() for x in self.bgImgs]


    # Pick a background at random.
    idx = self._rng.randint(0, len(self.bgImgs)-1)
    bgImg = self.scaledBGImgs[idx]

    # ---------------------------------------------------------------------------
    # re-scale the background to the source image if necessary
    if bgImg.size != image.size:
      bgImg = self.scaledBGImgs[idx] = self.bgImgs[idx].resize(image.size, Image.ANTIALIAS)

    # ---------------------------------------------------------------------------
    # Create the mask around the source image
    mask = image.split()[-1]
    if image.mode[-1] != 'A' or isSimpleBBox(mask):
      mask = createMask(image, threshold=self._threshold, fillHoles=True,
                        backgroundColor=self.background, blurRadius=self._blurRadius,
                        maskScale=self._maskScale)

    # ---------------------------------------------------------------------------
    # Paste the image onto the background
    newImage = bgImg.copy()
    newImage.paste(image, (0,0), mask)

    # Put an "all-on" alpha channel because we now want the network to consider the entire
    #  image
    newImage.putalpha(ImageChops.constant(newImage, 255))

    return newImage
  def __init__(self, parent, network):

    NetworkInspector.__init__(self, parent, network)

    # Find the classifier
    self.classifier = _findClassifier(network)
    if not self.classifier:
      raise RuntimeError("No classifier found (no region with 'categoriesOut')")

    # Get the categoryInfo from the sensor
    sensor = _getElement(network, 'sensor')
    self.categoryInfo = _getCategoryInfo(sensor)
    self.catNames = [cat[0] for cat in self.categoryInfo]

    # Acquire reference to the gabor region
    if 'GaborRegion' in _getRegionType(network, 'level1SP'):
      self._hasGabor = True
      self._gaborRegion = None
      self._gaborProxy = None
      gaborRegion = _getElement(network, VisionUtils.getTierName(1, network=network))
      if gaborRegion:
        self._gaborProxy = _getGaborProxy(gaborRegion)

      # Obtain gabor scales
      gaborInputDims = self._gaborProxy._inputDims
      baseWidth = float(gaborInputDims[0][1])
      self.scales = [float(dims[1])/baseWidth for dims in gaborInputDims]

      # Used to store cached gabor responses
      self._cachedResponses = {}
    else:
      self._gaborRegion = None
      self._hasGabor = False

    self.showTitle = False
    self.mode = 'unfiltered'
    self.filterOnCategory = False
    self.logDir = ""
    self._bboxes = None
    self._imageFilenames = None


    # Check if our classifier complies with the required API:
    self._compliantClassifier = _classifierComplies(self.classifier)
    if self._compliantClassifier:
      # Convert category labels to numpy
      c = _getSelf(self.classifier)
      categoryList = c.getCategoryList()
      if categoryList is None:
        categoryList = []
      self._catLabels = numpy.array(categoryList)
      self.numPrototypes = "Total Prototypes: %d" % len(self._catLabels)

    # Parameters that need to be handled with custom code in updateParameters
    customParameters = {
      'numPresentationsInLog': Str,
      'logDir': Str,
      # Used for displaying "Distance:"
      'dummy': Str
    }

    for name, trait in customParameters.iteritems():
      self.add_trait(name, trait(label=name))

    self._createDefaultImages()

    # Add traits
    if not self._hasGabor:
      availableSpaces = ['pixel']
    else:
      availableSpaces = ['pixel', 'gabor']

    for k in xrange(self._numNeighbors):
      for spaceType in availableSpaces:
        traitName = '%s%03d' % (spaceType, k)
        self.add_trait(traitName, Instance(PIL.Image.Image))
        exec "self.%s = self._missingImage" % traitName

      traitName = 'score%03d' % k
      self.add_trait(traitName, Str)
      protoScoreStr = self._makeScoreString()
      exec "self.%s = protoScoreStr" % traitName

      traitName = 'imagename%03d' % k
      self.add_trait(traitName, Str)
      imagename = ""
      exec "self.%s = imagename" % traitName

    self.createView()