Esempio n. 1
0
 def __init__(self):
   # * Create application context, passing in custom arguments, and get a logger
   argParser = argparse.ArgumentParser(add_help=False)
   argParser.add_argument('--features', type=str, default=None, help="features to look for, comma separated")
   self.context = Context.createInstance(description=self.__class__.__name__, parent_argparsers=[argParser])
   self.logger = logging.getLogger(self.__class__.__name__)
   
   # * Parse arguments
   self.features = self.context.options.features.split(',') if (hasattr(self.context.options, 'features') and self.context.options.features is not None) else []
   self.featureWeights = dict()
   for feature in self.features:
     if ':' in feature:  # check for explicit weights, e.g. RG:0.8,BY:0.75
       try:
         featureSpec = feature.split(':')
         self.featureWeights[featureSpec[0].strip()] = float(featureSpec[1].strip())
       except Exception as e:
         self.logger.warn("Invalid feature specification '%s': %s", feature, e)
     else:  # use default weight
       self.featureWeights[feature.strip()] = default_feature_weight
   if 'rest' not in self.featureWeights:
     self.featureWeights['rest'] = default_feature_weight_rest  # explicitly specify rest, otherwise previous weights will remain
   self.logger.info("Searching with feature weights: %s", self.featureWeights)
   
   # * Create systems and associated managers
   self.context.update()  # get fresh time
   self.visSys = VisualSystem(imageSize=self.image_size, timeNow=self.context.timeNow, showMonitor=False)
   self.visMan = VisionManager(self.visSys, screen_background=self.screen_background)
   # TODO: Design a better way to share systems/managers (every system has a parent/containing agent?)
   
   # * Export RPC calls, if enabled
   if self.context.isRPCEnabled:
     self.logger.info("Exporting RPC calls")
     rpc.export(self.visSys)
     rpc.export(self.visMan)
     rpc.refresh()  # Context is expected to have started RPC server
Esempio n. 2
0
File: coil.py Progetto: napratin/nap
 def __init__(self):
   # * Create application context, passing in custom arguments, and get a logger
   argParser = argparse.ArgumentParser(add_help=False)
   #argParser.add_argument('--in', type=str, default="coil-100", help="path to directory containing input images")  # use input_source as directory; default to current directory
   argParser.add_argument('--out', type=str, default=None, help="path to output directory")  # should this be a common parameter in Context?
   argParser.add_argument('--obj', type=str, default="1,101,1", required=False, help="object ID range, right-open interval <start>,<stop>,<step> (no spaces); default: full range")
   argParser.add_argument('--view', type=str, default="0,360,5", required=False, help="view angle range in degrees, right-open interval <start>,<stop>,<step> (no spaces); default: full range")
   self.context = Context.createInstance(description="COIL-100 image dataset processor", parent_argparsers=[argParser])  # TODO how to gather arg parsers from other interested parties?
   self.logger = logging.getLogger(self.__class__.__name__)
   
   # * Parse arguments
   self.inDir = self.context.options.input_source  # should be an absolute path to a dir with COIL images; if it is a file/camera instead, it will be used as sole input
   # TODO also accept wildcards using glob.glob()?
   self.outDir = self.context.options.out  # just for convenience
   self.outFile = None
   if self.outDir is not None:  # TODO otherwise default to some directory?
     if os.path.isdir(self.outDir):
       now = datetime.now()
       outFilepath = os.path.join(self.outDir, "{}{}{}{}{}.{}".format(self.output_file_prefix, self.output_file_sep, now.strftime('%Y-%m-%d'), self.output_file_sep, now.strftime('%H-%M-%S'), self.output_file_ext))
       self.logger.info("Output file: {}".format(outFilepath))
       self.outFile = open(outFilepath, 'w')  # open output file for storing features (TODO use with.. block instead in start()?)
     else:
       self.logger.warn("Invalid output directory \"{}\"; no output will be saved".format(self.outDir))
       self.outDir = None  # TODO create output directory if it doesn't exist
   self.objRange = xrange(*(int(x) for x in self.context.options.obj.split(',')))
   self.viewRange = xrange(*(int(x) for x in self.context.options.view.split(',')))
   
   # * Create visual system and manager
   self.context.update()  # get fresh time
   self.visSys = VisualSystem(imageSize=self.image_size, timeNow=self.context.timeNow)
   self.visMan = COILManager(self.visSys)
Esempio n. 3
0
    
    # * TODO Compute feature vector of attended region
    
    # * Show output images if in GUI mode
    if self.context.options.gui:
      #cv2.imshow("Hue", self.images['H'])
      #cv2.imshow("Saturation", self.images['S'])
      #cv2.imshow("Value", self.images['V'])
      cv2.imshow("Rod response", self.imageRod)
      cv2.imshow("S-cone response", self.imagesCone['S'])
      cv2.imshow("M-cone response", self.imagesCone['M'])
      cv2.imshow("L-cone response", self.imagesCone['L'])
      cv2.imshow("ON Bipolar cells", self.imagesBipolar['ON'])
      cv2.imshow("OFF Bipolar cells", self.imagesBipolar['OFF'])
      #cv2.imshow("ON Ganglion cells", self.imagesGanglion['ON'])
      #cv2.imshow("OFF Ganglion cells", self.imagesGanglion['OFF'])
      for ganglionType, ganglionImage in self.imagesGanglion.iteritems():
        cv2.imshow("{} Ganglion cells".format(ganglionType), ganglionImage)
      cv2.imshow("Salience", self.imageSalience)
      
      # Designate a representative output image
      self.imageOut = self.imageSalience
      #_, self.imageOut = cv2.threshold(self.imageOut, 0.15, 1.0, cv2.THRESH_TOZERO)  # apply threshold to remove low-response regions
    
    return True, self.imageOut


if __name__ == "__main__":
  Context.createInstance(description="Test application that uses a SimplifiedProjector to run image input through a (simplified) Retina.")
  run(Projector(Retina()))
Esempio n. 4
0
    
    #result_uint8 = cv2.normalize(result, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)  # normalize (issue is variable scale)
    result_uint8 = np.uint8(result * 255.0)  # scale, for display (better avoid and return None if no GUI)
    
    #return result_uint8, minMatch, maxMatch, minMatchLoc, maxMatchLoc  # too many returns, generalize to *best* match value and loc
    if self.method == cv2.TM_SQDIFF or self.method == cv2.TM_SQDIFF_NORMED:
      return PatternMatch(value=minMatch, location=minMatchLoc, result=result_uint8, matcher=self)
    else:  # TM_CCORR or TM_CCOEFF
      return PatternMatch(value=maxMatch, location=maxMatchLoc, result=result_uint8, matcher=self)


if __name__ == "__main__":
  argParser = argparse.ArgumentParser(add_help=False)
  argParser.add_argument('--zelinsky', action='store_true', help="run a Zelinsky search agent")
  argParser.add_argument('--target', default='Q', choices=('Q', 'O'), help='target symbol (Q or O)')
  argParser.add_argument('--size', dest='num_stimuli', type=int, default=5, help='display size (no. of stimuli) to expect')
  argParser.add_argument('--features', type=str, default=None, help="features to look for, comma separated")  # duplicated for VisualSearchAgent (TODO: Find a better way to unify args, parsers)
  context = Context.createInstance(description="Zelinsky search agent", parent_argparsers=[argParser])
  if context.options.zelinsky:
    if context.options.features is None:
      context.options.features = 'OFF:1.0'  # Zelinsky-specific
    ZelinksyFinder(target=context.options.target, distractors=('O' if context.options.target == 'Q' else 'Q'), numStimuli=context.options.num_stimuli).run()
  else:
    VisualSearchAgent().run()
  
  # Some example invocations
  #ZelinksyFinder(target='Q', distractors=['O'], numStimuli= 5).run()  # target: 'Q', distractor: 'O'; size:  5 [default]
  #ZelinksyFinder(target='Q', distractors=['O'], numStimuli=17).run()  # target: 'Q', distractor: 'O'; size: 17
  #ZelinksyFinder(target='O', distractors=['Q'], numStimuli= 5).run()  # target: 'O', distractor: 'Q'; size:  5
  #ZelinksyFinder(target='O', distractors=['Q'], numStimuli=17).run()  # target: 'O', distractor: 'Q'; size: 17
Esempio n. 5
0
    self.move(np.int_([-offset[0], -offset[1]]))  # move back to center
  
  def enableEventLogging(self, filename_prefix="ocular-events", rpc_export=False, start_server=False):
    eventFilename = "logs/{}_{}.log".format(filename_prefix, time.strftime(EventLogger.timestamp_format, time.localtime(time.time())))
    self.eventLogger = EventLogger(eventFilename, rpc_export=rpc_export, start_server=start_server)
    self.logEvents = True

  def getFocusPoint(self):
    return self.projector.focusPoint
  
  def getFocusOffset(self):
    return (self.projector.focusPoint[0] - self.projector.screenSize[0] / 2, self.projector.focusPoint[1] - self.projector.screenSize[1] / 2) 
  
  def getVelocity(self):
    return self.v


# Testing
if __name__ == "__main__":
  context = Context.createInstance(description="Ocular motion testing")
  projector = Projector()
  ocular = EmulatedOcularMotionSystem(projector)
  runner = InputRunner(projector)
  
  while runner.update():
    ocular.update(context.timeNow)
    if not ocular.isMoving:
      ocular.move(np.int_([np.random.uniform(-100, 100), np.random.uniform(-100, 100)]))
  
  runner.cleanUp()
Esempio n. 6
0
 def setUp(self):
   Context.createInstance()
Esempio n. 7
0
        if keyChar == '.':
          self.testRodIdx = (self.testRodIdx + 1) % len(self.target.rods.neurons)
          self.testRod = self.target.rods.neurons[self.testRodIdx]
          self.logger.info("[>] Test rod [{}]: {}".format(self.testRodIdx, self.testRod))
        elif keyChar == ',':
          self.testRodIdx = (self.testRodIdx - 1) % len(self.target.rods.neurons)
          self.testRod = self.target.rods.neurons[self.testRodIdx]
          self.logger.info("[<] Test rod [{}]: {}".format(self.testRodIdx, self.testRod))
        else:
          return Projector.onKeyPress(self, key, keyChar)
        return True
    
    print "Running MonitoringProjector instance..."
    run(MonitoringProjector(Projector), description="Retina processing with monitor on a single neuron.")


if __name__ == "__main__":
  argParser = argparse.ArgumentParser(add_help=False)
  argParser.add_argument('--test', default="test_projector", help="test case to run (a test_ method in TestRetina)")
  context = Context.createInstance(parent_argparsers=[argParser])
  try:
    runner = TestRetina(context.options.test).run
    if context.options.debug:
      import pdb
      pdb.runcall(runner)
    else:
      runner()
  except ValueError as e:
    print "Invalid test: {}".format(e)
    print "Pick from: {}".format(", ".join(name for name, method in inspect.getmembers(TestRetina, predicate=inspect.ismethod) if name.startswith("test_")))