コード例 #1
0
ファイル: coil.py プロジェクト: napratin/nap
 def run(self):
   if self.outFile is not None:
     #self.outFile.write("{}\t{}\t{}\t{}\n".format('obj', 'view', '\t'.join(["{}_mean".format(label) for label in self.visSys.featureLabels]), '\t'.join(["{}_sd".format(label) for label in self.visSys.featureLabels])))  # vector mean and SD
     self.outFile.write("{}\t{}\t{}\n".format('obj', 'view', '\t'.join(["{}_{}".format(label, i) for label in self.visSys.featureLabels for i in xrange(self.visSys.num_feature_neurons)])))  # matrix mean
   
   if self.context.isDir:  # input source is a directory
     # * Run visual input using manager, looping over all specified object images
     for obj in self.objRange:
       for view in self.viewRange:
         # ** Build image file path from object ID and view angle
         input_file = os.path.join(self.inDir, "{}{}{}{}.{}".format(self.input_file_prefix, obj, self.input_file_sep, view, self.input_file_ext))
         #assert os.path.exists(input_file), "Input file \"{}\" doesn't exist".format(input_file)
         if not os.path.exists(input_file):
           self.logger.warn("Input file \"{}\" doesn't exist".format(input_file))
           continue
         self.logger.info("Input file: {}".format(input_file))
         
         # ** Modify context to set image file as input source, and run it through the visual system
         self.context.options.input_source = input_file
         self.context.isImage = True
         print "Running..."
         run(self.visMan, resetContextTime=False)  # use the same manager so that visual system is only created once
         if self.outFile is not None:
           #self.outFile.write("{}\t{}\t{}\t{}\n".format(obj, view, '\t'.join(str(feat_mean) for feat_mean in self.visMan.featureVectorMean), '\t'.join(str(feat_sd) for feat_sd in self.visMan.featureVectorSD)))  # vector mean and SD
           self.outFile.write("{}\t{}\t{}\n".format(obj, view, '\t'.join(str(feat) for feat in self.visMan.featureMatrixMean.flat)))  # matrix mean
   else:
     run(self.visMan, resetContextTime=False)  # run on the sole input source (image or video)
   
   if self.outFile is not None:
     self.outFile.close()
     self.logger.info("Output file closed.")
コード例 #2
0
ファイル: retina.py プロジェクト: napratin/nap
 def test_rod_potential(self):
   from ..neuron import action_potential_trough, action_potential_peak
   
   class MonitoringProjector(Projector):
     do_plot = False
     def __init__(self, retina=None):
       Projector.__init__(self, retina)
       
       # Neuron to monitor
       self.testRodIdx = 0
       self.testRod = self.target.rods.neurons[self.testRodIdx]
       self.logger.info("Test rod [{}]: {}".format(self.testRodIdx, self.testRod))
       
       # Plotting
       if self.do_plot:
         self.logger.info("Plotting is enabled")
         self.fig = figure()
         hold(True)
         self.ax = self.fig.gca()
         self.ax.set_ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.02)
         self.ax.set_title("Neuron")
         self.ax.set_xlabel("Time (s)")
         self.ax.set_ylabel("Membrane potential (V)")
     
     def process(self, imageIn, timeNow):
       keepRunning, imageOut = Projector.process(self, imageIn, timeNow)
       self.testRod.p = 1.0  # make sure it updated every iteration
       if self.do_plot:
         self.testRod.plot()
         pause(0.01)
       print "{}\t{}\t{}\t{}\t{}\t{}".format(timeNow, self.testRod.response, self.testRod.potential, self.testRod.I_e, self.testRod.expDecayFactor, self.testRod.pixelValue)  # [debug, non-GUI]
       #print "{}\t{}\t{}\t{}".format(timeNow, self.testRod.potential, self.testRod.expDecayFactor, self.testRod.pixelValue)  # [debug, non-GUI, for BipolarCells]
       #cv2.circle(imageOut, (self.testRod.pixel[0], self.testRod.pixel[1]), 3, np.uint8([255, 0, 255]))
       imageOut[self.testRod.pixel[1], self.testRod.pixel[0]] = np.uint8([255, 0, 255])
       return keepRunning, imageOut
     
     def onKeyPress(self, key, keyChar=None):
       if keyChar == '.':
         self.testRodIdx = (self.testRodIdx + 1) % len(self.target.rods.neurons)
         self.testRod = self.target.rods.neurons[self.testRodIdx]
         self.logger.info("[>] Test rod [{}]: {}".format(self.testRodIdx, self.testRod))
       elif keyChar == ',':
         self.testRodIdx = (self.testRodIdx - 1) % len(self.target.rods.neurons)
         self.testRod = self.target.rods.neurons[self.testRodIdx]
         self.logger.info("[<] Test rod [{}]: {}".format(self.testRodIdx, self.testRod))
       else:
         return Projector.onKeyPress(self, key, keyChar)
       return True
   
   print "Running MonitoringProjector instance..."
   run(MonitoringProjector(Projector), description="Retina processing with monitor on a single neuron.")
コード例 #3
0
ファイル: retina.py プロジェクト: napratin/nap
    
    # * TODO Compute feature vector of attended region
    
    # * Show output images if in GUI mode
    if self.context.options.gui:
      #cv2.imshow("Hue", self.images['H'])
      #cv2.imshow("Saturation", self.images['S'])
      #cv2.imshow("Value", self.images['V'])
      cv2.imshow("Rod response", self.imageRod)
      cv2.imshow("S-cone response", self.imagesCone['S'])
      cv2.imshow("M-cone response", self.imagesCone['M'])
      cv2.imshow("L-cone response", self.imagesCone['L'])
      cv2.imshow("ON Bipolar cells", self.imagesBipolar['ON'])
      cv2.imshow("OFF Bipolar cells", self.imagesBipolar['OFF'])
      #cv2.imshow("ON Ganglion cells", self.imagesGanglion['ON'])
      #cv2.imshow("OFF Ganglion cells", self.imagesGanglion['OFF'])
      for ganglionType, ganglionImage in self.imagesGanglion.iteritems():
        cv2.imshow("{} Ganglion cells".format(ganglionType), ganglionImage)
      cv2.imshow("Salience", self.imageSalience)
      
      # Designate a representative output image
      self.imageOut = self.imageSalience
      #_, self.imageOut = cv2.threshold(self.imageOut, 0.15, 1.0, cv2.THRESH_TOZERO)  # apply threshold to remove low-response regions
    
    return True, self.imageOut


if __name__ == "__main__":
  Context.createInstance(description="Test application that uses a SimplifiedProjector to run image input through a (simplified) Retina.")
  run(Projector(Retina()))
コード例 #4
0
ファイル: sample.py プロジェクト: ziaridoy20/lumos
import cv2  # OpenCV functions
import cv2.cv as cv  # OpenCV constants

from lumos.base import FrameProcessor  # base processor class
from lumos.input import run  # driver function


class MyAwesomeProcessor(FrameProcessor):
    """Custom processor that selects hues based on current time."""
    def process(self, imageIn, timeNow):
        # Convert input Red-Green-Blue image to Hue-Saturation-Value
        hsv = cv2.cvtColor(imageIn, cv.CV_BGR2HSV)
        h, s, v = cv2.split(hsv)  # split into 3 channels
        h = h.reshape(h.shape + (1, ))

        # Pick desired hue range based on current time
        hue = int(((timeNow % 10) / 10) * 180)
        min_hue = max(0, hue - 10)
        max_hue = min(180, hue + 10)

        # Apply mask to select pixels in hue range and return
        mask = cv2.inRange(h, min_hue, max_hue)
        imageOut = cv2.bitwise_and(imageIn, imageIn, mask=mask)
        return True, imageOut


if __name__ == "__main__":
    # Run a custom processor instance (NOTE pass in class name)
    run(MyAwesomeProcessor, description="A sample lumos application")
コード例 #5
0
ファイル: retina.py プロジェクト: napratin/nap
 def test_projector(self):
   run(Projector(Retina()), description="Test application that uses a Projector to run image input through a Retina.")
コード例 #6
0
ファイル: sample.py プロジェクト: napratin/lumos
import cv2  # OpenCV functions
import cv2.cv as cv  # OpenCV constants

from lumos.base import FrameProcessor  # base processor class
from lumos.input import run  # driver function

class MyAwesomeProcessor(FrameProcessor):
  """Custom processor that selects hues based on current time."""
  
  def process(self, imageIn, timeNow):
    # Convert input Red-Green-Blue image to Hue-Saturation-Value
    hsv = cv2.cvtColor(imageIn, cv.CV_BGR2HSV)
    h, s, v = cv2.split(hsv)  # split into 3 channels
    h = h.reshape(h.shape + (1,))
    
    # Pick desired hue range based on current time
    hue = int(((timeNow % 10) / 10) * 180)
    min_hue = max(0, hue - 10) 
    max_hue = min(180, hue + 10)
    
    # Apply mask to select pixels in hue range and return
    mask = cv2.inRange(h, min_hue, max_hue)
    imageOut = cv2.bitwise_and(imageIn, imageIn, mask=mask)
    return True, imageOut


if __name__ == "__main__":
  # Run a custom processor instance (NOTE pass in class name)
  run(MyAwesomeProcessor, description="A sample lumos application")