Пример #1
0
    def test_ParallelBasicPython_Works(self):
        X = np.random.rand(1000, 10)
        T = np.random.rand(1000, 3)
        hX = modules.make_hdf5(X, self.fnameX)
        hT = modules.make_hdf5(T, self.fnameT)

        model0 = HPELM(10, 3)
        model0.add_neurons(10, 'lin')
        model0.add_neurons(5, 'tanh')
        model0.add_neurons(15, 'sigm')
        model0.save(self.fmodel)

        model1 = HPELM(10, 3)
        model1.load(self.fmodel)
        os.remove(self.fnameHT)
        os.remove(self.fnameHH)
        model1.add_data(self.fnameX,
                        self.fnameT,
                        istart=0,
                        icount=100,
                        fHH=self.fnameHH,
                        fHT=self.fnameHT)

        model2 = HPELM(10, 3)
        model2.load(self.fmodel)
        model2.add_data(self.fnameX,
                        self.fnameT,
                        istart=100,
                        icount=900,
                        fHH=self.fnameHH,
                        fHT=self.fnameHT)

        model3 = HPELM(10, 3)
        model3.load(self.fmodel)
        model3.solve_corr(self.fnameHH, self.fnameHT)
        model3.save(self.fmodel)

        model4 = HPELM(10, 3)
        model4.load(self.fmodel)
        model4.predict(self.fnameX, self.fnameY)

        err = model4.error(self.fnameT, self.fnameY, istart=0, icount=198)
        self.assertLess(err, 1)

        err = model4.error(self.fnameT, self.fnameY, istart=379, icount=872)
        self.assertLess(err, 1)
Пример #2
0
    def test_ParallelBasicPython_Works(self):
        X = np.random.rand(1000, 10)
        T = np.random.rand(1000, 3)
        hX = modules.make_hdf5(X, self.fnameX)
        hT = modules.make_hdf5(T, self.fnameT)

        model0 = HPELM(10, 3)
        model0.add_neurons(10, 'lin')
        model0.add_neurons(5, 'tanh')
        model0.add_neurons(15, 'sigm')
        model0.save(self.fmodel)

        model1 = HPELM(10, 3)
        model1.load(self.fmodel)
        os.remove(self.fnameHT)
        os.remove(self.fnameHH)
        model1.add_data(self.fnameX, self.fnameT, istart=0, icount=100, fHH=self.fnameHH, fHT=self.fnameHT)

        model2 = HPELM(10, 3)
        model2.load(self.fmodel)
        model2.add_data(self.fnameX, self.fnameT, istart=100, icount=900, fHH=self.fnameHH, fHT=self.fnameHT)

        model3 = HPELM(10, 3)
        model3.load(self.fmodel)
        model3.solve_corr(self.fnameHH, self.fnameHT)
        model3.save(self.fmodel)

        model4 = HPELM(10, 3)
        model4.load(self.fmodel)
        model4.predict(self.fnameX, self.fnameY)

        err = model4.error(self.fnameT, self.fnameY, istart=0, icount=198)
        self.assertLess(err, 1)

        err = model4.error(self.fnameT, self.fnameY, istart=379, icount=872)
        self.assertLess(err, 1)
Пример #3
0
    def start():
        pairs = MapperUtil.get_allpairs()  # Get pairs starting from 0th line
        if not pairs:
            print("No pairs found.")
            sys.exit()

        p = pyaudio.PyAudio()  # Create a PyAudio session

        # Create a stream
        stream = p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        output=True)

        #H2V_cursor = NeuralNetUtil.get_neurons("H2V")
        elmH2V = None

        # Loop over the pairs coming from CROSSMODAL
        for pair in pairs:
            #time.sleep(0.5) # Wait 0.5 seconds to prevent aggressive loop
            print pair

            if pair['direction'] == "H2V":
                print "____________________________________________________________\n"
                print pair['timestamp1']

                hearing_memory = HearingMemoryUtil.get_memory(
                    pair['timestamp1'])
                hearing_memory = hearing_memory.next()['data']
                #print hearing_memory.next()['data']
                #chunky_array = numpy.fromstring(hearing_memory.next()['data'], 'int16')
                #print chunky_array
                stream.write(hearing_memory)

                numpy_audio = numpy.fromstring(hearing_memory, numpy.uint8)
                #print numpy_audio
                print "Audio: ", numpy_audio.shape
                #print numpy.transpose(numpy_audio.reshape((numpy_audio.shape[0],1))).shape

                vision_memory = VisionMemoryUtil.get_memory(pair['timestamp2'])
                vision_memory = vision_memory.next()

                frame_amodal = numpy.fromstring(vision_memory['amodal'],
                                                numpy.uint8)
                print "Frame Threshold: ", frame_amodal.shape
                cv2.imshow("Frame Threshhold", frame_amodal.reshape(360, 640))
                cv2.moveWindow("Frame Threshhold", 50, 100)

                frame_color = numpy.fromstring(vision_memory['color'],
                                               numpy.uint8)
                print "Frame Delta Colored: ", frame_color.shape
                cv2.imshow("Frame Delta Colored",
                           frame_color.reshape(360, 640, 3))
                cv2.moveWindow("Frame Delta Colored", 1200, 100)
                key = cv2.waitKey(500) & 0xFF
                #time.sleep(2.0)

                modulo = numpy_audio.shape[0] % RATE
                numpy_audio = numpy_audio[:-modulo]
                for one_second in numpy.array_split(
                        numpy_audio, int(numpy_audio.shape[0] / RATE)):
                    X = numpy.transpose(
                        one_second.reshape((one_second.shape[0], 1)))
                    T = numpy.transpose(
                        frame_amodal.reshape((frame_amodal.shape[0], 1)))
                    X = X.astype(numpy.float32, copy=False)
                    T = T.astype(numpy.float32, copy=False)
                    X[0] = X[0] / X[0].max()
                    T[0] = T[0] / T[0].max()
                    print X.shape
                    print T.shape
                    if elmH2V is None:
                        elmH2V = HPELM(X.shape[1], T.shape[1])
                        if os.path.exists(
                                os.path.expanduser("~/CerebralCortexH2V.pkl")):
                            #elmH2V.nnet.neurons = H2V_cursor.next()['neurons']
                            elmH2V.load(
                                os.path.expanduser("~/CerebralCortexH2V.pkl"))
                        else:
                            elmH2V.add_neurons(100, "sigm")
                    elmH2V.train(X, T, "LOO")
                    print elmH2V.predict(X)
                    cv2.imshow(
                        ">>>PREDICTION<<<",
                        numpy.transpose(elmH2V.predict(X)).reshape(360, 640))
                    cv2.moveWindow(">>>PREDICTION<<<", 50, 550)

        print elmH2V.nnet.neurons
        elmH2V.save(os.path.expanduser("~/CerebralCortexH2V.pkl"))
Пример #4
0
	def start():
		pairs = MapperUtil.get_allpairs() # Get pairs starting from 0th line
		if not pairs:
			print ("No pairs found.")
			sys.exit()

		p = pyaudio.PyAudio() # Create a PyAudio session

		# Create a stream
		stream = p.open(format=FORMAT,
						channels=CHANNELS,
						rate=RATE,
						output=True)

		#H2V_cursor = NeuralNetUtil.get_neurons("H2V")
		elmH2V = None

		# Loop over the pairs coming from CROSSMODAL
		for pair in pairs:
			   #time.sleep(0.5) # Wait 0.5 seconds to prevent aggressive loop
			   print pair

			   if pair['direction'] == "H2V":
				   print "____________________________________________________________\n"
				   print pair['timestamp1']

				   hearing_memory = HearingMemoryUtil.get_memory(pair['timestamp1'])
				   hearing_memory = hearing_memory.next()['data']
				   #print hearing_memory.next()['data']
				   #chunky_array = numpy.fromstring(hearing_memory.next()['data'], 'int16')
				   #print chunky_array
				   stream.write(hearing_memory)

				   numpy_audio = numpy.fromstring(hearing_memory, numpy.uint8)
				   #print numpy_audio
				   print "Audio: ",numpy_audio.shape
				   #print numpy.transpose(numpy_audio.reshape((numpy_audio.shape[0],1))).shape


				   vision_memory = VisionMemoryUtil.get_memory(pair['timestamp2'])
				   vision_memory = vision_memory.next()

				   frame_amodal = numpy.fromstring(vision_memory['amodal'], numpy.uint8)
				   print "Frame Threshold: ",frame_amodal.shape
				   cv2.imshow("Frame Threshhold", frame_amodal.reshape(360,640))
				   cv2.moveWindow("Frame Threshhold",50,100)

				   frame_color = numpy.fromstring(vision_memory['color'], numpy.uint8)
				   print "Frame Delta Colored: ",frame_color.shape
				   cv2.imshow("Frame Delta Colored", frame_color.reshape(360,640,3))
				   cv2.moveWindow("Frame Delta Colored",1200,100)
				   key = cv2.waitKey(500) & 0xFF
				   #time.sleep(2.0)

				   modulo = numpy_audio.shape[0] % RATE
				   numpy_audio = numpy_audio[:-modulo]
				   for one_second in numpy.array_split(numpy_audio, int(numpy_audio.shape[0] / RATE)):
					   X = numpy.transpose(one_second.reshape((one_second.shape[0],1)))
					   T = numpy.transpose(frame_amodal.reshape((frame_amodal.shape[0],1)))
					   X = X.astype(numpy.float32, copy=False)
					   T = T.astype(numpy.float32, copy=False)
					   X[0] = X[0] / X[0].max()
					   T[0] = T[0] / T[0].max()
					   print X.shape
					   print T.shape
					   if elmH2V is None:
						   elmH2V = HPELM(X.shape[1],T.shape[1])
						   if os.path.exists(os.path.expanduser("~/CerebralCortexH2V.pkl")):
							   #elmH2V.nnet.neurons = H2V_cursor.next()['neurons']
							   elmH2V.load(os.path.expanduser("~/CerebralCortexH2V.pkl"))
						   else:
							   elmH2V.add_neurons(100, "sigm")
					   elmH2V.train(X, T, "LOO")
					   print elmH2V.predict(X)
					   cv2.imshow(">>>PREDICTION<<<", numpy.transpose(elmH2V.predict(X)).reshape(360,640))
					   cv2.moveWindow(">>>PREDICTION<<<",50,550)

		print elmH2V.nnet.neurons
		elmH2V.save(os.path.expanduser("~/CerebralCortexH2V.pkl"))
Пример #5
0
 def test_SaveEmptyModel_CanLoad(self):
     model = HPELM(10, 3)
     model.save(self.fname)
     model2 = HPELM(10, 3)
     model2.load(self.fname)
Пример #6
0
 def test_SaveEmptyModel_CanLoad(self):
     model = HPELM(10, 3)
     model.save(self.fname)
     model2 = HPELM(10, 3)
     model2.load(self.fname)