Exemplo n.º 1
0
def main(argv = None):
    
    options = parseCommandLine()
    options.c, options.n = int(options.c), int(options.n)
    
    datFile, outFile = openFiles(options)
    
    valsList = []
    newFileList = []
    for line in datFile.readlines():
        lineContents = line.split()
        val = float(lineContents[options.c])
        valsList.append(val)
        newFileList.append(lineContents)
    timeDist = float(newFileList[1][0]) * options.n / 2
    newFileIter = iter(newFileList)    
    for i in movingAverage(valsList, options.n):
        line = newFileIter.next()
        line[0] = str(float(line[0]) + timeDist)
        line[options.c] = "%f" % (i)
        outFile.write("\t".join(line))
        outFile.write('\n')
        
         
    datFile.close()
    outFile.close()
 def process_reward(self,reward):
     # - change the PF related to the self.active_perm_number using the Natural Weight Equation:
     if self.firedBool == True:
         old_value = self.PF_list[self.active_perm_number]
         new_value = reward
         resistance = self.resistance
         self.PF_list[self.active_perm_number] = utils.movingAverage(old_value, new_value, resistance)
         # - Make sure value is within the limits:
         if self.PF_list[self.active_perm_number] < self.min_PF:
             self.PF_list[self.active_perm_number] = self.min_PF
         elif self.PF_list[self.active_perm_number] > self.max_PF:
             self.PF_list[self.active_perm_number] = self.max_PF
Exemplo n.º 3
0
    def predict(self):
        if self.life > self.start_predicting:
            # - Copy the matrices for the simulation:
            self.cell_matrix_D_copy = np.copy(self.cell_matrix_D)
            self.cell_matrix_V_copy = np.copy(self.cell_matrix_V)
            self.cell_matrix_EV_copy = np.copy(self.cell_matrix_EV)
            self.cell_matrix_DR_copy = np.copy(self.cell_matrix_DR)

            # SIMULATIONS:
            for n in range(self.output_size):
                # - Reconstruct next input:
                # Pn = (EI0*An - EI0*EAn + Dn)/(An - EAn) = (EI0(An - EAn) + Dn)/(An - EAn) = EI0 + Dn/(An - EAn)
                # Prediction = SUM(Pn*Rn)/SUM(Rn)
                if n == 0:
                    self.cell_matrix_P, self.output_layer[n][
                        n] = decodeDendrites(self.cell_matrix_D_copy,
                                             self.cell_matrix_V_copy,
                                             self.cell_matrix_EV_copy,
                                             self.cell_matrix_DR_copy)
                else:
                    temp_matrix = []  # dummy matrix
                    # we only needed the first P matrix for the dendr optimization later
                    temp_matrix, self.output_layer[n][n] = decodeDendrites(
                        self.cell_matrix_D_copy, self.cell_matrix_V_copy,
                        self.cell_matrix_EV_copy, self.cell_matrix_DR_copy)

                # - Move the input array back:
                for el in range(self.row_size - 1):
                    self.cell_matrix_V_copy[0][
                        -1 - el] = self.cell_matrix_V_copy[0][-2 - el]

                # - Set the A[0][0] input:
                self.cell_matrix_V_copy[0][0] = self.output_layer[n][n]

                # - Run connections:
                if self.pull == 1:
                    self.cell_matrix_V_copy = utils.runConnectionsPull(
                        self.cell_matrix_V_copy, self.connections_list)
                else:
                    self.cell_matrix_V_copy = utils.runConnectionsPush(
                        self.cell_matrix_V_copy, self.connections_list)

                # - Update the EVs:
                for row in range(self.row_count):
                    for el in range(self.row_size):
                        # -- EVs:
                        old_value = self.cell_matrix_EV_copy[row][el]
                        new_value = self.cell_matrix_V_copy[row][el]
                        self.cell_matrix_EV_copy[row][
                            el] = utils.movingAverage(old_value, new_value,
                                                      self.RV)
Exemplo n.º 4
0
    def findRowBounds(self):
        """
        Finds the rows where the labels could be located
        """
        # TODO Ignore internal row bounds
        count_row = np.sum(self.img_eroded,
                           axis=1) / 255  # Number of white pixels in each row
        moving_average_row = movingAverage(
            count_row, 100)  # Moving average of white pixels for each row

        min_height = int(
            self.N / 10
        )  # Minimum number of white pixels for row to be considered for local max
        peaks, _ = find_peaks(moving_average_row,
                              height=min_height)  # Finds the local maxima
        _, _, top, bottom = peak_widths(moving_average_row,
                                        peaks,
                                        rel_height=0.5)
        self.row_bounds = list(zip(top.astype('int'), bottom.astype('int')))
        self.row_bounds = list(
            filter(lambda r: r[1] - r[0] > self.M / 20, self.row_bounds))
        print(self.row_bounds)
Exemplo n.º 5
0
    def linePlots(self,dest):
        '''Graphs for relative age cohorts include

        * Bytes added per edit (new vs. old editors)
        * Contribution percentage of bytes added for each one year cohort
        * Editor percentage for each one year cohort        

        
        '''
        logger.info('Creating line plots for %s'%self)

        editspan = "%s<edits%s"%(self.minedits,'<%s'%self.maxedits if self.maxedits is not None else '')

        # Bytes added per edit (new vs. old editors)

        added = self.data['added']
        edits = self.data['edits']
        editors = self.data['editors']

        six = added[0:6,:].sum(axis=0)/(edits[0:6,:].sum(axis=0)+1)
        moresix = added[7:,:].sum(axis=0)/(edits[7:,:].sum(axis=0)+1)

        six = utils.movingAverage(array=six, WINDOW=3)
        moresix = utils.movingAverage(array=moresix, WINDOW=3)

        fig = self.addLine(data=six,label='new editors (0-6 months active)')
        fig = self.addLine(data=moresix,fig=fig,label='older editors (>6 months active)')        

        # l = 12  
        # fig = None      
        # for i in range(0,(added.shape[1]/l)*l,l):
        #     e = edits[(i):(i+l-1),:].sum(axis=0)
        #     e[e==0] = 1
        #     data = added[(i):(i+l-1),:].sum(axis=0)/e
        #     fig = self.addLine(data=data,fig=fig,label='%s-%s months active'%(i,(i+l-1)))

       
        self.saveFigure(name='bytes_per_edit_new_vs_old', fig=fig, dest=dest, title='Bytes added per edit (new vs. old editors, %s)'%editspan,ylabel='Bytes', legendpos=1)


        # Contribution percentage of bytes added for each one year cohort
        total = added.sum(axis=0)
        total[total==0] = 1

        l = 12  
        fig = None      
        for i in range(0,(added.shape[1]/l)*l,l):
            data = added[(i):(i+l-1),:].sum(axis=0)/total
            fig = self.addLine(data=data,fig=fig,label='%s-%s months active'%(i,(i+l-1)))


        self.saveFigure(name='percentage_added_line', fig=fig, dest=dest, title='Contribution percentage of bytes added for editors with %s'%editspan,ylabel='Percentage', legendpos=1)

        # Editor percentage for each one year cohort
        total = editors.sum(axis=0)
        total[total==0] = 1

        l = 12  
        fig = None      
        for i in range(0,(added.shape[1]/l)*l,l):
            data = editors[(i):(i+l-1),:].sum(axis=0)/total
            fig = self.addLine(data=data,fig=fig,label='%s-%s months active'%(i,(i+l-1)))


        self.saveFigure(name='percentage_editor_line', fig=fig, dest=dest, title='Editor age percentage for editors with %s'%editspan,ylabel='Percentage', legendpos=1)
	def run(self):

		# Construct mask first
		mask = self.constructMask()
		prev_key_pts = None

		# fourcc = cv2.VideoWriter_fourcc(*'XVID')
		# self.video = cv2.VideoWriter('video.avi', fourcc,29, (int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))))
		
		while self.vid.isOpened() and self.frame_idx<len(self.gt):
			ret, frame = self.vid.read()
			if not ret:
				break

			# Convert to B/W
			frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			frame_gray = frame_gray[130:350, 35:605]
			mask_vis = frame.copy() # <- For visualization
			
			# Process each frame
			if self.prev_pts is None:
				self.temp_preds[self.frame_idx] = 0
			else:
				# Get median of predicted V/hf values
				preds = self.processFrame(frame_gray)
				self.temp_preds[self.frame_idx] = np.median(preds) if len(preds) else 0

			# Extract features
			self.prev_pts = self.getFeatures(frame_gray, mask[130:350, 35:605])
			self.prev_gray = frame_gray
			self.frame_idx += 1
			
			# For visualization purposes only
			if self.vis:
				prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts)
				if cv2.waitKey(1) & 0xFF == ord('q'):
					break

		# self.video.release()
		self.vid.release()

		# Split predictions into train and validation - 
		split = self.frame_idx//10
		train_preds = self.temp_preds[:self.frame_idx-split]
		val_preds = self.temp_preds[self.frame_idx - split:self.frame_idx]
		gt_train = self.gt[:len(train_preds)]
		gt_val = self.gt[len(train_preds):self.frame_idx]

		# Fit to ground truth
		preds = movingAverage(train_preds, self.window)

		lin_reg = linear_model.LinearRegression(fit_intercept=False)
		lin_reg.fit(preds.reshape(-1, 1), gt_train) 
		hf_factor = lin_reg.coef_[0]
		print("Estimated hf factor = {}".format(hf_factor))


		# estimate training error
		pred_speed_train = train_preds * hf_factor
		pred_speed_train = movingAverage(pred_speed_train, self.window)
		mse = np.mean((pred_speed_train - gt_train)**2)
		print("MSE for train", mse)

		# Estimate validation error
		pred_speed_val = val_preds * hf_factor
		pred_speed_val = movingAverage(pred_speed_val, self.window)
		mse = np.mean((pred_speed_val - gt_val)**2)
		print("MSE for val", mse)
		
		# plot(pred_speed_train, gt_train)
		# plot(pred_speed_val, gt_val)

		return hf_factor
Exemplo n.º 7
0
    def inputAndUpdate(self, new_input):

        # - Get the input to the input cell:
        self.input_cell = new_input

        # - Evaluate the prediction:
        if self.life > self.start_predicting + 1:
            for n in range(self.output_size):
                old_value = self.error_layer[n]
                new_value = abs((self.output_layer[n][n] - self.input_cell) /
                                self.input_cell)
                resistance = self.mainR_resist
                self.error_layer[n] = utils.movingAverage(
                    old_value, new_value, resistance)

        # - Move output layer forward:
        for row in range(self.output_size):
            for el in range(self.output_size - 1):
                self.output_layer[row][el] = self.output_layer[row][el + 1]

        # - Rate the dendrites:
        # Rn = E( 1 - abs( (Pn - I0) / I0 ) )
        if self.life > self.start_predicting + 1:
            for row in range(self.row_count):
                for el in range(self.row_size):
                    old_value = self.cell_matrix_DR[row][el]
                    new_value = 1 - abs(
                        (self.cell_matrix_P[row][el] - self.input_cell) /
                        self.input_cell)
                    resistance = self.dRating_resist
                    Rn = utils.movingAverage(old_value, new_value, resistance)
                    # Reset Rn on bad predictions:
                    if new_value < 0.1 or Rn < 0.001:
                        Rn = 0.001
                    self.cell_matrix_DR[row][el] = Rn

        # - Update the dendrites:
        if self.life > self.row_size:
            for row in range(self.row_count):
                for el in range(self.row_size):
                    if self.dendr_type == 0:
                        # Dn = E(COV) = E((I[0]-EI[0])*(A[n]-EA[n]))
                        # -- Recover the needed variables:
                        Dn = self.cell_matrix_D[row][el]
                        I0 = self.input_cell
                        EI0 = self.cell_matrix_EV[0][0]
                        An = self.cell_matrix_V[row][el]
                        EAn = self.cell_matrix_EV[row][el]
                        # -- Calculate the covariance:
                        COV_ = utils.COV(I0, EI0, An, EAn)
                        # -- Calculate and set Dn = E(COV):
                        old_value = Dn
                        new_value = COV_
                        Dn = utils.movingAverage(old_value, new_value, self.RD)
                        self.cell_matrix_D[row][el] = Dn
            # -- Update RV:
            self.RD += 1
            if self.RD > self.max_D_resist:
                self.RD = self.max_D_resist

        # - Move Input array back:
        for el in range(self.row_size - 1):
            self.cell_matrix_V[0][-1 - el] = self.cell_matrix_V[0][-2 - el]

        # - Set A[0][0] = input:
        self.cell_matrix_V[0][0] = self.input_cell

        # - Run connections:
        if self.pull == 1:
            self.cell_matrix_V = utils.runConnectionsPull(
                self.cell_matrix_V, self.connections_list)
        else:
            self.cell_matrix_V = utils.runConnectionsPush(
                self.cell_matrix_V, self.connections_list)

        # - Update the EVs:
        for row in range(self.row_count):
            for el in range(self.row_size):
                # -- EVs:
                old_value = self.cell_matrix_EV[row][el]
                new_value = self.cell_matrix_V[row][el]
                self.cell_matrix_EV[row][el] = utils.movingAverage(
                    old_value, new_value, self.RV)

        # - Update the RV:
        self.RV += 1
        if self.RV > self.max_V_resist:
            self.RV = self.max_V_resist

        # - Update life:
        self.life += 1
Exemplo n.º 8
0
    def run(self, path=[cfg.color_PINK, cfg.color_YELLOW]):
        self.path = path
        self.currentColors = path[:2]
        # Initialize PiCamera
        camera = cameraInit()

        #initialize peopleTracker
        firstColorTracker = CentroidTracker(maxDisappeared=8)
        secondColorTracker = CentroidTracker(maxDisappeared=14)
        centroidX = 0
        width = 1
        # objects = OrderedDict()

        # Create and start PID controller thread
        horizontalPositionControlThread = Thread(
            target=horizontalPositionControl_PID)
        horizontalPositionControlThread.start()
        print("horizontal control thread started")

        rawCapture = PiRGBArray(camera,
                                size=(cfg.FRAME_WIDTH, cfg.FRAME_HEIGHT))

        self.sensorReader.start()

        def nothing(x):
            pass

        #loop through frames continuously
        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            image = frame.array
            startTime = time.time()

            print(cfg.mode)

            blurred = cv2.GaussianBlur(image, (5, 5), 0)
            hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)  # convert picture

            maskFirst = getFilteredColorMask(
                hsv,
                cfg.colorLimitsDict.lower(self.currentColors[0]),
                cfg.colorLimitsDict.upper(self.currentColors[0]),
                useMorphology=False)
            firstContoursSorted = getAreaSortedContours(maskFirst)
            firstBoundingBoxes = getBoundingBoxes(firstContoursSorted)
            drawBoxes(image, firstBoundingBoxes)
            firstColorObjects = firstColorTracker.update(firstBoundingBoxes)
            drawObjectCoordinates(image, firstColorObjects)

            maskSecond = getFilteredColorMask(
                hsv,
                cfg.colorLimitsDict.lower(self.currentColors[1]),
                cfg.colorLimitsDict.upper(self.currentColors[1]),
                useMorphology=False)
            secondContoursSorted = getAreaSortedContours(maskSecond)
            secondBoundingBoxes = getBoundingBoxes(secondContoursSorted)
            drawBoxes(image, secondBoundingBoxes)
            secondColorObjects = secondColorTracker.update(secondBoundingBoxes)
            drawObjectCoordinates(image, secondColorObjects)

            if not (self.step + 2 == len(self.path)):

                if bool(firstColorObjects) and bool(secondColorObjects):
                    self.isNextStep = False
                    print("both colors seen")
                    _, secondColorCenterX = findCenterOfBiggestBox(
                        secondColorObjects)
                    cfg.horizontal_measurement = movingAverage(
                        secondColorCenterX,
                        cfg.horizontalPositions,
                        windowSize=2)

                    cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_LEFT,
                                          dps=cfg.MAX_SPEED -
                                          int(cfg.horizontal_correction))
                    cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_RIGHT,
                                          dps=cfg.MAX_SPEED +
                                          int(cfg.horizontal_correction))

                elif bool(firstColorObjects):
                    print("first color seen")
                    self.isNextStep = False
                    _, firstColorCenterX = findCenterOfBiggestBox(
                        firstColorObjects)
                    cfg.horizontal_measurement = movingAverage(
                        firstColorCenterX,
                        cfg.horizontalPositions,
                        windowSize=2)

                    cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_LEFT,
                                          dps=cfg.MAX_SPEED -
                                          int(cfg.horizontal_correction))
                    cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_RIGHT,
                                          dps=cfg.MAX_SPEED +
                                          int(cfg.horizontal_correction))

                elif bool(secondColorObjects):
                    print("second color seen")
                    if not self.isNextStep:
                        self.isNextStep = self.updateStep()

                else:
                    cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_LEFT, dps=1)
                    cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_RIGHT, dps=1)
                    print("STOP!")
            else:
                print("last step")
                _, secondColorCenterX = findCenterOfBiggestBox(
                    secondColorObjects)
                cfg.horizontal_measurement = movingAverage(
                    secondColorCenterX, cfg.horizontalPositions, windowSize=2)

                cv2.imshow("outputImage", image)
                endTime = time.time()
                # print("loopTime: ", endTime - startTime)
                # Exit if 'esc' is clicked
                # cleanup hardware
                key = cv2.waitKey(1)
                rawCapture.truncate(0)
                if key == 27:
                    cfg.threadStopper.set()
                    horizontalPositionControlThread.join()
                    # distanceControlThread.join()
                    # deviceScannerThread.join()
                    cfg.GPG.reset_all()
                    camera.close()
                    cfg.GPG.stop()
                    break

        cv2.destroyAllWindows()
Exemplo n.º 9
0
 def testMovingAverage(self):
     #movingAverage([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
     resList = [40.0, 40.0, 41.0, 41.8, 43, 44]
     mvAvg = movingAverage([40, 30, 50, 46, 39, 44])
     for res in resList:
         self.assertAlmostEqual(mvAvg.next(), res)
Exemplo n.º 10
0
    def run(self):
        # Construct mask first
        mask = self.constructMask()
        prev_key_pts = None

        #new video
        fourcc = cv2.VideoWriter_fourcc(*'MP4V')

        self.video = cv2.VideoWriter(
            'video.mp4', fourcc, 29,
            (int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
             int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))))
        mseL = []
        lCount = []
        nb = 0
        while self.vid.isOpened() and self.frame_idx < len(self.gt):
            ret, frame = self.vid.read()
            if not ret:
                break

            # Convert to B/W
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            frame_gray = frame_gray[130:350, 35:605]
            mask_vis = frame.copy()  # <- For visualization

            # Process each frame
            if self.prev_pts is None:
                self.temp_preds[self.frame_idx] = 0
            else:
                # Get median of predicted V/hf values
                preds = self.processFrame(frame_gray)
                #store prediction values

                self.temp_preds[self.frame_idx] = np.median(preds) if len(
                    preds) else 0

            # Extract features
            self.prev_pts = self.getFeatures(frame_gray, mask[130:350, 35:605])
            self.prev_gray = frame_gray
            #record additional error rate over time
            newTrain_preds = self.temp_preds[self.frame_idx]
            newGt_train = self.gt[self.frame_idx]

            # estimate training error
            newMse = np.mean((newTrain_preds - newGt_train)**2)

            mseL.append(newMse)
            nb = nb + 1
            lCount.append(nb)

            self.frame_idx += 1

            # For visualization purposes only
            if self.vis:
                prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        # self.video.release()
        self.vid.release()

        # Split predictions into train and validation -
        split = self.frame_idx // 10
        train_preds = self.temp_preds[:self.frame_idx - split]

        val_preds = self.temp_preds[self.frame_idx - split:self.frame_idx]
        gt_train = self.gt[:len(train_preds)]

        gt_val = self.gt[len(train_preds):self.frame_idx]

        # Fit to ground truth
        preds = movingAverage(train_preds, self.window)
        #use linear regression to fit predicted values on actual
        lin_reg = linear_model.LinearRegression(fit_intercept=False,
                                                normalize=True)
        lin_reg.fit(preds.reshape(-1, 1), gt_train)
        hf_factor = lin_reg.coef_[0]
        print("Estimated hf factor = {}".format(hf_factor))

        # estimate training error
        pred_speed_train = train_preds * hf_factor
        pred_speed_train = movingAverage(pred_speed_train, self.window)
        mse = np.mean((pred_speed_train - gt_train)**2)
        print("MSE for train", mse)
        #graph MSE over time

        fig = plt.figure()
        ax = plt.subplot(111)
        ax.plot(lCount, mseL, label='$y = MSE')
        plt.title('Legend inside')
        ax.legend()

        fig.savefig('trainPlot.png')

        # Estimate validation error

        pred_speed_val = val_preds * hf_factor
        pred_speed_val = movingAverage(pred_speed_val, self.window)
        mse = np.mean((pred_speed_val - gt_val)**2)
        print("MSE for val", mse)

        # plot(pred_speed_train, gt_train)
        # plot(pred_speed_val, gt_val)

        return hf_factor
Exemplo n.º 11
0
    def getInputAndPropagate(self, input_array):
        # - Decay the chem markings:
        if self.is_chem_marking == 1:
            for layer in range(self.hidden_count + 1):
                for cell in range(len(self.Chem_list[layer])):
                    self.Chem_list[layer][
                        cell] = self.Chem_list[layer][cell] - self.chem_decay
                    if self.Chem_list[layer][cell] < 0.0:
                        self.Chem_list[layer][cell] = 0.0

        # - Reset Fs:
        for layer in range(self.hidden_count + 1):
            self.F_list[layer] = np.zeros(len(self.F_list[layer]))

        # - Reset PFs and output layer if is_RNN=0:
        if self.is_RNN == 0:
            for layer in range(self.hidden_count + 1):
                self.PF_list[layer] = np.zeros(len(self.PF_list[layer]))
            self.output_PF = np.zeros(self.output_size)

        # - Reduce the PFs and output layer if is_RNN=1:
        if self.is_RNN == 1:
            for layer in range(self.hidden_count + 1):
                self.PF_list[layer] = np.multiply(self.PF_list[layer],
                                                  self.RNN_reduce)
            self.output_PF = self.output_PF * self.RNN_reduce

        # - Iterate all layers:
        for layer in range(self.hidden_count + 1):
            # -- For layer == 0:
            if layer == 0:
                # --- get input into PF:
                self.PF_list[layer] = input_array
                # --- F = bound(PF)
                #self.F_list[layer]  = utils.boundInputMinMaxArray(self.PF_list[layer], 0, 1)

                # --- F = PF:
                self.F_list[layer] = self.PF_list[layer]

            # -- For layer  > 0:
            if layer > 0:
                # --- decide whether to F | PF:
                self.F_list[layer] = utils.fireNotFire(self.PF_list[layer],
                                                       self.min_PF,
                                                       self.max_PF)
            # -- Run LL connections PF[n+1]   += F[n] * LL[n]
            for n in range(self.hidden_count):
                self.PF_list[n + 1] += np.dot(self.F_list[n], self.LLC_list[n])

            # -- Run LB connections PF[n-1-k] += F[n] * LB[n][k]
            if self.is_simple_connected == 0:
                for n in range(self.hidden_count):
                    # --- Run LB[k]
                    for k in range(n):
                        self.PF_list[n - 1 - k] += np.dot(
                            self.F_list[n], self.LBC_list[n][k])
                        # ---- Force re-fire on F[n-1-k]
                        # ----- For input layer Set F = bound(PF):
                        if n - 1 - k == 0:
                            self.F_list[0] = utils.boundInputMinMaxArray(
                                self.PF_list[0], 0, 1)
                        # ----- For hidden layers decide whether to F | PF:
                        else:
                            self.F_list[n - 1 - k] = utils.fireNotFire(
                                self.PF_list[n - 1 - k], self.min_PF,
                                self.max_PF)

        # - Run LO connections PF[out] += F[n] * LO[n]
        for n in range(self.hidden_count + 1):
            self.output_PF += np.dot(self.F_list[n], self.LOC_list[n])

        # - Check the fired cells and update the chem markings:
        if self.is_chem_marking == 1:
            for layer in range(self.hidden_count + 1):
                for cell in range(len(self.Chem_list[layer])):
                    if self.F_list[layer][cell] == 1:
                        self.Chem_list[layer][cell] = 1

        # - Update all EFs:
        for layer in range(self.hidden_count + 1):
            for el in range(len(self.F_list[layer])):
                old_value = self.EF_list[layer][el]
                new_value = self.F_list[layer][el]
                resistance = self.EF_resist
                self.EF_list[layer][el] = utils.movingAverage(
                    old_value, new_value, resistance)

        # - Increase the EF_resist:
        self.EF_resist += 1
        if self.EF_resist > self.max_EF_resist:
            self.EF_resist = self.max_EF_resist

        # - Provide the output:
        self.output_F = utils.classOutput(self.output_PF, self.min_PF,
                                          self.max_PF)

        # - Update life:
        self.life = self.life + 1
Exemplo n.º 12
0
def personFollower():
    # Initialize PiCamera
    camera = cameraInit()

    #initialize peopleTracker
    peopleTracker = CentroidTracker(maxDisappeared=10)

    # def nothing(x):
    #     pass

    # cv2.namedWindow("Trackbars")

    # cv2.createTrackbar("B", "Trackbars", 0, 255, nothing)
    # cv2.createTrackbar("G", "Trackbars", 0, 255, nothing)
    # cv2.createTrackbar("R", "Trackbars", 0, 255, nothing)

    centroidX = 0
    width = 1
    objects = OrderedDict()

    # Create and start PID controller thread
    horizontalPositionControlThread = Thread(
        target=horizontalPositionControl_PID)
    horizontalPositionControlThread.start()
    print("horizontal control thread started")

    distanceControlThread = Thread(target=distanceControl_PID)
    distanceControlThread.start()
    print("distance control thread started")

    # # Handle Bluetooth Low Energy device scan
    # scanDelegate = ble.ScanDelegate()
    # deviceScanner = ble.DeviceScanner(scanDelegate)
    # # deviceScanner.startScan(float("inf"))
    # deviceScanner.showAvilableDevices(scanDelegate)

    # wifiScanner = WifiScanner()

    # # deviceScannerThread = Thread(target = wifiScanner.probeRssi)
    # deviceScannerThread = Thread(target = deviceScanner.startScan)
    # deviceScannerThread.start()
    # print("device scanner thread started")

    rawCapture = PiRGBArray(camera, size=(cfg.FRAME_WIDTH, cfg.FRAME_HEIGHT))

    #loop through frames continuously
    for frame in camera.capture_continuous(rawCapture,
                                           format="bgr",
                                           use_video_port=True):
        image = frame.array
        startTime = time.time()
        # print(" my device signal strength in dB: ", scanDelegate.rssi)
        # image = imutils.resize(image, width=min(400, image.shape[1]))

        # print(deviceScannerThread.is_alive())
        # Find object in the image

        blurred = cv2.GaussianBlur(image, (3, 3), 0)
        hsv = cv2.cvtColor(
            blurred,
            cv2.COLOR_BGR2HSV)  # convert picture from BGR to HSV color format

        # # Optional trackbars left for determining threshold 'live' if current is not working
        # B = cv2.getTrackbarPos("B", "Trackbars")
        # G = cv2.getTrackbarPos("G", "Trackbars")
        # R = cv2.getTrackbarPos("R", "Trackbars")
        # B = 50
        # G = 10
        # R = 180

        lowerLimit, upperLimit = getHSVColorLimitsFromBGR(50, 10, 180)

        mask = getFilteredColorMask(hsv, lowerLimit, upperLimit)

        contoursSorted = getAreaSortedContours(mask)
        boundingBoxes = getBoundingBoxes(contoursSorted)

        drawBoxes(image, boundingBoxes)

        objects = peopleTracker.update(boundingBoxes)

        if bool(objects):
            maxAreaBboxID = 0
            prevArea = 0
            eraseIDs = []

            drawObjectCoordinates(image, objects)

            # # print("maxAreaBboxID: ", maxAreaBboxID)
            # if maxAreaBboxID in objects :
            #     trackedCentroid = objects[maxAreaBboxID]
            #     centroidX = trackedCentroid[0]
            #     width = movingAverage(trackedCentroid[3], cfg.bBoxWidths, windowSize = 4)
            #     print("width", width)
            biggestBoxID, centroidX = findCenterOfBiggestBox(objects)

            cfg.horizontal_measurement = centroidX  #movingAverage(centroidX,

            _, _, _, width = objects[biggestBoxID]
            cfg.distance_measurement = movingAverage(findCameraDistance(width),
                                                     cfg.distanceMeasurements,
                                                     windowSize=2)

            # cfg.distance_measurement = findCameraDistance(width)

            print(cfg.distance_measurement)

            cv2.putText(image, "%.2fcm" % (cfg.distance_measurement),
                        (image.shape[1] - 200, image.shape[0] - 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 3)

            cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_LEFT,
                                  dps=int(-cfg.distance_correction) -
                                  int(cfg.horizontal_correction))
            cfg.GPG.set_motor_dps(cfg.GPG.MOTOR_RIGHT,
                                  dps=int(-cfg.distance_correction) +
                                  int(cfg.horizontal_correction))

        cv2.imshow("outputImage", image)
        endTime = time.time()
        print("loopTime: ", endTime - startTime)
        # Exit if 'esc' is clicked
        # cleanup hardware
        key = cv2.waitKey(1)
        rawCapture.truncate(0)
        if key == 27:
            cfg.threadStopper.set()
            cfg.GPG.reset_all()
            camera.close()
            cfg.GPG.stop()
            horizontalPositionControlThread.join()
            distanceControlThread.join()
            # deviceScannerThread.join()
            break
Exemplo n.º 13
0
 def testMovingAverage(self):
     #movingAverage([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
     resList = [40.0, 40.0, 41.0, 41.8, 43, 44]
     mvAvg = movingAverage([40, 30, 50, 46, 39, 44])
     for res in resList:
         self.assertAlmostEqual(mvAvg.next(), res)
Exemplo n.º 14
0
    def linePlots(self, dest):
        '''Graphs for relative age cohorts include

        * Bytes added per edit (new vs. old editors)
        * Contribution percentage of bytes added for each one year cohort
        * Editor percentage for each one year cohort        

        
        '''
        logger.info('Creating line plots for %s' % self)

        editspan = "%s<edits%s" % (self.minedits, '<%s' % self.maxedits
                                   if self.maxedits is not None else '')

        # Bytes added per edit (new vs. old editors)

        added = self.data['added']
        edits = self.data['edits']
        editors = self.data['editors']

        six = added[0:6, :].sum(axis=0) / (edits[0:6, :].sum(axis=0) + 1)
        moresix = added[7:, :].sum(axis=0) / (edits[7:, :].sum(axis=0) + 1)

        six = utils.movingAverage(array=six, WINDOW=3)
        moresix = utils.movingAverage(array=moresix, WINDOW=3)

        fig = self.addLine(data=six, label='new editors (0-6 months active)')
        fig = self.addLine(data=moresix,
                           fig=fig,
                           label='older editors (>6 months active)')

        # l = 12
        # fig = None
        # for i in range(0,(added.shape[1]/l)*l,l):
        #     e = edits[(i):(i+l-1),:].sum(axis=0)
        #     e[e==0] = 1
        #     data = added[(i):(i+l-1),:].sum(axis=0)/e
        #     fig = self.addLine(data=data,fig=fig,label='%s-%s months active'%(i,(i+l-1)))

        self.saveFigure(
            name='bytes_per_edit_new_vs_old',
            fig=fig,
            dest=dest,
            title='Bytes added per edit (new vs. old editors, %s)' % editspan,
            ylabel='Bytes',
            legendpos=1)

        # Contribution percentage of bytes added for each one year cohort
        total = added.sum(axis=0)
        total[total == 0] = 1

        l = 12
        fig = None
        for i in range(0, (added.shape[1] / l) * l, l):
            data = added[(i):(i + l - 1), :].sum(axis=0) / total
            fig = self.addLine(data=data,
                               fig=fig,
                               label='%s-%s months active' % (i, (i + l - 1)))

        self.saveFigure(
            name='percentage_added_line',
            fig=fig,
            dest=dest,
            title='Contribution percentage of bytes added for editors with %s'
            % editspan,
            ylabel='Percentage',
            legendpos=1)

        # Editor percentage for each one year cohort
        total = editors.sum(axis=0)
        total[total == 0] = 1

        l = 12
        fig = None
        for i in range(0, (added.shape[1] / l) * l, l):
            data = editors[(i):(i + l - 1), :].sum(axis=0) / total
            fig = self.addLine(data=data,
                               fig=fig,
                               label='%s-%s months active' % (i, (i + l - 1)))

        self.saveFigure(name='percentage_editor_line',
                        fig=fig,
                        dest=dest,
                        title='Editor age percentage for editors with %s' %
                        editspan,
                        ylabel='Percentage',
                        legendpos=1)