Example #1
0
def calculate(processor,
              interp_multiplier,
              ignore_calib,
              plot_fkt,
              correction_factor=1):
    x, y, laser_x, laser_y = processor.get_data(interp_multiplier)

    calibrator = Calibrator(laser_x, laser_y, interp_multiplier, plot_fkt)
    delta, dt_max = calibrator.get_calibration(x)

    print(len(x))
    if not ignore_calib:
        _x = x + delta

        x = np.linspace(_x[0], _x[-1], len(_x))
        y = Utils.interpolate(x, _x, y)

        x = x * correction_factor

    # t in seconds
    t = x * dt_max
    dt = t[1] - t[0]

    t_max = len(t)

    print(len(t))
    print(1 / t_max)

    l, y_trans = FourierTransformer().transform(y, dt, t_max)

    # /   y_trans = list(reversed(y_trans))

    return l, y_trans
Example #2
0
    def fromConfig(cls, config):
        if 'camera' not in config:
            return None

        camera = PinholeCamera.fromConfig(config['camera'])

        if camera:
            monocular = MonocularWrapper()

            if 'calibrator' in config:
                calibrator = Calibrator.fromConfig(camera, monocular,
                                                   config['calibrator'])
            else:
                calibrator = Calibrator(camera, monocular)

            if calibrator:
                for key in cls.CONFIG_KEYS:
                    if key not in config:
                        return None

                roi = config['roi']
                velocitaCrociera = config['velocitaCrociera']
                #Parametri opzionali
                distanzaMinima = config[
                    'distanzaMinima'] if 'distanzaMinima' in config else None
                distanzaLimite = config[
                    'distanzaLimite'] if 'distanzaLimite' in config else None
                windowSize = config[
                    'windowSize'] if 'windowSize' in config else None
                skipThreshold = config[
                    'skipThreshold'] if 'skipThreshold' in config else None

                return cls(camera, monocular, calibrator, roi,
                           velocitaCrociera, distanzaMinima, distanzaLimite,
                           windowSize, skipThreshold)
def main():
    print(Banner("GUESS THE NUMBER APP").content)
    calibrator = Calibrator(random.randint(0, 100))
    prompt = Prompt()
    success = False
    while not success:
        calibration = calibrator.calibrate(prompt.input())
        print(calibration.message)
        success = calibration.is_equal
Example #4
0
   def CheckPinCode(self, pin):
       if pin=='2502':
           self.tempthreadcontrol(0)
           spi.close()
           self.CalibrWindow=Calibrator(self.finish_signal, self)
           self.CalibrWindow.show()
 
       else:
           pass
       self.Calibr.setStyleSheet(metrocss.prog_passive)
Example #5
0
def generate_samples(filename, output='./output.mp4'):
    if not clb.initialized():
        clb.find_pictures(directory='./camera_cal/')
        clb.calibrate_camera(9, 6)
    m = model.CarModel()
    m.load()
    t = track.FrameVehiclePipeline(m, shape=(720, 1280))
    video = VideoFileClip(filename)
    out = video.fl_image(t.samples)
    out.write_videofile(output, audio=False)
Example #6
0
def single_image(filename, model_path='./data/model.p'):
    if not clb.initialized():
        clb.find_pictures(directory='./camera_cal/')
        clb.calibrate_camera(9, 6)
    im = common.load_image(filename, color='RGB')
    common.show_image(im)
    m = model.CarModel()
    m.load(filename=model_path)
    t = track.FrameVehiclePipeline(m, shape=im.shape[:2])
    t.process(im, show=True)
Example #7
0
    def xtest_compare_calibrator_with_multiple_interpolations(self):
        # 2: Light
        # 1: Laser
        processor = preProcessData.DataProcessor(
            "./data/Notch Filter/Data Channel 1.dat",
            "./data/Notch Filter/Data Channel 2.dat")

        x, y, laser_x, laser_y = processor.get_data(1)
        calibrator = Calibrator(laser_x, laser_y)
        x, y = calibrator.calculate_calibration()
        plt.plot(x, y)

        x, y, laser_x, laser_y = processor.get_data(2)
        calibrator = Calibrator(laser_x, laser_y)
        x, y = calibrator.calculate_calibration(2)
        plt.plot(x, y)

        x, y, laser_x, laser_y = processor.get_data(8)
        calibrator = Calibrator(laser_x, laser_y)
        x, y = calibrator.calculate_calibration(8)
        plt.plot(x, y)

        plt.xlabel('peak index')
        plt.ylabel('delta')
        plt.show()
Example #8
0
 def AnalyzeImg(self):
     imglist = []
     for i in range(self.screenshot_list.count()):
         item = self.screenshot_list.item(i)
         imglist.append(unicode(item.text()))
     language = unicode(self.ocr_language.currentText())
     self.calibratorthread = Calibrator(self, imglist, language)
     QObject.connect(self.calibratorthread,
                     SIGNAL('finished(float, int, PyQt_PyObject)'),
                     self.threadFinished)
     QObject.connect(self.calibratorthread, SIGNAL('steps(int)'),
                     self.updateSteps)
     QObject.connect(self.calibratorthread, SIGNAL('progress(int)'),
                     self.updateProgress)
     self.calibratorthread.execute()
Example #9
0
def test(filename, n=0, show=False):
    import importlib
    importlib.reload(mask)
    print(persp._perspective)
    im_ = plt.imread(filename)
    im = clb.undistort(im_)
    if n == -1:
        return persp.warp(im, show=show)
    elif n == 0:
        im2 = persp.warp(im, show=show)
        color_mask.hls_channel(im2, channel='h', thresh=(100, 255), show=show)
        color_mask.hls_channel(im2, channel='l', thresh=(200, 255), show=show)
        color_mask.hls_channel(im2, channel='s', thresh=(100, 255), show=show)
    elif n == 1:
        im_eq = equalize_hist(im, show=show)
        im2 = persp.warp(im_eq, show=show)
        color_mask.hls_channel(im2, channel='h', thresh=(0, 100), show=show)
        color_mask.hls_channel(im2, channel='l', thresh=(200, 255), show=show)
        color_mask.hls_channel(im2, channel='s', thresh=(100, 255), show=show)
    elif n == 2:
        im2 = persp.warp(im, show=show)
        color_mask.rgb_channel(im2, channel='r', thresh=(220, 255), show=show)
        color_mask.rgb_channel(im2, channel='g', thresh=(210, 255), show=show)
        color_mask.rgb_channel(im2, channel='b', thresh=(0, 100), show=show)
    elif n == 3:
        im2 = persp.warp(im, show=show)
        color_mask.hls_channel(im2, channel='h', thresh=(0, 90), show=show)
        color_mask.hls_channel(im2, channel='l', thresh=(200, 255), show=show)
        color_mask.hls_channel(im2, channel='s', thresh=(110, 255), show=show)
    elif n == 4:
        im2 = persp.warp(im, show=show)
        return custom_mask.special_mask(im2, show=show)
Example #10
0
def calibrate(path: str, filter: str, nrows: int, ncols: int):
    calibrator = Calibrator(1)
    objp = np.zeros((nrows * ncols, 3), np.float32)
    objp[:, :2] = np.mgrid[0:nrows, 0:ncols].T.reshape(-1, 2)
    counter = 0
    image_names = glob.glob(os.path.join(path, filter))
    print(image_names)

    for i in range(100):
        for imname in glob.glob(os.path.join(path, filter)):
            counter += 1
            image = cv2.imread(imname, cv2.IMREAD_GRAYSCALE)
            f, corners = cv2.findChessboardCorners(image, (nrows, ncols), None)
            cv2.cornerSubPix(image, corners, (11, 11), (-1, -1),
                             (cv2.TERM_CRITERIA_EPS +
                              cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
            print(counter)
            calibrator.train(objp, np.squeeze(corners.astype(np.float64)))
Example #11
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1) as network,\
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            config = builder.create_builder_config()
            config.max_workspace_size = 1 << 30
            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            print('Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                print('Beginning ONNX file parsing')
                parser.parse(model.read())
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            print('Completed parsing of ONNX file')
            print('Building an engine from file {}; this may take a while...'.
                  format(onnx_file_path))

            # build trt engine
            builder.max_batch_size = max_batch_size
            builder.max_workspace_size = 1 << 30  # 1GB
            builder.fp16_mode = fp16_mode
            if int8_mode:
                config.max_workspace_size = common.GiB(1)
                config.set_flag(trt.BuilderFlag.INT8)
                config.int8_calibrator = Calibrator(
                    calibration_stream, calibration_table_path
                )  # 设定的东西要放到config里面 - 没有更改运行时候的文件,这里只是给自己做一下提醒

                #builder.int8_mode = int8_mode
                assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                #builder.int8_calibrator  = Calibrator(calibration_stream, calibration_table_path)
                print('Int8 mode enabled')
            #engine = builder.build_cuda_engine(network)
            #engine = builder.build_engine(network,config)
            profile = builder.create_optimization_profile()
            profile.set_shape(
                network.get_input(0).name, (1, 3, 368, 432), (8, 3, 368, 432),
                (64, 3, 368, 432))  #lopps
            #profile.set_shape(network.get_input(0).name, (1, 3,368, 656), (8, 3,368, 656), (64, 3,368, 656))   #openpose_coco
            #profile.set_shape(network.get_input(0).name, (1, 3,432, 368), (1, 3,432, 368), (32, 3, 432, 368))  #openpose_thin
            #profile.set_shape(network.get_input(0).name, (1, 3, 384, 384), (8, 3, 384, 384), (64, 3, 384, 384))#ppn-resnet50-V2-HW=384x384.onnx)
            config.add_optimization_profile(profile)
            engine = builder.build_engine(network, config)
            if engine is None:
                print('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
Example #12
0
 def differential_scan(self):
     cmat, dist_ceofs, \
     rmat, tmat, _ = Calibrator().calibration_data.load_calibrations()
     while self.is_scanning:
         if np.abs(self.theta) >= PI2:
             break
         else:
             begin = time.time()
             try:
                 image = self.camera.capture_image()
Example #13
0
 def AnalyzeImg(self):
     imglist = []
     for i in range(self.screenshot_list.count()):
         item = self.screenshot_list.item(i)
         imglist.append(unicode(item.text()))
     language = unicode(self.ocr_language.currentText())
     self.calibratorthread = Calibrator(self, imglist, language)
     QObject.connect(self.calibratorthread, SIGNAL('finished(float, int, PyQt_PyObject)'), self.threadFinished)
     QObject.connect(self.calibratorthread, SIGNAL('steps(int)'), self.updateSteps)
     QObject.connect(self.calibratorthread, SIGNAL('progress(int)'), self.updateProgress)
     self.calibratorthread.execute()
Example #14
0
 def samples(self, im, target='data/video_dataset/'):
     im = clb.undistort(im)
     shape = self._model.input_shape
     for nw, se in self._slicer.wins:
         ys, ye = nw[1], se[1]
         xs, xe = nw[0], se[0]
         cpy = cv.resize(im[ys:ye, xs:xe, :], shape[:2])
         cpy = common.cvt_color(cpy, color='BGR', src='RGB')
         cv.imwrite('{0}/{1}.png'.format(target, self._i), cpy)
         self._i += 1
     print(self._i)
     return im
Example #15
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1) as network, \
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            log.logger.info(
                'Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                log.logger.info('Beginning ONNX file parsing')
                parser.parse(model.read())
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            log.logger.info('Completed parsing of ONNX file')
            log.logger.info(
                'Building an engine from file {}; this may take a while...'.
                format(onnx_file_path))

            # build trt engine
            if int8_mode:
                builder.max_batch_size = max_batch_size
                builder.int8_mode = int8_mode
                builder.max_workspace_size = 1 << 30  # 1GB
                assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                builder.int8_calibrator = Calibrator(calibration_stream,
                                                     calibration_table_path)
                engine = builder.build_cuda_engine(network)
                log.logger.info('Int8 mode enabled')
            if fp16_mode:
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                builder.fp16_mode = fp16_mode
                engine = builder.build_cuda_engine(network)
                log.logger.info('fp16 mode enabled')
            if fp32_mode:
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                engine = builder.build_cuda_engine(network)
                log.logger.info('fp32 mode enabled')
            if engine is None:
                log.logger.error('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
Example #16
0
 def calibrate(self,
               original,
               realSizeMM=[254.0, 170.0],
               chessboardSize=[25, 17],
               returnAnnotatedImage=False):
     rc = Calibrator.calibrate(self,
                               original=self.QPixmapToImage(original),
                               realSizeMM=realSizeMM,
                               chessboardSize=chessboardSize,
                               returnAnnotatedImage=returnAnnotatedImage)
     if type(rc) is not bool:
         return self.imageToQPixmap(rc)
     else:
         return rc
def main():
    test_set = '../testpics'
    model_file = '../Engine/11.onnx'
    # Now we create a calibrator and give it the location of our calibration data.
    # We also allow it to cache calibration data for faster engine building.
    engine_model_path = "models_save/panoptic_fcn_int8_7.2.2.trt"
    calibration_cache = 'models_save/panoptic_fcn_int8.cache'
    calib = Calibrator(test_set, cache_file=calibration_cache)

    # Inference batch size can be different from calibration batch size.
    batch_size = 1
    engine = build_int8_engine(model_file, calib, batch_size)
    with open(engine_model_path, "wb") as f:
        f.write(engine.serialize())
Example #18
0
def calibrate_devices():
    #	with Serial('/dev/ttyWHATEVER', baudrate=115200, timeout=30, xonxoff=True) as frequency_counter_port:
    #		frequency_counter = Tf930FrequencyCounter(frequency_counter_port)

    global calibration_dir
    resources = pyvisa.ResourceManager('@py')
    with resources.open_resource(
            'USB0::62700::60984::SDM36GBQ5R0755::0::INSTR') as resource:
        frequency_counter = Sdm3065xFrequencyCounter(resource)
        calibrator = CalibrationProcess(
            lambda: Calibrator(CalibrationLog(calibration_dir),
                               frequency_counter,
                               lambda: Cluck2SesameDevice(lambda: Serial(
                                   '/dev/ttyS0', baudrate=9600, timeout=30)),
                               vdd_volts_range=[2.7, 4.5]))

        calibrator.calibrate()
Example #19
0
 def process(self, frame, show=False):
     im = clb.undistort(frame)
     warped = prsp.warp(im)
     masked = custom_mask.apply(warped, show=show)
     lp, rp = self._find_lane_points(masked,
                                     num_strips=10,
                                     radius=70,
                                     show=show)
     height = masked.shape[0]
     width = masked.shape[1]
     llane = Lane(lp, end=height - 1, num=height)
     rlane = Lane(rp, end=height - 1, num=height)
     offset = Lane.offset(llane,
                          rlane,
                          y=masked.shape[0] - 1,
                          frame_width=width)
     lcurv = llane.curvature(height // 2)
     rcurv = rlane.curvature(height // 2)
     fill = Lane.fill_lanes(warped, llane, rlane, show=show)
     return self._final_frame(im, fill, lcurv, rcurv, offset, show=show)
if __name__ == "__main__":
    image_size = (image_width, image_height)

    # imgLeft = downsample_image(cv2.imread("./input_images/aloeL.jpg"), 1)
    # imgRight = downsample_image(cv2.imread("./input_images/aloeR.jpg"), 1)
    imgLeft = downsample_image(cv2.imread("./input_images/left19.jpg"), 3)
    imgRight = downsample_image(cv2.imread("./input_images/right19.jpg"), 3)

    width_left, height_left = imgLeft.shape[:2]
    width_right, height_right = imgRight.shape[:2]

    if 0 in [width_left, height_left, width_right, height_right]:
        print("Error: Can't remap image.")

    calibrator = Calibrator(6, 9)
    calibrator.load_parameters()
    imgLeft = calibrator.undistort_image(imgLeft)
    imgRight = calibrator.undistort_image(imgRight)

    # cv2.imshow('Left CALIBRATED', imgLeft)
    # cv2.imshow('Right CALIBRATED', imgRight)
    #
    # cv2.waitKey(0)

    # Rectify both image
    threshold = 0.65
    imgLeft_r, imgRight_r = rectify_stereo_pair_uncalibrated(
        imgLeft, imgRight, threshold)

    stereo = SGBMTuner(imgLeft_r, imgRight_r)
Example #21
0
class ColorCalibrationWizard(QWizard, Ui_ColorCalibrationWizard):
    def __init__(self, settings):
        QWizard.__init__(self)
        self.setupUi(self)
        self.settings = settings
        
        self.add_screenshots.clicked.connect(self.AddFiles)
        self.wizardPage2.pageCreated.connect(self.AnalyzeImg)
        self.contrast = 0.0
    
    def accept(self):
        self.settings.setValue('contrast', self.contrast)
        self.settings.sync()
        self.close()
    
    def AddFiles(self):
        files = QFileDialog.getOpenFileNames(self, "Open", self.settings['screenshot_dir'])
        if files == []:
            return
        first_item = None
        for file in files:
            if self.screenshot_list.count() >= 5:
                break
            file1 = unicode(file).encode(sys.getfilesystemencoding())
            item = QListWidgetItem(file1)
            self.screenshot_list.addItem(item)
            
        if self.screenshot_list.count() >= 1:
            self.wizardPage1.fullfilled = True
            self.wizardPage1.completeChanged.emit()
            
    def threadFinished(self, result, errors, result_list):
        self.contrast = result
        rel_error = errors/self.screenshot_list.count()
        
        information = "Calibration finished.\nResulting optimal contrast value for your HUD color settings: "+unicode(result) +"\n"
        information += "OCR Errors: "+unicode(errors)+"\n\n"
        if rel_error == 0:
            information += "Your screenshots seem to have perfect size and contrast. OCR accuracy should be almost perfect. Please observe your result and perform OCR training in case there are some mistakes.\n\n"
        elif rel_error < 4:
            information += "Your screenshots seem to have good size and contrast. OCR accuracy should be high enough for numbers, possible inaccuracies in the text will be corrected by dictionary comparison. Please observe your result and perform OCR training in case there are some mistakes.\n\n"
        elif rel_error < 10:
            information += "Your screenshots seem to have sufficient size and contrast. You should check your results for possible errors. Especially 6,8 and 9 could get mixed up. Please consider using higher resolution, lower FOV or higher contrast HUD color. Observe your result and perform OCR training in case there are mistakes.\n\n"
        else:
            information += "Your screenshots are not usable for OCR. Use higher resolution, lower FOV or higher contrast HUD colors and perform the calibration again. OCR training might help but it is not guaranteed. \n\n"

        for i in range(len(result_list)):
            information += "Image "+unicode(i+1)+"\n"
            information += "Valid market screenshot: "+unicode(result_list[i][0])+"\n"
            information += "Resolution: "+unicode(result_list[i][1])+"x"+unicode(result_list[i][2])+"\n"
            information += "Market width: "+unicode(result_list[i][3])+"\n\n"
        
        information += "You can train the OCR with your screenshots. To perform training go to Settings > Learning wizard.\n\n"
        self.scr_result.setPlainText(information)
        self.wizardPage2.fullfilled = True
        self.wizardPage2.completeChanged.emit()
    
    def updateSteps(self, steps):
        self.analyzing_progress.setMaximum(steps)
        self.repaint()
    
    def updateProgress(self, time_left):
        self.time_left.setText(unicode(time_left)+"s")
        self.analyzing_progress.setValue(self.analyzing_progress.value()+1)
        self.repaint()
    
    def AnalyzeImg(self):
        imglist = []
        for i in range(self.screenshot_list.count()):
            item = self.screenshot_list.item(i)
            imglist.append(unicode(item.text()))
        language = unicode(self.ocr_language.currentText())
        self.calibratorthread = Calibrator(self, imglist, language)
        QObject.connect(self.calibratorthread, SIGNAL('finished(float, int, PyQt_PyObject)'), self.threadFinished)
        QObject.connect(self.calibratorthread, SIGNAL('steps(int)'), self.updateSteps)
        QObject.connect(self.calibratorthread, SIGNAL('progress(int)'), self.updateProgress)
        self.calibratorthread.execute()
Example #22
0
	def correct(self,qpixmap):
		image = self.QPixmapToImage(qpixmap)
		rc = Calibrator.correct(self,image) 
		return self.imageToQPixmap(rc)
Example #23
0
	def calibrate(self,original,realSizeMM=[254.0,170.0],chessboardSize=[25,17],returnAnnotatedImage=False):
		rc = Calibrator.calibrate(self,original=self.QPixmapToImage(original),realSizeMM=realSizeMM,chessboardSize=chessboardSize,returnAnnotatedImage=returnAnnotatedImage)
		if type(rc) is not bool:
			return self.imageToQPixmap(rc)
		else:
			return rc
Example #24
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        # 1 << NetworkDefinitionCreationFlag.EXPLICIT_BATCH
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) as network, \
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            print('Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                print('Beginning ONNX file parsing')
                parser.parse(model.read())
                # modify the orignal onnx network
                count = 0
                for i in range(network.num_layers):
                    net = network.get_layer(i)
                    if (net.type == trt.LayerType.CONVOLUTION
                            and 'Conv' in net.name):
                        count = count + 1
                        # network.mark_output(net.get_output(0))
                        f_layer.write('----1----network.num_layers:' +
                                      str(network.num_layers) +
                                      '  .the number of the conv is:' +
                                      str(count) + '\n')
                        f_layer.write(net.name + '\n')
                        f_layer.write(str(net.type) + '\n')
                        # network.unmark_output(net.get_output(0))
                        activate = network.add_activation(
                            input=net.get_output(0),
                            type=trt.ActivationType.CLIP)  # return a layer
                        if (strategy == None):
                            print('strategy error!')
                        activate.beta = pow(2, strategy[count - 1] - 1) - 1
                        activate.alpha = -pow(2, strategy[count - 1] - 1)
                        activate.name = 'CLIP_%d' % i
                        f_layer.write(activate.name + ' beta is ' +
                                      str(activate.beta) + ' and alpha is ' +
                                      str(activate.alpha) + '\n')
                        # get the layer next to conv,and input the output of the activation layer.
                        net_next = network.get_layer(i + 1)
                        net_next.set_input(0, activate.get_output(0))
                        f_layer.write(net_next.name + '\n')
                        f_layer.write(str(net_next.type) + '\n')

                        net_next2 = network.get_layer(i + 2)
                        net_next2.set_input(0, net_next.get_output(0))
                        net_next2.set_input(1, activate.get_output(0))
                        f_layer.write(net_next2.name + '\n')
                        f_layer.write(str(net_next2.type) + '\n')
                f_layer.write('----2----network.num_layers:' +
                              str(network.num_layers) + '\n')
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            print('Completed parsing of ONNX file')
            print('Building an engine from file {}; this may take a while...'.
                  format(onnx_file_path))

            # build trt engine
            if int4_mode:
                builder.max_batch_size = max_batch_size
                builder.int8_mode = int4_mode
                builder.max_workspace_size = 1 << 30  # 1GB
                assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                builder.int8_calibrator = Calibrator(calibration_stream,
                                                     calibration_table_path)
                engine = builder.build_cuda_engine(network)
                # config=builder.create_builder_config()
                # config.max_workspace_size = 1 << 30  #
                # config.flags = 1 << int(trt.BuilderFlag.INT8)  #
                # assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                # config.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
                # engine = builder.build_engine(network, config)
                print('Int4 mode enabled')
            if fp16_mode:
                builder.strict_type_constraints = True
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                builder.fp16_mode = fp16_mode
                engine = builder.build_cuda_engine(network)
                print('fp16 modify mode enabled')
            if fp32_mode:
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                engine = builder.build_cuda_engine(network)
                print('fp32 modify mode enabled')
            if engine is None:
                print('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
Example #25
0
#peaks, _ = getPeaks(spectrum, min_dist=25, thres=0.3)
from scipy.signal import find_peaks

spectrum /= spectrum.max()
peaks, _ = find_peaks(spectrum, height=0.1, distance=10, width=(0,40))

plt.plot(spectrum)
plt.vlines(peaks, 0, max(spectrum))
plt.show()

if len(peaks) == 0:
    print("No peaks found, try again!")
    exit(1)

c = Calibrator(peaks, atlas)
c.set_fit_constraints(min_slope=0.043,
                      max_slope=0.053,
                      min_intercept=550,
                      max_intercept=650,
                      line_fit_thresh=2)
 
best_p = c.match_peaks_to_atlas(c.fit(2))

if best_p is not None:
    c.plot_fit(spectrum, best_p)

print("Start wavelength: ", best_p[-1])
print("Centre wavelenght: ", polyfit_value(len(spectrum)/2, best_p[::-1]))
print("Dispersion: ", best_p[-2])
Example #26
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/6/15 23:06
# @Author  : Wang Xin
# @Email   : [email protected]
# @File    : demo_BM.PY

import cv2
from utils import *
from calibrator import Calibrator
from stereo_matcher import StereoMatcher
import matplotlib.pyplot as plt

# for not rectified images
# stereo calibration
calibrator = Calibrator(images_dir="./data/calibration_images",
                        border_size=(9, 6))
calibrator.calibrate()
calibrator.save("stereo_camera.json")

# stereo matching
matcher1 = StereoMatcher(methods="BM",
                         is_rectified=False,
                         camera_params_file="stereo_camera.json")
imgL1 = cv2.imread("./data/calibration_images/left06.jpg", flags=0)
imgR1 = cv2.imread("./data/calibration_images/right06.jpg", flags=0)
print(len(imgR1.shape))
rectifiedImgL1, rectifiedImgR1 = rectifyImagePair(
    imgL1, imgR1, cameraParams=json_load("stereo_camera.json"))
rectifiedImgPair = np.concatenate((rectifiedImgL1, rectifiedImgR1), axis=1)
rectifiedImgPair[::20, :] = 0
disp1 = matcher1.predict(imgL1, imgR1)
Example #27
0
peaks, _ = find_peaks(spectrum,
                      distance=3.,
                      prominence=np.percentile(spectrum, 20))

plt.figure(1, figsize=(8, 8))
plt.clf()
plt.plot(spectrum)
plt.vlines(peaks,
           spectrum[peaks.astype('int')],
           spectrum.max() * 1.1,
           colors='C1')
plt.ylim(0, spectrum.max() * 1.1)
plt.xlim(0, len(spectrum))

c = Calibrator(peaks,
               elements=["Xe"],
               min_wavelength=3000.,
               max_wavelength=9000.)

# thresh (A) :: the individual line fitting tolerance to accept as a valid fitting point
# fit_tolerance (A) :: the RMS
c.set_fit_constraints(n_pix=len(spectrum),
                      min_intercept=3000.,
                      max_intercept=5000.,
                      fit_tolerance=pix_scale * 0.5,
                      thresh=pix_scale * 1.,
                      polydeg=4)

pix = np.array((241., 269., 280., 294., 317., 359., 462.5, 468.4, 477., 530.3,
                752., 836., 900., 912., 980.))
wave = np.array(
    (4500.98, 4624.28, 4671.23, 4734.15, 4844.33, 5023.88, 5488.56, 5531.07,
Example #28
0
import sys

# import PyQt4 QtCore and QtGui modules
from PyQt4.QtCore import *
from PyQt4.QtGui import *

from calibrator import Calibrator

if __name__ == '__main__':

    # create application
    app = QApplication( sys.argv )
    app.setApplicationName( 'calibrator' )

    # create widget
    w = Calibrator()
    w.setWindowTitle( 'calibrator' )
    w.show()

    # connection
    QObject.connect( app, SIGNAL( 'lastWindowClosed()' ), app, SLOT( 'quit()' ) )

    # execute application
    sys.exit( app.exec_() )
Example #29
0
 def process(self, orig, show=False):
     im = clb.undistort(orig, show=show)
     self._find_cars_heatmap(im, show=show)
     self._find_cars_boxes(show=show)
     self._reset_heatmap()
     return self._draw_car_boxes(orig, show=show)
Example #30
0
 def __init__(self):
     assert (clb.initialized())
     assert (prsp.initialized())
     self.left_line = None
     self.right_line = None
                    'param':resultMoment['Parameter'],
                    'paramStd':resultMoment['Parameter_Std'],
                    'scatterUncert':[resultMoment['Max95%Scatter_Uncertainty'][0]],
                    'meanUncert':[resultMoment['Max95%Mean_Uncertainty'][0]]})

else:
    # NOTE: SG Calibration
    workingDirectory=r'C:\Users\Richard\Google Drive\Berkeley\Berkeley Classes\Fall 2018\ME 103\Lab 4-5\ME103\10.26.2018'
    settings={'dir': workingDirectory,
                'x': 'Ch1_Fz', 'y': 'Weight',
                'xUnit': '[volts]', 'yUnit': '[Newton]',
                'xUncert': 0.0, 'yUncert': weightUncert,
                'target_file': 'strain_gauge_cal.csv', 'auto': True,
                'eqn': 'p1'}
    customEquation=None
    botSG=Calibrator(**settings)
    botSG.read_data()
    discrete_fit_data_SG=decorator(botSG, botSG.discrete_fit_data, h=lambda y: -y/1000*9.81)
    resultSG=discrete_fit_data_SG(customEquation)
    SGData=[
            ['Parameter', 'Parameter_Std', 'R2', 'Max95%Scatter_Uncertainty', 'Max95%Mean_Uncertainty'],
            [resultSG['param'][0], resultSG['paramStd'][0], resultSG['R2'], max(resultSG['scatterUncert']), max(resultSG['meanUncert'])],
            [resultSG['param'][1], resultSG['paramStd'][1], '', '', '']
            ]

    # NOTE: AOA Calibration
    workingDirectory=r'C:\Users\Richard\Google Drive\Berkeley\Berkeley Classes\Fall 2018\ME 103\Lab 4-5\ME103\10.26.2018\Angle Attack Cali'
    settings={'dir': workingDirectory,
                'x': 'Ch4_a_control', 'y': 'Angle_of_Attack',
                'xUnit': '[volts]', 'yUnit': '[degree]',
                'xUncert': 0.0, 'yUncert': levelUncert,
Example #32
0
#!/usr/bin/env python

from calibrator import Calibrator
import rospy
calibrator = Calibrator()

if "__main__" == __name__:
    try:
        calibrator.set_pattern_type("CHESS_BOARD")
        calibrator.set_calibration_file("calibration_sd_kinect/")
        #~ calibrator.set_calibration_file("calibration_s4/")
        calibrator.perform_calibration()
        calibrator.print_parameters()
    except rospy.ROSInterruptException:
        pass
Example #33
0
 def correct(self, qpixmap):
     image = self.QPixmapToImage(qpixmap)
     rc = Calibrator.correct(self, image)
     return self.imageToQPixmap(rc)
Example #34
0
def projectSpecCalc(proj, **kwargs):
    generalPrint(
        "ProjectSpecCalc",
        "Calculating spectra for project with options: {}".format(kwargs))
    # default options
    options = parseKeywords(getDefaultOptions(proj), kwargs)

    # calculate the spectra
    # get the reference time
    datetimeRef = proj.getRefTime()
    # prepare the calibrator
    cal = Calibrator(proj.getCalDataPath())
    if options["calibrate"]:
        cal.printInfo()

    # loop over sites
    for s in options["sites"]:
        # print site info
        proj.printSiteInfo(s)

        # get measurement directories for site s
        timeMeas = proj.getSiteTimeFiles(s)

        # loop over measurement folders and calculate spectra for each one
        for meas in timeMeas:
            # get measurement sample frequency
            fs = proj.getMeasSampleFreq(s, meas)
            # check to see if in given frequency list
            if int(fs) not in options["freqs"]:
                continue

            # print measurement info
            proj.printMeasInfo(s, meas)

            # get measurement start and end times
            datetimeStart = proj.getMeasStartTime(s, meas)
            datetimeEnd = proj.getMeasEndTime(s, meas)

            # get data, sensor info, serial info, chopper info for calibration
            reader = proj.getMeasDataReader(s, meas)
            # get data start and end times - these may not be equal to startDate and endDate
            # there is the issue of ats data recording end time as one sample late
            # hence get the actual end time
            dataStartTime, dataEndTime = reader.getDataTimes(
                datetimeStart, datetimeEnd)
            dataChans = reader.getChannels()
            if len(options["chans"]) > 0:
                dataChans = options["chans"]
            # alternatively, could simply do getPhysicalSamples() and get all data that way
            data = reader.getPhysicalData(dataStartTime,
                                          dataEndTime,
                                          chans=dataChans)

            if options["calibrate"]:
                # do the calibration here
                sensors = reader.getSensors(dataChans)
                serials = reader.getSerials(dataChans)
                choppers = reader.getChoppers(dataChans)
                data = cal.calibrate(data, fs, sensors, serials, choppers)

            # notch filter if required
            for n in options["notch"]:
                for c in data:
                    data[c] = notchFilter(data[c], fs, n, n / 5.0)

            # define decimation parameters
            decParams = DecimationParams(fs)
            if len(options["evalfreq"]) == 0:
                decParams.setDecimationParams(options["declevels"],
                                              options["freqlevel"])
            else:
                decParams.setFrequencyParams(options["evalfreq"],
                                             options["declevels"],
                                             options["freqlevel"])
            decParams.printInfo()
            numLevels = decParams.getNumLevels()

            # now do window parameters
            winParams = WindowParams(decParams)
            # winParams.printInfo()

            # create the decimator
            dec = Decimator(data, fs, decParams)
            # dec.printInfo()

            # loop through decimation levels
            for iDec in xrange(0, numLevels):
                # get the data for the current level
                check = dec.incrementLevel()
                if not check:
                    break  # not enough data
                #dec.printInfo()
                data = dec.getData()

                # create the windower and give it window parameters for current level
                fsDec = dec.getSampleFreq()
                win = Windower(datetimeRef, dataStartTime, data, fsDec,
                               winParams.getWindowSize(iDec),
                               winParams.getOverlap(iDec))
                numWindows = win.getNumWindows()
                # win.printInfo()
                if numWindows < 2:
                    break  # do no more decimation

                # create the spectrum calculator and statistics calculators
                specCalc = SpectrumCalculator(fsDec,
                                              winParams.getWindowSize(iDec))

                # get ready a file to save the spectra
                specWrite = SpectrumWriter(proj.getSpecDataPathMeas(s, meas),
                                           datetimeRef)
                specWrite.openBinaryForWriting("spectra", iDec, fsDec,
                                               winParams.getWindowSize(iDec),
                                               winParams.getOverlap(iDec),
                                               win.getGlobalWindowOffset(),
                                               numWindows, dataChans)

                # loop though windows, calculate spectra and save
                for iW in xrange(0, numWindows):
                    # get the window data
                    winData = win.getData(iW)

                    # calculate spectra
                    f, specData = specCalc.calcFourierCoeff(winData)

                    # write out spectra
                    specWrite.writeBinary(specData, iW)

                # close spectra and stat files
                specWrite.closeFile()