Example #1
0
    def xtest_compare_calibrator_with_multiple_interpolations(self):
        # 2: Light
        # 1: Laser
        processor = preProcessData.DataProcessor(
            "./data/Notch Filter/Data Channel 1.dat",
            "./data/Notch Filter/Data Channel 2.dat")

        x, y, laser_x, laser_y = processor.get_data(1)
        calibrator = Calibrator(laser_x, laser_y)
        x, y = calibrator.calculate_calibration()
        plt.plot(x, y)

        x, y, laser_x, laser_y = processor.get_data(2)
        calibrator = Calibrator(laser_x, laser_y)
        x, y = calibrator.calculate_calibration(2)
        plt.plot(x, y)

        x, y, laser_x, laser_y = processor.get_data(8)
        calibrator = Calibrator(laser_x, laser_y)
        x, y = calibrator.calculate_calibration(8)
        plt.plot(x, y)

        plt.xlabel('peak index')
        plt.ylabel('delta')
        plt.show()
Example #2
0
    def fromConfig(cls, config):
        if 'camera' not in config:
            return None

        camera = PinholeCamera.fromConfig(config['camera'])

        if camera:
            monocular = MonocularWrapper()

            if 'calibrator' in config:
                calibrator = Calibrator.fromConfig(camera, monocular,
                                                   config['calibrator'])
            else:
                calibrator = Calibrator(camera, monocular)

            if calibrator:
                for key in cls.CONFIG_KEYS:
                    if key not in config:
                        return None

                roi = config['roi']
                velocitaCrociera = config['velocitaCrociera']
                #Parametri opzionali
                distanzaMinima = config[
                    'distanzaMinima'] if 'distanzaMinima' in config else None
                distanzaLimite = config[
                    'distanzaLimite'] if 'distanzaLimite' in config else None
                windowSize = config[
                    'windowSize'] if 'windowSize' in config else None
                skipThreshold = config[
                    'skipThreshold'] if 'skipThreshold' in config else None

                return cls(camera, monocular, calibrator, roi,
                           velocitaCrociera, distanzaMinima, distanzaLimite,
                           windowSize, skipThreshold)
Example #3
0
def calculate(processor,
              interp_multiplier,
              ignore_calib,
              plot_fkt,
              correction_factor=1):
    x, y, laser_x, laser_y = processor.get_data(interp_multiplier)

    calibrator = Calibrator(laser_x, laser_y, interp_multiplier, plot_fkt)
    delta, dt_max = calibrator.get_calibration(x)

    print(len(x))
    if not ignore_calib:
        _x = x + delta

        x = np.linspace(_x[0], _x[-1], len(_x))
        y = Utils.interpolate(x, _x, y)

        x = x * correction_factor

    # t in seconds
    t = x * dt_max
    dt = t[1] - t[0]

    t_max = len(t)

    print(len(t))
    print(1 / t_max)

    l, y_trans = FourierTransformer().transform(y, dt, t_max)

    # /   y_trans = list(reversed(y_trans))

    return l, y_trans
def main():
    print(Banner("GUESS THE NUMBER APP").content)
    calibrator = Calibrator(random.randint(0, 100))
    prompt = Prompt()
    success = False
    while not success:
        calibration = calibrator.calibrate(prompt.input())
        print(calibration.message)
        success = calibration.is_equal
Example #5
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1) as network,\
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            config = builder.create_builder_config()
            config.max_workspace_size = 1 << 30
            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            print('Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                print('Beginning ONNX file parsing')
                parser.parse(model.read())
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            print('Completed parsing of ONNX file')
            print('Building an engine from file {}; this may take a while...'.
                  format(onnx_file_path))

            # build trt engine
            builder.max_batch_size = max_batch_size
            builder.max_workspace_size = 1 << 30  # 1GB
            builder.fp16_mode = fp16_mode
            if int8_mode:
                config.max_workspace_size = common.GiB(1)
                config.set_flag(trt.BuilderFlag.INT8)
                config.int8_calibrator = Calibrator(
                    calibration_stream, calibration_table_path
                )  # 设定的东西要放到config里面 - 没有更改运行时候的文件,这里只是给自己做一下提醒

                #builder.int8_mode = int8_mode
                assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                #builder.int8_calibrator  = Calibrator(calibration_stream, calibration_table_path)
                print('Int8 mode enabled')
            #engine = builder.build_cuda_engine(network)
            #engine = builder.build_engine(network,config)
            profile = builder.create_optimization_profile()
            profile.set_shape(
                network.get_input(0).name, (1, 3, 368, 432), (8, 3, 368, 432),
                (64, 3, 368, 432))  #lopps
            #profile.set_shape(network.get_input(0).name, (1, 3,368, 656), (8, 3,368, 656), (64, 3,368, 656))   #openpose_coco
            #profile.set_shape(network.get_input(0).name, (1, 3,432, 368), (1, 3,432, 368), (32, 3, 432, 368))  #openpose_thin
            #profile.set_shape(network.get_input(0).name, (1, 3, 384, 384), (8, 3, 384, 384), (64, 3, 384, 384))#ppn-resnet50-V2-HW=384x384.onnx)
            config.add_optimization_profile(profile)
            engine = builder.build_engine(network, config)
            if engine is None:
                print('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
Example #6
0
   def CheckPinCode(self, pin):
       if pin=='2502':
           self.tempthreadcontrol(0)
           spi.close()
           self.CalibrWindow=Calibrator(self.finish_signal, self)
           self.CalibrWindow.show()
 
       else:
           pass
       self.Calibr.setStyleSheet(metrocss.prog_passive)
Example #7
0
 def differential_scan(self):
     cmat, dist_ceofs, \
     rmat, tmat, _ = Calibrator().calibration_data.load_calibrations()
     while self.is_scanning:
         if np.abs(self.theta) >= PI2:
             break
         else:
             begin = time.time()
             try:
                 image = self.camera.capture_image()
Example #8
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1) as network, \
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            log.logger.info(
                'Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                log.logger.info('Beginning ONNX file parsing')
                parser.parse(model.read())
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            log.logger.info('Completed parsing of ONNX file')
            log.logger.info(
                'Building an engine from file {}; this may take a while...'.
                format(onnx_file_path))

            # build trt engine
            if int8_mode:
                builder.max_batch_size = max_batch_size
                builder.int8_mode = int8_mode
                builder.max_workspace_size = 1 << 30  # 1GB
                assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                builder.int8_calibrator = Calibrator(calibration_stream,
                                                     calibration_table_path)
                engine = builder.build_cuda_engine(network)
                log.logger.info('Int8 mode enabled')
            if fp16_mode:
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                builder.fp16_mode = fp16_mode
                engine = builder.build_cuda_engine(network)
                log.logger.info('fp16 mode enabled')
            if fp32_mode:
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                engine = builder.build_cuda_engine(network)
                log.logger.info('fp32 mode enabled')
            if engine is None:
                log.logger.error('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
def main():
    test_set = '../testpics'
    model_file = '../Engine/11.onnx'
    # Now we create a calibrator and give it the location of our calibration data.
    # We also allow it to cache calibration data for faster engine building.
    engine_model_path = "models_save/panoptic_fcn_int8_7.2.2.trt"
    calibration_cache = 'models_save/panoptic_fcn_int8.cache'
    calib = Calibrator(test_set, cache_file=calibration_cache)

    # Inference batch size can be different from calibration batch size.
    batch_size = 1
    engine = build_int8_engine(model_file, calib, batch_size)
    with open(engine_model_path, "wb") as f:
        f.write(engine.serialize())
Example #10
0
 def AnalyzeImg(self):
     imglist = []
     for i in range(self.screenshot_list.count()):
         item = self.screenshot_list.item(i)
         imglist.append(unicode(item.text()))
     language = unicode(self.ocr_language.currentText())
     self.calibratorthread = Calibrator(self, imglist, language)
     QObject.connect(self.calibratorthread,
                     SIGNAL('finished(float, int, PyQt_PyObject)'),
                     self.threadFinished)
     QObject.connect(self.calibratorthread, SIGNAL('steps(int)'),
                     self.updateSteps)
     QObject.connect(self.calibratorthread, SIGNAL('progress(int)'),
                     self.updateProgress)
     self.calibratorthread.execute()
Example #11
0
def calibrate_devices():
    #	with Serial('/dev/ttyWHATEVER', baudrate=115200, timeout=30, xonxoff=True) as frequency_counter_port:
    #		frequency_counter = Tf930FrequencyCounter(frequency_counter_port)

    global calibration_dir
    resources = pyvisa.ResourceManager('@py')
    with resources.open_resource(
            'USB0::62700::60984::SDM36GBQ5R0755::0::INSTR') as resource:
        frequency_counter = Sdm3065xFrequencyCounter(resource)
        calibrator = CalibrationProcess(
            lambda: Calibrator(CalibrationLog(calibration_dir),
                               frequency_counter,
                               lambda: Cluck2SesameDevice(lambda: Serial(
                                   '/dev/ttyS0', baudrate=9600, timeout=30)),
                               vdd_volts_range=[2.7, 4.5]))

        calibrator.calibrate()
Example #12
0
def calibrate(path: str, filter: str, nrows: int, ncols: int):
    calibrator = Calibrator(1)
    objp = np.zeros((nrows * ncols, 3), np.float32)
    objp[:, :2] = np.mgrid[0:nrows, 0:ncols].T.reshape(-1, 2)
    counter = 0
    image_names = glob.glob(os.path.join(path, filter))
    print(image_names)

    for i in range(100):
        for imname in glob.glob(os.path.join(path, filter)):
            counter += 1
            image = cv2.imread(imname, cv2.IMREAD_GRAYSCALE)
            f, corners = cv2.findChessboardCorners(image, (nrows, ncols), None)
            cv2.cornerSubPix(image, corners, (11, 11), (-1, -1),
                             (cv2.TERM_CRITERIA_EPS +
                              cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
            print(counter)
            calibrator.train(objp, np.squeeze(corners.astype(np.float64)))
Example #13
0
import sys

# import PyQt4 QtCore and QtGui modules
from PyQt4.QtCore import *
from PyQt4.QtGui import *

from calibrator import Calibrator

if __name__ == '__main__':

    # create application
    app = QApplication( sys.argv )
    app.setApplicationName( 'calibrator' )

    # create widget
    w = Calibrator()
    w.setWindowTitle( 'calibrator' )
    w.show()

    # connection
    QObject.connect( app, SIGNAL( 'lastWindowClosed()' ), app, SLOT( 'quit()' ) )

    # execute application
    sys.exit( app.exec_() )
Example #14
0
#!/usr/bin/env python

from calibrator import Calibrator
import rospy
calibrator = Calibrator()

if "__main__" == __name__:
    try:
        calibrator.set_pattern_type("CHESS_BOARD")
        calibrator.set_calibration_file("calibration_sd_kinect/")
        #~ calibrator.set_calibration_file("calibration_s4/")
        calibrator.perform_calibration()
        calibrator.print_parameters()
    except rospy.ROSInterruptException:
        pass
Example #15
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/6/15 23:06
# @Author  : Wang Xin
# @Email   : [email protected]
# @File    : demo_BM.PY

import cv2
from utils import *
from calibrator import Calibrator
from stereo_matcher import StereoMatcher
import matplotlib.pyplot as plt

# for not rectified images
# stereo calibration
calibrator = Calibrator(images_dir="./data/calibration_images",
                        border_size=(9, 6))
calibrator.calibrate()
calibrator.save("stereo_camera.json")

# stereo matching
matcher1 = StereoMatcher(methods="BM",
                         is_rectified=False,
                         camera_params_file="stereo_camera.json")
imgL1 = cv2.imread("./data/calibration_images/left06.jpg", flags=0)
imgR1 = cv2.imread("./data/calibration_images/right06.jpg", flags=0)
print(len(imgR1.shape))
rectifiedImgL1, rectifiedImgR1 = rectifyImagePair(
    imgL1, imgR1, cameraParams=json_load("stereo_camera.json"))
rectifiedImgPair = np.concatenate((rectifiedImgL1, rectifiedImgR1), axis=1)
rectifiedImgPair[::20, :] = 0
disp1 = matcher1.predict(imgL1, imgR1)
Example #16
0
peaks, _ = find_peaks(spectrum,
                      distance=3.,
                      prominence=np.percentile(spectrum, 20))

plt.figure(1, figsize=(8, 8))
plt.clf()
plt.plot(spectrum)
plt.vlines(peaks,
           spectrum[peaks.astype('int')],
           spectrum.max() * 1.1,
           colors='C1')
plt.ylim(0, spectrum.max() * 1.1)
plt.xlim(0, len(spectrum))

c = Calibrator(peaks,
               elements=["Xe"],
               min_wavelength=3000.,
               max_wavelength=9000.)

# thresh (A) :: the individual line fitting tolerance to accept as a valid fitting point
# fit_tolerance (A) :: the RMS
c.set_fit_constraints(n_pix=len(spectrum),
                      min_intercept=3000.,
                      max_intercept=5000.,
                      fit_tolerance=pix_scale * 0.5,
                      thresh=pix_scale * 1.,
                      polydeg=4)

pix = np.array((241., 269., 280., 294., 317., 359., 462.5, 468.4, 477., 530.3,
                752., 836., 900., 912., 980.))
wave = np.array(
    (4500.98, 4624.28, 4671.23, 4734.15, 4844.33, 5023.88, 5488.56, 5531.07,
Example #17
0
def projectInterpResamp(proj, **kwargs):
    generalPrint(
        "Project Interp Resamp",
        "Resampling / Interpolating project time files with options: {}".
        format(kwargs))
    # resample info is a dictionary
    # {sampleRateToResample: sampleRateToResampleTo}
    # this function then does this for all measurement directories of that sample rate in the project
    options = parseKeywords(getDefaultOptions(proj), kwargs)

    # create a data calibrator and writer instance
    cal = Calibrator(proj.getCalDataPath())
    if options["calibrate"]:
        cal.printInfo()
    writer = DataWriterInternal()

    # loop over sites
    for s in options["sites"]:
        # print site info
        proj.printSiteInfo(s)

        for fs in options["freqs"]:
            timeFiles = proj.getSiteTimeFilesFs(s, fs)

            if len(timeFiles) == 0:
                continue  # nothing to process

            # otherwise, resample
            for tF in timeFiles:
                # get the reader
                reader = proj.getMeasDataReader(s, tF)
                reader.printInfo()
                # get the dates for the data
                timeStart = reader.getStartDatetime()
                timeStop = reader.getStopDatetime()

                # now check the user provided dates
                # don't change timeStart, timeEnd yet because that breaks the checking
                if options["start"]:
                    startUser = datetime.strptime(options["start"],
                                                  "%Y-%m-%d %H:%M:%S")
                    if startUser > timeStop:  # this data has nothing to contribute in the optional date range
                        continue
                if options["stop"]:
                    stopUser = datetime.strptime(options["stop"],
                                                 "%Y-%m-%d %H:%M:%S")
                    if stopUser < timeStart:  # this data has nothing to contribute in the optional date range
                        continue

                # if the data contributes, copy in the data if relevant
                if options["start"]:
                    timeStart = datetime.strptime(options["start"],
                                                  "%Y-%m-%d %H:%M:%S")
                if options["stop"]:
                    timeStop = datetime.strptime(options["stop"],
                                                 "%Y-%m-%d %H:%M:%S")

                # calculate the samples
                sampleStart, sampleEnd = reader.time2sample(
                    timeStart, timeStop)
                dataStartTime, dataStopTime = reader.sample2time(
                    sampleStart, sampleEnd
                )  # need to do it this way round to protect against fractional sampling
                # now get the data
                data = reader.getPhysicalSamples(startSample=sampleStart,
                                                 endSample=sampleEnd)
                numSamples = reader.getNumSamples()
                chans = reader.getChannels()
                headers = reader.getHeaders()
                chanHeaders, chanMap = reader.getChanHeaders()
                dataFs = fs

                # if calibration is on, calibrate the data
                if options["calibrate"]:
                    generalPrint(
                        "Project Interp Resamp",
                        "Calibrating time data: site {}, time data {}".format(
                            s, tF))
                    sensors = reader.getSensors(reader.getChannels())
                    serials = reader.getSerials(reader.getChannels())
                    choppers = reader.getChoppers(reader.getChannels())
                    data = cal.calibrate(data, dataFs, sensors, serials,
                                         choppers)

                # Interpolation to the second - do this first
                # make sure all the data starts on a full second
                if options["interp"]:
                    if dataStartTime.microsecond != 0:
                        generalPrint(
                            "Project Interp Resamp",
                            "Interpolating to second: site {}, time data {} at {} Hz"
                            .format(s, tF, dataFs))
                        # the recording is not on the second - NOTE, this will fail with longer sample periods (i.e. greater than a second)
                        startTimeInterp, numSamplesInterp, dataInterp = interpolateToSecond(
                            dataFs, dataStartTime, data)
                        numSamples = numSamplesInterp
                        dataStartTime = startTimeInterp
                        data = dataInterp

                # Check if fs belongs to options["resamp"] keys and resample if does
                # resampling does not change the start time - hence dataStartTime is unchanged
                if dataFs in options[
                        "resamp"]:  # then need to resample this data
                    generalPrint(
                        "Project Interp Resamp",
                        "Resampling site = {}, time data {} at {} Hz to {} Hz".
                        format(s, tF, fs, options["resamp"][fs]))
                    data = resample(data, dataFs, options["resamp"][dataFs])
                    # update info for saving file
                    dataFs = options["resamp"][fs]
                    numSamples = data[chans[0]].size

                # recall, start times stay the same with resampling - only the sample rate changes and the number of samples
                # write out data - the data writer automatically deals with the end date
                outPath = os.path.join(
                    proj.getTimeDataPathSite(s),
                    options["prepend"] + tF + options["postpend"])
                writer.setOutPath(outPath)
                writer.writeData(
                    headers,
                    chanHeaders,
                    data,
                    start_time=dataStartTime.strftime("%H:%M:%S.%f"),
                    start_date=dataStartTime.strftime("%Y-%m-%d"),
                    numSamples=numSamples,
                    sample_freq=dataFs,
                    lsb_applied=True)
                writer.printInfo()
Example #18
0
def projectSpecCalc(proj, **kwargs):
    generalPrint(
        "ProjectSpecCalc",
        "Calculating spectra for project with options: {}".format(kwargs))
    # default options
    options = parseKeywords(getDefaultOptions(proj), kwargs)

    # calculate the spectra
    # get the reference time
    datetimeRef = proj.getRefTime()
    # prepare the calibrator
    cal = Calibrator(proj.getCalDataPath())
    if options["calibrate"]:
        cal.printInfo()

    # loop over sites
    for s in options["sites"]:
        # print site info
        proj.printSiteInfo(s)

        # get measurement directories for site s
        timeMeas = proj.getSiteTimeFiles(s)

        # loop over measurement folders and calculate spectra for each one
        for meas in timeMeas:
            # get measurement sample frequency
            fs = proj.getMeasSampleFreq(s, meas)
            # check to see if in given frequency list
            if int(fs) not in options["freqs"]:
                continue

            # print measurement info
            proj.printMeasInfo(s, meas)

            # get measurement start and end times
            datetimeStart = proj.getMeasStartTime(s, meas)
            datetimeEnd = proj.getMeasEndTime(s, meas)

            # get data, sensor info, serial info, chopper info for calibration
            reader = proj.getMeasDataReader(s, meas)
            # get data start and end times - these may not be equal to startDate and endDate
            # there is the issue of ats data recording end time as one sample late
            # hence get the actual end time
            dataStartTime, dataEndTime = reader.getDataTimes(
                datetimeStart, datetimeEnd)
            dataChans = reader.getChannels()
            if len(options["chans"]) > 0:
                dataChans = options["chans"]
            # alternatively, could simply do getPhysicalSamples() and get all data that way
            data = reader.getPhysicalData(dataStartTime,
                                          dataEndTime,
                                          chans=dataChans)

            if options["calibrate"]:
                # do the calibration here
                sensors = reader.getSensors(dataChans)
                serials = reader.getSerials(dataChans)
                choppers = reader.getChoppers(dataChans)
                data = cal.calibrate(data, fs, sensors, serials, choppers)

            # notch filter if required
            for n in options["notch"]:
                for c in data:
                    data[c] = notchFilter(data[c], fs, n, n / 5.0)

            # define decimation parameters
            decParams = DecimationParams(fs)
            if len(options["evalfreq"]) == 0:
                decParams.setDecimationParams(options["declevels"],
                                              options["freqlevel"])
            else:
                decParams.setFrequencyParams(options["evalfreq"],
                                             options["declevels"],
                                             options["freqlevel"])
            decParams.printInfo()
            numLevels = decParams.getNumLevels()

            # now do window parameters
            winParams = WindowParams(decParams)
            # winParams.printInfo()

            # create the decimator
            dec = Decimator(data, fs, decParams)
            # dec.printInfo()

            # loop through decimation levels
            for iDec in xrange(0, numLevels):
                # get the data for the current level
                check = dec.incrementLevel()
                if not check:
                    break  # not enough data
                #dec.printInfo()
                data = dec.getData()

                # create the windower and give it window parameters for current level
                fsDec = dec.getSampleFreq()
                win = Windower(datetimeRef, dataStartTime, data, fsDec,
                               winParams.getWindowSize(iDec),
                               winParams.getOverlap(iDec))
                numWindows = win.getNumWindows()
                # win.printInfo()
                if numWindows < 2:
                    break  # do no more decimation

                # create the spectrum calculator and statistics calculators
                specCalc = SpectrumCalculator(fsDec,
                                              winParams.getWindowSize(iDec))

                # get ready a file to save the spectra
                specWrite = SpectrumWriter(proj.getSpecDataPathMeas(s, meas),
                                           datetimeRef)
                specWrite.openBinaryForWriting("spectra", iDec, fsDec,
                                               winParams.getWindowSize(iDec),
                                               winParams.getOverlap(iDec),
                                               win.getGlobalWindowOffset(),
                                               numWindows, dataChans)

                # loop though windows, calculate spectra and save
                for iW in xrange(0, numWindows):
                    # get the window data
                    winData = win.getData(iW)

                    # calculate spectra
                    f, specData = specCalc.calcFourierCoeff(winData)

                    # write out spectra
                    specWrite.writeBinary(specData, iW)

                # close spectra and stat files
                specWrite.closeFile()
Example #19
0
#peaks, _ = getPeaks(spectrum, min_dist=25, thres=0.3)
from scipy.signal import find_peaks

spectrum /= spectrum.max()
peaks, _ = find_peaks(spectrum, height=0.1, distance=10, width=(0,40))

plt.plot(spectrum)
plt.vlines(peaks, 0, max(spectrum))
plt.show()

if len(peaks) == 0:
    print("No peaks found, try again!")
    exit(1)

c = Calibrator(peaks, atlas)
c.set_fit_constraints(min_slope=0.043,
                      max_slope=0.053,
                      min_intercept=550,
                      max_intercept=650,
                      line_fit_thresh=2)
 
best_p = c.match_peaks_to_atlas(c.fit(2))

if best_p is not None:
    c.plot_fit(spectrum, best_p)

print("Start wavelength: ", best_p[-1])
print("Centre wavelenght: ", polyfit_value(len(spectrum)/2, best_p[::-1]))
print("Dispersion: ", best_p[-2])
Example #20
0
def projectViewTime(proj, startDate, endDate, **kwargs):
    generalPrint("ProjectViewTime",
                 "Showing time data with options: {}".format(kwargs))
    # get options
    options = parseKeywords(getDefaultOptions(proj), kwargs)
    # format startDate and endDate
    start = datetime.strptime("{}.000".format(startDate),
                              "%Y-%m-%d %H:%M:%S.%f")
    end = datetime.strptime("{}.000".format(endDate), "%Y-%m-%d %H:%M:%S.%f")

    # create a calibrator in case
    cal = Calibrator(proj.getCalDataPath())
    if options["calibrate"]:
        cal.printInfo()

    dataAll = {}
    xAll = {}
    fsAll = {}
    # first need to collect the relevant data
    # loop over sites
    for s in options["sites"]:
        # print site info
        # proj.printSiteInfo(s)

        # get measurement directories for site s
        timeMeas = proj.getSiteTimeFiles(s)

        # get the dictionary ready
        dataAll[s] = {}
        xAll[s] = {}
        fsAll[s] = {}
        # loop over measurement folders and calculate spectra for each one
        for meas in timeMeas:
            # get measurement sample frequency
            fs = proj.getMeasSampleFreq(s, meas)
            # check to see if in given frequency list
            if int(fs) not in options["freqs"]:
                continue

            # now find recordings that are within the recording time
            siteStart = proj.getMeasStartTime(s, meas)
            siteEnd = proj.getMeasEndTime(s, meas)
            if siteEnd < start or siteStart > end:
                continue

            # now get the data
            reader = proj.getMeasDataReader(s, meas)
            reader.printInfo()
            # get the samples of the datetimes
            sampleStart, sampleEnd = reader.time2sample(start, end)
            # and then go back and get the times - this protects against non second sampling
            # as the samples returned from time2sample are rounded
            # using sample2time, we get the appropriate start and end times for those samples
            readStart, readEnd = reader.sample2time(sampleStart, sampleEnd)
            # get the data
            # data = reader.getPhysicalData(readStart, readEnd, chans=options["chans"])
            data = reader.getPhysicalData(readStart,
                                          readEnd,
                                          chans=options["chans"])
            # if calibration is on, calibrate the data
            if options["calibrate"]:
                generalPrint(
                    "ProjectViewTime",
                    "Calibrating time data: site {}, time data {}".format(
                        s, meas))
                sensors = reader.getSensors(reader.getChannels())
                serials = reader.getSerials(reader.getChannels())
                choppers = reader.getChoppers(reader.getChannels())
                data = cal.calibrate(data, fs, sensors, serials, choppers)

            # have the data, now want to calculate the x array
            samples = data[options["chans"][0]].size
            fsDelta = timedelta(seconds=1.0 / fs)
            x = np.empty(shape=(samples), dtype=datetime)
            for i in xrange(0, samples):
                x[i] = readStart + timedelta(seconds=1.0 * i / fs)
            # save the data
            dataAll[s][meas] = data
            xAll[s][meas] = x
            fsAll[s][meas] = fs

    # once all the data has been collected, plot it all
    fig = plt.figure(figsize=options["figsize"])
    plotFonts = options["plotfonts"]
    # suptitle
    st = fig.suptitle("Time data from {} to {}".format(
        start.strftime("%Y-%m-%d %H-%M-%S"),
        end.strftime("%Y-%m-%d %H-%M-%S")),
                      fontsize=plotFonts["suptitle"])
    st.set_y(0.98)
    for s in dataAll:
        for meas in dataAll[s]:
            # apply the filter options
            if options["lpfilt"]:
                dataAll[s][meas] = lpFilter(dataAll[s][meas], fsAll[s][meas],
                                            options["lpfilt"])
            if options["hpfilt"]:
                dataAll[s][meas] = hpFilter(dataAll[s][meas], fsAll[s][meas],
                                            options["hpfilt"])
            if options["bpfilt"]:
                dataAll[s][meas] = hpFilter(dataAll[s][meas], fsAll[s][meas],
                                            options["bpfilt"][0],
                                            options["bpfilt"][1])

            generalPrint(
                "ProjectViewTime", "Plotting {} - {} from {}  to {}".format(
                    s, meas, xAll[s][meas][0], xAll[s][meas][-1]))
            # now plot the data
            for idx, chan in enumerate(options["chans"]):
                plt.subplot(4, 1, idx + 1)
                plotData = dataAll[s][meas][chan]
                if options["normalise"]:  # then normalise the data
                    plotData = plotData / np.linalg.norm(plotData)
                plt.plot(xAll[s][meas],
                         plotData,
                         label="{} - {}".format(s, meas))

    for idx, chan in enumerate(options["chans"]):
        ax = plt.subplot(4, 1, idx + 1)
        plt.title("Channel {}".format(chan), fontsize=plotFonts["title"])
        plt.grid()
        if idx == len(options["chans"]) - 1:
            plt.xlabel("Time", fontsize=plotFonts["axisLabel"])
        # limit the x-axis
        plt.xlim([start, end])
        # do the yaxis
        if isElectric(chan):
            plt.ylabel("mV/km", fontsize=plotFonts["axisLabel"])
            if len(options["Eylim"]) > 0:
                plt.ylim(options["Eylim"])
        else:
            if options["calibrate"]:
                plt.ylabel("nT", fontsize=plotFonts["axisLabel"])
            else:
                plt.ylabel("mV", fontsize=plotFonts["axisLabel"])
            if len(options["Hylim"]) > 0:
                plt.ylim(options["Hylim"])
        plt.legend(fontsize=plotFonts["legend"])
        # set tick sizes
        for label in (ax.get_xticklabels() + ax.get_yticklabels()):
            label.set_fontsize(plotFonts["axisTicks"])

    plt.tight_layout()
    # shift subplots down, make room for suptitle
    fig.subplots_adjust(top=0.92)
    if options["save"]:
        fig.savefig(
            os.path.join(
                proj.getImageDataPath(),
                "timeData_{}_{}".format(start.strftime("%Y-%m-%d_%H-%M-%S_"),
                                        end.strftime("%Y-%m-%d_%H-%M-%S"))))
    if options["show"]:
        plt.show()
    plt.close("all")
    return fig
#calibrate

from calibrator import Calibrator

calib = Calibrator()
calib.CalibrateData()
                    'param':resultMoment['Parameter'],
                    'paramStd':resultMoment['Parameter_Std'],
                    'scatterUncert':[resultMoment['Max95%Scatter_Uncertainty'][0]],
                    'meanUncert':[resultMoment['Max95%Mean_Uncertainty'][0]]})

else:
    # NOTE: SG Calibration
    workingDirectory=r'C:\Users\Richard\Google Drive\Berkeley\Berkeley Classes\Fall 2018\ME 103\Lab 4-5\ME103\10.26.2018'
    settings={'dir': workingDirectory,
                'x': 'Ch1_Fz', 'y': 'Weight',
                'xUnit': '[volts]', 'yUnit': '[Newton]',
                'xUncert': 0.0, 'yUncert': weightUncert,
                'target_file': 'strain_gauge_cal.csv', 'auto': True,
                'eqn': 'p1'}
    customEquation=None
    botSG=Calibrator(**settings)
    botSG.read_data()
    discrete_fit_data_SG=decorator(botSG, botSG.discrete_fit_data, h=lambda y: -y/1000*9.81)
    resultSG=discrete_fit_data_SG(customEquation)
    SGData=[
            ['Parameter', 'Parameter_Std', 'R2', 'Max95%Scatter_Uncertainty', 'Max95%Mean_Uncertainty'],
            [resultSG['param'][0], resultSG['paramStd'][0], resultSG['R2'], max(resultSG['scatterUncert']), max(resultSG['meanUncert'])],
            [resultSG['param'][1], resultSG['paramStd'][1], '', '', '']
            ]

    # NOTE: AOA Calibration
    workingDirectory=r'C:\Users\Richard\Google Drive\Berkeley\Berkeley Classes\Fall 2018\ME 103\Lab 4-5\ME103\10.26.2018\Angle Attack Cali'
    settings={'dir': workingDirectory,
                'x': 'Ch4_a_control', 'y': 'Angle_of_Attack',
                'xUnit': '[volts]', 'yUnit': '[degree]',
                'xUncert': 0.0, 'yUncert': levelUncert,
Example #23
0
 def scan(self):
     cmat, dist_ceofs, \
     rmat, tmat, _ = Calibrator().calibration_data.load_calibrations()
Example #24
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1) as network,\
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            print('Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                print('Beginning ONNX file parsing')
                parser.parse(model.read())
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            print('Completed parsing of ONNX file')
            print('Building an engine from file {}; this may take a while...'.
                  format(onnx_file_path))

            # build trt engine
            builder.max_batch_size = max_batch_size
            if not dynamic:
                builder.max_workspace_size = 1 << 30  # 1GB
                builder.fp16_mode = fp16_mode
                if int8_mode:
                    builder.int8_mode = int8_mode
                    assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                    builder.int8_calibrator = Calibrator(
                        ["input"], calibration_stream, calibration_table_path)
                    print('Int8 mode enabled')
                engine = builder.build_cuda_engine(network)
            else:
                config = builder.create_builder_config()
                config.max_workspace_size = 1 << 30  # 1GB
                profile = builder.create_optimization_profile()
                profile.set_shape(
                    network.get_input(0).name, (1, 3, 200, 200),
                    (1, 3, 608, 448), (1, 3, 1200, 1200))
                # dynamic_engine fp16 set
                if fp16_mode:
                    config.set_flag(trt.BuilderFlag.FP16)
                # dynamic_engine int8 set
                if int8_mode:
                    config.set_flag(trt.BuilderFlag.INT8)
                    assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                    # choose an calibration profile
                    # config.set_calibration_profile(profile)
                    config.int8_calibrator = Calibrator(["input"],
                                                        calibration_stream,
                                                        calibration_table_path)
                    print('Int8 mode enabled')
                # choose an optimization profile
                config.add_optimization_profile(profile)
                engine = builder.build_engine(network, config)
            # If errors happend when executing builder.build_cuda_engine(network),
            # a None-Type object would be returned
            if engine is None:
                print('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
if __name__ == "__main__":
    image_size = (image_width, image_height)

    # imgLeft = downsample_image(cv2.imread("./input_images/aloeL.jpg"), 1)
    # imgRight = downsample_image(cv2.imread("./input_images/aloeR.jpg"), 1)
    imgLeft = downsample_image(cv2.imread("./input_images/left19.jpg"), 3)
    imgRight = downsample_image(cv2.imread("./input_images/right19.jpg"), 3)

    width_left, height_left = imgLeft.shape[:2]
    width_right, height_right = imgRight.shape[:2]

    if 0 in [width_left, height_left, width_right, height_right]:
        print("Error: Can't remap image.")

    calibrator = Calibrator(6, 9)
    calibrator.load_parameters()
    imgLeft = calibrator.undistort_image(imgLeft)
    imgRight = calibrator.undistort_image(imgRight)

    # cv2.imshow('Left CALIBRATED', imgLeft)
    # cv2.imshow('Right CALIBRATED', imgRight)
    #
    # cv2.waitKey(0)

    # Rectify both image
    threshold = 0.65
    imgLeft_r, imgRight_r = rectify_stereo_pair_uncalibrated(
        imgLeft, imgRight, threshold)

    stereo = SGBMTuner(imgLeft_r, imgRight_r)
Example #26
0
def projectGetTime(proj, site, meas, **kwargs):
	# print project information
	proj.printInfo()

	# print site info
	proj.printSiteInfo(site)

	# print measurement info
	proj.printMeasInfo(site, meas)

	# get measurement sample frequency
	fs = proj.getMeasSampleFreq(site, meas)
	# get measurement start and end times
	datetimeStart = proj.getMeasStartTime(site, meas)
	datetimeEnd = proj.getMeasEndTime(site, meas)
	# get the measurement reader and chans
	reader = proj.getMeasDataReader(site, meas)
	chans = reader.getChannels()

	# set the default parameters
	decimation = [1]
	calibrate = False
	notch = []
	detrend = True

	# apply options
	if "chans" in kwargs:
		chans = kwargs["chans"]
	if "start" in kwargs:
		datetimeStart = datetime.strptime(kwargs["start"], '%Y-%m-%d %H:%M:%S')
	if "stop" in kwargs:
		datetimeEnd = datetime.strptime(kwargs["stop"], '%Y-%m-%d %H:%M:%S')
	if "decimation" in kwargs:
		if type(kwargs["decimation"]) != type(list()):
			decimation = [kwargs["decimation"]]
		else:
			decimation = kwargs["decimation"]
	if "calibrate" in kwargs:
		calibrate = kwargs["calibrate"]	
	if "detrend" in kwargs:
		calibrate = kwargs["detrend"]			
	if "notch" in kwargs:
		notch = kwargs["notch"]			

	# get correct data start and end times
	dataStartTime, dataEndTime = reader.getDataTimes(datetimeStart, datetimeEnd)

	# get data, sensor info, serial info, chopper info for calibration
	data = reader.getPhysicalData(chans, dataStartTime, dataEndTime)
	sensors = reader.getSensors(chans)
	serials = reader.getSerials(chans)
	choppers = reader.getChoppers(chans)

	if calibrate:
		cal = Calibrator(proj.getCalDataPath())
		cal.printInfo()	
		# calibrate		
		data = cal.calibrate(data, fs, sensors, serials, choppers)

	# notch filter if required
	for n in notch:
		for c in data:
			data[c] = notchFilter(data[c], fs, n, n/5.0)		

	# lists for saving the data
	timeData = []
	timeX = []
	specData = []
	specX = []
	sampleFreqs = []

	# set fsDec
	fsDec = fs
	# now for each decimation, get data
	for d in decimation:
		# decimate
		data = downsampleTime(data, d, 51)
		fsDec = fsDec/d
		size = data[chans[0]].size
		# create spectrum calculator
		specCalc = SpectrumCalculator(fsDec, size)			

		# now calculate the spectrum
		f, fData = specCalc.calcFourierCoeff(data)

		# save data
		if detrend:
			for c in list(data.keys()):
				data[c] = signal.detrend(data[c], type="linear")	
		timeData.append(copy.deepcopy(data))
		timeX.append([dataStartTime, dataEndTime])
		specData.append(fData)
		specX.append(f)
		sampleFreqs.append(fsDec)	

	return timeX, timeData, specX, specData
Example #27
0
    def build_engine(max_batch_size, save_engine):
        """Takes an ONNX file and creates a TensorRT engine to run inference with"""
        # 1 << NetworkDefinitionCreationFlag.EXPLICIT_BATCH
        with trt.Builder(TRT_LOGGER) as builder, \
                builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) as network, \
                trt.OnnxParser(network, TRT_LOGGER) as parser:

            # parse onnx model file
            if not os.path.exists(onnx_file_path):
                quit('ONNX file {} not found'.format(onnx_file_path))
            print('Loading ONNX file from path {}...'.format(onnx_file_path))
            with open(onnx_file_path, 'rb') as model:
                print('Beginning ONNX file parsing')
                parser.parse(model.read())
                # modify the orignal onnx network
                count = 0
                for i in range(network.num_layers):
                    net = network.get_layer(i)
                    if (net.type == trt.LayerType.CONVOLUTION
                            and 'Conv' in net.name):
                        count = count + 1
                        # network.mark_output(net.get_output(0))
                        f_layer.write('----1----network.num_layers:' +
                                      str(network.num_layers) +
                                      '  .the number of the conv is:' +
                                      str(count) + '\n')
                        f_layer.write(net.name + '\n')
                        f_layer.write(str(net.type) + '\n')
                        # network.unmark_output(net.get_output(0))
                        activate = network.add_activation(
                            input=net.get_output(0),
                            type=trt.ActivationType.CLIP)  # return a layer
                        if (strategy == None):
                            print('strategy error!')
                        activate.beta = pow(2, strategy[count - 1] - 1) - 1
                        activate.alpha = -pow(2, strategy[count - 1] - 1)
                        activate.name = 'CLIP_%d' % i
                        f_layer.write(activate.name + ' beta is ' +
                                      str(activate.beta) + ' and alpha is ' +
                                      str(activate.alpha) + '\n')
                        # get the layer next to conv,and input the output of the activation layer.
                        net_next = network.get_layer(i + 1)
                        net_next.set_input(0, activate.get_output(0))
                        f_layer.write(net_next.name + '\n')
                        f_layer.write(str(net_next.type) + '\n')

                        net_next2 = network.get_layer(i + 2)
                        net_next2.set_input(0, net_next.get_output(0))
                        net_next2.set_input(1, activate.get_output(0))
                        f_layer.write(net_next2.name + '\n')
                        f_layer.write(str(net_next2.type) + '\n')
                f_layer.write('----2----network.num_layers:' +
                              str(network.num_layers) + '\n')
                assert network.num_layers > 0, 'Failed to parse ONNX model. \
                            Please check if the ONNX model is compatible '

            print('Completed parsing of ONNX file')
            print('Building an engine from file {}; this may take a while...'.
                  format(onnx_file_path))

            # build trt engine
            if int4_mode:
                builder.max_batch_size = max_batch_size
                builder.int8_mode = int4_mode
                builder.max_workspace_size = 1 << 30  # 1GB
                assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                builder.int8_calibrator = Calibrator(calibration_stream,
                                                     calibration_table_path)
                engine = builder.build_cuda_engine(network)
                # config=builder.create_builder_config()
                # config.max_workspace_size = 1 << 30  #
                # config.flags = 1 << int(trt.BuilderFlag.INT8)  #
                # assert calibration_stream, 'Error: a calibration_stream should be provided for int8 mode'
                # config.int8_calibrator = Calibrator(calibration_stream, calibration_table_path)
                # engine = builder.build_engine(network, config)
                print('Int4 mode enabled')
            if fp16_mode:
                builder.strict_type_constraints = True
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                builder.fp16_mode = fp16_mode
                engine = builder.build_cuda_engine(network)
                print('fp16 modify mode enabled')
            if fp32_mode:
                builder.max_batch_size = max_batch_size
                builder.max_workspace_size = 1 << 30  # 1GB
                engine = builder.build_cuda_engine(network)
                print('fp32 modify mode enabled')
            if engine is None:
                print('Failed to create the engine')
                return None
            print("Completed creating the engine")
            if save_engine:
                with open(engine_file_path, "wb") as f:
                    f.write(engine.serialize())
            return engine
Example #28
0
import cv2
import numpy as np
import time
import csv
from calibrator import Calibrator
from tracker import Tracker
from graph import Graph

cal = Calibrator()
eyes, lower, upper = cal.calibrate()

cap = cv2.VideoCapture(0)
prev_x = 0
prev_y = 0

data = []

while (True):
    ret, frame = cap.read()

    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)

    track = Tracker()
    match = track.find_eyes(eyes, frame, lower, upper)

    total_x = 0
    total_y = 0

    avg_x = 0
    avg_y = 0