Esempio n. 1
0
File: bin.py Progetto: clones/kaa
 def set_property(self, prop, value):
     if prop == 'chanlist':
         if not value in CHANLIST:
             raise AttributeError('unknown chanlist %s' % value)
         self._chanlist = value
         return True
     if prop == 'frequency':
         return self._tuner.setfreq(value)
     if prop == 'channel':
         if not self._chanlist:
             raise AttributeError('chanlist not set')
         if not value in CHANLIST[self._chanlist]:
             raise AttributeError('unknown channel %s' % value)
         return self._tuner.setfreq(CHANLIST[self._chanlist][value])
     if prop == 'device':
         self._device = value
         self._tuner = Tuner(self._device, self._norm)
         return self._src.set_property('device', self._device)
     if prop == 'norm':
         if not value.upper() in ('NTSC', 'PAL'):
             raise AttributeError('unknown norm %s' % value)
         self._norm = value.upper()
         self._tuner = Tuner(self._device, self._norm)
         return self._src.set_property('device', self._device)
     raise AttributeError
Esempio n. 2
0
File: bin.py Progetto: clones/kaa
class V4Lsrc(gst.Bin):
    def __init__(self):
        gst.Bin.__init__(self, 'v4lsrcbin_%d')
        self._device = '/dev/video0'
        self._norm = 'NTSC'
        self._tuner = Tuner(self._device, self._norm)
        self._src = gst.element_factory_make('v4lsrc')
        self._src.set_property('device', self._device)
        self._queue = gst.element_factory_make('queue')
        self.add(self._src, self._queue)

        # FIXME: make this a property
        size = 'width=%s,height=%s' % (720, 576)
        caps = gst.structure_from_string('video/x-raw-yuv,%s' % size)
        self._src.link_pads_filtered('src', self._queue, 'sink', gst.Caps(caps))

        pad = self._queue.get_pad('src')
        self._ghost = gst.GhostPad(pad.get_name(), pad)
        self.add_pad(self._ghost)
        self._chanlist = None


    def set_property(self, prop, value):
        if prop == 'chanlist':
            if not value in CHANLIST:
                raise AttributeError('unknown chanlist %s' % value)
            self._chanlist = value
            return True
        if prop == 'frequency':
            return self._tuner.setfreq(value)
        if prop == 'channel':
            if not self._chanlist:
                raise AttributeError('chanlist not set')
            if not value in CHANLIST[self._chanlist]:
                raise AttributeError('unknown channel %s' % value)
            return self._tuner.setfreq(CHANLIST[self._chanlist][value])
        if prop == 'device':
            self._device = value
            self._tuner = Tuner(self._device, self._norm)
            return self._src.set_property('device', self._device)
        if prop == 'norm':
            if not value.upper() in ('NTSC', 'PAL'):
                raise AttributeError('unknown norm %s' % value)
            self._norm = value.upper()
            self._tuner = Tuner(self._device, self._norm)
            return self._src.set_property('device', self._device)
        raise AttributeError


    def get_request_pad(self, type):
        if type == 'video':
            return self._ghost
        raise AttributeError
Esempio n. 3
0
 def __init__(self, topleft, music_handler, playlists, frequencies):
     self.rect = self.bg_image.get_rect(topleft=topleft)
     self.handler = music_handler
     self.stations = OrderedDict()
     for p, f in zip(playlists, frequencies):
         s = RadioStation(p, f)
         self.stations[f] = s
     self.tuner_frequency = 1
     self.current_station = self.stations[frequencies[0]]
     self.static_volume = 0.
     self.tuner = Tuner((self.rect.left + 128, self.rect.top + 20),
                        frequencies)
     r = self.tuner.rect
     self.volume_knob = Potentiometer((r.left - 50, r.centery + 50))
Esempio n. 4
0
    def connect(self):
        # train SOM
        ordered = self.conn_fun(self.data, self.size*1000)

        # Нужно для тюнера, который ожидает на вход массив комплексных чисел
        z = np.array([complex(p[0], p[1]) for p in np.transpose(np.asarray(ordered)[:2, :])])
        order = range(self.size)

        # Some shapes of points (eg., angles) are difficult for SOM.
        # Use a little of postprocessing for tuning
        tuner = Tuner(z)
        order = tuner.reorder(order)

        result = np.take(z, order)
        result = np.array([[z.real, z.imag] for z in result])

        return [result]
Esempio n. 5
0
def main(argv=None):
    # Bind signal handler
    signal.signal(signal.SIGINT, signal_handler)

    # Collect all input device ids
    is_input = lambda id: PyAudio().get_device_info_by_host_api_device_index(
        0, id).get('maxInputChannels') > 0
    all_devs = range(
        0,
        PyAudio().get_host_api_info_by_index(0).get('deviceCount'))
    in_devs = list(filter(is_input, all_devs))

    # Parse args
    parser = argparse.ArgumentParser(prog='tuner')
    parser.add_argument('-v', '--verbose', help='Verbose', action='store_true')
    parser.add_argument('-l',
                        '--listdevices',
                        help='List input devices',
                        action='store_true')
    parser.add_argument('-d',
                        '--device',
                        help='Input device id',
                        type=int,
                        choices=in_devs,
                        default=0)
    args = parser.parse_args(argv)

    # List input devices
    if args.listdevices:
        print('id    name')
        for id in in_devs:
            dev_info = PyAudio().get_device_info_by_host_api_device_index(
                0, id)
            print(f' { id }    { dev_info.get("name") }')

        return

    # Run tuner
    t = Tuner(args.device)
    t.go(args.verbose)
Esempio n. 6
0
    def connect(self):
        # train SOM
        self._normalize()
        self._train(self.size*100, lrate=0.99, sigma_init=self.size)
        self._train(self.size*250, lrate=0.99, sigma_init=2)
        self._denormalyze()

        ordered = {}
        for point_id in range(len(self.z)):
            bmu = self._BMU_idx(self.z[point_id])
            try:
                ordered[bmu].append(point_id)
            except KeyError:
                ordered[bmu] = [point_id]

        order = []
        for i in range(self.size):
            try:
                pnts = ordered[i]
                if len(pnts) != 1:
                    for point_id in pnts:
                        order.append(point_id)
                else:
                    order.append(pnts[0])
            except KeyError:
                # It's Ok, if self.size > len(self.z)
                pass

        # Some shapes of points (eg., angles) are difficult for SOM.
        # Use a little of postprocessing for tuning
        tuner = Tuner(self.z)
        order = tuner.reorder(order)

        result = np.take(self.z, order)
        result = np.array([[z.real, z.imag] for z in result])

        return result
Esempio n. 7
0
    def connect(self):
        # train SOM
        self._normalize()
        self._train(self.size * 100, lrate=0.99, sigma_init=self.size)
        self._train(self.size * 250, lrate=0.99, sigma_init=2)
        self._denormalyze()

        ordered = {}
        for point_id in range(len(self.z)):
            bmu = self._BMU_idx(self.z[point_id])
            try:
                ordered[bmu].append(point_id)
            except KeyError:
                ordered[bmu] = [point_id]

        order = []
        for i in range(self.size):
            try:
                pnts = ordered[i]
                if len(pnts) != 1:
                    for point_id in pnts:
                        order.append(point_id)
                else:
                    order.append(pnts[0])
            except KeyError:
                # It's Ok, if self.size > len(self.z)
                pass

        # Some shapes of points (eg., angles) are difficult for SOM.
        # Use a little of postprocessing for tuning
        tuner = Tuner(self.z)
        order = tuner.reorder(order)

        result = np.take(self.z, order)
        result = np.array([[z.real, z.imag] for z in result])

        return [result]
def main():
    amp = Amplifier("Top-O-Line-Amplifier")
    tuner = Tuner("Top-O-Line AM/FM Tuner", amp)
    dvd_player = DvdPlayer("Top-O-Line DVD Player", amp)
    cd_player = CDPlayer("Top-O-Line CD Player", amp)
    projector = Projector("Top-O-Line Projector", dvd_player)
    lights = TheaterLights("Theater Ceiling Lights")
    screen = Screen("Theater Screen")
    popper = PopcornPopper("Popcorn Popper")

    home_theater = HomeTheaterFacade(amp, tuner, dvd_player, cd_player,
                                     projector, screen, lights, popper)
    home_theater.watch_movie("Lord of the Rings")
    print()
    home_theater.end_movie()
    print()
Esempio n. 9
0
File: bin.py Progetto: clones/kaa
    def __init__(self):
        gst.Bin.__init__(self, 'v4lsrcbin_%d')
        self._device = '/dev/video0'
        self._norm = 'NTSC'
        self._tuner = Tuner(self._device, self._norm)
        self._src = gst.element_factory_make('v4lsrc')
        self._src.set_property('device', self._device)
        self._queue = gst.element_factory_make('queue')
        self.add(self._src, self._queue)

        # FIXME: make this a property
        size = 'width=%s,height=%s' % (720, 576)
        caps = gst.structure_from_string('video/x-raw-yuv,%s' % size)
        self._src.link_pads_filtered('src', self._queue, 'sink', gst.Caps(caps))

        pad = self._queue.get_pad('src')
        self._ghost = gst.GhostPad(pad.get_name(), pad)
        self.add_pad(self._ghost)
        self._chanlist = None
Esempio n. 10
0
 def set_practise(self, on=True):
     """ Turn practise mode on/off """
     self.practise = on
     self.tuner = Tuner(self.path)  #initiats at practise on, and stays.
Esempio n. 11
0
 def on_tuner_btn_clicked(self, button):
     tuner = Tuner([16, 21, 26, 31, 35, 40])
     tuner.run()
     tuner.destroy()
Esempio n. 12
0
class Radio(object):
    static = prepare.SFX["static"]
    sweet_spot = .5
    station_range = 3.0
    bg_image = prepare.GFX["radio-bg"]

    def __init__(self, topleft, music_handler, playlists, frequencies):
        self.rect = self.bg_image.get_rect(topleft=topleft)
        self.handler = music_handler
        self.stations = OrderedDict()
        for p, f in zip(playlists, frequencies):
            s = RadioStation(p, f)
            self.stations[f] = s
        self.tuner_frequency = 1
        self.current_station = self.stations[frequencies[0]]
        self.static_volume = 0.
        self.tuner = Tuner((self.rect.left + 128, self.rect.top + 20),
                           frequencies)
        r = self.tuner.rect
        self.volume_knob = Potentiometer((r.left - 50, r.centery + 50))

    def get_event(self, event):
        self.volume_knob.get_event(event)
        self.tuner.get_event(event)

    def update(self, dt, mouse_pos):
        self.volume_knob.update(mouse_pos)
        self.tuner.update(mouse_pos)
        self.get_signal_strength()
        self.handler.volume = self.volume_knob.output
        self.handler.set_volume()
        for freq in self.stations:
            self.stations[freq].update(dt)

    def get_signal_strength(self):
        f = self.tuner.frequency
        low = None
        high = None
        for freq in self.stations.keys():
            if f >= freq:
                if low is None:
                    low = (freq, f - freq)
                elif f - freq < low[1]:
                    low = (freq, f - freq)
            elif f <= freq:
                if high is None:
                    high = (freq, freq - f)
                elif freq - f < high[1]:
                    high = (freq, freq - f)
            if low is not None and high is not None:
                if low[1] <= high[1]:
                    self.set_static_volume(*low)
                else:
                    self.set_static_volume(*high)

    def set_static_volume(self, current_station, freq_distance):
        f = self.tuner_frequency
        if self.stations[current_station] != self.current_station:
            self.current_station = self.stations[current_station]
            song, pos = self.current_station.set_pos()
            self.handler.load(song)
            self.handler.play(pos)
        if freq_distance <= self.sweet_spot:
            self.static_volume = 0.
        elif freq_distance <= self.station_range:
            self.static_volume = min(1.0, (freq_distance - self.sweet_spot) /
                                     float(self.station_range))
        elif freq_distance > self.station_range:
            self.static_volume = 1.0
        self.static.set_volume(self.static_volume * self.handler.volume)
        if self.static_volume:
            self.static.play(-1)
        else:
            self.static.stop()

    def draw(self, surface):
        surface.blit(self.bg_image, self.rect)
        self.volume_knob.draw(surface)
        self.tuner.draw(surface)
Esempio n. 13
0
 def runTuning(self,
               config_num,
               tuning_mode='random_sk',
               read=True,
               from_hdf=True):
     if not self.gen:
         df = self.initialize(self.addons_config_reco,
                              read=read,
                              from_hdf=from_hdf)
     else:
         df = self.initialize(self.addons_config_gen,
                              read=read,
                              from_hdf=from_hdf)
     X_train, X_test, y_train, y_test = self.configure(df, config_num)
     print(
         f'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Tuning on config {config_num}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
     )
     tuner = Tuner(mode=tuning_mode)
     if tuning_mode in {'random_sk', 'grid_search_cv'}:
         model_grid, grid_result, param_grid = tuner.tune(
             X_train, y_train, X_test, y_test)
         self.layers = grid_result.best_params_['layers']
         self.batch_norm = grid_result.best_params_['batch_norm']
         self.dropout = grid_result.best_params_['dropout']
         self.epochs = grid_result.best_params_['epochs']
         self.batchsize = grid_result.best_params_['batch_size']
         self.learning_rate = grid_result.best_params_['learning_rate']
         self.activation = grid_result.best_params_['activation']
         self.initializer_std = grid_result.best_params_['initializer_std']
         self.nodes = grid_result.best_params_[
             'nodes']  # !!! Kristof's edit on 25 Feb trying to fix the random_sk with the extra hyperparameters
         self.model_str = 'grid_model'
         grid_best_score = grid_result.best_score_
         self.model = tuner.gridModel(layers=self.layers,
                                      batch_norm=self.batch_norm,
                                      dropout=self.dropout)
         print("Best: %f using %s" %
               (grid_result.best_score_, grid_result.best_params_))
         means = grid_result.cv_results_['mean_test_score']
         stds = grid_result.cv_results_['std_test_score']
         params = grid_result.cv_results_['params']
         for mean, stdev, param in zip(means, stds, params):
             print("%f (%f) with: %r" % (mean, stdev, param))
     elif tuning_mode in {'hyperband', 'bayesian', 'random_kt'}:
         self.model, best_hps, param_grid = tuner.tune(
             X_train, y_train, X_test, y_test)
         # hard coded epochs, batchsize - KerasTuner doesn't search over this parameter space
         self.epochs = 50
         self.batchsize = 10000
         self.layers = best_hps.get('num_layers')
         self.batch_norm = best_hps.get('batch_norm')
         self.dropout = best_hps.get('dropout')
         self.learning_rate = best_hps.get('learning_rate')
         self.activation = best_hps.get('activation')
         self.initializer_std = best_hps.get('initializer_std')
         self.model_str = 'hyper_model'
         grid_best_score = None
     elif tuning_mode in {'hyperopt'}:
         self.model, best_params, param_grid = tuner.tune(
             X_train, y_train, X_test, y_test)
         self.nodes = int(best_params['nodes'])
         self.layers = int(best_params['num_layers'])
         self.batch_norm = best_params['batch_norm']
         self.dropout = best_params['dropout']
         self.epochs = int(best_params['epochs'])
         self.batchsize = int(best_params['batch_size'])
         self.learning_rate = best_params['learning_rate']
         self.activation = best_params['activation']
         self.initializer_std = best_params['initializer_std']
         self.model_str = 'hyperopt_model'
         grid_best_score = None
     else:
         raise ValueError('Tuning mode not understood')
     model = self.train(X_train,
                        X_test,
                        y_train,
                        y_test,
                        epochs=self.epochs,
                        batch_size=self.batchsize,
                        verbose=0)
     if self.binary:
         auc = self.evaluateBinary(model, X_test, y_test, self.history)
     else:
         w_a = df.w_a
         w_b = df.w_b
         auc = self.evaluate(model, X_test, y_test, self.history, w_a, w_b)
     if not self.gen:
         file = f'{self.write_dir}/tuning_reco_{self.channel}.txt'
     else:
         file = f'{self.write_dir}/tuning_gen_{self.channel}.txt'
     self.model.save(f'{self.write_dir}/tuning_model_{self.channel}.h5')
     with open(file, 'a+') as f:
         print(f'Writing HPs to {file}')
         time_str = datetime.datetime.now().strftime('%Y/%m/%d|%H:%M:%S')
         # message = f'{time_str},{auc},{self.config_num},{self.layers},{self.batch_norm},{self.dropout},{self.epochs},{self.batchsize},{tuning_mode},{grid_best_score},{param_grid}\n'
         message = f'{time_str},{auc},{self.config_num},{self.nodes},{self.layers},{self.batch_norm},{self.dropout},{self.epochs},{self.batchsize},{tuning_mode},{grid_best_score},{self.learning_rate},{self.activation},{self.initializer_std},{param_grid}\n'
         print(f"Message: {message}")
         f.write(message)
     model_save_str = f'./saved_models/{self.channel}/model_{config_num}'
     model.save(model_save_str)
Esempio n. 14
0
from amplifier import Amplifier
from cd_player import CdPlayer
from dvd_player import DvdPlayer
from home_theater_facade import HomeTheaterFacade
from tuner import Tuner

amplifier = Amplifier()
cd = CdPlayer()
dvd = DvdPlayer()
tuner = Tuner()

facade = HomeTheaterFacade(tuner, amplifier, cd, dvd)

facade.watch_movie()
facade.listen_to_cd()
facade.listen_to_radio()
facade.end_radio()
Esempio n. 15
0
 def runWithSmearing(self,
                     config_num,
                     features: list,
                     from_hdf=True,
                     sample=False):
     """ 
     Need to update smearing_hp.txt to get hyperparameter for tuning 
     Trying for config 1.6 smearing first
     """
     if not self.gen:
         print('CHANGING GEN TO TRUE - REQUIRED FOR SMEARING')
         self.gen = True
     print('SETTING SAVE_ALPHA TO FALSE')
     self.addons_config_gen['neutrino']['save_alpha'] = False
     self.addons_config_gen['neutrino']['load_alpha'] = False
     df = self.initializeWithSmear(features,
                                   self.addons_config_gen,
                                   from_hdf=from_hdf,
                                   sample=sample)
     # get config 3.9 model if not rho_rho, if rho_rho, get 3.2
     with open(f'./NN_output/smearing_hp_{config_num}.txt', 'r') as fh:
         num_list = [line for line in fh]
     if self.channel == 'rho_rho':
         nn_arch = num_list[0].split(',')
     elif self.channel == 'rho_a1':
         nn_arch = num_list[1].split(',')
     else:
         nn_arch = num_list[2].split(',')
     optimal_auc = float(nn_arch[1])
     nodes = int(nn_arch[3])
     num_layers = int(nn_arch[4])
     batch_norm = bool(nn_arch[5])
     dropout = float(nn_arch[6])
     epochs = int(nn_arch[7])
     batch_size = int(nn_arch[8])
     learning_rate = float(nn_arch[11])
     activation = str(nn_arch[12])
     initializer_std = float(nn_arch[13])
     params = {
         'nodes': nodes,
         'num_layers': num_layers,
         'batch_norm': batch_norm,
         'dropout': dropout,
         'epochs': epochs,
         'batch_size': batch_size,
         'learning_rate': learning_rate,
         'activation': activation,
         'initializer_std': initializer_std,
     }
     print(f'Training with {params}')
     tuner = Tuner()
     self.model, _ = tuner.hyperOptModelNN(params)
     X_train, X_test, y_train, y_test = self.configure(df, config_num)
     model = self.train(X_train,
                        X_test,
                        y_train,
                        y_test,
                        epochs=epochs,
                        batch_size=batch_size)
     # model = self.train(X_train, X_test, y_train, y_test, epochs=50, batch_size=10000)
     if self.binary:
         auc = self.evaluateBinary(model,
                                   X_test,
                                   y_test,
                                   self.history,
                                   plot=False)
     else:
         w_a = df.w_a
         w_b = df.w_b
         auc = self.evaluate(model,
                             X_test,
                             y_test,
                             self.history,
                             w_a,
                             w_b,
                             plot=False)
     return auc, optimal_auc
Esempio n. 16
0
    def hp_search(self):
        if not self.remote:
            if self.opt_model.max_instances_at_once > torch.cuda.device_count():
                raise Exception(''' 'max_instances_at_once' must be smaller or equal to the number of available gpus''')
        if not hasattr(self.opt_model, 'name'):
            logger.info("no 'update_optimal_model' method, checking for model.txt file . . . ")
            self.update_optimal_model()
        # initialize tuner and gun i.e.
        ongoing_trials = OngoingTrials()
        tuner = Tuner(self.opt_model, ongoing_trials)
        gun = Launcher(self.opt_model, ongoing_trials, remote=self.remote)
        logger.info('commencing hyper-parameter search . . . ')
        tuner.search_hp()
        gun.launch_trials()
        tuner.end_trial()
        # starting second set of trials
        tuner.search_hp()
        while ongoing_trials.status is not 'STOPPED':
            gun.launch_trials()
            tuner.end_trial()
            # starting next set of trials
            tuner.search_hp()

        best_trial = tuner.get_best_trial()
        logger.info('best trial: ', json.dumps(best_trial))
        if os.path.exists(self.path_to_best_trial):
            logger.info('overwriting best_trial.json . . .')
            os.remove(self.path_to_best_trial)
        with open(self.path_to_best_trial, 'w') as fp:
            json.dump(best_trial, fp)
            logger.info('results saved to best_trial.json')
Esempio n. 17
0
    def hp_search(self):
        if not self.remote:
            if self.opt_model.max_instances_at_once > torch.cuda.device_count(
            ):
                raise Exception(
                    ''' 'max_instances_at_once' must be smaller or equal to the number of available gpus'''
                )
        if not hasattr(self.opt_model, 'name'):
            logger.info(
                "no 'update_optimal_model' method, checking for model.txt file . . . "
            )
            self.update_optimal_model()
        # initialize tuner and gun i.e.
        ongoing_trials = OngoingTrials()
        tuner = Tuner(self.opt_model, ongoing_trials)
        gun = Launcher(self.opt_model, ongoing_trials, remote=self.remote)
        logger.info('commencing hyper-parameter search . . . ')
        tuner.search_hp()
        gun.launch_trials()
        tuner.end_trial()
        # starting second set of trials
        tuner.search_hp()
        while ongoing_trials.status is not 'STOPPED':
            gun.launch_trials()
            tuner.end_trial()
            # starting next set of trials
            tuner.search_hp()

        trials = tuner.get_trials()
        sorted_trial_ids = tuner.get_sorted_trial_ids()

        string1 = self.path_to_best_checkpoint.split('.')[0]
        for i in range(len(sorted_trial_ids)):
            save_checkpoint_location = string1 + str(i) + '.pt'
            if os.path.exists(save_checkpoint_location):
                logger.info('overwriting checkpoint . . .')
                os.remove(save_checkpoint_location)
            logger.info('trial ' + sorted_trial_ids[i] + '\tval: ' +
                        str(trials[sorted_trial_ids[i]]['metrics']))
            torch.save(trials[sorted_trial_ids[i]]['checkpoint'],
                       save_checkpoint_location)

        logger.info('best trial: ' +
                    str(trials[sorted_trial_ids[0]]['hp_values']) +
                    '\nbest value: ' +
                    str(trials[sorted_trial_ids[0]]['metrics']))

        best_trial = trials[sorted_trial_ids[0]]['hp_values']
        if os.path.exists(self.path_to_best_trial):
            logger.info('overwriting best_trial.json . . .')
            os.remove(self.path_to_best_trial)
        with open(self.path_to_best_trial, 'w') as fp:
            json.dump(best_trial, fp)
            logger.info('results saved to best_trial.json')
Esempio n. 18
0
import os
import numpy as np

from tuner import Tuner
from utils import load_image_into_numpy_array, plot_detections

MODEL = 'ssd_resnet50_v1_fpn_640x640_coco17_tpu-8'

t = Tuner(config_path=os.path.join('pre_trained_models', MODEL,
                                   'pipeline.config'),
          checkpoint_path=os.path.join('pre_trained_models', MODEL,
                                       'checkpoint', 'ckpt-0'),
          num_classes=1)

t.load_training_images(
    path='models/research/object_detection/test_images/ducky/train/')

t.set_annotation_data(data=[
    np.array([[0.436, 0.591, 0.629, 0.712]], dtype=np.float32),
    np.array([[0.539, 0.583, 0.73, 0.71]], dtype=np.float32),
    np.array([[0.464, 0.414, 0.626, 0.548]], dtype=np.float32),
    np.array([[0.313, 0.308, 0.648, 0.526]], dtype=np.float32),
    np.array([[0.256, 0.444, 0.484, 0.629]], dtype=np.float32)
])

t.prepare_data()
t.restore_weights()
t.fine_tune(batch_size=4, learning_rate=0.01, num_batches=100)

test_img_path = 'models/research/object_detection/test_images/ducky/test/out1.jpg'
detections = t.detect(test_img_path)
Esempio n. 19
0
from intelhex import IntelHex
from database import Database
from file import File
from tuner import Tuner

ih = IntelHex()
client = pymongo.MongoClient(
    "mongodb+srv://db_user1:[email protected]/sample_training?retryWrites=true&w=majority")


if __name__ == "__main__":

    file = File(ih)
    data = Database(client)
    ih = file.import_file()
    tune = Tuner(ih)
    print("Collecting data from database... ")
    model = data.get_model()
    injection = data.get_injection(model)
    turbo = data.get_turbo(model)
    rail = data.get_rail(model)

    if injection:
        x_inj, y_inj = data.get_injection_pos(model)
        print("Inj tuning...")
        ih = tune.tuning(injection, x_inj, y_inj)
    else:
        pass
    if turbo:
        x_trb, y_trb = data.get_turbo_pos(model)
        print("Turbo tuning...")