Example #1
0
    def __event_detected(self, pin_returned):
        sensor = self.__configs.sensor_list[pin_returned]
        topic = self.__configs.root_topic + sensor.topic
        state = sensor.determine_state(self.__gpio.input)
        event = Event(topic, state, utils.timestamp())

        utils.log(event.log())
        self.__mqtt.publish(topic, event.as_json())
 def start(self, model, run_id=timestamp()):
     self.compile(model)
     self.train(model=model,
                train_data=self.loader.train_data,
                validation_data=self.loader.test_data,
                run_id=run_id)
     self.evaluate(run_id=run_id,
                   test_data=self.loader.test_data,
                   steps=self.loader.test_data_info.count)
def start(model, hparams, run_id=timestamp()):
    train_data = loader.load_train_dataset()
    validation_data = loader.load_test_dataset()
    compile(model=model, hparams=hparams)
    train(model=model,
          train_data=train_data,
          validation_data=validation_data,
          run_id=run_id,
          hparams=hparams)
    evaluate(run_id=run_id,
             test_data=validation_data,
             steps=loader.test_data_count)
def evolve_state(hamiltonian, initial_state, tlist, return_all_states=False):
    """Evolve state with (generally time-dependent) hamiltonian.

    This is a wrapper around `qutip.mesolve`, to which `hamiltonian` and
    `initial_state` are directly fed.

    Parameters
    ----------
    hamiltonian : list of qutip objects
        The Hamiltonian specification in `qutip.mesolve` is the
        "function based" one, as per qutip.mesolve documentation. This means
        that `hamiltonian` is to be given as a list of constant Hamiltonians,
        each one pairs with a time-dependent (numeric) coefficient.
        The simplest example would be `hamiltonian = [H0, [H1, coeffFun]]`.
    initial_state : qutip.Qobj
    time : float or list of floats
        If a single number, it is divided into a number of subintervals and
        the result used as input for `qutip.mesolve`. If a list of numbers,
        it is directly fed to `qutip.mesolve`.
    """
    if isinstance(tlist, numbers.Number):
        tlist = np.linspace(0, tlist, 40)

    try:
        evolving_states = qutip.mesolve(hamiltonian, initial_state, tlist)
    except Exception:
        error_data_filename = 'evolution_error_details_{}.pickle'.format(
            timestamp())
        error_data_filename = os.path.join(os.getcwd(), error_data_filename)
        error_data_filename = autonumber_filename(error_data_filename)
        logging.info('Something went wrong while trying to evolve from\n'
                     'initial_state={}\nwith hamiltonian={}.\nSaving data to '
                     'reproduce in "{}".'.format(initial_state, hamiltonian,
                                                 error_data_filename))
        with open(error_data_filename, 'wb') as fh:
            if len(hamiltonian) == 2 and len(hamiltonian[1]) == 2:
                pulse_samples = [
                    hamiltonian[1][1](t) for t in np.linspace(0, time, 100)
                ]
                hamiltonian = (hamiltonian[0], hamiltonian[1][0])
            data = dict(hamiltonian=hamiltonian,
                        initial_state=initial_state,
                        times_list=tlist)
            data = data.update(dict(pulse_samples=pulse_samples))
            print('data: {}'.format(data))
            pickle.dump(data, fh)
        raise

    if return_all_states:
        return evolving_states.states
    else:
        return evolving_states.states[-1]
Example #5
0
def main():

    p = argparse.ArgumentParser(
        description='{} ({})'.format(__description__, __version__))
    p.add_argument('-d',
                   help="set type of search database",
                   required=True,
                   choices=['basic', 'enum', 'bit'])
    p.add_argument('-e',
                   help="set type of search problem (see config.json)",
                   required=True)
    p.add_argument('-p',
                   help="print the CVC code and return",
                   action='store_true')
    args = p.parse_args()

    path = './tmp/'
    f = path + '{}-{}-{}.csv'.format(args.d, args.e, utils.timestamp())

    if args.d == 'basic':
        import src.search.basic as search
        cmds = cmd.basic
    elif args.d == 'enum':
        import src.search.enum as search
        cmds = cmd.enum
    elif args.d == 'bit':
        import src.search.bit as search
        cmds = cmd.bit
    """ check if stp and solver are in PATH or node/bin """
    stp = utils.which('stp')
    solver = utils.which(cmds[args.e][0])
    if stp == None:
        print "stp not found in PATH or in node/bin"
        return 1
    if solver == None:
        print "solver {} not found in PATH or in node/bin".format(
            cmds[args.e][0])
        return 1

    if not args.p:
        """ start search """
        solutions = search.do(*cmds[args.e])
        if solutions != None:
            utils.to_csv(solutions, f)
            print "Solution(s) written to {}".format(f)
    else:
        """ print CVC code """
        print search.cvc(*cmds[args.e])
Example #6
0
File: node.py Project: norx/node
def main():

    p = argparse.ArgumentParser(description='{} ({})'.format(__description__,__version__))
    p.add_argument('-d',help="set type of search database",required=True,choices=['basic','enum','bit'])
    p.add_argument('-e',help="set type of search problem (see config.json)",required=True)
    p.add_argument('-p',help="print the CVC code and return",action='store_true')
    args = p.parse_args()

    path = './tmp/'
    f = path + '{}-{}-{}.csv'.format(args.d,args.e,utils.timestamp())

    if args.d == 'basic':
        import src.search.basic as search
        cmds = cmd.basic
    elif args.d == 'enum':
        import src.search.enum as search
        cmds = cmd.enum
    elif args.d == 'bit':
        import src.search.bit as search
        cmds = cmd.bit

    """ check if stp and solver are in PATH or node/bin """
    stp = utils.which('stp')
    solver = utils.which(cmds[args.e][0])
    if stp == None:
        print "stp not found in PATH or in node/bin"
        return 1
    if solver == None:
        print "solver {} not found in PATH or in node/bin".format(cmds[args.e][0])
        return 1

    if not args.p:
        """ start search """
        solutions = search.do(*cmds[args.e])
        if solutions != None:
            utils.to_csv( solutions, f )
            print "Solution(s) written to {}".format(f)
    else:
        """ print CVC code """
        print search.cvc(*cmds[args.e])
Example #7
0
    def __init__(
            self,
            err_handler: Callable[[Exception, Callable[[None], None]], None]):
        """Set up an Application instance.

        Initialize other modules' classes & set up instance state.

        Arguments:
        err_handler -- a function to handle errors that may arise
                                  inside the application, must accept an
                                  Exeption & a callback
        """
        # initilize running variable for tracking quit state
        self.__exit = False

        self.__error_handler = err_handler

        # load configuration
        self.__configs = load_configs('/app/configuration.yaml')

        # setup GPIO pins
        self.__gpio = GpioHelper(self.__configs.sensor_list)

        # setup mqtt client, then
        # initialize mqtt connection & begin loop
        self.__mqtt = MqttHelper(
            self.__configs.mqtt_host,
            self.__configs.mqtt_port,
            self.__configs.mqtt_user,
            self.__configs.mqtt_pass
        ).will_set(  # set last will in case of ungraceful exit
            self.__configs.root_topic + 'fault',
            Fault(
                'FAILED',
                utils.timestamp()
            ).as_json()
        ).connect()  # connect & return mqtt helper object

        self.__fault_signal("FAILED")  # fault fails until running
Example #8
0
    def __fault_signal(self, fault_state):
        topic = self.__configs.root_topic + "fault"
        event = Fault(fault_state, utils.timestamp())

        utils.log(event.log())
        self.__mqtt.publish(topic, event.as_json())
Example #9
0
def train(train_xy,
          training_params,
          model_params,
          val_xy=None,
          model_name='base_model',
          img_size=None,
          grid_size=(16, 16),
          log_dir=None,
          model_dir=None,
          use_lr_scheduler=False):
    """
    Train an object detection model using YOLO method.

    :param train_xy: tuple, train data in the format (imgs, anns).
    :param training_params: dict, hyperparameters used for training.
        batch_size: int, number of samples that will be propagated
            through the network.
        epochs: int, number of forward passes and backward passes of
            all the training examples.
    :param model_params: dict, hyperparameters of the model.
        learning_rate: float, determines the step size at each
            iteration step.
        l_coord: float, lambda coordinates parameter for the YOLO loss
            function. Weight of the XY and WH loss.
        l_noobj: float, lambda no object parameter for the YOLO loss
            function. Weight of the one part of the confidence loss.
    :param val_xy: tuple (default: None), validation data in the format
        (imgs, anns).
    :param model_name: str (default: base_model), name of the model to
        be used for training. Possible values are `base_model`.
    :param img_size: tuple (default: None), new resolution
        (new_img_height, new_img_width) of each image. If `None` then
        images will not be resized.
    :param grid_size: tuple (default: (16, 16)), number of
        (grid_rows, grid_cols) of grid cell.
    :param log_dir: str (default: None), TensorBoard log directory.
    :param model_dir: str (default: None), Directory where checkpoints
        of models will be stored.
    :param use_lr_scheduler: bool (default: False), whether to use
        learning rate scheduler or not.
    :return:
        model: tf.keras.Model, trained model.
        history: History, its History.history attribute is a record of
            training loss values and metrics values at successive
            epochs, as well as validation loss values and validation
            metrics values (if applicable).
    """
    # Create train dataset
    train_dataset = create_dataset(train_xy[0],
                                   train_xy[1],
                                   img_size,
                                   grid_size,
                                   is_training=True,
                                   batch_size=training_params['batch_size'])

    # Create validation dataset
    val_dataset = None
    if val_xy is not None:
        val_dataset = create_dataset(val_xy[0],
                                     val_xy[1],
                                     img_size,
                                     grid_size,
                                     is_training=False,
                                     batch_size=training_params['batch_size'])

    # Choose model
    input_shape = train_xy[0].shape[1:]
    if model_name == 'base_model':
        model = base_model(grid_size, input_shape=input_shape, **model_params)
    elif model_name == 'darknet19_model':
        model = darknet19_model(grid_size,
                                input_shape=input_shape,
                                **model_params)
    elif model_name == 'darknet19_model_resnet':
        model = darknet19_model_resnet(grid_size,
                                       input_shape=input_shape,
                                       **model_params)

    else:
        raise ValueError(f'Error: undefined model `{model_name}`.')

    # Create keras callbacks
    callbacks = []
    if log_dir is not None:
        log_dir = os.path.join(log_dir, timestamp())
        callbacks.append(
            tf.keras.callbacks.TensorBoard(
                log_dir=log_dir,
                histogram_freq=1,
                profile_batch=0,
            ))
    if model_dir is not None:
        model_path = os.path.join(model_dir, timestamp(), 'model.ckpt')
        callbacks.append(
            tf.keras.callbacks.ModelCheckpoint(filepath=model_path,
                                               monitor='val_loss',
                                               save_weights_only=True,
                                               save_best_only=True,
                                               mode='min',
                                               verbose=1))
    if use_lr_scheduler:
        fn_scheduler = partial(lr_scheduler,
                               initial_lr=model_params['learning_rate'])
        callbacks.append(
            tf.keras.callbacks.LearningRateScheduler(fn_scheduler))

    # Train model
    history = None
    try:
        model.summary()
        history = model.fit(
            train_dataset,
            epochs=training_params['epochs'],
            validation_data=val_dataset,
            steps_per_epoch=len(train_xy[1]) // training_params['batch_size'],
            callbacks=callbacks,
        )
    finally:
        # TensorBoard HParams saving
        if log_dir is not None:
            log_dir_hparams = os.path.join(log_dir, 'hparams')
            with tf.summary.create_file_writer(log_dir_hparams).as_default():
                hp.hparams({
                    **training_params,
                    **model_params
                },
                           trial_id=log_dir)

                if history is not None:
                    train_best_loss = min(history.history['loss'])
                    # train_best_f1_score = max(history.history['F1Score'])
                    tf.summary.scalar('train_best_loss',
                                      train_best_loss,
                                      step=0)
                    # tf.summary.scalar(
                    #     'train_best_f1_score',
                    #     train_best_f1_score,
                    #     step=0
                    # )

                    if val_dataset is not None:
                        val_best_loss = min(history.history['val_loss'])
                        # val_best_f1_score = max(history.history['val_F1Score'])
                        tf.summary.scalar('val_best_loss',
                                          val_best_loss,
                                          step=0)
                        # tf.summary.scalar(
                        #     'val_best_f1_score',
                        #     val_best_f1_score,
                        #     step=0
                        # )

    return model, history