def main():
    add_pitch, add_roll, add_filter = False, False, True
    batch_size = 128
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter)
    n_train = train_set[0].shape[0]
    n_test = test_set[0].shape[0]

    n_train_batches = n_train // batch_size
    n_test_batches = n_test // batch_size
    n_valid_batches = n_test // batch_size

    print("n_train_batches: %d, n_test_batches: %d" %
          (n_train_batches, n_test_batches))

    model = UFCNN(n_in=(sequence_length, n_features),
                  n_filters=64,
                  filter_size=7,
                  pool_sizes=0,
                  n_hidden=[512],
                  dropout_probability=0.5,
                  n_out=n_classes,
                  downsample=1,
                  trans_func=rectify,
                  out_func=softmax,
                  batch_size=batch_size)

    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set, valid_set)
    train_args['inputs']['batchsize'] = batch_size
    train_args['inputs']['learningrate'] = 0.002
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999

    test_args['inputs']['batchsize'] = batch_size
    validate_args['inputs']['batchsize'] = batch_size

    model.log += "\nDataset: %s" % name
    model.log += "\nAdd pitch: %s\nAdd roll: %s" % (add_pitch, add_roll)
    model.log += "\nAdd filter separated signals: %s" % add_filter
    model.log += "\nTransfer function: %s" % model.transf.__name__
    train = TrainModel(model=model,
                       anneal_lr=0.9,
                       anneal_lr_freq=50,
                       output_freq=1,
                       pickle_f_custom_freq=100,
                       f_custom_eval=None)
    train.pickle = True
    train.add_initial_training_notes("")
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_train_batches,
                      n_test_batches=n_test_batches,
                      n_valid_batches=n_valid_batches,
                      n_epochs=10000)
def load_data():
    data = ld.LoadHAR(ROOT_FOLDER).uci_har_v1(True, True)

    return dict(
        output_dim=int(data['y_test'].shape[-1]),
        X_train=theano.shared(data['x_train'].astype(theano.config.floatX)),
        y_train=theano.shared(data['y_train'].astype(theano.config.floatX)),
        X_valid=theano.shared(data['x_test'].astype(theano.config.floatX)),
        y_valid=theano.shared(data['y_test'].astype(theano.config.floatX)),
        X_test=theano.shared(data['x_test'].astype(theano.config.floatX)),
        y_test=theano.shared(data['y_test'].astype(theano.config.floatX)),
        num_examples_train=data['x_train'].shape[0],
        num_examples_valid=data['x_test'].shape[0],
        num_examples_test=data['x_test'].shape[0],
        seq_len=int(data['x_train'].shape[1]),
        n_fea=int(data['x_train'].shape[2]))
def load_data():
    (x_train, y_train), (x_test, y_test), (x_valid, y_valid), (sequence_length, n_features, n_classes) \
        = ld.LoadHAR(ROOT_FOLDER).uci_har_v1(add_pitch=True, add_roll=True, expand=True)

    return dict(output_dim=n_classes,
                X_train=x_train,
                y_train=y_train,
                X_test=x_test,
                y_test=y_test,
                X_valid=x_valid,
                y_valid=y_valid,
                num_examples_train=x_train.shape[0].eval(),
                num_examples_valid=x_test.shape[0].eval(),
                num_examples_test=x_test.shape[0].eval(),
                seq_len=sequence_length,
                n_fea=n_features)
Exemplo n.º 4
0
def load_data():
    data = ld.LoadHAR(ROOT_FOLDER).uci_har_v1()
    x_test = utils.spectrogram_2d(utils.magnitude(data['x_test'])).astype(
        theano.config.floatX)

    return dict(
        output_dim=int(data['y_test'].shape[-1]),
        X_train=theano.shared(
            utils.spectrogram_2d(utils.magnitude(data['x_train'])).astype(
                theano.config.floatX)),
        y_train=theano.shared(data['y_train'].astype(theano.config.floatX)),
        X_valid=theano.shared(x_test),
        y_valid=theano.shared(data['y_test'].astype(theano.config.floatX)),
        X_test=theano.shared(x_test),
        y_test=theano.shared(data['y_test'].astype(theano.config.floatX)),
        num_examples_train=data['x_train'].shape[0],
        num_examples_valid=data['x_test'].shape[0],
        num_examples_test=data['x_test'].shape[0],
        seq_len=int(x_test.shape[1]),
        input_width=x_test.shape[2],
        input_height=x_test.shape[3])
Exemplo n.º 5
0
def main():
    add_pitch, add_roll, add_filter = True, True, True
    n_samples, step = 200, 50
    shuffle = True
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    # The data is structured as (samples, sequence, features) but to properly use the convolutional RNN we need a longer
    # time
    factor = 5
    sequence_length *= factor

    n_train = train_set[0].shape[0] // factor
    print("Resizing train set from %d to %d" %
          (train_set[0].shape[0], n_train * factor))
    train_set = (np.reshape(train_set[0][:factor * n_train],
                            (n_train, sequence_length, n_features)),
                 np.reshape(train_set[1][:factor * n_train],
                            (n_train, factor, n_classes)))

    n_test = test_set[0].shape[0] // factor
    print("Resizing test set from %d to %d" %
          (test_set[0].shape[0], n_test * factor))
    test_set = (np.reshape(test_set[0][:factor * n_test],
                           (n_test, sequence_length, n_features)),
                np.reshape(test_set[1][:factor * n_test],
                           (n_test, factor, n_classes)))
    valid_set = test_set

    n_train = train_set[0].shape[0]
    n_test = test_set[0].shape[0]
    n_valid = valid_set[0].shape[0]

    n_train_batches = n_train // batch_size
    n_test_batches = n_test // batch_size
    n_valid_batches = n_valid // batch_size

    print("n_train_batches: %d, n_test_batches: %d, n_valid_batches: %d" %
          (n_train_batches, n_test_batches, n_valid_batches))

    n_conv = 1
    model = RCL_RNN(n_in=(sequence_length, n_features),
                    n_filters=[64] * n_conv,
                    filter_sizes=[3] * n_conv,
                    pool_sizes=[2] * n_conv,
                    n_hidden=[100],
                    conv_dropout=0.4,
                    rcl=[3, 3, 3, 3, 3],
                    rcl_dropout=0.4,
                    dropout_probability=0.5,
                    n_out=n_classes,
                    downsample=1,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_size=batch_size,
                    factor=factor)

    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set, valid_set)
    train_args['inputs']['batchsize'] = batch_size
    train_args['inputs']['learningrate'] = 0.004
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 1e-6

    test_args['inputs']['batchsize'] = batch_size
    validate_args['inputs']['batchsize'] = batch_size

    model.log += "\nDataset: %s" % name
    model.log += "\nTraining samples: %d" % n_train
    model.log += "\nTest samples: %d" % n_test
    model.log += "\nSequence length: %d" % (sequence_length / factor)
    model.log += "\nTime steps: %d" % factor
    model.log += "\nStep: %d" % step
    model.log += "\nShuffle: %s" % shuffle
    model.log += "\nAdd pitch: %s\nAdd roll: %s" % (add_pitch, add_roll)
    model.log += "\nAdd filter separated signals: %s" % add_filter
    model.log += "\nTransfer function: %s" % model.transf
    train = TrainModel(model=model,
                       anneal_lr=0.75,
                       anneal_lr_freq=50,
                       output_freq=1,
                       pickle_f_custom_freq=100,
                       f_custom_eval=None)
    train.pickle = True
    train.add_initial_training_notes("")
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_train_batches,
                      n_test_batches=n_test_batches,
                      n_valid_batches=n_valid_batches,
                      n_epochs=2000)
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 100, 50
    shuffle = False
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name, users = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    # The data is structured as (samples, sequence, features) but to properly use the convolutional RNN we need a longer
    # time
    factor = 5
    sequence_length *= factor

    # Concat train and test data
    X = np.concatenate((train_set[0], test_set[0]), axis=0)
    y = np.concatenate((train_set[1], test_set[1]), axis=0)

    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)

        n_train = train_set[0].shape[0] // factor
        print("Resizing train set from %d to %d" %
              (train_set[0].shape[0], n_train * factor))
        train_set = (np.reshape(train_set[0][:factor * n_train],
                                (n_train, sequence_length, n_features)),
                     np.reshape(train_set[1][:factor * n_train],
                                (n_train, factor, n_classes)))

        n_test = test_set[0].shape[0] // factor
        print("Resizing test set from %d to %d" %
              (test_set[0].shape[0], n_test * factor))
        test_set = (np.reshape(test_set[0][:factor * n_test],
                               (n_test, sequence_length, n_features)),
                    np.reshape(test_set[1][:factor * n_test],
                               (n_test, factor, n_classes)))
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]
        n_valid = valid_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = 1
        batch_size = n_test
        n_train_batches = n_train // batch_size

        print("n_train_batches: %d, n_test_batches: %d, n_valid_batches: %d" %
              (n_train_batches, n_test_batches, n_valid_batches))

        n_conv = 6
        model = conv_BRNN(n_in=(sequence_length, n_features),
                          n_filters=[64] * n_conv,
                          filter_sizes=[3] * n_conv,
                          pool_sizes=[1, 2, 1, 2, 2, 2],
                          n_hidden=[200],
                          conv_dropout=0.2,
                          dropout_probability=0.5,
                          n_out=n_classes,
                          downsample=1,
                          trans_func=rectify,
                          out_func=softmax,
                          batch_size=batch_size,
                          factor=factor)

        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, valid_set)
        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.003
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 0.999

        train = TrainModel(model=model,
                           anneal_lr=0.9,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.add_initial_training_notes(
            "Standardizing data after adding features")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" %
                              (sequence_length / factor))
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Time steps: %d" % factor)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)

        train.train_model(f_train=f_train,
                          train_args=train_args,
                          f_test=f_test,
                          test_args=test_args,
                          f_validate=f_validate,
                          validation_args=validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
def run_cnn():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 100
    shuffle = False
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name, users = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    X = np.concatenate((train_set[0], test_set[0]), axis=0)
    y = np.concatenate((train_set[1], test_set[1]), axis=0)

    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_train_batches = n_train // batch_size
        n_test_batches = n_test // batch_size
        n_valid_batches = n_test // batch_size

        print("n_train_batches: %d, n_test_batches: %d" %
              (n_train_batches, n_test_batches))
        model = CNN(n_in=(sequence_length, n_features),
                    n_filters=[64, 64, 64, 64],
                    filter_sizes=[5, 5, 3, 3],
                    pool_sizes=[2, 2, 2, 2],
                    conv_dropout=0.2,
                    n_hidden=[512],
                    dropout_probability=0.5,
                    n_out=n_classes,
                    downsample=0,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_size=batch_size,
                    batch_norm=False)
        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, valid_set)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.003
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = True
        train.add_initial_training_notes("")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)

        train.train_model(f_train,
                          train_args,
                          f_test,
                          test_args,
                          f_validate,
                          validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger