Esempio n. 1
0
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 50, 50
    load_data = LoadHAR(add_pitch=add_pitch,
                        add_roll=add_roll,
                        add_filter=add_filter,
                        n_samples=n_samples,
                        step=step)

    conf = ModelConfiguration()
    conf.load_datasets(
        [load_data.uci_mhealth, load_data.idash, load_data.wisdm1])

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )

    for train_index, test_index in conf.cv:
        conf.user = user

        model = RNN(n_in=(n_samples, conf.n_features),
                    n_hidden=[50, 50],
                    dropout_probability=0.5,
                    n_out=conf.n_classes,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax)

        if len(conf.cv) > 1:
            user_idx += 1
            conf.user = conf.user_names[user_idx]

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.002,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data)
 def save_encoder(self):
     """
     Dump the model into a pickled version
     """
     p = paths.path_exists(self.root_path + '/pickled model/')
     p += 'encoder.pkl'
     pickle.dump(self.encoder_params,
                 open(p, "wb"),
                 protocol=pickle.HIGHEST_PROTOCOL)
Esempio n. 3
0
def main():
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=True,
                        n_samples=n_samples,
                        step=step,
                        normalize=False,
                        comp_magnitude=False)

    conf = ModelConfiguration()
    conf.load_datasets([load_data.uci_hapt], label_limit=100)

    user_idx = -1
    user = None  # 'UCI HAPT10'
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
        print('Testing user: %s' % user)
    else:
        # Cross validate on users
        # conf.cv = LeavePLabelOut(conf.users, p=1)

        # Divide into K folds balanced on labels
        # conf.cv = StratifiedKFold(np.argmax(conf.y, axis=1), n_folds=10)

        # And shuffle
        conf.cv = StratifiedShuffleSplit(np.argmax(conf.y, axis=1),
                                         n_iter=1,
                                         test_size=0.3)

    for train_index, test_index in conf.cv:
        conf.user = user

        model = ResNet(n_in=(n_samples, conf.n_features),
                       n_filters=[32, 64, 128, 256],
                       pool_sizes=[2, 2, 2, 2],
                       n_hidden=[512],
                       conv_dropout=0.3,
                       dropout=0.5,
                       n_out=conf.n_classes,
                       trans_func=leaky_rectify,
                       out_func=softmax,
                       batch_norm=True,
                       stats=conf.stats)

        if len(conf.cv) > 1:
            user_idx += 1
            if len(conf.cv) == len(conf.user_names):
                conf.user = conf.user_names[user_idx]
            else:
                conf.user = conf.name + ' K_%d' % user_idx

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=500,
                 model=model,
                 train=train,
                 load_data=load_data)
Esempio n. 4
0
def main():
    n_samples, step = 200, 50
    load_data = LoadHAR(add_pitch=True,
                        add_roll=True,
                        add_filter=True,
                        n_samples=n_samples,
                        step=step,
                        normalize='segments',
                        comp_magnitude=True,
                        simple_labels=True,
                        common_labels=True)

    conf = ModelConfiguration()
    conf.load_datasets([load_data.uci_hapt], label_limit=18)

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
    else:
        # conf.cv = LeaveOneLabelOut(conf.users)
        conf.cv = StratifiedShuffleSplit(np.argmax(conf.y, axis=1),
                                         n_iter=10,
                                         test_size=0.1,
                                         random_state=None)

    for train_index, test_index in conf.cv:
        conf.user = user

        n_conv = 1
        model = RCNN(n_in=(n_samples, conf.n_features),
                     n_filters=[32],
                     filter_sizes=[3] * n_conv,
                     pool_sizes=[2] * n_conv,
                     rcl=[2, 2, 2, 2],
                     rcl_dropout=0.5,
                     n_hidden=[512],
                     dropout_probability=0.5,
                     n_out=conf.n_classes,
                     ccf=False,
                     trans_func=rectify,
                     out_func=softmax,
                     batch_norm=True,
                     stats=conf.stats)

        if len(conf.cv) > 1:
            user_idx += 1
            if len(conf.cv) == len(conf.user_names):
                conf.user = conf.user_names[user_idx]
            else:
                conf.user = conf.name + ' K_%d' % user_idx

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        # Copy script to output folder
        scriptpath = path.realpath(__file__)
        filename = path.basename(scriptpath)
        shutil.copy(scriptpath, model.root_path + '/' + filename)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data,
                 batch_size=64)
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=add_pitch,
                        add_roll=add_roll,
                        add_filter=add_filter,
                        n_samples=n_samples,
                        step=step)
    batch_size = 64

    # Define datasets and load iteratively
    datasets = [
        load_data.idash, load_data.wisdm1, load_data.uci_mhealth,
        load_data.uci_hapt
    ]
    X, y, name, users = datasets[0]()
    users = ['%s_%02d' % (name, user) for user in users]
    for dataset in datasets[1:]:
        X_tmp, y_tmp, name_tmp, users_tmp = dataset()
        X = np.concatenate((X, X_tmp))
        y = np.concatenate((y, y_tmp))
        for user in users_tmp:
            users.append('%s_%02d' % (name_tmp, user))
        name += '_' + name_tmp
    users = np.array(users)

    print('Users: %d' % len(np.unique(users)))
    print(X.shape)

    n_windows, sequence_length, n_features = X.shape
    y = one_hot(y, n_classes=len(ACTIVITY_MAP))
    n_classes = y.shape[-1]

    # Create a time-string for our cv run
    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S'))
    cv = LeaveOneLabelOut(users)

    user_idx = 0
    user_names = np.unique(users)
    user = None
    if user is not None:
        train_idx = users != user
        test_idx = users == user
        cv = ((train_idx, test_idx), )

    for train_index, test_index in cv:
        user = user_names[user_idx]
        user_idx += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        # Scale data using training data
        scaler = StandardScaler().fit(X_train.reshape((-1, n_features)))
        n_windows = X_train.shape[0]
        X_train = scaler.transform(X_train.reshape((-1, n_features))).reshape(
            (n_windows, sequence_length, n_features))
        n_windows = X_test.shape[0]
        X_test = scaler.transform(X_test.reshape((-1, n_features))).reshape(
            (n_windows, sequence_length, n_features))

        print('Xtrain mean: %f\tstd: %f' % (X_train.mean(), X_train.std()))
        print('Xtest mean: %f\tstd: %f' % (X_test.mean(), X_test.std()))
        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = None
        batch_size = n_test
        n_train_batches = n_train // batch_size
        print("n_train_batches: %d, n_test_batches: %d" %
              (n_train_batches, n_test_batches))

        model = ResNet(n_in=(sequence_length, n_features),
                       n_filters=[32, 32, 64, 64],
                       pool_sizes=[2, 1, 2, 1],
                       n_hidden=[512],
                       conv_dropout=0.5,
                       dropout=0.5,
                       n_out=n_classes,
                       trans_func=rectify,
                       out_func=softmax,
                       batch_size=batch_size,
                       batch_norm=True)

        if len(cv) > 1:
            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, d, user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        # Build model
        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, None)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.001
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        # Define confusion matrix
        cfm = ConfusionMatrix(n_classes=n_classes,
                              class_names=list(ACTIVITY_MAP.values()))
        print(n_classes, len(list(ACTIVITY_MAP.values())))

        def f_custom(model, path):
            mean_evals = model.get_output(X_test).eval()
            t_class = np.argmax(y_test, axis=1)
            y_class = np.argmax(mean_evals, axis=1)
            # cfm.batchAdd(t_class, y_class)
            # print(cfm)

            cm = confusion_matrix(t_class, y_class)
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            plt.clf()
            plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
            plt.colorbar()
            plt.ylabel('True')
            plt.xlabel('Predicted')
            plt.savefig(path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=f_custom)
        train.pickle = False
        train.add_initial_training_notes(
            "Standardizing data after adding features\
                                          \nUsing striding instead of pooling")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %s" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % False)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)
        train.write_to_logger("Network Architecture ---------------")
        for layer in get_all_layers(model.model):
            # print(layer.name, ": ", get_output_shape(layer))
            train.write_to_logger(layer.name + ": " +
                                  str(get_output_shape(layer)))

        train.train_model(f_train,
                          train_args,
                          f_test,
                          test_args,
                          f_validate,
                          validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 100, 50
    shuffle = False
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name, users = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    # The data is structured as (samples, sequence, features) but to properly use the convolutional RNN we need a longer
    # time
    factor = 5
    sequence_length *= factor

    # Concat train and test data
    X = np.concatenate((train_set[0], test_set[0]), axis=0)
    y = np.concatenate((train_set[1], test_set[1]), axis=0)

    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)

        n_train = train_set[0].shape[0] // factor
        print("Resizing train set from %d to %d" %
              (train_set[0].shape[0], n_train * factor))
        train_set = (np.reshape(train_set[0][:factor * n_train],
                                (n_train, sequence_length, n_features)),
                     np.reshape(train_set[1][:factor * n_train],
                                (n_train, factor, n_classes)))

        n_test = test_set[0].shape[0] // factor
        print("Resizing test set from %d to %d" %
              (test_set[0].shape[0], n_test * factor))
        test_set = (np.reshape(test_set[0][:factor * n_test],
                               (n_test, sequence_length, n_features)),
                    np.reshape(test_set[1][:factor * n_test],
                               (n_test, factor, n_classes)))
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]
        n_valid = valid_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = 1
        batch_size = n_test
        n_train_batches = n_train // batch_size

        print("n_train_batches: %d, n_test_batches: %d, n_valid_batches: %d" %
              (n_train_batches, n_test_batches, n_valid_batches))

        n_conv = 6
        model = conv_BRNN(n_in=(sequence_length, n_features),
                          n_filters=[64] * n_conv,
                          filter_sizes=[3] * n_conv,
                          pool_sizes=[1, 2, 1, 2, 2, 2],
                          n_hidden=[200],
                          conv_dropout=0.2,
                          dropout_probability=0.5,
                          n_out=n_classes,
                          downsample=1,
                          trans_func=rectify,
                          out_func=softmax,
                          batch_size=batch_size,
                          factor=factor)

        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, valid_set)
        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.003
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 0.999

        train = TrainModel(model=model,
                           anneal_lr=0.9,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.add_initial_training_notes(
            "Standardizing data after adding features")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" %
                              (sequence_length / factor))
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Time steps: %d" % factor)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)

        train.train_model(f_train=f_train,
                          train_args=train_args,
                          f_test=f_test,
                          test_args=test_args,
                          f_validate=f_validate,
                          validation_args=validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
def main():
    n_samples, step = 100, 50
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=True,
                        n_samples=n_samples,
                        step=step,
                        normalize=True)

    conf = ModelConfiguration()
    conf.load_datasets(
        [load_data.uci_hapt],
        label_limit=18)  # , load_data.uci_mhealth, load_data.idash

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
    else:
        conf.cv = LeaveOneLabelOut(conf.users)

    for train_index, test_index in conf.cv:
        conf.user = user

        model = CNN(n_in=(n_samples + 2, conf.n_features),
                    n_filters=[64, 64, 64, 64],
                    filter_sizes=[5, 5, 3, 3],
                    pool_sizes=[2, 2, 2, 2],
                    conv_dropout=0.5,
                    n_hidden=[128],
                    dense_dropout=0.5,
                    n_out=conf.n_classes,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_norm=True,
                    input_noise=0.2,
                    stats=2)

        if len(conf.cv) > 1:
            user_idx += 1
            conf.user = conf.user_names[user_idx]

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data)
Esempio n. 8
0
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                        n_samples=n_samples, step=step)
    batch_size = 64
    X, y, name, users, stats = load_data.uci_hapt()

    d = str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = 1
        batch_size = n_test
        n_train_batches = n_train//batch_size
        print("n_train_batches: %d, n_test_batches: %d" % (n_train_batches, n_test_batches))

        # num_1x1, num_2x1_proj, reduce_3x1, num_3x1, reduce_5x1, num_5x1
        model = Incep(n_in=(sequence_length, n_features),
                      inception_layers=[(8, 8, 0, 8, 0, 8),
                                                (16, 8, 0, 16, 0, 8),
                                                (32, 16, 0, 32, 0, 16),
                                                (32, 16, 0, 32, 0, 16),
                                                (64, 32, 0, 64, 0, 32),
                                                (64, 32, 0, 64, 0, 32)],
                      pool_sizes=[2, 2, 0, 2, 0, 2],
                      n_hidden=512,
                      output_dropout=0.5,
                      inception_dropout=0.2,
                      n_out=n_classes,
                      trans_func=rectify,
                      out_func=softmax,
                      batch_size=batch_size,
                      batch_norm=False)

        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        # Build model
        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(train_set,
                                                                                              test_set,
                                                                                              valid_set)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.002
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.add_initial_training_notes("Standardizing data after adding features")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" % (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)
        train.write_to_logger("Network Architecture ---------------")
        for layer in get_all_layers(model.model):
            # print(layer.name, ": ", get_output_shape(layer))
            train.write_to_logger(layer.name + ": " + str(get_output_shape(layer)))
        train.train_model(f_train, train_args,
                          f_test, test_args,
                          f_validate, validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
Esempio n. 9
0
def main():
    n_samples, step = 40, 20
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        step=step,
                        normalize='channels',
                        comp_magnitude=False,
                        simple_labels=True,
                        common_labels=False)

    conf = ModelConfiguration()
    conf.load_datasets([load_data.uci_hapt], label_limit=6)

    user_idx = -1
    user = None  # 'UCI HAPT10'
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
        print('Testing user: %s' % user)
    else:
        # Cross validate on users
        conf.cv = LeavePLabelOut(conf.users, p=1)

        # Divide into K folds balanced on labels
        # conf.cv = StratifiedKFold(conf.users, n_folds=10)

        # And shuffle
        # conf.cv = StratifiedShuffleSplit(np.argmax(conf.y, axis=1), n_iter=1, test_size=0.1, random_state=None)

        # Pure shuffle
        # conf.cv = ShuffleSplit(conf.y.shape[0], n_iter=2, test_size=0.1)

    for train_index, test_index in conf.cv:
        conf.user = user

        model = tconvRNN(n_in=(n_samples, conf.n_features),
                         n_filters=[64, 64, 64, 64],
                         filter_sizes=[5] * 4,
                         pool_sizes=[0] * 4,
                         n_hidden=[128, 128],
                         conv_dropout=0.3,
                         rnn_in_dropout=0.0,
                         rnn_hid_dropout=0.0,
                         output_dropout=0.5,
                         n_out=conf.n_classes,
                         trans_func=leaky_rectify,
                         out_func=softmax,
                         stats=conf.stats)

        if len(conf.cv) > 1:
            user_idx += 1
            if len(conf.cv) == len(conf.user_names):
                conf.user = conf.user_names[user_idx]
            else:
                conf.user = conf.name + ' K_%d' % user_idx

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        scriptpath = path.realpath(__file__)
        filename = path.basename(scriptpath)
        print(scriptpath, model.root_path, filename)
        shutil.copy(scriptpath, model.root_path + '/' + filename)

        train = TrainModel(model=model,
                           anneal_lr=0.9,
                           anneal_lr_freq=1,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.write_to_logger(
            "Using StratifiedShuffleSplit with n_iter=1, test_size=0.1, random_state=None"
        )

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=500,
                 model=model,
                 train=train,
                 load_data=load_data,
                 batch_size=100)
def run_cnn():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 100
    shuffle = False
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name, users = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    X = np.concatenate((train_set[0], test_set[0]), axis=0)
    y = np.concatenate((train_set[1], test_set[1]), axis=0)

    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_train_batches = n_train // batch_size
        n_test_batches = n_test // batch_size
        n_valid_batches = n_test // batch_size

        print("n_train_batches: %d, n_test_batches: %d" %
              (n_train_batches, n_test_batches))
        model = CNN(n_in=(sequence_length, n_features),
                    n_filters=[64, 64, 64, 64],
                    filter_sizes=[5, 5, 3, 3],
                    pool_sizes=[2, 2, 2, 2],
                    conv_dropout=0.2,
                    n_hidden=[512],
                    dropout_probability=0.5,
                    n_out=n_classes,
                    downsample=0,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_size=batch_size,
                    batch_norm=False)
        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, valid_set)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.003
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = True
        train.add_initial_training_notes("")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)

        train.train_model(f_train,
                          train_args,
                          f_test,
                          test_args,
                          f_validate,
                          validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger