Esempio n. 1
0
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 50, 50
    load_data = LoadHAR(add_pitch=add_pitch,
                        add_roll=add_roll,
                        add_filter=add_filter,
                        n_samples=n_samples,
                        step=step)

    conf = ModelConfiguration()
    conf.load_datasets(
        [load_data.uci_mhealth, load_data.idash, load_data.wisdm1])

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )

    for train_index, test_index in conf.cv:
        conf.user = user

        model = RNN(n_in=(n_samples, conf.n_features),
                    n_hidden=[50, 50],
                    dropout_probability=0.5,
                    n_out=conf.n_classes,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax)

        if len(conf.cv) > 1:
            user_idx += 1
            conf.user = conf.user_names[user_idx]

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.002,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data)
Esempio n. 2
0
def run_sdgmssl_mnist():
    """
    Train a skip deep generative model on the mnist dataset with 100 evenly distributed labels.
    """
    seed = np.random.randint(1, 2147462579)
    n_labeled = 100  # The total number of labeled data points.
    mnist_data = mnist.load_semi_supervised(n_labeled=n_labeled, filter_std=0.0, seed=seed, train_valid_combine=True)

    n, n_x = mnist_data[0][0].shape  # Datapoints in the dataset, input features.
    n, n_x = int(n), int(n_x)
    n_samples = 100  # The number of sampled labeled data points for each batch.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = SDGMSSL(n_x=n_x, n_a=100, n_z=100, n_y=10, qa_hid=[500, 500],
                    qz_hid=[500, 500], qy_hid=[500, 500], px_hid=[500, 500], pa_hid=[500, 500],
                    nonlinearity=rectify, batchnorm=True, x_dist='bernoulli')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(*mnist_data)
    # Update the default function arguments.
    train_args['inputs']['batchsize_unlabeled'] = bs
    train_args['inputs']['batchsize_labeled'] = n_samples
    train_args['inputs']['beta'] = .1
    train_args['inputs']['learningrate'] = 3e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs']['samples'] = 5
    test_args['inputs']['samples'] = 5
    validate_args['inputs']['samples'] = 5

    # Evaluate the approximated classification error with 100 MC samples for a good estimate
    def custom_evaluation(model, path):
        mean_evals = model.get_output(mnist_data[2][0], 100)
        t_class = np.argmax(mnist_data[2][1], axis=1)
        y_class = np.argmax(mean_evals, axis=1)
        missclass = (np.sum(y_class != t_class, dtype='float32') / len(y_class)) * 100.
        train.write_to_logger("test 100-samples: %0.2f%%." % missclass)

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model, output_freq=1, pickle_f_custom_freq=10, f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the skip deep generative model with %i labels. bn %s. seed %i." % (
    n_labeled, str(model.batchnorm), seed))
    train.train_model(f_train, train_args,
                      f_test, test_args,
                      f_validate, validate_args,
                      n_train_batches=n_batches,
                      n_epochs=1000,
                      # Any symbolic model variable can be annealed during
                      # training with a tuple of (var_name, every, scale constant, minimum value).
                      anneal=[("learningrate", 200, 0.75, 3e-5)])
def run_adgmssl_mnist():
    """
    Train a auxiliary deep generative model on the mnist dataset with 100 evenly distributed labels.
    """
    n_labeled = 100  # The total number of labeled data points.
    n_samples = 100  # The number of sampled labeled data points for each batch.
    n_batches = 600  # The number of batches.
    mnist_data = mnist.load_semi_supervised(n_batches=n_batches, n_labeled=n_labeled, n_samples=n_samples,
                                            filter_std=0.0, seed=123456, train_valid_combine=True)

    n, n_x = mnist_data[0][0].shape  # Datapoints in the dataset, input features.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = ADGMSSL(n_x=n_x, n_a=100, n_z=100, n_y=10, a_hidden=[500, 500],
                    z_hidden=[500, 500], xhat_hidden=[500, 500], y_hidden=[500, 500],
                    trans_func=rectify, x_dist='bernoulli')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(*mnist_data)
    # Update the default function arguments.
    train_args['inputs']['batchsize'] = bs
    train_args['inputs']['batchsize_labeled'] = n_samples
    train_args['inputs']['beta'] = 0.01 * n
    train_args['inputs']['learningrate'] = 3e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs']['samples'] = 10  # if running a cpu: set this the no. of samples to 1.
    test_args['inputs']['samples'] = 1
    validate_args['inputs']['samples'] = 1

    # Evaluate the approximated classification error with 100 MC samples for a good estimate.
    def error_evaluation(*args):
        mean_evals = model.get_output(mnist_data[1][0], 100)
        t_class = np.argmax(mnist_data[1][1], axis=1)
        y_class = np.argmax(mean_evals, axis=1)
        missclass = (np.sum(y_class != t_class, dtype='float32') / len(y_class)) * 100.
        train.write_to_logger("test 100-samples: %0.2f%%." % missclass)

    # Define training loop. Output training evaluations every 1 epoch and the approximated good estimate
    # of the classification error every 10 epochs.
    train = TrainModel(model=model, anneal_lr=.75, anneal_lr_freq=200, output_freq=1,
                       pickle_f_custom_freq=10, f_custom_eval=error_evaluation)
    train.add_initial_training_notes("Training the auxiliary deep generative model with %i labels." % n_labeled)
    train.train_model(f_train, train_args,
                      f_test, test_args,
                      f_validate, validate_args,
                      n_train_batches=n_batches,
                      n_epochs=3000)
def main():
    add_pitch, add_roll, add_filter = False, False, True
    batch_size = 128
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter)
    n_train = train_set[0].shape[0]
    n_test = test_set[0].shape[0]

    n_train_batches = n_train // batch_size
    n_test_batches = n_test // batch_size
    n_valid_batches = n_test // batch_size

    print("n_train_batches: %d, n_test_batches: %d" %
          (n_train_batches, n_test_batches))

    model = UFCNN(n_in=(sequence_length, n_features),
                  n_filters=64,
                  filter_size=7,
                  pool_sizes=0,
                  n_hidden=[512],
                  dropout_probability=0.5,
                  n_out=n_classes,
                  downsample=1,
                  trans_func=rectify,
                  out_func=softmax,
                  batch_size=batch_size)

    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set, valid_set)
    train_args['inputs']['batchsize'] = batch_size
    train_args['inputs']['learningrate'] = 0.002
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999

    test_args['inputs']['batchsize'] = batch_size
    validate_args['inputs']['batchsize'] = batch_size

    model.log += "\nDataset: %s" % name
    model.log += "\nAdd pitch: %s\nAdd roll: %s" % (add_pitch, add_roll)
    model.log += "\nAdd filter separated signals: %s" % add_filter
    model.log += "\nTransfer function: %s" % model.transf.__name__
    train = TrainModel(model=model,
                       anneal_lr=0.9,
                       anneal_lr_freq=50,
                       output_freq=1,
                       pickle_f_custom_freq=100,
                       f_custom_eval=None)
    train.pickle = True
    train.add_initial_training_notes("")
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_train_batches,
                      n_test_batches=n_test_batches,
                      n_valid_batches=n_valid_batches,
                      n_epochs=10000)
def run_sdgmssl_mnist():
    """
    Train a skip deep generative model on the mnist dataset with 100 evenly distributed labels.
    """
    seed = np.random.randint(1, 2147462579)
    n_labeled = 100  # The total number of labeled data points.
    mnist_data = mnist.load_semi_supervised(n_labeled=n_labeled, filter_std=0.0, seed=seed, train_valid_combine=True)

    n, n_x = mnist_data[0][0].shape  # Datapoints in the dataset, input features.
    n_samples = 100  # The number of sampled labeled data points for each batch.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = SDGMSSL(n_x=n_x, n_a=100, n_z=100, n_y=10, qa_hid=[500, 500],
                    qz_hid=[500, 500], qy_hid=[500, 500], px_hid=[500, 500], pa_hid=[500, 500],
                    nonlinearity=rectify, batchnorm=True, x_dist='bernoulli')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(*mnist_data)
    # Update the default function arguments.
    train_args['inputs']['batchsize_unlabeled'] = bs
    train_args['inputs']['batchsize_labeled'] = n_samples
    train_args['inputs']['beta'] = .1
    train_args['inputs']['learningrate'] = 3e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs']['samples'] = 5
    test_args['inputs']['samples'] = 5
    validate_args['inputs']['samples'] = 5

    # Evaluate the approximated classification error with 100 MC samples for a good estimate
    def custom_evaluation(model, path):
        mean_evals = model.get_output(mnist_data[2][0], 100)
        t_class = np.argmax(mnist_data[2][1], axis=1)
        y_class = np.argmax(mean_evals, axis=1)
        missclass = (np.sum(y_class != t_class, dtype='float32') / len(y_class)) * 100.
        train.write_to_logger("test 100-samples: %0.2f%%." % missclass)

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model, output_freq=1, pickle_f_custom_freq=10, f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the skip deep generative model with %i labels. bn %s. seed %i." % (
    n_labeled, str(model.batchnorm), seed))
    train.train_model(f_train, train_args,
                      f_test, test_args,
                      f_validate, validate_args,
                      n_train_batches=n_batches,
                      n_epochs=1000,
                      # Any symbolic model variable can be annealed during
                      # training with a tuple of (var_name, every, scale constant, minimum value).
                      anneal=[("learningrate", 200, 0.75, 3e-5)])
def run_adgmssl():
    n_samples, step = 50, 25
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        lowpass=10,
                        diff=False,
                        step=step,
                        normalize='segments',
                        comp_magnitude=True,
                        simple_labels=False,
                        common_labels=False)

    # datasets = [load_data.uci_hapt, load_data.idash, load_data.uci_mhealth]
    X, y, name, users, stats = load_data.uci_hapt()
    if stats is not None:
        X = np.concatenate((X, stats), axis=1)
    limited_labels = y < 18
    y = y[limited_labels]
    X = X[limited_labels]
    users = users[limited_labels]
    y_unique = np.unique(y)
    y = one_hot(y)

    # Reshape input samples to be a vector instead of samples x features
    X = np.reshape(X, (X.shape[0], -1))

    # Split into test and training
    # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    train_idx = (users != 10)  # & (users != 2) & (users != 3)
    test_idx = (users == 10)  # | (users == 2) | (users == 3)
    X_train, X_test = X[train_idx], X[test_idx]
    y_train, y_test = y[train_idx], y[test_idx]

    # Split training into labelled and unlabelled. Optionally stratified by the label
    X_train_labelled, X_train_unlabelled, y_train_labelled, y_train_unlabelled = \
        train_test_split(X_train, y_train, train_size=0.5, stratify=np.argmax(y_train, axis=1))

    # Add some data from test set to unlabelled training set
    X_train_unlabelled = np.concatenate((X_train_unlabelled, X_test))
    y_train_unlabelled = np.concatenate((y_train_unlabelled, y_test))

    # Combine in sets
    train_set_labelled = (X_train_labelled, y_train_labelled)
    train_set_unlabelled = (X_train_unlabelled, y_train_unlabelled)
    test_set = (X_test, y_test)
    print('Train unlabelled size: ', train_set_unlabelled[0].shape)
    print('Train labelled size: ', train_set_labelled[0].shape)
    print('Test size: ', test_set[0].shape)

    n_test = test_set[0].shape
    n, n_x = train_set_unlabelled[0].shape
    n_labelled_samples, n_x = train_set_labelled[0].shape
    n_classes = y.shape[-1]
    bs = 64
    n_batches = n // bs

    # Initialize the auxiliary deep generative model.
    model = ADGM(n_x=n_x,
                 n_a=100,
                 n_z=100,
                 n_y=n_classes,
                 a_hidden=[500, 500],
                 z_hidden=[500, 500],
                 xhat_hidden=[500, 500],
                 y_hidden=[500, 500],
                 trans_func=rectify,
                 batchnorm=False,
                 x_dist='gaussian')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set_unlabelled, train_set_labelled, test_set)
    # Update the default function arguments.
    train_args['inputs']['batchsize_unlabeled'] = bs
    train_args['inputs']['batchsize_labeled'] = n_labelled_samples
    train_args['inputs']['beta'] = 1  # 0.01 * n/n_labelled_samples
    train_args['inputs']['learningrate'] = 3e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs'][
        'samples'] = 1  # if running a cpu: set this the no. of samples to 1.
    test_args['inputs']['samples'] = 1

    # validate_args['inputs']['samples'] = 1

    # Evaluate the approximated classification error with 100 MC samples for a good estimate.
    def error_evaluation(model, path):
        mean_evals = model.get_output(test_set[0].astype(np.float32), 100)
        t_class = np.argmax(test_set[1].astype(np.float32), axis=1)
        y_class = np.argmax(mean_evals, axis=1)
        missclass = (np.sum(y_class != t_class, dtype='float32') /
                     len(y_class)) * 100.
        train.write_to_logger("test 100-samples misclassification: %0.2f%%." %
                              missclass)

        plt.clf()
        fig, axarr = plt.subplots(nrows=1, ncols=2)
        axarr[0].plot(t_class, color='red')
        axarr[0].plot(y_class, linestyle='dotted')

        cm = confusion_matrix(t_class, y_class)
        cm = cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis] + 1e-6)
        axarr[1].imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
        axarr[1].set_ylabel('True')
        axarr[1].set_xlabel('Predicted')

        fig.set_size_inches(18, 10)
        fig.savefig(path, dpi=100)
        plt.close(fig)

        # Plot reconstruction
        plt.clf()
        f, axarr = plt.subplots(nrows=len(y_unique), ncols=1)
        for idx, y_l in enumerate(y_unique):
            act_idx = np.argmax(y_test, axis=1) == y_l
            test_act = test_set[0][act_idx]
            test_y = test_set[1][act_idx]

            z = model.fz(test_act, test_y, 1).eval()
            x_hat = model.f_xhat(z, test_y, 1).eval()

            axarr[idx].plot(test_act[0], color='red')
            axarr[idx].plot(x_hat[0], color='blue', linestyle='dotted')

        f.set_size_inches(12, 20)
        f.savefig(path.strip('.png') + '_fit.png', dpi=100)
        plt.close(f)

    # Define training loop. Output training evaluations every 1 epoch and the approximated good estimate
    # of the classification error every 10 epochs.
    train = TrainModel(model=model,
                       anneal_lr=.75,
                       anneal_lr_freq=100,
                       output_freq=1,
                       pickle_f_custom_freq=50,
                       f_custom_eval=error_evaluation)

    train.add_initial_training_notes(
        "Training the auxiliary deep generative model with %i labels." %
        n_labelled_samples)
    train.write_to_logger(
        "Using reduced HAPT dataset with Walking, Stairs, Inactive classes")
    train.write_to_logger("Normalizing: %s" % load_data.normalize)
    train.write_to_logger("Simple labels: %s" % load_data.simple_labels)
    train.write_to_logger("Common labels: %s" % load_data.common_labels)
    train.write_to_logger("Sequence length: %d" % load_data.n_samples)
    train.write_to_logger("Step: %d" % load_data.step)
    train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                          (load_data.add_pitch, load_data.add_roll))
    train.write_to_logger("Only magnitude: %s" % load_data.comp_magnitude)
    train.write_to_logger("Lowpass: %s" % str(load_data.lowpass))
    train.write_to_logger("Add filter separated signals: %s" %
                          load_data.add_filter)
    train.write_to_logger("Differentiate: %s" % load_data.differentiate)
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_batches,
                      n_epochs=1000)
def main():
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        lowpass=None,
                        step=step,
                        normalize='segments',
                        comp_magnitude=True,
                        simple_labels=False,
                        common_labels=False)

    X, y, name, users, stats = load_data.uci_hapt()
    users = ['%s%02d' % (name, user) for user in users]
    limited_labels = y < 20
    y = y[limited_labels]
    X = X[limited_labels].astype('float32')
    users = np.char.asarray(users)[limited_labels]
    y_unique = np.unique(y)

    cv = StratifiedShuffleSplit(y, n_iter=1, test_size=0.1, random_state=0)
    for (train_index, test_index) in cv:
        x_train, x_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
    n_win, n_samples, n_features = x_train.shape

    train_set = (x_train, y_train)
    test_set = (x_test, y_test)
    print('Train size: ', train_set[0].shape)
    print('Test size: ', test_set[0].shape)

    n_train = train_set[0].shape[0]
    n_test = test_set[0].shape[0]
    batch_size = 64

    n_test_batches = n_test // batch_size
    n_train_batches = n_train // batch_size

    model = FCAE(n_in=(int(n_samples), int(n_features)),
                 filters=[256, 128, 32],
                 pool_sizes=[0],
                 n_hidden=[0],
                 n_out=0,
                 trans_func=rectify,
                 stats=0)

    # Build model
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set, None)

    def f_custom(model, path):
        plt.clf()
        f, axarr = plt.subplots(nrows=len(y_unique), ncols=1)

        for idx, y_l in enumerate(y_unique):
            act_idx = y_test == y_l
            test_act = test_set[0][act_idx]
            out = model.get_output(test_act).eval()

            axarr[idx].plot(test_act[0], color='red')
            axarr[idx].plot(out[0], color='blue', linestyle='dotted')

        f.set_size_inches(12, 20)
        f.savefig(path, dpi=100)
        plt.close(f)

    train = TrainModel(model=model,
                       anneal_lr=0.75,
                       anneal_lr_freq=100,
                       output_freq=1,
                       pickle_f_custom_freq=100,
                       f_custom_eval=f_custom)
    train.pickle = False

    train.write_to_logger("Normalizing: %s" % load_data.normalize)
    train.write_to_logger("Simple labels: %s" % load_data.simple_labels)
    train.write_to_logger("Common labels: %s" % load_data.common_labels)
    train.write_to_logger("Step: %d" % load_data.step)
    train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                          (load_data.add_pitch, load_data.add_roll))
    train.write_to_logger("Only magnitude: %s" % load_data.comp_magnitude)
    train.write_to_logger("Lowpass: %s" % str(load_data.lowpass))
    train.write_to_logger("Add filter separated signals: %s" %
                          load_data.add_filter)

    test_args['inputs']['batchsize'] = batch_size
    train_args['inputs']['batchsize'] = batch_size
    train_args['inputs']['learningrate'] = 0.003
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 1e-6
    validate_args['inputs']['batchsize'] = batch_size

    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_train_batches,
                      n_test_batches=n_test_batches,
                      n_epochs=2000)
Esempio n. 8
0
def main():
    add_pitch, add_roll, add_filter = True, True, True
    n_samples, step = 200, 50
    shuffle = True
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    # The data is structured as (samples, sequence, features) but to properly use the convolutional RNN we need a longer
    # time
    factor = 5
    sequence_length *= factor

    n_train = train_set[0].shape[0] // factor
    print("Resizing train set from %d to %d" %
          (train_set[0].shape[0], n_train * factor))
    train_set = (np.reshape(train_set[0][:factor * n_train],
                            (n_train, sequence_length, n_features)),
                 np.reshape(train_set[1][:factor * n_train],
                            (n_train, factor, n_classes)))

    n_test = test_set[0].shape[0] // factor
    print("Resizing test set from %d to %d" %
          (test_set[0].shape[0], n_test * factor))
    test_set = (np.reshape(test_set[0][:factor * n_test],
                           (n_test, sequence_length, n_features)),
                np.reshape(test_set[1][:factor * n_test],
                           (n_test, factor, n_classes)))
    valid_set = test_set

    n_train = train_set[0].shape[0]
    n_test = test_set[0].shape[0]
    n_valid = valid_set[0].shape[0]

    n_train_batches = n_train // batch_size
    n_test_batches = n_test // batch_size
    n_valid_batches = n_valid // batch_size

    print("n_train_batches: %d, n_test_batches: %d, n_valid_batches: %d" %
          (n_train_batches, n_test_batches, n_valid_batches))

    n_conv = 1
    model = RCL_RNN(n_in=(sequence_length, n_features),
                    n_filters=[64] * n_conv,
                    filter_sizes=[3] * n_conv,
                    pool_sizes=[2] * n_conv,
                    n_hidden=[100],
                    conv_dropout=0.4,
                    rcl=[3, 3, 3, 3, 3],
                    rcl_dropout=0.4,
                    dropout_probability=0.5,
                    n_out=n_classes,
                    downsample=1,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_size=batch_size,
                    factor=factor)

    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set, valid_set)
    train_args['inputs']['batchsize'] = batch_size
    train_args['inputs']['learningrate'] = 0.004
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 1e-6

    test_args['inputs']['batchsize'] = batch_size
    validate_args['inputs']['batchsize'] = batch_size

    model.log += "\nDataset: %s" % name
    model.log += "\nTraining samples: %d" % n_train
    model.log += "\nTest samples: %d" % n_test
    model.log += "\nSequence length: %d" % (sequence_length / factor)
    model.log += "\nTime steps: %d" % factor
    model.log += "\nStep: %d" % step
    model.log += "\nShuffle: %s" % shuffle
    model.log += "\nAdd pitch: %s\nAdd roll: %s" % (add_pitch, add_roll)
    model.log += "\nAdd filter separated signals: %s" % add_filter
    model.log += "\nTransfer function: %s" % model.transf
    train = TrainModel(model=model,
                       anneal_lr=0.75,
                       anneal_lr_freq=50,
                       output_freq=1,
                       pickle_f_custom_freq=100,
                       f_custom_eval=None)
    train.pickle = True
    train.add_initial_training_notes("")
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_train_batches,
                      n_test_batches=n_test_batches,
                      n_valid_batches=n_valid_batches,
                      n_epochs=2000)
def run_adgmssl_mnist():
    """
    Train a auxiliary deep generative model on the mnist dataset with 100 evenly distributed labels.
    """
    n_labeled = 100  # The total number of labeled data points.
    n_samples = 100  # The number of sampled labeled data points for each batch.
    n_batches = 600  # The number of batches.
    mnist_data = mnist.load_semi_supervised(n_batches=n_batches,
                                            n_labeled=n_labeled,
                                            n_samples=n_samples,
                                            filter_std=0.0,
                                            seed=123456,
                                            train_valid_combine=True)

    n, n_x = mnist_data[0][
        0].shape  # Datapoints in the dataset, input features.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = ADGMSSL(n_x=n_x,
                    n_a=100,
                    n_z=100,
                    n_y=10,
                    a_hidden=[500, 500],
                    z_hidden=[500, 500],
                    xhat_hidden=[500, 500],
                    y_hidden=[500, 500],
                    trans_func=rectify,
                    x_dist='bernoulli')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        *mnist_data)
    # Update the default function arguments.
    train_args['inputs']['batchsize'] = bs
    train_args['inputs']['batchsize_labeled'] = n_samples
    train_args['inputs']['beta'] = 0.01 * n
    train_args['inputs']['learningrate'] = 3e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs'][
        'samples'] = 10  # if running a cpu: set this the no. of samples to 1.
    test_args['inputs']['samples'] = 1
    validate_args['inputs']['samples'] = 1

    # Evaluate the approximated classification error with 100 MC samples for a good estimate.
    def error_evaluation(*args):
        mean_evals = model.get_output(mnist_data[1][0], 100)
        t_class = np.argmax(mnist_data[1][1], axis=1)
        y_class = np.argmax(mean_evals, axis=1)
        missclass = (np.sum(y_class != t_class, dtype='float32') /
                     len(y_class)) * 100.
        train.write_to_logger("test 100-samples: %0.2f%%." % missclass)

    # Define training loop. Output training evaluations every 1 epoch and the approximated good estimate
    # of the classification error every 10 epochs.
    train = TrainModel(model=model,
                       anneal_lr=.75,
                       anneal_lr_freq=200,
                       output_freq=1,
                       pickle_f_custom_freq=10,
                       f_custom_eval=error_evaluation)
    train.add_initial_training_notes(
        "Training the auxiliary deep generative model with %i labels." %
        n_labeled)
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_batches,
                      n_epochs=3000)
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=add_pitch,
                        add_roll=add_roll,
                        add_filter=add_filter,
                        n_samples=n_samples,
                        step=step)
    batch_size = 64

    # Define datasets and load iteratively
    datasets = [
        load_data.idash, load_data.wisdm1, load_data.uci_mhealth,
        load_data.uci_hapt
    ]
    X, y, name, users = datasets[0]()
    users = ['%s_%02d' % (name, user) for user in users]
    for dataset in datasets[1:]:
        X_tmp, y_tmp, name_tmp, users_tmp = dataset()
        X = np.concatenate((X, X_tmp))
        y = np.concatenate((y, y_tmp))
        for user in users_tmp:
            users.append('%s_%02d' % (name_tmp, user))
        name += '_' + name_tmp
    users = np.array(users)

    print('Users: %d' % len(np.unique(users)))
    print(X.shape)

    n_windows, sequence_length, n_features = X.shape
    y = one_hot(y, n_classes=len(ACTIVITY_MAP))
    n_classes = y.shape[-1]

    # Create a time-string for our cv run
    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S'))
    cv = LeaveOneLabelOut(users)

    user_idx = 0
    user_names = np.unique(users)
    user = None
    if user is not None:
        train_idx = users != user
        test_idx = users == user
        cv = ((train_idx, test_idx), )

    for train_index, test_index in cv:
        user = user_names[user_idx]
        user_idx += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        # Scale data using training data
        scaler = StandardScaler().fit(X_train.reshape((-1, n_features)))
        n_windows = X_train.shape[0]
        X_train = scaler.transform(X_train.reshape((-1, n_features))).reshape(
            (n_windows, sequence_length, n_features))
        n_windows = X_test.shape[0]
        X_test = scaler.transform(X_test.reshape((-1, n_features))).reshape(
            (n_windows, sequence_length, n_features))

        print('Xtrain mean: %f\tstd: %f' % (X_train.mean(), X_train.std()))
        print('Xtest mean: %f\tstd: %f' % (X_test.mean(), X_test.std()))
        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = None
        batch_size = n_test
        n_train_batches = n_train // batch_size
        print("n_train_batches: %d, n_test_batches: %d" %
              (n_train_batches, n_test_batches))

        model = ResNet(n_in=(sequence_length, n_features),
                       n_filters=[32, 32, 64, 64],
                       pool_sizes=[2, 1, 2, 1],
                       n_hidden=[512],
                       conv_dropout=0.5,
                       dropout=0.5,
                       n_out=n_classes,
                       trans_func=rectify,
                       out_func=softmax,
                       batch_size=batch_size,
                       batch_norm=True)

        if len(cv) > 1:
            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, d, user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        # Build model
        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, None)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.001
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        # Define confusion matrix
        cfm = ConfusionMatrix(n_classes=n_classes,
                              class_names=list(ACTIVITY_MAP.values()))
        print(n_classes, len(list(ACTIVITY_MAP.values())))

        def f_custom(model, path):
            mean_evals = model.get_output(X_test).eval()
            t_class = np.argmax(y_test, axis=1)
            y_class = np.argmax(mean_evals, axis=1)
            # cfm.batchAdd(t_class, y_class)
            # print(cfm)

            cm = confusion_matrix(t_class, y_class)
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            plt.clf()
            plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
            plt.colorbar()
            plt.ylabel('True')
            plt.xlabel('Predicted')
            plt.savefig(path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=f_custom)
        train.pickle = False
        train.add_initial_training_notes(
            "Standardizing data after adding features\
                                          \nUsing striding instead of pooling")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %s" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % False)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)
        train.write_to_logger("Network Architecture ---------------")
        for layer in get_all_layers(model.model):
            # print(layer.name, ": ", get_output_shape(layer))
            train.write_to_logger(layer.name + ": " +
                                  str(get_output_shape(layer)))

        train.train_model(f_train,
                          train_args,
                          f_test,
                          test_args,
                          f_validate,
                          validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
def run_rae_har():
    seed = np.random.randint(1, 2147462579)

    # def sinus_seq(period, samples, length):
    #     X = np.linspace(-np.pi*(samples/period), np.pi*(samples/period), samples)
    #     X = np.reshape(np.sin(X), (-1, length, 1))
    #     X += np.random.randn(*X.shape)*0.1
    #     # X = (X - np.min(X))/(np.max(X) - np.min(X))
    #     return X, np.ones((samples/length, 1))
    #
    # X1, y1 = sinus_seq(40, 100000, 50)
    # X2, y2 = sinus_seq(20, 40000, 50)
    #
    # X = np.concatenate((X1, X2)).astype('float32')
    # y = np.concatenate((y1*0, y2*1), axis=0).astype('int')
    #
    # dim_samples, dim_sequence, dim_features = X.shape
    # X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)

    n_samples, step = 50, 25
    load_data = LoadHAR(add_pitch=False, add_roll=False, add_filter=False, n_samples=n_samples, diff=False,
                        step=step, normalize='segments', comp_magnitude=False, simple_labels=False, common_labels=False)
    X, y, name, users, stats = load_data.uci_hapt()

    limited_labels = y < 18
    y = y[limited_labels]
    X = X[limited_labels].astype(np.float32)
    users = users[limited_labels]

    # Compress labels
    for idx, label in enumerate(np.unique(y)):
        if not np.equal(idx, label):
            y[y == label] = idx

    y_unique = np.unique(y)
    y = one_hot(y, len(y_unique))
    num_classes = len(y_unique)

    # Split into train and test stratified by users
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=np.argmax(y, axis=1), random_state=1)

    # Combine in sets
    train_set = (X_train, y_train)
    test_set = (X_test, y_test)
    print('Train size: ', train_set[0].shape)
    print('Test size: ', test_set[0].shape)

    n, n_l, n_c = train_set[0].shape  # Datapoints in the dataset, input features.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = RAE(n_c=int(n_c), n_l=int(n_l), px_hid=[256], enc_rnn=256, dec_rnn=256,
                nonlinearity=rectify, batchnorm=False)

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(train_set, test_set)
    # Update the default function arguments.
    train_args['inputs']['batchsize'] = bs
    train_args['inputs']['learningrate'] = 1e-3
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999

    def custom_evaluation(model, path):
        # Get model output
        x_ = test_set[0]
        y_ = test_set[1]
        xhat = model.f_px(x_)

        # reduce y to integers
        y_ = np.argmax(y_, axis=1)

        plt.clf()
        f, axarr = plt.subplots(nrows=num_classes, ncols=n_c)
        for idx, y_l in enumerate(y_unique):
            l_idx = y_ == y_l

            for c in range(n_c):
                axarr[idx, c].plot(x_[l_idx, :, c][:2].reshape(-1), color='red')
                axarr[idx, c].plot(xhat[l_idx, :, c][:2].reshape(-1), color='blue', linestyle='dotted')

        f.set_size_inches(12, 3*num_classes)
        f.savefig(path, dpi=100, format='png')
        plt.close(f)

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model, output_freq=1, pickle_f_custom_freq=10, f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the rae with bn %s. seed %i." % (str(model.batchnorm), seed))
    train.train_model(f_train, train_args,
                      f_test, test_args,
                      f_validate, validate_args,
                      n_train_batches=n_batches,
                      n_epochs=10000,
                      anneal=[("learningrate", 100, 0.75, 3e-5)])
def main():
    n_samples, step = 50, 25
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        diff=False,
                        step=step,
                        normalize='segments',
                        comp_magnitude=False,
                        simple_labels=False,
                        common_labels=False)
    X, y, name, users, stats = load_data.uci_hapt()

    limited_labels = y < 18
    y = y[limited_labels]
    X = X[limited_labels].astype(np.float32)
    users = users[limited_labels]

    # Compress labels
    for idx, label in enumerate(np.unique(y)):
        if not np.equal(idx, label):
            y[y == label] = idx

    y_unique = np.unique(y)
    y = one_hot(y, len(y_unique))
    num_classes = len(y_unique)

    # Split into train and test stratified by users
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        stratify=np.argmax(
                                                            y, axis=1),
                                                        random_state=1)

    # Combine in sets
    train_set = (X_train, y_train)
    test_set = (X_test, y_test)
    print('Train size: ', train_set[0].shape)
    print('Test size: ', test_set[0].shape)

    n, n_l, n_c = train_set[
        0].shape  # Datapoints in the dataset, input features.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    model = CAE(n_in=(int(n_l), int(n_c)),
                filters=[8, 16, 32, 64, 128],
                n_hidden=128,
                n_out=n_samples,
                trans_func=leaky_rectify,
                stats=0)

    # Copy script to output folder
    copy_script(__file__, model)

    # Build model
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set)

    def custom_evaluation(model, path):
        # Get model output
        x_ = test_set[0]
        y_ = test_set[1]
        xhat = model.f_px(x_)

        # reduce y to integers
        y_ = np.argmax(y_, axis=1)

        plt.clf()
        f, axarr = plt.subplots(nrows=num_classes, ncols=n_c)
        for idx, y_l in enumerate(y_unique):
            l_idx = y_ == y_l

            for c in range(n_c):
                axarr[idx, c].plot(x_[l_idx, :, c][:2].reshape(-1),
                                   color='red')
                axarr[idx, c].plot(xhat[l_idx, :, c][:2].reshape(-1),
                                   color='blue',
                                   linestyle='dotted')

        f.set_size_inches(12, 3 * num_classes)
        f.savefig(path, dpi=100, format='png')
        plt.close(f)

    train = TrainModel(model=model,
                       output_freq=1,
                       pickle_f_custom_freq=10,
                       f_custom_eval=custom_evaluation)
    train.pickle = False

    train.write_to_logger("Normalizing: %s" % load_data.normalize)
    train.write_to_logger("Simple labels: %s" % load_data.simple_labels)
    train.write_to_logger("Common labels: %s" % load_data.common_labels)
    train.write_to_logger("Sequence length: %d" % load_data.n_samples)
    train.write_to_logger("Step: %d" % load_data.step)
    train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                          (load_data.add_pitch, load_data.add_roll))
    train.write_to_logger("Only magnitude: %s" % load_data.comp_magnitude)
    train.write_to_logger("Lowpass: %s" % str(load_data.lowpass))
    train.write_to_logger("Add filter separated signals: %s" %
                          load_data.add_filter)
    train.write_to_logger("Differentiate: %s" % load_data.differentiate)

    train_args['inputs']['batchsize'] = bs
    train_args['inputs']['learningrate'] = 1e-3
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999

    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=int(n_batches),
                      n_epochs=2000,
                      anneal=[("learningrate", 100, 0.75, 3e-5)])
Esempio n. 13
0
def main():
    n_samples, step = 40, 20
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        step=step,
                        normalize='channels',
                        comp_magnitude=False,
                        simple_labels=True,
                        common_labels=False)

    conf = ModelConfiguration()
    conf.load_datasets([load_data.uci_hapt], label_limit=6)

    user_idx = -1
    user = None  # 'UCI HAPT10'
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
        print('Testing user: %s' % user)
    else:
        # Cross validate on users
        conf.cv = LeavePLabelOut(conf.users, p=1)

        # Divide into K folds balanced on labels
        # conf.cv = StratifiedKFold(conf.users, n_folds=10)

        # And shuffle
        # conf.cv = StratifiedShuffleSplit(np.argmax(conf.y, axis=1), n_iter=1, test_size=0.1, random_state=None)

        # Pure shuffle
        # conf.cv = ShuffleSplit(conf.y.shape[0], n_iter=2, test_size=0.1)

    for train_index, test_index in conf.cv:
        conf.user = user

        model = tconvRNN(n_in=(n_samples, conf.n_features),
                         n_filters=[64, 64, 64, 64],
                         filter_sizes=[5] * 4,
                         pool_sizes=[0] * 4,
                         n_hidden=[128, 128],
                         conv_dropout=0.3,
                         rnn_in_dropout=0.0,
                         rnn_hid_dropout=0.0,
                         output_dropout=0.5,
                         n_out=conf.n_classes,
                         trans_func=leaky_rectify,
                         out_func=softmax,
                         stats=conf.stats)

        if len(conf.cv) > 1:
            user_idx += 1
            if len(conf.cv) == len(conf.user_names):
                conf.user = conf.user_names[user_idx]
            else:
                conf.user = conf.name + ' K_%d' % user_idx

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        scriptpath = path.realpath(__file__)
        filename = path.basename(scriptpath)
        print(scriptpath, model.root_path, filename)
        shutil.copy(scriptpath, model.root_path + '/' + filename)

        train = TrainModel(model=model,
                           anneal_lr=0.9,
                           anneal_lr_freq=1,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.write_to_logger(
            "Using StratifiedShuffleSplit with n_iter=1, test_size=0.1, random_state=None"
        )

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=500,
                 model=model,
                 train=train,
                 load_data=load_data,
                 batch_size=100)
def run_cnn():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 100
    shuffle = False
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name, users = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    X = np.concatenate((train_set[0], test_set[0]), axis=0)
    y = np.concatenate((train_set[1], test_set[1]), axis=0)

    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_train_batches = n_train // batch_size
        n_test_batches = n_test // batch_size
        n_valid_batches = n_test // batch_size

        print("n_train_batches: %d, n_test_batches: %d" %
              (n_train_batches, n_test_batches))
        model = CNN(n_in=(sequence_length, n_features),
                    n_filters=[64, 64, 64, 64],
                    filter_sizes=[5, 5, 3, 3],
                    pool_sizes=[2, 2, 2, 2],
                    conv_dropout=0.2,
                    n_hidden=[512],
                    dropout_probability=0.5,
                    n_out=n_classes,
                    downsample=0,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_size=batch_size,
                    batch_norm=False)
        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, valid_set)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.003
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = True
        train.add_initial_training_notes("")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)

        train.train_model(f_train,
                          train_args,
                          f_test,
                          test_args,
                          f_validate,
                          validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
             bl_dropout=0.5,
             slicers=slicers,
             bn=True)

# Copy model to output folder
copy_script(__file__, model)

f_train, f_test, f_validate, train_args, test_args, validate_args, predict = model.build_model(
    train_set, test_set, weights=class_weights)
train_args['inputs']['batchsize'] = batch_size
train_args['inputs']['learningrate'] = 0.004
# test_args['inputs']['batchsize'] = batch_size

try:
    train = TrainModel(model=model,
                       output_freq=1,
                       pickle_f_custom_freq=10,
                       f_custom_eval=None)
    train.train_model(
        f_train,
        train_args,
        f_test,
        test_args,
        f_validate,
        validate_args,
        n_train_batches=n_train_batches,
        n_test_batches=1,
        n_epochs=200,
        # Any symbolic model variable can be annealed during
        # training with a tuple of (var_name, every, scale constant, minimum value).
        anneal=[("learningrate", 10, 0.75, 3e-4)])
def run_vrae_har():
    seed = np.random.randint(1, 2147462579)

    # def sinus_seq(period, samples, length):
    #     X = np.linspace(-np.pi*(samples/period), np.pi*(samples/period), samples)
    #     X = np.reshape(np.sin(X), (-1, length, 1))
    #     X += np.random.randn(*X.shape)*0.1
    #     # X = (X - np.min(X))/(np.max(X) - np.min(X))
    #     return X, np.ones((samples/length, 1))
    #
    # X1, y1 = sinus_seq(20, 100000, 40)
    # X2, y2 = sinus_seq(12, 100000, 40)
    # X3, y3 = sinus_seq(8, 100000, 40)
    #
    # X = np.concatenate((X1, X2, X3)).astype('float32')
    # y = np.concatenate((y1*0, y2*1, y3*2), axis=0).astype('int')[:, 0]
    # y_unique = np.unique(list(y))
    #
    # y = one_hot(y, len(y_unique))
    #
    # dim_samples, dim_sequence, dim_features = X.shape
    # X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)

    ##
    # HAR data
    # X, y, users, stats = har.load()

    n_samples, step = 50, 25
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        diff=False,
                        step=step,
                        normalize='segments',
                        comp_magnitude=False,
                        simple_labels=False,
                        common_labels=False)
    X, y, name, users, stats = load_data.uci_hapt()

    limited_labels = y < 18
    y = y[limited_labels]
    X = X[limited_labels].astype(np.float32)
    users = users[limited_labels]

    # X -= X.mean(axis=0)

    # Compress labels
    for idx, label in enumerate(np.unique(y)):
        if not np.equal(idx, label):
            y[y == label] = idx

    y_unique = np.unique(y)
    y = one_hot(y, len(y_unique))

    dim_samples, dim_sequence, n_c = X.shape
    num_classes = len(y_unique)

    # Split into train and test stratified by users
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        stratify=np.argmax(
                                                            y, axis=1),
                                                        random_state=1)
    ##

    # Combine in sets
    train_set = (X_train, y_train)
    test_set = (X_test, y_test)
    print('Train size: ', train_set[0].shape)
    print('Test size: ', test_set[0].shape)

    n, n_l, n_c = train_set[
        0].shape  # Datapoints in the dataset, input features.
    n_batches = int(n / 100)  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = RVAE(n_c=n_c,
                 n_z=256,
                 qz_hid=[256, 256],
                 px_hid=[256, 256],
                 enc_rnn=256,
                 dec_rnn=256,
                 n_l=n_l,
                 nonlinearity=rectify,
                 batchnorm=False,
                 x_dist='gaussian',
                 px_nonlinearity=None)

    # Copy script to output folder
    copy_script(__file__, model)

    # Create output path for PCA plot
    makedirs(model.get_root_path() + '/training custom evals/pca')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set)
    # Update the default function arguments.
    train_args['inputs']['batchsize'] = bs
    train_args['inputs']['learningrate'] = 1e-3
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs']['samples'] = 1
    train_args['inputs']['warmup'] = 1.1

    def custom_evaluation(model, path):
        # Get model output
        x_ = test_set[0]
        y_ = test_set[1]

        qz = model.f_qz(x_, 1)
        px = model.f_px(x_, qz, 1)
        px_mu = model.f_mu(x_, qz, 1)
        px_var = np.exp(model.f_var(x_, qz, 1))

        # reduce y to integers
        y_ = np.argmax(y_, axis=1)

        plt.clf()
        f, axarr = plt.subplots(nrows=num_classes, ncols=n_c * 2)
        for idx, y_l in enumerate(y_unique):
            l_idx = y_ == y_l

            for c in range(n_c):
                axarr[idx, c * 2].plot(x_[l_idx, :, c][:2].reshape(-1))
                axarr[idx, c * 2].plot(px[l_idx, :, c][:2].reshape(-1),
                                       linestyle='dotted')
                axarr[idx, c * 2 + 1].plot(px_mu[l_idx, :, c][:2].reshape(-1),
                                           label="mu")
                axarr[idx, c * 2 + 1].plot(px_var[l_idx, :, c][:2].reshape(-1),
                                           label="var")
            plt.legend()

        f.set_size_inches(20, num_classes * 3)
        f.savefig(path, dpi=100, format='png')
        plt.close(f)

        # Plot PCA decomp
        z_pca = PCA(n_components=2).fit_transform(qz)

        palette = itertools.cycle(sns.color_palette())
        plt.clf()
        plt.figure()
        for i in set(y_unique):
            plt.scatter(z_pca[y_ == i, 0],
                        z_pca[y_ == i, 1],
                        c=next(palette),
                        alpha=0.8)
        plt.legend()
        plt.title('PCA of Z')
        plt.savefig(path.replace('custom_eval_plot', 'pca/z'))
        plt.close()

    def anneal_func(input):
        return input - 0.01

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model,
                       output_freq=1,
                       pickle_f_custom_freq=10,
                       f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the vrae with bn %s. seed %i." %
                                     (str(model.batchnorm), seed))
    train.train_model(
        f_train,
        train_args,
        f_test,
        test_args,
        f_validate,
        validate_args,
        n_train_batches=n_batches,
        n_epochs=1000,
        # Any symbolic model variable can be annealed during
        # training with a tuple of (var_name, every, scale constant, minimum value).
        anneal=[("learningrate", 100, 0.75, 3e-5),
                ("warmup", 1, anneal_func, 0.1)])

    image_to_movie.create(model.get_root_path() + '/training custom evals',
                          rate=3)
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 100, 50
    shuffle = False
    batch_size = 64
    (train_set, test_set, valid_set, (sequence_length, n_features, n_classes)), name, users = \
        ld.LoadHAR().uci_hapt(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                              n_samples=n_samples, step=step, shuffle=shuffle)

    # The data is structured as (samples, sequence, features) but to properly use the convolutional RNN we need a longer
    # time
    factor = 5
    sequence_length *= factor

    # Concat train and test data
    X = np.concatenate((train_set[0], test_set[0]), axis=0)
    y = np.concatenate((train_set[1], test_set[1]), axis=0)

    d = str(
        datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)

        n_train = train_set[0].shape[0] // factor
        print("Resizing train set from %d to %d" %
              (train_set[0].shape[0], n_train * factor))
        train_set = (np.reshape(train_set[0][:factor * n_train],
                                (n_train, sequence_length, n_features)),
                     np.reshape(train_set[1][:factor * n_train],
                                (n_train, factor, n_classes)))

        n_test = test_set[0].shape[0] // factor
        print("Resizing test set from %d to %d" %
              (test_set[0].shape[0], n_test * factor))
        test_set = (np.reshape(test_set[0][:factor * n_test],
                               (n_test, sequence_length, n_features)),
                    np.reshape(test_set[1][:factor * n_test],
                               (n_test, factor, n_classes)))
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]
        n_valid = valid_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = 1
        batch_size = n_test
        n_train_batches = n_train // batch_size

        print("n_train_batches: %d, n_test_batches: %d, n_valid_batches: %d" %
              (n_train_batches, n_test_batches, n_valid_batches))

        n_conv = 6
        model = conv_BRNN(n_in=(sequence_length, n_features),
                          n_filters=[64] * n_conv,
                          filter_sizes=[3] * n_conv,
                          pool_sizes=[1, 2, 1, 2, 2, 2],
                          n_hidden=[200],
                          conv_dropout=0.2,
                          dropout_probability=0.5,
                          n_out=n_classes,
                          downsample=1,
                          trans_func=rectify,
                          out_func=softmax,
                          batch_size=batch_size,
                          factor=factor)

        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
            train_set, test_set, valid_set)
        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.003
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 0.999

        train = TrainModel(model=model,
                           anneal_lr=0.9,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.add_initial_training_notes(
            "Standardizing data after adding features")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" %
                              (sequence_length / factor))
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Time steps: %d" % factor)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" %
                              (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)

        train.train_model(f_train=f_train,
                          train_args=train_args,
                          f_test=f_test,
                          test_args=test_args,
                          f_validate=f_validate,
                          validation_args=validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
Esempio n. 18
0
def main():
    add_pitch, add_roll, add_filter = False, False, True
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=add_pitch, add_roll=add_roll, add_filter=add_filter,
                        n_samples=n_samples, step=step)
    batch_size = 64
    X, y, name, users, stats = load_data.uci_hapt()

    d = str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S'))
    lol = LeaveOneLabelOut(users)
    user = 0
    for train_index, test_index in lol:
        user += 1
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        train_set = (X_train, y_train)
        test_set = (X_test, y_test)
        valid_set = test_set

        n_train = train_set[0].shape[0]
        n_test = test_set[0].shape[0]

        n_test_batches = 1
        n_valid_batches = 1
        batch_size = n_test
        n_train_batches = n_train//batch_size
        print("n_train_batches: %d, n_test_batches: %d" % (n_train_batches, n_test_batches))

        # num_1x1, num_2x1_proj, reduce_3x1, num_3x1, reduce_5x1, num_5x1
        model = Incep(n_in=(sequence_length, n_features),
                      inception_layers=[(8, 8, 0, 8, 0, 8),
                                                (16, 8, 0, 16, 0, 8),
                                                (32, 16, 0, 32, 0, 16),
                                                (32, 16, 0, 32, 0, 16),
                                                (64, 32, 0, 64, 0, 32),
                                                (64, 32, 0, 64, 0, 32)],
                      pool_sizes=[2, 2, 0, 2, 0, 2],
                      n_hidden=512,
                      output_dropout=0.5,
                      inception_dropout=0.2,
                      n_out=n_classes,
                      trans_func=rectify,
                      out_func=softmax,
                      batch_size=batch_size,
                      batch_norm=False)

        # Generate root path and edit
        root_path = model.get_root_path()
        model.root_path = "%s_cv_%s_%d" % (root_path, d, user)
        paths.path_exists(model.root_path)
        rmdir(root_path)

        # Build model
        f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(train_set,
                                                                                              test_set,
                                                                                              valid_set)
        train_args['inputs']['batchsize'] = batch_size
        train_args['inputs']['learningrate'] = 0.002
        train_args['inputs']['beta1'] = 0.9
        train_args['inputs']['beta2'] = 1e-6

        test_args['inputs']['batchsize'] = batch_size
        validate_args['inputs']['batchsize'] = batch_size

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False
        train.add_initial_training_notes("Standardizing data after adding features")
        train.write_to_logger("Dataset: %s" % name)
        train.write_to_logger("LOO user: %d" % user)
        train.write_to_logger("Training samples: %d" % n_train)
        train.write_to_logger("Test samples: %d" % n_test)
        train.write_to_logger("Sequence length: %d" % sequence_length)
        train.write_to_logger("Step: %d" % step)
        train.write_to_logger("Shuffle: %s" % shuffle)
        train.write_to_logger("Add pitch: %s\nAdd roll: %s" % (add_pitch, add_roll))
        train.write_to_logger("Add filter separated signals: %s" % add_filter)
        train.write_to_logger("Transfer function: %s" % model.transf)
        train.write_to_logger("Network Architecture ---------------")
        for layer in get_all_layers(model.model):
            # print(layer.name, ": ", get_output_shape(layer))
            train.write_to_logger(layer.name + ": " + str(get_output_shape(layer)))
        train.train_model(f_train, train_args,
                          f_test, test_args,
                          f_validate, validate_args,
                          n_train_batches=n_train_batches,
                          n_test_batches=n_test_batches,
                          n_valid_batches=n_valid_batches,
                          n_epochs=500)

        # Reset logging
        handlers = train.logger.handlers[:]
        for handler in handlers:
            handler.close()
            train.logger.removeHandler(handler)
        del train.logger
def run_vae():
    seed = np.random.randint(1, 2147462579)

    def sinus_seq(period, samples, length):
        X = np.linspace(-np.pi * (samples / period),
                        np.pi * (samples / period), samples)
        X = np.reshape(np.sin(X), (-1, length))
        X += np.random.randn(*X.shape) * 0.1
        # X = (X - np.min(X))/(np.max(X) - np.min(X))
        return X, np.ones((samples / length, 1))

    X1, y1 = sinus_seq(40, 100000, 50)
    X2, y2 = sinus_seq(20, 40000, 50)

    X = np.concatenate((X1, X2)).astype('float32')
    y = np.concatenate((y1 * 0, y2 * 1), axis=0).astype('int')

    dim_samples, dim_features = X.shape
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)

    # X, y, users, stats = har.load()
    #
    # limited_labels = y < 5
    # y = y[limited_labels]
    # X = X[limited_labels]
    # users = users[limited_labels]
    #
    # # Compress labels
    # for idx, label in enumerate(np.unique(y)):
    #     if not np.equal(idx, label):
    #         y[y == label] = idx
    #
    # y_unique = np.unique(y)
    # y = one_hot(y, len(y_unique))
    #
    # dim_samples, dim_sequence, dim_features = X.shape
    # num_classes = len(y_unique)
    #
    # # Split into train and test stratified by users
    # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=users)

    # Combine in sets
    train_set = (X_train, y_train)
    test_set = (X_test, y_test)
    print('Train size: ', train_set[0].shape)
    print('Test size: ', test_set[0].shape)

    n, n_x = train_set[0].shape  # Datapoints in the dataset, input features.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = VAE(n_x=int(n_x),
                n_z=16,
                z_hidden=[16],
                xhat_hidden=[32],
                x_dist='gaussian')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set)
    # Update the default function arguments.
    train_args['inputs']['batchsize'] = 100
    train_args['inputs']['learningrate'] = 1e-3
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999

    def custom_evaluation(model, path):
        plt.clf()
        f, axarr = plt.subplots(nrows=len(np.unique(y)), ncols=1)
        for idx, y_l in enumerate(np.unique(y)):
            act_idx = test_set[1] == y_l
            test_act = test_set[0][act_idx[:, 0]]

            z = model.f_qz(test_act, 1)
            xhat = model.f_px(z, 1)

            axarr[idx].plot(test_act[:3].reshape(-1, 1), color='red')
            axarr[idx].plot(xhat[:3].reshape(-1, 1),
                            color='blue',
                            linestyle='dotted')

        f.set_size_inches(8, 5)
        f.savefig(path, dpi=100, format='png')
        plt.close(f)

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model,
                       output_freq=1,
                       pickle_f_custom_freq=100,
                       f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the rae with bn %s. seed %i." %
                                     (str(model.batchnorm), seed))
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_batches,
                      n_epochs=10000,
                      anneal=[("learningrate", 100, 0.75, 3e-5)])

# Evaluate the approximated classification error with 100 MC samples for a good estimate
def custom_evaluation(model, path):
    mean_evals = model.get_output(mnist_data[2][0], 100)
    t_class = np.argmax(mnist_data[2][1], axis=1)
    y_class = np.argmax(mean_evals, axis=1)
    missclass = (np.sum(y_class != t_class, dtype='float32') /
                 len(y_class)) * 100.
    train.write_to_logger("test 100-samples: %0.2f%%." % missclass)


# Define training loop. Output training evaluations every 1 epoch
# and the custom evaluation method every 10 epochs.
train = TrainModel(model=model,
                   output_freq=1,
                   pickle_f_custom_freq=10,
                   f_custom_eval=custom_evaluation)
train.add_initial_training_notes(
    "Training the skip deep generative model with %i labels. bn %s. seed %i." %
    (n_labeled, str(model.batchnorm), seed))
train.train_model(
    f_train,
    train_args,
    f_test,
    test_args,
    f_validate,
    validate_args,
    n_train_batches=n_batches,
    n_epochs=1000,
    # Any symbolic model variable can be annealed during
    # training with a tuple of (var_name, every, scale constant, minimum value).
Esempio n. 21
0
def main():
    n_samples, step = 200, 50
    load_data = LoadHAR(add_pitch=True,
                        add_roll=True,
                        add_filter=True,
                        n_samples=n_samples,
                        step=step,
                        normalize='segments',
                        comp_magnitude=True,
                        simple_labels=True,
                        common_labels=True)

    conf = ModelConfiguration()
    conf.load_datasets([load_data.uci_hapt], label_limit=18)

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
    else:
        # conf.cv = LeaveOneLabelOut(conf.users)
        conf.cv = StratifiedShuffleSplit(np.argmax(conf.y, axis=1),
                                         n_iter=10,
                                         test_size=0.1,
                                         random_state=None)

    for train_index, test_index in conf.cv:
        conf.user = user

        n_conv = 1
        model = RCNN(n_in=(n_samples, conf.n_features),
                     n_filters=[32],
                     filter_sizes=[3] * n_conv,
                     pool_sizes=[2] * n_conv,
                     rcl=[2, 2, 2, 2],
                     rcl_dropout=0.5,
                     n_hidden=[512],
                     dropout_probability=0.5,
                     n_out=conf.n_classes,
                     ccf=False,
                     trans_func=rectify,
                     out_func=softmax,
                     batch_norm=True,
                     stats=conf.stats)

        if len(conf.cv) > 1:
            user_idx += 1
            if len(conf.cv) == len(conf.user_names):
                conf.user = conf.user_names[user_idx]
            else:
                conf.user = conf.name + ' K_%d' % user_idx

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        # Copy script to output folder
        scriptpath = path.realpath(__file__)
        filename = path.basename(scriptpath)
        shutil.copy(scriptpath, model.root_path + '/' + filename)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data,
                 batch_size=64)

def custom_evaluation(model, path):
    # Dump encoder
    model.save_encoder()

    # Get model output
    x_ = test_set[0]
    xhat = model.f_px(x_)

    idx = np.random.randint(0, x_.shape[0]-5)

    plt.clf()
    f, axarr = plt.subplots(nrows=1, ncols=1)
    axarr.plot(x_[idx:idx+5, :, :].reshape(-1, x_.shape[-1]), color='red')
    axarr.plot(xhat[idx:idx+5, :, :].reshape(-1, x_.shape[-1]), color='blue', linestyle='dotted')

    f.set_size_inches(12, 10)
    f.savefig(path, dpi=100, format='png')
    plt.close(f)

# Define training loop. Output training evaluations every 1 epoch
# and the custom evaluation method every 10 epochs.
train = TrainModel(model=model, output_freq=1, pickle_f_custom_freq=10, f_custom_eval=custom_evaluation)
train.add_initial_training_notes("Training the rae with bn %s. seed %i." % (str(model.batchnorm), seed))
train.train_model(f_train, train_args,
                  f_test, test_args,
                  f_validate, validate_args,
                  n_train_batches=n_batches,
                  n_epochs=5000,
                  anneal=[("learningrate", 100, 0.75, 3e-5)])
def run_cvae():
    seed = np.random.randint(1, 2147462579)

    # def sinus_seq(period, samples, length):
    #     X = np.linspace(-np.pi*(samples/period), np.pi*(samples/period), samples)
    #     X = np.reshape(np.sin(X), (-1, length, 1))
    #     X += np.random.randn(*X.shape)*0.1
    #     X = (X - np.min(X))/(np.max(X) - np.min(X))
    #     return X, np.ones((samples/length, 1))
    #
    # X1, y1 = sinus_seq(40, 100000, 50)
    # X2, y2 = sinus_seq(20, 40000, 50)
    #
    # X = np.concatenate((X1, X2)).astype('float32')
    # y = np.concatenate((y1*0, y2*1), axis=0).astype('int')
    #
    # dim_samples, dim_sequence, dim_features = X.shape
    # X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)

    # X, y, users, stats = har.load()

    n_samples, step = 25, 25
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        diff=False,
                        step=step,
                        normalize='segments',
                        comp_magnitude=True,
                        simple_labels=True,
                        common_labels=True)
    X, y, name, users, stats = load_data.uci_hapt()

    limited_labels = y < 5
    y = y[limited_labels]
    X = X[limited_labels].astype(np.float32)
    users = users[limited_labels]

    X -= X.mean(axis=0)

    # Compress labels
    for idx, label in enumerate(np.unique(y)):
        if not np.equal(idx, label):
            y[y == label] = idx

    y_unique = np.unique(y)
    y = one_hot(y, len(y_unique))

    dim_samples, dim_sequence, dim_features = X.shape
    num_classes = len(y_unique)

    # Split into train and test stratified by users
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        stratify=users)

    # Combine in sets
    train_set = (X_train, y_train)
    test_set = (X_test, y_test)
    print('Train size: ', train_set[0].shape)
    print('Test size: ', test_set[0].shape)

    n, seq, n_x = train_set[
        0].shape  # Datapoints in the dataset, input features.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    # [num_filters, stride, pool]
    filters = [[128, 1, 2], [128, 1, 2], [128, 1, 2], [128, 1, 2]]
    model = CVAE(n_x=int(n_x),
                 n_z=128,
                 px_hid=[128],
                 qz_hid=[128],
                 filters=filters,
                 seq_length=int(seq),
                 nonlinearity=rectify,
                 batchnorm=False,
                 x_dist='gaussian')

    # Copy script to output folder
    copy_script(__file__, model)

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set, test_set)
    # Update the default function arguments.
    train_args['inputs']['batchsize'] = 100
    train_args['inputs']['learningrate'] = 1e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs']['warmup'] = .5

    def custom_evaluation(model, path):
        plt.clf()
        f, axarr = plt.subplots(nrows=len(np.unique(y)), ncols=2)
        z_ = np.empty((0, model.n_z))
        y_ = np.empty((0, ))
        for idx, y_l in enumerate(np.unique(y)):
            act_idx = test_set[1] == y_l
            test_act = test_set[0][act_idx[:, 0]]

            z = model.f_qz(test_act, 1)
            z_ = np.concatenate((z_, z))
            y_ = np.concatenate((y_, np.ones((len(test_act), )) * y_l))

            xhat = model.f_px(z, 1)
            mu = model.f_mu(z, 1)
            var = np.exp(model.f_var(z, 1))

            axarr[idx, 0].plot(test_act[:2].reshape(-1, dim_features),
                               color='red')
            axarr[idx, 0].plot(xhat[:2].reshape(-1, dim_features),
                               color='blue',
                               linestyle='dotted')

            axarr[idx, 1].plot(mu[:2].reshape(-1, dim_features), label="mu")
            axarr[idx, 1].plot(var[:2].reshape(-1, dim_features), label="var")
            plt.legend()

        f.set_size_inches(12, 10)
        f.savefig(path, dpi=100, format='png')
        plt.close(f)

        # Plot PCA decomp of Z
        z_pca = PCA(n_components=2).fit_transform(z_)
        plt.clf()
        plt.figure()
        for c, i in zip(['r', 'b'], set(y_unique)):
            plt.scatter(z_pca[y_ == i, 0], z_pca[y_ == i, 1], c=c, alpha=0.8)
        plt.legend()
        plt.title('PCA of Z')
        plt.savefig(path.replace('custom_eval_plot', 'pca/z'))
        plt.close()

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model,
                       output_freq=1,
                       pickle_f_custom_freq=10,
                       f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the rae with bn %s. seed %i." %
                                     (str(model.batchnorm), seed))
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_batches,
                      n_epochs=1000,
                      anneal=[("learningrate", 100, 0.75, 3e-5),
                              ("warmup", 5, 0.99, 0.1)])

    image_to_movie.create(model.get_root_path() + '/training_custom_evals/',
                          rate=3)
Esempio n. 24
0
def main():
    seed = np.random.randint(1, 2147462579)

    # def sinus_seq(period, samples, length):
    #     X = np.linspace(-np.pi*(samples/period), np.pi*(samples/period), samples)
    #     X = np.reshape(np.sin(X), (-1, length, 1))
    #     X += np.random.randn(*X.shape)*0.1
    #     # X = (X - np.min(X))/(np.max(X) - np.min(X))
    #     return X, np.ones((samples/length, 1))
    #
    # X1, y1 = sinus_seq(20, 100000, 40)
    # X2, y2 = sinus_seq(12, 100000, 40)
    # X3, y3 = sinus_seq(8, 100000, 40)
    #
    # X = np.concatenate((X1, X2, X3)).astype('float32')
    # y = np.concatenate((y1*0, y2*1, y3*2), axis=0).astype('int')[:, 0]
    #
    # y_unique = np.unique(y)
    # y = one_hot(y, len(y_unique))
    # num_classes = len(y_unique)
    #
    # X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.9)

    # X, y, users, stats = har.load()
    #
    n_samples, step = 50, 50
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=False,
                        n_samples=n_samples,
                        diff=False,
                        step=step,
                        normalize='segments',
                        comp_magnitude=False,
                        simple_labels=False,
                        common_labels=False)
    X, y, name, users, stats = load_data.uci_hapt()

    limited_labels = y < 18
    y = y[limited_labels]
    X = X[limited_labels].astype(np.float32)
    users = users[limited_labels]

    # X -= X.mean(axis=0)

    # Compress labels
    for idx, label in enumerate(np.unique(y)):
        if not np.equal(idx, label):
            y[y == label] = idx

    y_unique = np.unique(y)
    num_classes = len(y_unique)
    y = one_hot(y, num_classes)

    # Split into train and test stratified by users
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=1000,
                                                        stratify=users)

    n_samples = 1001
    # Split training into labelled and unlabelled. Optionally stratified by the label
    X_train_labeled, X_train_unlabeled, y_train_labeled, y_train_unlabeled = \
        train_test_split(X_train, y_train, train_size=n_samples, stratify=np.argmax(y_train, axis=1))

    # Combine in sets
    train_set_labeled = (X_train_labeled, y_train_labeled)
    train_set_unlabeled = (X_train_unlabeled, y_train_unlabeled)
    test_set = (X_test, y_test)
    print('Train unlabelled size: ', train_set_unlabeled[0].shape)
    print('Train labelled size: ', train_set_labeled[0].shape)
    print('Test size: ', test_set[0].shape)

    n, n_l, n_c = train_set_unlabeled[
        0].shape  # Datapoints in the dataset, input features.
    n_batches = n / 100  # The number of batches.
    bs = n / n_batches  # The batchsize.

    # Initialize the auxiliary deep generative model.
    model = RSDGM(n_c=int(n_c),
                  n_l=int(n_l),
                  n_a=100,
                  n_z=128,
                  n_y=num_classes,
                  qa_hid=[100],
                  qz_hid=[100],
                  qy_hid=[100],
                  px_hid=[128],
                  pa_hid=[100],
                  nonlinearity=rectify,
                  batchnorm=False,
                  x_dist='gaussian')

    # Copy script to output folder
    copy_script(__file__, model)

    # Create output path for PCA plot
    makedirs(model.get_root_path() + '/training custom evals/pca')

    # Get the training functions.
    f_train, f_test, f_validate, train_args, test_args, validate_args = model.build_model(
        train_set_unlabeled, train_set_labeled, test_set)
    # Update the default function arguments.
    train_args['inputs']['batchsize_unlabeled'] = bs
    train_args['inputs']['batchsize_labeled'] = n_samples
    train_args['inputs']['beta'] = .1
    train_args['inputs']['learningrate'] = 3e-4
    train_args['inputs']['beta1'] = 0.9
    train_args['inputs']['beta2'] = 0.999
    train_args['inputs']['samples'] = 1
    train_args['inputs']['warmup'] = 1.1

    def custom_evaluation(model, path):
        # Get model output
        x_ = test_set[0]
        y_ = test_set[1]

        # qy = model.f_qy(x_, 1)
        qa = model.f_qa(x_, 1)
        qz = model.f_qz(x_, y_, 1)
        # pa = model.f_pa(qz, y_, 1)
        px = model.f_px(qa, qz, y_, 1)
        px_mu = model.f_mu(qa, qz, y_, 1)
        px_var = np.exp(model.f_var(qa, qz, y_, 1))

        # reduce y to integers
        y_ = np.argmax(y_, axis=1)

        plt.clf()
        f, axarr = plt.subplots(nrows=len(y_unique), ncols=2)
        for idx, y_l in enumerate(y_unique):
            l_idx = y_ == y_l

            axarr[idx, 0].plot(x_[l_idx][:2].reshape(-1, n_c))
            axarr[idx, 0].plot(px[l_idx][:2].reshape(-1, n_c),
                               linestyle='dotted')
            axarr[idx, 1].plot(px_mu[l_idx][:2].reshape(-1, n_c), label="mu")
            axarr[idx, 1].plot(px_var[l_idx][:2].reshape(-1, n_c), label="var")
            plt.legend()

        f.set_size_inches(12, 8)
        f.savefig(path, dpi=100, format='png')
        plt.close(f)

        # Plot PCA decomp
        z_pca = PCA(n_components=2).fit_transform(qz)
        a_pca = PCA(n_components=2).fit_transform(qa)

        palette = itertools.cycle(sns.color_palette())
        plt.clf()
        plt.figure()
        f, axarr = plt.subplots(ncols=2)
        for i in set(y_unique):
            c = next(palette)
            axarr[0].scatter(z_pca[y_ == i, 0],
                             z_pca[y_ == i, 1],
                             c=c,
                             alpha=0.8)
            axarr[1].scatter(a_pca[y_ == i, 0],
                             a_pca[y_ == i, 1],
                             c=c,
                             alpha=0.8,
                             label=str(i))
        plt.legend()
        plt.title('PCA of Z and A')
        f.set_size_inches(10, 6)
        plt.savefig(path.replace('custom_eval_plot', 'pca/z'),
                    dpi=100,
                    format='png')
        plt.close()

    # Define training loop. Output training evaluations every 1 epoch
    # and the custom evaluation method every 10 epochs.
    train = TrainModel(model=model,
                       output_freq=1,
                       pickle_f_custom_freq=10,
                       f_custom_eval=custom_evaluation)
    train.add_initial_training_notes("Training the rae with bn %s. seed %i." %
                                     (str(model.batchnorm), seed))
    train.train_model(f_train,
                      train_args,
                      f_test,
                      test_args,
                      f_validate,
                      validate_args,
                      n_train_batches=n_batches,
                      n_epochs=1000,
                      anneal=[("learningrate", 100, 0.75, 3e-5),
                              ("warmup", 1, 0.99, 0.1)])
Esempio n. 25
0
def main():
    n_samples, step = 200, 200
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=True,
                        n_samples=n_samples,
                        step=step,
                        normalize=False,
                        comp_magnitude=False)

    conf = ModelConfiguration()
    conf.load_datasets([load_data.uci_hapt], label_limit=100)

    user_idx = -1
    user = None  # 'UCI HAPT10'
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
        print('Testing user: %s' % user)
    else:
        # Cross validate on users
        # conf.cv = LeavePLabelOut(conf.users, p=1)

        # Divide into K folds balanced on labels
        # conf.cv = StratifiedKFold(np.argmax(conf.y, axis=1), n_folds=10)

        # And shuffle
        conf.cv = StratifiedShuffleSplit(np.argmax(conf.y, axis=1),
                                         n_iter=1,
                                         test_size=0.3)

    for train_index, test_index in conf.cv:
        conf.user = user

        model = ResNet(n_in=(n_samples, conf.n_features),
                       n_filters=[32, 64, 128, 256],
                       pool_sizes=[2, 2, 2, 2],
                       n_hidden=[512],
                       conv_dropout=0.3,
                       dropout=0.5,
                       n_out=conf.n_classes,
                       trans_func=leaky_rectify,
                       out_func=softmax,
                       batch_norm=True,
                       stats=conf.stats)

        if len(conf.cv) > 1:
            user_idx += 1
            if len(conf.cv) == len(conf.user_names):
                conf.user = conf.user_names[user_idx]
            else:
                conf.user = conf.name + ' K_%d' % user_idx

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=50,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=500,
                 model=model,
                 train=train,
                 load_data=load_data)
def main():
    n_samples, step = 100, 50
    load_data = LoadHAR(add_pitch=False,
                        add_roll=False,
                        add_filter=True,
                        n_samples=n_samples,
                        step=step,
                        normalize=True)

    conf = ModelConfiguration()
    conf.load_datasets(
        [load_data.uci_hapt],
        label_limit=18)  # , load_data.uci_mhealth, load_data.idash

    user_idx = -1
    user = None
    # Create a time-string for our cv run
    if user is not None:
        train_idx = conf.users != user
        test_idx = conf.users == user
        conf.cv = ((train_idx, test_idx), )
    else:
        conf.cv = LeaveOneLabelOut(conf.users)

    for train_index, test_index in conf.cv:
        conf.user = user

        model = CNN(n_in=(n_samples + 2, conf.n_features),
                    n_filters=[64, 64, 64, 64],
                    filter_sizes=[5, 5, 3, 3],
                    pool_sizes=[2, 2, 2, 2],
                    conv_dropout=0.5,
                    n_hidden=[128],
                    dense_dropout=0.5,
                    n_out=conf.n_classes,
                    ccf=False,
                    trans_func=rectify,
                    out_func=softmax,
                    batch_norm=True,
                    input_noise=0.2,
                    stats=2)

        if len(conf.cv) > 1:
            user_idx += 1
            conf.user = conf.user_names[user_idx]

            # Generate root path and edit
            root_path = model.get_root_path()
            model.root_path = "%s_cv_%s_%s" % (root_path, conf.d, conf.user)
            paths.path_exists(model.root_path)
            rmdir(root_path)

        train = TrainModel(model=model,
                           anneal_lr=0.75,
                           anneal_lr_freq=100,
                           output_freq=1,
                           pickle_f_custom_freq=100,
                           f_custom_eval=None)
        train.pickle = False

        conf.run(train_index,
                 test_index,
                 lr=0.003,
                 n_epochs=300,
                 model=model,
                 train=train,
                 load_data=load_data)