示例#1
0
def run():
    training_set, evaluation_set = dat.get_data_sets()

    sample = next(training_set)
    n_pixels = np.prod(sample.shape)
    printer = Printer(input_shape=sample.shape)

    N_NODES = [24]
    n_nodes = N_NODES + [n_pixels]
    layers = []

    layers.append(RangeNormalization(training_set))

    for i_layer in range(len(n_nodes)):
        new_layer = Dense(
            n_nodes[i_layer],
            activation_function=Tanh(),
            # initializer=Glorot(),
            # initializer=He(),
            initializer=Uniform(scale=3),
            previous_layer=layers[-1],
            optimizer=Momentum(),
        )
        new_layer.add_regularizer(L1())
        new_layer.add_regularizer(Limit(4.0))
        layers.append(new_layer)

    layers.append(Difference(layers[-1], layers[0]))

    autoencoder = ANN(
        layers=layers,
        error_function=Sqr,
        printer=printer,
    )

    msg = """

Running autoencoder demo
    on Nordic Runes data set.
    Find performance history plots,
    model parameter report,
    and neural network visualizations
    in the {} directory.

""".format(autoencoder.reports_path)

    print(msg)

    autoencoder.train(training_set)
    autoencoder.evaluate(evaluation_set)
def initialize(
    activation_function=Tanh,
    initializer=Glorot,
    learning_rate=1e-4,
    n_nodes_00=79,
    n_nodes_0=23,
    n_nodes_1=9,
    n_nodes_2=23,
    n_nodes_3=79,
    patch_size=11,
    **kwargs,
):
    training_set, tuning_set, evaluation_set = ldr.get_data_sets(
        patch_size=patch_size)

    sample = next(training_set)
    n_pixels = np.prod(sample.shape)
    # n_nodes_dense = [n_nodes_00, n_nodes_0, n_nodes_1, n_nodes_2, n_nodes_3]
    n_nodes_dense = [n_nodes_00, n_nodes_1, n_nodes_3]
    # n_nodes_dense = [n_nodes_1]
    n_nodes_dense = [n for n in n_nodes_dense if n > 0]
    n_nodes = n_nodes_dense + [n_pixels]

    layers = []

    layers.append(RangeNormalization(training_set))

    for i_layer in range(len(n_nodes)):
        new_layer = Dense(n_nodes[i_layer],
                          activation_function=activation_function,
                          initializer=initializer,
                          previous_layer=layers[-1],
                          optimizer=Momentum(
                              learning_rate=learning_rate,
                              momentum_amount=.9,
                          ))
        layers.append(new_layer)

    layers.append(Difference(layers[-1], layers[0]))

    autoencoder = ANN(
        layers=layers,
        error_function=Sqr,
        n_iter_train=5e4,
        n_iter_evaluate=1e4,
        n_iter_evaluate_hyperparameters=9,
        verbose=False,
    )

    return autoencoder, training_set, tuning_set
def initialize(
    limit=None,
    L1_param=None,
    L2_param=None,
    learning_rate=None,
    momentum=.9,
    **kwargs,
):
    training_set, tuning_set, evaluation_set = ldr.get_data_sets()

    sample = next(training_set)
    n_pixels = np.prod(sample.shape)
    N_NODES = [33]
    n_nodes = N_NODES + [n_pixels]
    layers = []

    layers.append(RangeNormalization(training_set))

    for i_layer in range(len(n_nodes)):
        new_layer = Dense(
            n_nodes[i_layer],
            activation_function=Tanh,
            initializer=Glorot(),
            previous_layer=layers[-1],
            optimizer=Momentum(
                learning_rate=learning_rate,
                momentum_amount=momentum,
            ),
        )
        if limit is not None:
            new_layer.add_regularizer(Limit(limit))
        if L1_param is not None:
            new_layer.add_regularizer(L1(L1_param))
        if L2_param is not None:
            new_layer.add_regularizer(L2(L2_param))

        layers.append(new_layer)

    layers.append(Difference(layers[-1], layers[0]))

    autoencoder = ANN(
        layers=layers,
        error_function=Sqr,
        n_iter_train=5e4,
        n_iter_evaluate=1e4,
        verbose=False,
    )

    return autoencoder, training_set, tuning_set
示例#4
0
def run():
    training_set, tuning_set, evaluation_set = ldr.get_data_sets()

    sample = next(training_set)
    n_pixels = np.prod(sample.shape)
    printer = Printer(input_shape=sample.shape)

    N_NODES = [64, 36, 24, 36, 64]
    # N_NODES = [64]
    n_nodes = N_NODES + [n_pixels]
    layers = []

    layers.append(RangeNormalization(training_set))

    for i_layer in range(len(n_nodes)):
        new_layer = Dense(
            n_nodes[i_layer],
            activation_function=Tanh,
            initializer=Glorot(),
            previous_layer=layers[-1],
            optimizer=Momentum(),
        )
        # new_layer.add_regularizer(L1())
        new_layer.add_regularizer(Limit(4.0))
        layers.append(new_layer)

    layers.append(Difference(layers[-1], layers[0]))

    autoencoder = ANN(
        layers=layers,
        error_function=Sqr,
        printer=printer,
    )

    msg = """

    Running autoencoder on images of the surface of Mars.
        Find performance history plots, model parameter report,
        and neural network visualizations in the directory
        {}

""".format(autoencoder.reports_path)

    print(msg)

    autoencoder.train(training_set)
    autoencoder.evaluate(tuning_set)
    autoencoder.evaluate(evaluation_set)
示例#5
0
def initialize():
    training_set, tuning_set, evaluation_set = ldr.get_data_sets()

    sample = next(training_set)
    n_pixels = np.prod(sample.shape)
    printer = Printer(input_shape=sample.shape)

    N_NODES = [33]
    n_nodes = N_NODES + [n_pixels]
    layers = []

    layers.append(RangeNormalization(training_set))

    for i_layer in range(len(n_nodes)):
        new_layer = Dense(
            n_nodes[i_layer],
            activation_function=Tanh,
            initializer=Glorot(),
            previous_layer=layers[-1],
            optimizer=SGD(),
        )
        new_layer.add_regularizer(Limit(4.0))
        layers.append(new_layer)

    layers.append(Difference(layers[-1], layers[0]))

    autoencoder = ANN(
        layers=layers,
        error_function=Sqr,
        n_iter_train=N_ITER_TRAIN,
        n_iter_evaluate=N_ITER_EVALUATE,
        printer=printer,
        verbose=False,
    )

    return autoencoder, training_set, tuning_set
def train(
    image_path,
    activation_function=Tanh,
    initializer=Glorot,
    learning_rate=1e-4,
    n_nodes_0=79,
    n_nodes_1=9,
    n_nodes_2=79,
):
    """
    Train an autoencoder to represent image patches more economically.

    image_path: str, a path to the directory containing the images
        that are to be compressed. If this is a relative path, it needs to be
        relative to the directory from which this module is run.
    activation_function: one of the classes available in
        cottonwood/core/activation_functions.py
        As of this writing, {Tanh, Sigmoid, ReLU}
    initializer: one of the classes available in
        cottonwood/core/initializers.py
        As of this writing, {Glorot, He}
    learning_rate: float, the learning rate for the Momentum optimizers
        that gets called during backpropagation. Feasible values will probably
        be between 1e-5 and 1e-3.
    n_nodes_x: int, the number of nodes in layer x. Layer 1 is
        the narrowest layer, and its node activities
        are used as the representation
        of the compressed patch.

    returns a trained autoencoder
    """
    training_patches = ldr.get_training_data(patch_size, image_path)

    sample = next(training_patches)
    printer = Printer(input_shape=sample.shape)
    n_pixels = np.prod(sample.shape)
    n_nodes_dense = [n_nodes_0, n_nodes_1, n_nodes_2]
    n_nodes = n_nodes_dense + [n_pixels]

    printer = Printer(input_shape=sample.shape)

    layers = []

    layers.append(RangeNormalization(training_patches))

    for i_layer in range(len(n_nodes)):
        new_layer = Dense(n_nodes[i_layer],
                          activation_function=activation_function(),
                          initializer=initializer(),
                          previous_layer=layers[-1],
                          optimizer=Momentum(
                              learning_rate=learning_rate,
                              momentum_amount=.9,
                          ))
        layers.append(new_layer)

    layers.append(Difference(layers[-1], layers[0]))

    autoencoder = ANN(
        layers=layers,
        error_function=Sqr,
        n_iter_train=5e4,
        n_iter_evaluate=1e4,
        n_iter_evaluate_hyperparameters=9,
        printer=printer,
        verbose=True,
        viz_interval=1e4,
    )
    autoencoder.train(training_patches)
    return autoencoder