Ejemplo n.º 1
0
sq2 = u.create_fire_module(mp1, 16,64,64,128)
sq3 = u.create_fire_module(sq2, 16,64,64,128)
sq4 = u.create_fire_module(sq3, 32,128,128,128)

mp4 = u.max_pool_2x2(sq4) #down to 32x32

sq5 = u.create_fire_module(mp4, 32,128,128,256)
sq6 = u.create_fire_module(sq5, 48,192,192,256)
sq7 = u.create_fire_module(sq6, 48,192,192,384)
sq8 = u.create_fire_module(sq7, 64,256,256,384)

mp8 = u.max_pool_2x2(sq8)#down to 16x16

sq9 = u.create_fire_module(mp8, 64,256,256,512)

activations = u.get_activations(sq9, 16, 512)

out = tf.nn.softmax(activations)

#Regressor

keep_prob = tf.placeholder(tf.float32)
reg_sq = u.create_fire_module(sq8,8,2,2,512)
final_count = tf.cast((params.IMAGE_SIZE/4)**2*4,tf.int32)
h_sq8_flat = tf.reshape(reg_sq,[-1, final_count])

W_reg1 = u.weight_variable([final_count, params.FC_NODES])
b_reg1 = u.bias_variable([params.FC_NODES])

h_reg1 = tf.nn.relu(tf.matmul(h_sq8_flat, W_reg1) + b_reg1)
h_reg1_drop = tf.nn.dropout(h_reg1, keep_prob)
Ejemplo n.º 2
0
    print(X.v)
elif axis_vector_type == 'pca':
    data = np.load(args.dataset)

    # if the dataset already has activations, just load them
    if args.spatial_encoding in args.dataset:
        print("Loading activations directly")
        activations = data['activations']
        flat_pos = data['positions']
    else:
        print("Computing activations")
        encoding_func, dim = get_encoding_function(args,
                                                   limit_low=0,
                                                   limit_high=2.2)
        activations, flat_pos = get_activations(data=data,
                                                encoding_func=encoding_func,
                                                encoding_dim=dim)

    pca = PCA(n_components=args.n_components)
    # pca = NMF(n_components=args.n_components)

    print("Fitting PCA")
    pca.fit(activations)

    print("Getting covariance")
    covariance = pca.get_covariance()

    plt.figure()
    plt.imshow(covariance)

    X_vec = circulant_matrix_to_vec(covariance)
Ejemplo n.º 3
0

if __name__ == '__main__':
    N = 10000
    inputs_1, outputs = get_data(N, input_dim)

    m = build_model()
    m.compile(
        optimizer=optimizers.Adam(),
        loss=losses.binary_crossentropy,
        metrics=[metrics.binary_accuracy]
    )

    m.fit([inputs_1], outputs, epochs=20, batch_size=64, validation_split=0.5)
    test_x, test_y = get_data(1, input_dim)

    # attention vector corresponds to the second matrix.
    # the first one is the Inputs output.
    attention_vector = get_activations(m, test_x, print_shape_only=True,
                                       layer_name='attention_vec')[0].flatten()
    print('attention = ', attention_vector)

    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar',
                                                                   title='Attention Mechanism as '
                                                                         'a function of input'
                                                                         ' dimensions.')
    plt.show()
sq2 = u.create_fire_module(mp1, 16, 64, 64, 128)
sq3 = u.create_fire_module(sq2, 16, 64, 64, 128)
sq4 = u.create_fire_module(sq3, 32, 128, 128, 128)

mp4 = u.max_pool_2x2(sq4)  #down to 8x8

#sq5 = u.create_fire_module(mp4, 32,128,128,256)
#sq6 = u.create_fire_module(sq5, 48,192,192,256)
#sq7 = u.create_fire_module(sq6, 48,192,192,384)
#sq8 = u.create_fire_module(sq7, 64,256,256,384)

#mp8 = u.max_pool_2x2(sq8)#down to 4x4

sq9 = u.create_fire_module(mp4, 32, 128, 128, 256)  #(mp8, 64,256,256,512)

activations = u.get_activations(sq9, 8, 256)

out = tf.nn.softmax(activations)

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(activations, y_))
loss = cross_entropy

correct_prediction = tf.equal(tf.argmax(out, 1), tf.argmax(y_, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

print("Model constructed!")

sess = tf.Session()
Ejemplo n.º 5
0
            plt.imshow(deprocess_image(img2))

            plt.subplot(2,2,4)
            plt.imshow(deprocess_image(img3))


            plt.show()
            array = np.array(deconv[layer_name])
            pdb.set_trace()

    elif all_images == True:
        data = np.expand_dims(data, axis=1)

        top_filter = []
        for idx in range(data.shape[0]):
            top_filter.append((utils.get_activations(model, data[idx]), idx))
        pdb.set_trace()










"""
for i, img in enumerate(deconv[layer_name]):
    plt.subplot(2,2,i+1)
    plt.imshow(deprocess_image(img))
Ejemplo n.º 6
0
        m = model_attention_applied_after_lstm()

    m.compile(
        optimizer=optimizers.Adam(),
        loss=losses.binary_crossentropy,
        metrics=[metrics.binary_accuracy]
    )

    print(m.summary())

    m.fit([train_x], train_y, epochs=5, batch_size=64, validation_split=0.1)

    attention_vectors = []
    for i in range(300):
        test_x, test_y = get_data_recurrtent(1, TIME_STEPS, INPUT_DIM)
        attention_vector = np.mean(get_activations(m, test_x, print_shape_only=True, layer_name='attention_vec')[0],
                                   axis=2).squeeze()
        print('attention =', attention_vector)
        assert (np.sum(attention_vector) - 1.0) < 1e-5
        attention_vectors.append(attention_vector)

    attention_vector_final = np.mean(np.array(attention_vectors), axis=0)
    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector_final, columns=['attention (%)']).plot(kind='bar',
                                                                         title='Attention Mechanism as '
                                                                               'a function of input'
                                                                               ' dimensions.')
    plt.show()
            i for i in range(len(sentences)) if substr in sentences[i]
        ]
        if len(sentence_idxs) < iters:
            print('Not enough sentences with word {}'.format(substr))
            sys.exit(0)

        _mask = np.random.randint(0, len(sentence_idxs) - 1, size=(iters, ))
        mask = np.array([sentence_idxs[idx] for idx in _mask])
    else:
        mask = np.random.randint(0, 2 * sequence_length - 1, size=(iters, ))

    chosen_x = X[mask]
    chosen_y = Y[mask]
    chosen_s = [sentences[i] for i in mask]

    activations = get_activations(model, 0, chosen_x)

    real_data = []
    for i in range(iters):
        s = chosen_s[i]
        datum = {'pca': [], 'seq': s}
        for j in range(len(s)):
            datum['pca'].append(
                list(map(lambda x: float(x), activations[i, j, :])))
        real_data.append(datum)

    datasets = {'data': real_data}
    with open('cell.json', 'w') as outfile:
        json.dump(datasets, outfile)

    print('Wrote to json')
Ejemplo n.º 8
0
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Run the data through a few MLP models and save the activations from
# each layer into a Pandas DataFrame.
rows = []
sigmas = [0.10, 0.14, 0.28]
for stddev in sigmas:
    init = initializers.RandomNormal(mean=0.0, stddev=stddev, seed=seed)
    activation = 'relu'

    model = create_mlp_model(n_hidden_layers, dim_layer, (data_dim, ),
                             n_classes, init, 'zeros', activation)
    compile_model(model)
    output_elts = get_activations(model, x_test)
    n_layers = len(model.layers)
    i_output_layer = n_layers - 1

    for i, out in enumerate(output_elts[:-1]):
        if i > 0 and i != i_output_layer:
            for out_i in out.ravel()[::20]:
                rows.append([i, stddev, out_i])

df = pd.DataFrame(rows,
                  columns=['Hidden Layer', 'Standard Deviation', 'Output'])

# Plot previously saved activations from the 5 hidden layers
# using different initialization schemes.
fig = plt.figure(figsize=(12, 6))
axes = grid_axes_it(len(sigmas), 1, fig=fig)
Ejemplo n.º 9
0
def network(seed, run, hp):
    source_train_labels, source_train_images, source_val_labels, source_val_images = input_data.get_training_and_val_data(
        hp.source_animal)
    source_test_labels, source_test_images = input_data.get_test_data(
        hp.source_animal)
    target_train_labels, target_train_images, target_test_labels, target_test_images = input_data.get_training_and_val_data(
        hp.target_animal, labels_per_category=hp.labels_per_category)
    target_val_labels, target_val_images = input_data.get_test_data(
        hp.target_animal)

    if hp.pruning_dataset == 'p_source':
        pruning_dataset = input_data.get_category_images(
            source_train_labels, source_train_images, 1)
    elif hp.pruning_dataset == 'n_source':
        pruning_dataset = input_data.get_category_images(
            source_train_labels, source_train_images, 0)
    elif hp.pruning_dataset == 'p_target':
        pruning_dataset = input_data.get_category_images(
            target_train_labels, target_train_images, 1)
    else:
        pruning_dataset = input_data.get_category_images(
            target_train_labels, target_train_images, 0)

    # Model
    source_model = Sequential()

    weight_init = glorot_uniform(seed)

    source_model.add(
        Conv2D(32, (3, 3),
               padding='same',
               input_shape=source_train_images.shape[1:],
               kernel_initializer=weight_init))
    source_model.add(Activation(hp.conv_activation))
    source_model.add(Conv2D(32, (3, 3), kernel_initializer=weight_init))
    source_model.add(Activation(hp.conv_activation))
    source_model.add(MaxPooling2D(pool_size=(2, 2)))
    source_model.add(Dropout(0.25))

    source_model.add(
        Conv2D(64, (3, 3), padding='same', kernel_initializer=weight_init))
    source_model.add(Activation(hp.conv_activation))
    source_model.add(Conv2D(64, (3, 3), kernel_initializer=weight_init))
    source_model.add(Activation(hp.conv_activation))
    source_model.add(MaxPooling2D(pool_size=(2, 2)))
    source_model.add(Dropout(0.25))

    source_model.add(Flatten())
    source_model.add(
        Dense(hp.num_starting_units,
              kernel_initializer=weight_init,
              name="fc_layer"))
    source_model.add(Activation('relu'))
    source_model.add(Dense(1, kernel_initializer=weight_init))
    source_model.add(Activation('sigmoid'))

    # Adam learning optimizer
    opt = keras.optimizers.adam(lr=hp.source_lr)

    # train the source_model using Adam
    source_model.compile(loss=hp.loss_function,
                         optimizer=opt,
                         metrics=[binary_accuracy])

    # Callbacks:
    # Stopping point value
    early_stopping = library_extensions.EarlyStoppingWithMax(
        target=1.1,
        monitor='val_binary_accuracy',
        min_delta=0,
        patience=0,
        verbose=1,
        mode='auto',
        baseline=0.68)

    all_source_predictions = library_extensions.PredictionHistory(
        source_model,
        source_train_images,
        source_train_labels,
        source_val_images,
        source_val_labels,
        source_test_images,
        source_test_labels,
        save_opp=hp.save_opp,
        opp_train_images=target_train_images,
        opp_train_labels=target_train_labels,
        opp_val_images=target_val_images,
        opp_val_labels=target_val_labels,
        opp_test_images=target_test_images,
        opp_test_labels=target_test_labels)

    # Training source network
    source_model.fit(source_train_images,
                     source_train_labels,
                     batch_size=hp.batch_size,
                     epochs=hp.source_max_epochs,
                     validation_data=(target_train_images,
                                      target_train_labels),
                     shuffle=True,
                     callbacks=[all_source_predictions])

    # Save trained source model
    source_model.save(
        utils.create_path(run.path, "source", "saved_models",
                          "source_model_{}.h5".format(seed)))
    print("source model saved")

    # Save stopped epoch variable
    if early_stopping.stopped_epoch == 0:
        cat_epoch_end = hp.source_max_epochs
    else:
        cat_epoch_end = early_stopping.stopped_epoch

    layer = source_model.get_layer("fc_layer")

    # Finding neuron indicies to prune
    apoz = library_extensions.get_apoz(source_model, layer, pruning_dataset)
    activations = utils.get_activations(source_model, layer, pruning_dataset)

    # Mean activation method
    if hp.pruning_method == 'activation':
        activations_mean = np.mean(activations, axis=0)
        discard_indices = \
            np.where((activations_mean <= hp.lower_threshold) | (activations_mean >= hp.upper_threshold))[0]

    # Normalised threshold method
    elif hp.pruning_method == 'thresh_maru':
        mean_data = np.mean(activations, axis=0)
        std_data = np.std(activations, axis=0)
        maru_data = np.divide(mean_data,
                              std_data,
                              out=np.zeros_like(mean_data),
                              where=std_data != 0)
        normalised_maru = minmax_scale(maru_data, feature_range=(0, 1))
        discard_indices = \
            np.where((normalised_maru <= hp.lower_threshold) | (normalised_maru >= hp.upper_threshold))[0]

    # APoZ method
    else:
        discard_indices = np.where((apoz <= hp.lower_threshold)
                                   | (apoz >= hp.upper_threshold))[0]

    # Creating the new target source_model.
    if hp.reinit_weights:
        target_model = utils.reinitialise_weights(seed, discard_indices,
                                                  source_model)
    else:

        def my_delete_channels(model, layer, channels, *, node_indices=None):
            my_surgeon = library_extensions.MySurgeon(model)
            my_surgeon.add_job('delete_channels',
                               layer,
                               node_indices=node_indices,
                               channels=channels)
            return my_surgeon.operate()

        # New source_model ready for training on dogs
        target_model = my_delete_channels(source_model,
                                          layer,
                                          discard_indices,
                                          node_indices=None)

    print(target_model.summary())

    # Adam learning optimizer
    target_opt = keras.optimizers.adam(lr=hp.target_lr)

    # train the source_model using Adam
    target_model.compile(loss=hp.loss_function,
                         optimizer=target_opt,
                         metrics=[binary_accuracy])

    # Callbacks:
    all_target_predictions = library_extensions.PredictionHistory(
        target_model,
        target_train_images,
        target_train_labels,
        target_val_images,
        target_val_labels,
        target_test_images,
        target_test_labels,
        save_opp=hp.save_opp,
        opp_train_images=source_train_images,
        opp_train_labels=source_train_labels,
        opp_val_images=source_val_images,
        opp_val_labels=source_val_labels,
        opp_test_images=source_test_images,
        opp_test_labels=source_test_labels)

    # Training target network
    target_model.fit(target_train_images,
                     target_train_labels,
                     epochs=hp.target_max_epochs,
                     batch_size=hp.batch_size,
                     validation_data=(target_val_images, target_val_labels),
                     shuffle=True,
                     callbacks=[all_target_predictions])

    # Save number of neurons for use in naive network
    num_seeded_units = target_model.get_layer('fc_layer').get_config()['units']

    # Save trained target model
    target_model.save(
        utils.create_path(run.path, "target", "saved_models",
                          "target_model_{}.h5".format(seed)))
    print("target model saved")

    # Generate results
    run.save_opp = hp.save_opp
    run.source.update(seed, all_source_predictions)
    run.target.update(seed, all_target_predictions)
    run.update_single_data(seed, num_seeded_units, cat_epoch_end)
    run.update_apoz_data(seed, apoz)
    run.update_activation_data(seed, activations)

    return num_seeded_units