Exemplo n.º 1
0
    data.combine([
        #            map(lambda x: x[:30], data.digits_session_dependence_1_dataset(channels=range(1, 8))),
        #            map(lambda x: x[:30], data.digits_session_dependence_2_dataset(channels=range(1, 8))),
        #            map(lambda x: x[:40], data.digits_session_dependence_3_dataset(channels=range(1, 8))),
        validation_sequence_groups,
    ]))

# Pads or truncates each sequence to length
length = 2000  # 300,600
training_sequence_groups = data.transform.pad_truncate(
    training_sequence_groups, length)
validation_sequence_groups = data.transform.pad_truncate(
    validation_sequence_groups, length)

# Format into sequences and labels
train_sequences, train_labels = data.get_inputs(training_sequence_groups)
val_sequences, val_labels = data.get_inputs(validation_sequence_groups)

# Calculate sample weights
class_weights = compute_class_weight('balanced', np.unique(train_labels),
                                     train_labels)
train_weights = class_weights[list(train_labels)]

train_labels = tf.keras.utils.to_categorical(train_labels)
val_labels = tf.keras.utils.to_categorical(val_labels)

print np.shape(train_sequences)
print np.shape(train_labels)
print np.shape(val_sequences)
print np.shape(val_labels)
Exemplo n.º 2
0
    1, 1, 4, 2, 4, 1, 4, 1, 4, 2, 2, 2, 1, 1, 3, 2, 4, 1, 3, 0, 3, 4, 3, 4, 3,
    4, 3, 1, 2, 3, 3, 0, 4, 3, 0, 2, 2, 0, 4, 4, 3, 4, 1, 3, 4, 4, 0, 3, 0, 4,
    3, 1, 2, 1, 3, 0, 1, 1, 3, 1, 2, 3, 0, 3, 0, 2, 3, 1, 3, 3, 3, 2, 2, 0, 2,
    0, 4, 2, 3, 3, 2, 2, 4, 1, 0, 0, 4, 2, 1, 1, 0, 1, 0, 0, 0, 4, 0, 2, 3, 2,
    4, 0, 4, 2, 3, 2, 4, 0, 4, 1, 2, 0, 1, 0, 1, 1, 0, 4, 2, 1, 1, 4, 4, 1, 1,
    2, 3, 3, 2, 4, 2, 2, 0, 3, 0, 2, 1, 4, 2, 2, 3, 1, 2, 4, 2, 1, 2, 4, 0
]

sequence_groups = transform_data(
    data.process_scrambled(labels, ['arnav1.txt'],
                           channels=channels,
                           sample_rate=250))
print len(sequence_groups)
print map(len, sequence_groups)

lengths = map(len, data.get_inputs(sequence_groups)[0])
print min(lengths), np.mean(lengths), max(lengths)

# Split sequence_groups into training and validation data
training_sequence_groups, validation_sequence_groups = data.split(
    sequence_groups, 1. / 3)

# Manually selecting different training and validation datasets
#training_sequence_groups = transform_data(data.digits_session_1_dataset())
#validation_sequence_groups = transform_data(data.digits_session_4_dataset())

# Pads or truncates each sequence to length
length = 450  # DO NOT CHANGE
training_sequence_groups = data.transform.pad_truncate(
    training_sequence_groups, length)
validation_sequence_groups = data.transform.pad_truncate(
Exemplo n.º 3
0
    with arg_scope([conv2d], batch_norm_params=batch_norm_params, stddev=0.02, activation=lrelu, weight_decay=1e-5):
        inp = inp-0.5
        o = conv2d(inp, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=2)
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
        flat = flatten(o)
        #flat = flatten(avg_pool(o, kernel_size=3))
        z = fc(flat, num_units_out=z_dim, activation=tf.nn.tanh)/2+.5
        return z

batch_size = 32

with tf.variable_scope("data"):
    images, _ = data.get_inputs(batch_size)

with tf.variable_scope("generator") as gen_scope:
    z = tf.random_uniform([batch_size, z_dim], 0, 1)
    generated = generator(z)
gen_scope.reuse_variables()
gen_vars = [x for x in tf.trainable_variables() if x.name.startswith(gen_scope.name)]

with tf.variable_scope("discriminator") as scope:
    real_probs = discriminator(images)

with tf.variable_scope("discriminator", reuse=True) as scope:
    fake_probs = discriminator(generated)

with tf.variable_scope("encoder") as encoder_scope:
    enc_z = encoder(images, z_dim)
Exemplo n.º 4
0
# pipeline
# input_batch -> samples -> train_model

# descriminator --> works on sets of samples?
                    # RNN?
# Sample a grid of points and run on that.
# sample a subset of image and run on that.

# Checks real image data against sampled image data
# sample section of image. Sample corresponding secion from CPPN.
# lets try a VAE first.
# goal is unsupervised. gradient descent back from input image to latent space to get code variables.
# adversarial there is no linking between the two.

imgs, labels = data.get_inputs(batch_size=10)

def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
    shape = input_.get_shape().as_list()

    with tf.variable_scope(scope or "Linear"):
        matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
                                 tf.random_normal_initializer(stddev=stddev))
        bias = tf.get_variable("bias", [output_size],
            initializer=tf.constant_initializer(bias_start))
        if with_w:
            return tf.matmul(input_, matrix) + bias, matrix, bias
        else:
            return tf.matmul(input_, matrix) + bias

def sample_img(img, n_samples):
Exemplo n.º 5
0
    with arg_scope([conv2d, conv2d_transpose], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2-1
        d = 8
        z = fc(z, num_units_out=d*d*32, batch_norm_params=batch_norm_params)
        c = z.get_shape()[1].value / (d*d)
        z = tf.reshape(z, (-1, d, d, c))
        o = conv2d_transpose(z, n, (3, 3), stride=(2, 2))
        o = conv2d_transpose(o, n, (3, 3), stride=(2, 2))
        o = conv2d(o, num_filters_out=n*2, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=1, kernel_size=(3, 3), stride=1, padding="VALID", batch_norm_params=None)
        out = o[:, 1:29, 1:29, :]
        return out


with tf.variable_scope("data"):
    images, _ = data.get_inputs(128)

with tf.variable_scope("generator") as gen_scope:
    z = tf.random_uniform([128, 16*8], 0, 1)
    generated = generator(z)
gen_scope.reuse_variables()
gen_vars = [x for x in tf.trainable_variables() if x.name.startswith(gen_scope.name)]

with tf.variable_scope("discriminator") as scope:
    real_probs = discriminator(images)

with tf.variable_scope("discriminator", reuse=True) as scope:
    fake_probs = discriminator(generated)

dis_vars = [x for x in tf.trainable_variables() if x.name.startswith(scope.name)]