Exemple #1
0
    300 * B: 0.0001
})

for i, l in enumerate(losses):
    optimizers.append(
        Adam(l, learning_rate, params=dnn[i * 2 + 1:(i + 1) * 2 + 1].params))

minimizer = tf.group(
    sum([opt.updates for opt in optimizers], []) + dnn.updates)

# Workers
#---------

min1 = sknet.Worker(op_name='minimizer',
                    context='train_set',
                    op=minimizer,
                    instruction='execute every batch',
                    deterministic=False)

loss1 = sknet.Worker(op_name='loss',
                     context='train_set',
                     op=tf.stack(losses, -1),
                     instruction='save & print every 100 batch',
                     deterministic=False)

op_accu = tf.reshape(tf.stack(accus), (1, 6))

accus1 = sknet.Worker(
    op_name='prediction',
    context='test_set',
    op=op_accu,
Exemple #2
0
lr = sknet.schedules.PiecewiseConstant(0.002, {
    100 * B: 0.002,
    200 * B: 0.001,
    250 * B: 0.0005
})
optimizer = sknet.optimizers.Adam(loss,
                                  lr,
                                  params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

min1 = sknet.Worker(name='minimizer',
                    context='train_set',
                    op=[minimizer, loss],
                    deterministic=False,
                    period=[1, 1],
                    verbose=[0, 3])

accu1 = sknet.Worker(name='accu',
                     context='test_set',
                     op=accu,
                     deterministic=True,
                     transform_function=np.mean,
                     verbose=1)

queue = sknet.Queue((min1, accu1))

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)
Exemple #3
0
loss = sknet.losses.crossentropy_logits(p=dataset.labels, q=dnn[-1])
accuracy = sknet.losses.StreamingAccuracy(dataset.labels, dnn[-1])
auc = sknet.losses.StreamingAUC(dataset.labels, tf.nn.softmax(dnn[-1])[:, 1])

# we aim at minimizing the loss, so we create the optimizer (Adam in this case)
# with a stepwise learning rate, the minimizer is the operation that applies
# the changes to the model parameters, we also specify that this process
# should also include some possible network dependencies present in UPDATE_OPS

optimizer = sknet.optimizers.Adam(loss, dnn.variables(trainable=True), 0.001)
minimizer = tf.group(optimizer.updates + dnn.updates)
# Workers
#---------
work1 = sknet.Worker(name='minimizer',
                     context='train_set',
                     op=[minimizer, loss, accuracy, auc],
                     deterministic=False,
                     period=[1, 100, 1, 1],
                     verbose=[0, 2, 1, 1])

work2 = sknet.Worker(name='AUC',
                     context='test_set',
                     op=[accuracy, auc],
                     deterministic=True,
                     verbose=[1, 1],
                     period=[1, 1])

queue = sknet.Queue((work1, work2))

# Pipeline
#---------
Exemple #4
0
B = dataset.N('train_set') // 64
lr = sknet.schedules.PiecewiseConstant(0.001, {
    100 * B: 0.0005,
    200 * B: 0.0001,
    250 * B: 0.00005
})
optimizer = sknet.optimizers.Adam(loss, dnn.variables(trainable=True), lr)
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

min_worker = sknet.Worker(name='minimizer',
                          context='train_set',
                          op=[minimizer, loss] + mus + [dataset.images],
                          deterministic=False,
                          period=[1, 100] + [5000] * (len(mus) + 1))

accu_worker = sknet.Worker(name='accu',
                           context='test_set',
                           op=[accu] + mus + [dataset.images],
                           deterministic=True,
                           period=[1] + [5] * (len(mus) + 1))

PATH = '/mnt/drive1/rbalSpace/centroids/'
queue = sknet.Queue((min_worker, accu_worker),
                    filename=PATH + 'saved_mus_' + MODEL + '.h5')

# Pipeline
#---------
Exemple #5
0
reconstruction = dnn[-1]
# Loss and Optimizer
#-------------------

# Compute some quantities that we want to keep track and/or act upon
loss = MSE(dataset.signals, dnn[-1])

optimizer = sknet.optimize.Adam(loss, 0.00001, params=dnn.params)
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

work1 = sknet.Worker(op_name='minimizer',
                     context='train_set',
                     op=minimizer,
                     instruction='execute every batch',
                     deterministic=False)

work2 = sknet.Worker(op_name='loss',
                     context='train_set',
                     op=loss,
                     instruction='save & print every 300 batch',
                     deterministic=False)

work3 = work2.alter(context='test_set',
                    deterministic=True,
                    instruction='save every batch & print average')

work4 = sknet.Worker(op_name='reconstruction',
                     context='test_set',
Exemple #6
0
B = dataset.N('train_set') // 32
lr = sknet.optimize.PiecewiseConstant(0.005, {
    100 * B: 0.003,
    200 * B: 0.001,
    250 * B: 0.0005
})
optimizer = Adam(loss, lr, params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

minimize = sknet.Worker(name='loss',
                        context='train_set',
                        op=[minimizer, loss, hessian, dataset.labels],
                        deterministic=False,
                        period=[1, 100, 100, 100])

accu = sknet.Worker(name='accu',
                    context='test_set',
                    op=[accu, hessian, dataset.labels],
                    deterministic=True,
                    transform_function=[np.mean, None, None])

queue = sknet.Queue((minimize, accu),filename='cifar10_'\
                    +str(data_augmentation)+'_'+str(D)+'_'+str(W)+'.h5')

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)
Exemple #7
0
test_accu = sknet.losses.StreamingAccuracy(dataset.labels, prediction[:16])

B = dataset.N('train_set') // 16
lr = sknet.schedules.PiecewiseConstant(0.01, {75 * B: 0.005, 125 * B: 0.001})
optimizer = sknet.optimizers.Adam(train_classif + train_recons * PARAMETER,
                                  dnn.variables(trainable=True), lr)
minimizer = tf.group(optimizer.updates + dnn.updates)
reset_op = tf.group(dnn.reset_variables_op, optimizer.reset_variables_op)

# Workers
# ---------

train_w = sknet.Worker(
    name='minimizer',
    context='train_set',
    op=[minimizer, train_recons, train_classif, train_uaccu],
    deterministic=False,
    period=[1, 100, 100, 1],
    verbose=[0, 2, 2, 1])

valid_w = sknet.Worker(name='accu',
                       context='valid_set',
                       op=[test_accu, test_recons],
                       deterministic=True,
                       verbose=1)

test_w = sknet.Worker(name='accu',
                      context='test_set',
                      op=[test_accu, test_recons],
                      deterministic=True,
                      verbose=1)
Exemple #8
0
    W1[:, 1] -= W1[:, 0] * (W1[:, 1] * W1[:, 0]).sum() / (W1[:, 0]**2).sum()
    W1[:, 1] *= 2
    W1 = np.repeat(W1, 3, 1)
    dnn.append(ops.Dense(dataset.input, 6, W=W1, b=b1[[0, 5, 1, 2, 4, 3]]))
    output = dnn[0]
else:
    # RANK 2 ARBITRARY
    dnn = sknet.network.Network(name='simple_model')
    np.random.seed(111)
    W1 = (np.random.randn(2, 6) / 4).astype('float32')
    dnn.append(ops.Dense(dataset.input, 6, W=W1, b=b1))
    output = dnn[0]

output = sknet.Worker(op_name='poly',
                      context='train_set',
                      op=output,
                      instruction='save every batch',
                      deterministic=False)

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)
workplace.execute_worker(output)

fig = plt.figure(figsize=(6, 9))
#
mask = output.data[0] > 0
boundary = sknet.utils.geometry.get_input_space_partition(mask, N, N,
                                                          2).astype('bool')
boundary = boundary.astype('float32')
Exemple #9
0
# we aim at minimizing the loss, so we create the optimizer (Adam in this case)
# with a stepwise learning rate, the minimizer is the operation that applies
# the changes to the model parameters, we also specify that this process
# should also include some possible network dependencies present in UPDATE_OPS

optimizer = sknet.optimize.Adam(loss, 0.01, params=dnn.params)
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

work1 = sknet.Worker(op_name='minimizer',
                     context='train_set',
                     op=minimizer,
                     instruction='execute every batch',
                     deterministic=False,
                     sampling='random',
                     description='minimize')

work2 = sknet.Worker(op_name='loss',
                     context='train_set',
                     op=loss,
                     instruction='save & print every 30 batch',
                     deterministic=False,
                     description='saving the loss every 30 batches',
                     sampling='random')

work3 = sknet.Worker(
    op_name='accuracy',
    context='train_set',
# last layer going to 1d for binary classification
dnn.append(
    ops.Dense(dnn[-1],
              1,
              W=1 * np.random.randn(4, 1).astype('float32'),
              b=np.array([0.5]).astype('float32')))

# extracts the outputs prior nonlinearity
outputs = dnn[0::2].as_list()

# Workers
#---------

outputs = tf.concat(outputs, -1)
output = sknet.Worker(outputs=outputs,
                      context='train_set',
                      feed_dict=dnn.deter_dict(False))

# Pipeline
#---------

# now get the feature maps prior activation for all the points of the 2D grid
workplace = sknet.Workplace(dataset=dataset)
workplace.execute_worker(output)

# format those data for plotting the partitioning
features = output.epoch_data['outputs'][0]
features = features.reshape((-1, features.shape[-1]))

masks = [features[:, :4] > 0, features[:, 4:8] > 0, features[:, [8]] > 0]
boundarys_l = [
Exemple #11
0
    100 * B: 0.005,
    200 * B: 0.001,
    250 * B: 0.0005
})

optimizer = sknet.optimizers.NesterovMomentum(
    loss, lr, params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------


def period_function(batch, epoch):
    if batch % 100 == 0:
        return True
    return False


train = sknet.Worker(context='train_set',
                     deterministic=False,
                     minimizer=minimizer,
                     loss=(loss, period_function))

test = sknet.Worker(context='test_set', deterministic=True, accuracy=accu)

queue = sknet.Queue((train, test))

workplace = sknet.utils.Workplace(dataset)
workplace.execute_queue(queue, repeat=350, deter_func=dnn.deter_dict)
Exemple #12
0
# should also include some possible network dependencies present in UPDATE_OPS
learning_rate_schedule = schedule.stepwise({
    0: 0.01,
    5000: 0.001,
    100000: 0.0001
})
optimizer = Adam(schedule=learning_rate_schedule,
                 dependencies=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
minimizer = optimizer.minimize(loss)

# Workers
#---------

work1 = sknet.Worker(op_name='minimizer',
                     context='train_set',
                     op=minimizer,
                     instruction='execute every batch',
                     deterministic=False,
                     description=optimizer.description)

work2 = sknet.Worker(op_name='loss',
                     context='train_set',
                     op=loss,
                     instruction='save & print every 30 batch',
                     deterministic=False,
                     description='saving the loss every 30 batches')

work3 = sknet.Worker(
    op_name='accuracy',
    context='test_set',
    op=accuracy,
    instruction='execute every batch and save & print & average',
Exemple #13
0
lr = sknet.schedules.PiecewiseConstant(0.001, {
    90 * B: 0.0005,
    160 * B: 0.0001
})
optimizer = sknet.optimizers.Adam(loss_classif + PARAMETER * loss_recons,
                                  dnn.variables(trainable=True), lr)
minimizer = tf.group(*optimizer.updates, *dnn.updates)
reset_op = tf.group(optimizer.reset_variables_op, dnn.reset_variables_op)

# Workers
# ---------
period = sknet.dataset.BatchPeriod(1000)
train = sknet.Worker(minimizer,
                     loss_recons=loss_recons,
                     images=(dataset.images, period),
                     reconstructions=(reconstruction, period),
                     loss_classif=loss_classif,
                     context='train_set',
                     deterministic=False)

valid = sknet.Worker(loss_recons=loss_recons,
                     loss_classif=loss_classif,
                     context='valid_set',
                     deterministic=True)

test = sknet.Worker(loss_recons=loss_recons,
                    loss_classif=loss_classif,
                    images=(dataset.images, period),
                    reconstructions=(reconstruction, period),
                    context='test_set',
                    deterministic=True)