Exemple #1
0
                    deterministic=False)

loss1 = sknet.Worker(op_name='loss',
                     context='train_set',
                     op=tf.stack(losses, -1),
                     instruction='save & print every 100 batch',
                     deterministic=False)

op_accu = tf.reshape(tf.stack(accus), (1, 6))

accus1 = sknet.Worker(
    op_name='prediction',
    context='test_set',
    op=op_accu,
    instruction='execute every batch and save & print & average',
    deterministic=True,
    description='standard classification accuracy')

queue1 = sknet.Queue((min1 + loss1, accus1.alter(context='valid_set'), accus1))

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)

workplace.execute_queue(queue1,
                        repeat=400,
                        filename='test123.h5',
                        save_period=50)

#sknet.to_file(output,'test.h5','w')
Exemple #2
0
    250 * B: 0.0005
})
optimizer = sknet.optimizers.Adam(loss,
                                  lr,
                                  params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

min1 = sknet.Worker(name='minimizer',
                    context='train_set',
                    op=[minimizer, loss],
                    deterministic=False,
                    period=[1, 1],
                    verbose=[0, 3])

accu1 = sknet.Worker(name='accu',
                     context='test_set',
                     op=accu,
                     deterministic=True,
                     transform_function=np.mean,
                     verbose=1)

queue = sknet.Queue((min1, accu1))

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)
workplace.execute_queue(queue, repeat=350)
Exemple #3
0
})
optimizer = Adam(loss, lr, params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

reset = tf.group(dnn.reset_variables_op, optimizer.reset_variables_op)
# Workers
#---------

min1 = sknet.Worker(name='minimizer',
                    context='train_set',
                    op=[minimizer, loss],
                    deterministic=False,
                    period=[1, 100])

accu1 = sknet.Worker(name='accu',
                     context='test_set',
                     op=accu,
                     deterministic=True,
                     transform_function=np.mean,
                     verbose=1)

queue = sknet.Queue((min1, accu1), filename='save_reset.h5')

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)

for c in range(10):
    workplace.session.run(reset)
    workplace.execute_queue(queue, repeat=30, close_file=c == 29)
Exemple #4
0
    200 * B: 0.0001,
    250 * B: 0.00005
})
optimizer = sknet.optimizers.Adam(loss, dnn.variables(trainable=True), lr)
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

min_worker = sknet.Worker(name='minimizer',
                          context='train_set',
                          op=[minimizer, loss] + mus + [dataset.images],
                          deterministic=False,
                          period=[1, 100] + [5000] * (len(mus) + 1))

accu_worker = sknet.Worker(name='accu',
                           context='test_set',
                           op=[accu] + mus + [dataset.images],
                           deterministic=True,
                           period=[1] + [5] * (len(mus) + 1))

PATH = '/mnt/drive1/rbalSpace/centroids/'
queue = sknet.Queue((min_worker, accu_worker),
                    filename=PATH + 'saved_mus_' + MODEL + '.h5')

# Pipeline
#---------
workplace = sknet.Workplace(dnn, dataset=dataset)

workplace.execute_queue(queue, repeat=150)
Exemple #5
0
lr = sknet.optimize.PiecewiseConstant(0.005, {
    100 * B: 0.003,
    200 * B: 0.001,
    250 * B: 0.0005
})
optimizer = Adam(loss, lr, params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------

minimize = sknet.Worker(name='loss',
                        context='train_set',
                        op=[minimizer, loss, hessian, dataset.labels],
                        deterministic=False,
                        period=[1, 100, 100, 100])

accu = sknet.Worker(name='accu',
                    context='test_set',
                    op=[accu, hessian, dataset.labels],
                    deterministic=True,
                    transform_function=[np.mean, None, None])

queue = sknet.Queue((minimize, accu),filename='cifar10_'\
                    +str(data_augmentation)+'_'+str(D)+'_'+str(W)+'.h5')

# Pipeline
#---------
workplace = sknet.utils.Workplace(dnn, dataset=dataset)
workplace.execute_queue(queue, repeat=350)
Exemple #6
0
#---------
work1 = sknet.Worker(name='minimizer',
                     context='train_set',
                     op=[minimizer, loss, accuracy, auc],
                     deterministic=False,
                     period=[1, 100, 1, 1],
                     verbose=[0, 2, 1, 1])

work2 = sknet.Worker(name='AUC',
                     context='test_set',
                     op=[accuracy, auc],
                     deterministic=True,
                     verbose=[1, 1],
                     period=[1, 1])

queue = sknet.Queue((work1, work2))

# Pipeline
#---------

# the pipeline is assembling all the components for executing the program,
# the dataset, the workers and the linkage representing what missing values
# from the network have to be searched for in the dataset
# (for example, the labels)

workplace = sknet.utils.Workplace(dnn, dataset=dataset)

# will fit the model for 50 epochs and return the gathered op
# outputs given the above definitions
workplace.execute_queue(queue, repeat=30)
Exemple #7
0
                     op=minimizer,
                     instruction='execute every batch',
                     deterministic=False)

work2 = sknet.Worker(op_name='loss',
                     context='train_set',
                     op=loss,
                     instruction='save & print every 300 batch',
                     deterministic=False)

work3 = work2.alter(context='test_set',
                    deterministic=True,
                    instruction='save every batch & print average')

work4 = sknet.Worker(op_name='reconstruction',
                     context='test_set',
                     op=reconstruction,
                     instruction='save every 300 batch',
                     deterministic=True)

queue = sknet.Queue((work1 + work2, work3 + work4))

# Pipeline
#---------

workplace = sknet.utils.Workplace(dnn, dataset=dataset)

workplace.execute_queue(queue, repeat=2, filename='test.h5', save_period=20)

print(np.shape(work4.data[0]))
Exemple #8
0
train_w = sknet.Worker(
    name='minimizer',
    context='train_set',
    op=[minimizer, train_recons, train_classif, train_uaccu],
    deterministic=False,
    period=[1, 100, 100, 1],
    verbose=[0, 2, 2, 1])

valid_w = sknet.Worker(name='accu',
                       context='valid_set',
                       op=[test_accu, test_recons],
                       deterministic=True,
                       verbose=1)

test_w = sknet.Worker(name='accu',
                      context='test_set',
                      op=[test_accu, test_recons],
                      deterministic=True,
                      verbose=1)

workplace = sknet.utils.Workplace(dnn, dataset=dataset)
path = '/mnt/drive1/rbalSpace/GradientInversion/semisupervised/semisupervised'
for i in range(10):
    filename = '_classif_{}_{}_{}_{}_{}_run{}'.format(DATA_AUGMENTATION,
                                                      DATASET, MODEL,
                                                      PARAMETER, SAMPLES, i)

    queue = sknet.Queue((train_w, valid_w, test_w), filename=path + filename)
    workplace.execute_queue(queue, repeat=150)
    workplace.session.run(reset_op)
Exemple #9
0
                     op=loss,
                     instruction='save & print every 30 batch',
                     deterministic=False,
                     description='saving the loss every 30 batches',
                     sampling='random')

work3 = sknet.Worker(
    op_name='accuracy',
    context='train_set',
    op=accuracy,
    instruction='execute every batch and save & print & average',
    deterministic=False,
    description='standard classification accuracy')

queue = sknet.Queue(
    (work1 + work2 + work3, work3.alter(context='valid_set',
                                        deterministic=True),
     work3.alter(context='test_set', deterministic=True)))
# Pipeline
#---------

# the pipeline is assembling all the components for executing the program,
# the dataset, the workers and the linkage representing what missing values
# from the network have to be searched for in the dataset
# (for example, the labels)

workplace = sknet.utils.Workplace(dnn, dataset=dataset)

# will fit the model for 50 epochs and return the gathered op
# outputs given the above definitions

output = workplace.execute_queue(queue,
Exemple #10
0
    100 * B: 0.005,
    200 * B: 0.001,
    250 * B: 0.0005
})

optimizer = sknet.optimizers.NesterovMomentum(
    loss, lr, params=dnn.variables(trainable=True))
minimizer = tf.group(optimizer.updates + dnn.updates)

# Workers
#---------


def period_function(batch, epoch):
    if batch % 100 == 0:
        return True
    return False


train = sknet.Worker(context='train_set',
                     deterministic=False,
                     minimizer=minimizer,
                     loss=(loss, period_function))

test = sknet.Worker(context='test_set', deterministic=True, accuracy=accu)

queue = sknet.Queue((train, test))

workplace = sknet.utils.Workplace(dataset)
workplace.execute_queue(queue, repeat=350, deter_func=dnn.deter_dict)
Exemple #11
0
# ---------
period = sknet.dataset.BatchPeriod(1000)
train = sknet.Worker(minimizer,
                     loss_recons=loss_recons,
                     images=(dataset.images, period),
                     reconstructions=(reconstruction, period),
                     loss_classif=loss_classif,
                     context='train_set',
                     deterministic=False)

valid = sknet.Worker(loss_recons=loss_recons,
                     loss_classif=loss_classif,
                     context='valid_set',
                     deterministic=True)

test = sknet.Worker(loss_recons=loss_recons,
                    loss_classif=loss_classif,
                    images=(dataset.images, period),
                    reconstructions=(reconstruction, period),
                    context='test_set',
                    deterministic=True)

path = '/mnt/drive1/rbalSpace/GradientInversion/supervised/'
workplace = sknet.utils.Workplace(dataset)

for i in range(10):
    filename = '{}_{}_{}_run{}'.format(DATASET, MODEL, PARAMETER, i)
    queue = sknet.Queue((train, valid, test), filename=path + filename)
    workplace.execute_queue(queue, deter_func=dnn.deter_dict, repeat=200)
    workplace.session.run(reset_op)