Пример #1
0
def run(mod, pred, loss, epochs, base_lre, max_lr, name):
    train_loss_set = []
    train_acc_set = []
    test_loss_set = []
    test_acc_set = []
    iter_per_epoch = int(n_samples / config['batch_size'])
    Lr = cycle_lr(base_lre, max_lr, iter_per_epoch, config['cycle_epoch'],
                  config['cycle_ratio'], epochs)

    cy_lr = tf.placeholder(tf.float32, shape=(), name="cy_lr_" + name)
    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=cy_lr).minimize(loss)

    prediction = tf.argmax(pred, 1)
    correct_prediction = tf.equal(prediction, tf.argmax(mod.y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                              name="accuracy_" + name)

    iteration = 0
    iter_per_test_epoch = n_test_samples / config['batch_size']
    for epoch in range(epochs):
        epoch_loss = 0.
        epoch_acc = 0.
        for iter_in_epoch in range(iter_per_epoch):
            epoch_x, epoch_fine, epoch_coarse = cifar100.next_train_batch(
                config['batch_size'])
            _, c, acc = sess.run(
                [optimizer, loss, accuracy],  #, summ], 
                feed_dict={
                    mod.x: epoch_x,
                    mod.y: epoch_fine,
                    mod.training: True,
                    cy_lr: Lr[iteration],
                    mod.keep_prob: 0.7
                })
            epoch_loss += c
            epoch_acc += acc
            iteration += 1
            if iter_in_epoch % 100 == 0:
                print(
                    'Epoch ', epoch, '{:.2f}%'.format(
                        100 * (iter_in_epoch + 1) / int(iter_per_epoch)),
                    'completed out of ', epochs, 'loss: ',
                    epoch_loss / (iter_in_epoch + 1), 'acc: ',
                    '{:.2f}%'.format(epoch_acc * 100 / (iter_in_epoch + 1)))
        print('######################')
        print('TRAIN')
        print(
            'Epoch ', epoch,
            '{:.2f}%'.format(100 * (iter_in_epoch + 1) / int(iter_per_epoch)),
            'completed out of ', epochs, 'loss: ',
            epoch_loss / int(iter_per_epoch), 'acc: ',
            '{:.2f}%'.format(epoch_acc * 100 / int(iter_per_epoch)))
        train_loss_set.append(epoch_loss / int(iter_per_epoch))
        train_acc_set.append(epoch_acc * 100 / int(iter_per_epoch))
        test_loss = 0.
        test_acc = 0.
        for iter_in_epoch in range(int(iter_per_test_epoch)):
            epoch_x, epoch_fine, epoch_coarse = cifar100.next_test_batch(
                config['batch_size'])
            c, acc = sess.run(
                [loss, accuracy],
                feed_dict={
                    mod.x: epoch_x,
                    mod.y: epoch_fine,
                    mod.training: False,
                    mod.keep_prob: 1.
                })
            test_loss += c
            test_acc += acc
        print('TEST')
        print('Epoch ', epoch, 'loss: ', test_loss / int(iter_per_test_epoch),
              'acc: ',
              '{:.2f}%'.format(test_acc * 100 / int(iter_per_test_epoch)))
        print('###################### \n')
        test_loss_set.append(test_loss / int(iter_per_test_epoch))
        test_acc_set.append(test_acc * 100 / int(iter_per_test_epoch))

    return train_loss_set, train_acc_set, test_loss_set, test_acc_set
Пример #2
0
### make folder ###
mother_folder = config['model_name']
try:
    os.mkdir(mother_folder)
except OSError:
    pass    


### outputs ###
pred, loss = model.Forward()

iter_per_epoch = int(n_samples/config['batch_size']) 


### cyclic learning rate ###
Lr = cycle_lr(config['base_lr'], config['max_lr'], iter_per_epoch, 
              config['cycle_epoch'], config['cycle_ratio'], config['epochs'])

cy_lr = tf.placeholder(tf.float32, shape=(),  name = "cy_lr")


### run ###
folder_name = os.path.join(mother_folder, config['model_name']+'_'+config['datasets'])


with tf.name_scope('train'):
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=cy_lr).minimize(loss)

prediction = tf.argmax(pred, 1)
correct_prediction = tf.equal(prediction, tf.argmax(model.y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name = 'accuracy')
tf.summary.scalar('accuracy', accuracy)
Пример #3
0
def run_fine(ithx, ithy, test_ith, test_ithy, mod, pred, loss, epochs,
             base_lre, max_lr, name, var):
    train_loss_set = []
    train_acc_set = []
    test_loss_set = []
    test_acc_set = []
    iter_per_epoch = int(n_samples / (config['batch_size'] * 20))
    Lr = cycle_lr(base_lre, max_lr, iter_per_epoch, config['cycle_epoch'],
                  config['cycle_ratio'], epochs)

    cy_lr = tf.placeholder(tf.float32, shape=(), name="cy_lr_" + name)
    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=cy_lr).minimize(loss, var_list=var)

    prediction = tf.argmax(pred, 1)
    correct_prediction = tf.equal(prediction, tf.argmax(mod.y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                              name="accuracy_" + name)

    iteration = 0
    #    iter_per_test_epoch = n_test_samples/config['batch_size']
    for epoch in range(epochs):
        epoch_loss = 0.
        epoch_acc = 0.
        for iter_in_epoch in range(iter_per_epoch):
            iter_idx = np.random.choice(range(2500),
                                        config['batch_size'],
                                        replace=False)
            epoch_x, epoch_fine = ithx[iter_idx], ithy[iter_idx]
            _, c, acc = sess.run(
                [optimizer, loss, accuracy],  #, summ], 
                feed_dict={
                    mod.x: epoch_x,
                    mod.y: epoch_fine,
                    mod.training: True,
                    cy_lr: Lr[iteration],
                    mod.keep_prob: 0.7
                })
            epoch_loss += c
            epoch_acc += acc
            iteration += 1
        print('######################')
        print('TRAIN')
        print(
            'Epoch ', epoch,
            '{:.2f}%'.format(100 * (iter_in_epoch + 1) / int(iter_per_epoch)),
            'completed out of ', epochs, 'loss: ',
            epoch_loss / int(iter_per_epoch), 'acc: ',
            '{:.2f}%'.format(epoch_acc * 100 / int(iter_per_epoch)))
        train_loss_set.append(epoch_loss / int(iter_per_epoch))
        train_acc_set.append(epoch_acc * 100 / int(iter_per_epoch))

        test_loss = 0.
        test_acc = 0.
        c, acc = sess.run(
            [loss, accuracy],
            feed_dict={
                mod.x: test_ith,
                mod.y: test_ithy,
                mod.training: False,
                mod.keep_prob: 1.
            })
        test_loss += c
        test_acc += acc
        print('TEST')
        print('Epoch ', epoch, 'loss: ', test_loss, 'acc: ',
              '{:.2f}%'.format(test_acc * 100))
        print('###################### \n')
        test_loss_set.append(test_loss)
        test_acc_set.append(test_acc * 100)

    return train_loss_set, train_acc_set, test_loss_set, test_acc_set