示例#1
0
def log_score(data, iter_limit=None):
    logger.color_print(
        logger.Info,
        '\n-------\nEvaluating {} score...'.format(data.dataset_label))
    op_start_time = time.time()
    # Using batches to evaluate as it's intensive to load the whole set at once
    evaluated = batched_evaluate(data, iter_limit)
    loss = evaluated / data.get_number_of_batches(
        batch_size) if iter_limit is None else evaluated / iter_limit

    op_time, h, m, s = get_times(op_start_time, start_time)
    logger.color_print(
        logger.Info,
        'epoch {0} | {1}_loss: {2:.2f} | {3:.2f}s | {4:02d}:{5:02d}:{6:02d}\n-------\n'
        .format(epoch + 1, data.dataset_label, loss, op_time, h, m, s))
    if data.dataset_label is not 'test':
        plot_data['{}_loss'.format(data.dataset_label)] += [loss]
示例#2
0
def evaluate(data, batches):
    logger.color_print(
        logger.Bold, '\n-------\nEvaluating {} score ({} batches)...'.format(
            data.dataset_label, batches))
    op_start_time = time.time()
    # Using batches to evaluate as it's intensive to load the whole set at once
    loss, acc = batched_evaluate(data, batches)
    loss /= batches
    acc /= batches

    op_time, h, m, s = get_times(op_start_time, start_time)
    logger_level = logger.Bold
    if data.dataset_label is not 'test':
        score_data['{}_loss'.format(data.dataset_label)] += [loss]
    else:
        logger_level = logger.Success
    logger.color_print(
        logger_level,
        '\n-------\nepoch {} | {}_loss: {:.2f} | acc: {:.2f} | {:.2f}s | {:02d}:{:02d}:{:02d}\n-------\n'
        .format(epoch + 1, data.dataset_label, loss, acc, op_time, h, m, s))
    logger.dump(session_path)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
adam = Adam(lr=lr_starting, decay=lr_decay)
model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['accuracy'])

batches = limit // batch_size
for epoch in range(num_epochs):
    for batch in range(batches):
        op_start_time = time.time()
        batch_x, batch_y = data.train.next_batch(batch_size)
        model.train_on_batch(
            batch_x.reshape(-1, img_height, img_width, channels), batch_y)

        op_time, h, m, s = get_times(op_start_time, start_time)
        print('epoch {} | batch {}/{} | {:.2f}s | {:02d}:{:02d}:{:02d}'.format(
            epoch + 1, batch + 1, batches, op_time, h, m, s))

loss = 0
accuracy = 0
for i in range(int(limit // batch_size)):
    test_x, test_y = data.test.next_batch(batch_size)
    score = model.evaluate(test_x.reshape(-1, img_height, img_width, channels),
                           test_y,
                           batch_size=batch_size)
    print(score)
    loss += score[0]
    accuracy += score[1]
print('Test loss: {} | Test accuracy: {}'.format(loss, accuracy))
示例#4
0
adam = Adam(lr=lr_starting, decay=lr_decay)

print('Compiling model...')
model.compile(loss='categorical_crossentropy', optimizer=adam)

for epoch in range(num_epochs):
    data.train.shuffle()
    number_of_batches = data.train.get_number_of_batches(batch_size)
    for i in range(number_of_batches):
        op_start_time = time.time()
        batch_x, batch_y = data.train.next_batch(batch_size)
        model.train_on_batch(
            batch_x.reshape(-1, img_height, img_width, channels), batch_y)

        # Log (log :)) loss, current position and times
        op_time, overall_h, overall_m, overall_s = get_times(
            op_start_time, start_time)
        if (i + 1) % 50 == 0:
            loss = model.evaluate(
                batch_x.reshape(-1, img_height, img_width, channels), batch_y,
                batch_size)
            logger.color_print(
                logger.Info,
                'epoch {0} | batch {1} / {2} | loss: {3:.2f} | {4:.2f}s | {5:02d}:{6:02d}:{7:02d}'
                .format(epoch + 1, i + 1, number_of_batches, loss, op_time,
                        overall_h, overall_m, overall_s))
        else:
            print(
                'epoch {0} | batch {1} / {2} | {3:.2f}s | {4:02d}:{5:02d}:{6:02d}'
                .format(epoch + 1, i + 1, number_of_batches, op_time,
                        overall_h, overall_m, overall_s))