예제 #1
0
exp_name = 'exp' + str(exp_idx) + '_' + exp_name + '_LeNet5'
model = LeNet5()
optimizer = SGD(learning_rate = learning_rate)
''' =============== Learning Setting =============== '''

loss_object = SparseCategoricalCrossentropy()
path_dict = dir_setting(exp_name, CONTINUE_LEARNING)
model, losses_accs, start_epoch = continue_setting(CONTINUE_LEARNING, path_dict, model=model)
train_ds, validation_ds, test_ds = load_processing_mnist(train_ratio, train_batch_size, test_batch_size)
metric_objects = get_classification_metrics()


import time
start_time = time.time()

for epoch in range(start_epoch, epochs):
    train(train_ds, model, loss_object, optimizer, metric_objects)
    validation(validation_ds, model, loss_object, metric_objects)
    
    training_reporter(epoch, losses_accs, metric_objects, exp_name=exp_name)
    save_metrics_model(epoch, model, losses_accs, path_dict, save_period)
    
    metric_visualizer(losses_accs, path_dict['cp_path'])
    resetter(metric_objects)

end_time = time.time()
elapsed_time = end_time - start_time
with open(path_dict['cp_path'] + './elapsed_time.txt', 'w') as f:
    f.write(str(elapsed_time))

test(test_ds, model, loss_object, metric_objects, path_dict)
예제 #2
0
CONTINUE_LEARNING = False

train_ratio = 0.8
train_batch_size, test_batch_size = 128, 128

epochs = 10
save_interval = 2
learning_rate = 0.01

model = LeNet5()
optimizer = SGD(learning_rate=learning_rate)

loss_object = SparseCategoricalCrossentropy()
path_dict = dir_setting(exp_name, CONTINUE_LEARNING)
model, losses_accs, start_epoch = continue_setting(CONTINUE_LEARNING,
                                                   path_dict, model)
train_ds, validation_ds, test_ds = get_ds(train_ratio, train_batch_size,
                                          test_batch_size, "mnist")
metric_objects = get_classification_metrics()

for epoch in range(start_epoch, epochs):
    train(train_ds, model, loss_object, optimizer, metric_objects)
    validation(validation_ds, model, loss_object, metric_objects)

    training_reporter(epoch, losses_accs, metric_objects)
    save_metrics_model(epoch, model, losses_accs, path_dict, save_interval)

    metric_visualizer(losses_accs, path_dict['cp_path'])
    resetter(metric_objects)

test(test_ds, model, loss_object, metric_objects, path_dict)
예제 #3
0
        if DONE:
            # 각 에피소드 당 학습 정보를 기록
            max_q_value = np.max(
                (agent.model1(state).numpy() + agent.model1(state).numpy()) /
                2)

            scores.append(score)
            max_q_values.append(max_q_value)
            score_avg(score)
            max_q_avg(max_q_value)
            loss_avg(loss.numpy())

    if episode % report_period == 0:
        if score_avg.result() >= 399:
            print('Saturated!')
            save_metrics_model(episode, model, max_q_values_scores, path_dict)
            sys.exit()
        save_learning_curve(max_q_values_scores, report_period,
                            path_dict['cp_path'])
        training_reporter(episode,
                          max_q_values_scores,
                          metric_objects,
                          exp_name=exp_name)
        agent_reporter(agent)
        resetter(metric_objects)

    # save_period 에피소드마다 모델 저장
    if episode % save_period == 0:
        save_metrics_model(episode, model, max_q_values_scores, path_dict)