def route(actual_route, new_route):

    if new_route == View._back:
        return model.go_back()
    elif type(new_route) is tuple and (new_route[0] == View._course
                                       or new_route[0] == View._mural):
        new_controller = Controller.factory(new_route[0], new_route[1])
    else:
        new_controller = Controller.factory(new_route)

    model.save_state(actual_route)

    return new_controller
Example #2
0
                f"TRAIN LOSS: {train_loss:0.3f} " + "".join([
                    f" acc@{k}: {acc:0.3f}"
                    for k, acc in zip(top_k, train_kacc)
                ]), logfile)

        # TEST
        valid_loss, valid_kacc = process(model, valid_data, top_k, None)
        log(
            f"VALID LOSS: {valid_loss:0.3f} " + "".join(
                [f" acc@{k}: {acc:0.3f}"
                 for k, acc in zip(top_k, valid_kacc)]), logfile)

        if valid_loss < best_loss:
            plateau_count = 0
            best_loss = valid_loss
            model.save_state(os.path.join(running_dir, 'best_params.pkl'))
            log(f"  best model so far", logfile)
        else:
            plateau_count += 1
            if plateau_count % early_stopping == 0:
                log(
                    f"  {plateau_count} epochs without improvement, early stopping",
                    logfile)
                break
            if plateau_count % patience == 0:
                lr *= 0.2
                log(
                    f"  {plateau_count} epochs without improvement, decreasing learning rate to {lr}",
                    logfile)

    model.restore_state(os.path.join(running_dir, 'best_params.pkl'))
Example #3
0
# === training process ===
step = 1
total_loss = 0
print("=> start training")
for epoch in range(args.max_epoches):
    for i, (orig_images, blur_images) in enumerate(train_dataloader):
        orig_images, blur_images = orig_images.cuda(), blur_images.cuda()

        optimizer.zero_grad()
        output_images = network(blur_images)
        loss = loss_fn(output_images, orig_images)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

        if step % args.print_every == 0:
            avg_loss = total_loss / args.print_every
            total_loss = 0
            lr = optimizer.param_groups[0]["lr"]
            print("epoch: %03d, step: %05d, lr: %.6f, loss: %.5f" %
                  (epoch, step, lr, avg_loss))
        step += 1

    evaluate_network(epoch, network, eval_dataloader)

    save_state(ckpt_dir, epoch, network, optimizer)

    if epoch in [40, 80]:
        adjust_learning_rate(optimizer)