Ejemplo n.º 1
0
def load_yaml_file(yaml_file_path: str) -> dict:
    """
    Loads a yaml file and returns the content as nested dictionary.

    :return: nested dictionary as the content of the yaml file
    """
    path = Path(yaml_file_path)
    yaml = YAML()
    yaml.boolean_representation = ['False', 'True']
    return yaml.load(path)
Ejemplo n.º 2
0
def write_remote_global_settings_file(project_settings):
    yaml = YAML(typ='safe')
    yaml.preserve_quotes = True
    yaml.boolean_representation = ['False', 'True']
    with open('./global_settings.yaml') as f:
        doc = yaml.load(f)
        doc['repo_loc'] = project_settings['remote_settings'][
            'remote_repo_loc']
        doc['remote_settings']['remote_deploy'] = False
    with open('./remote/global_settings.yaml', 'w') as f:
        yaml.dump(doc, f)
Ejemplo n.º 3
0
import os
import sys
import csv
import time
import datetime

# YAML setup
from ruamel.yaml import YAML
yaml = YAML()
yaml.preserve_quotes = True
yaml.boolean_representation = ['False', 'True']


class Logger():
    def __init__(self, argv, args, short_args={}, files=[], stats={}):

        self.save = args.save
        if not self.save: return

        exp_name = self.create_exp_name(args)
        self.log_dir = os.path.join('logs', args.save_dir, exp_name)

        # Check if the result file exists, and if so, don't run it again.
        if not args.overwrite:
            if os.path.exists(os.path.join(
                    self.log_dir, 'result')) or os.path.exists(
                        os.path.join(self.log_dir, 'result.csv')):
                print("The result file {} exists! Not rerunning.".format(
                    os.path.join(self.log_dir, 'result')))
                sys.exit(0)
Ejemplo n.º 4
0
def experiment(args):
    print(f"Running experiment with args: {args}")
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    args.train_batch_num -= 1
    args.val_batch_num -= 1
    args.eval_batch_num -= 1

    # TODO (JON): What is yaml for right now?
    yaml = YAML()
    yaml.preserve_quotes = True
    yaml.boolean_representation = ['False', 'True']

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    if args.cuda: torch.cuda.manual_seed(args.seed)

    ########## Setup dataset
    # TODO (JON): Verify that the loaders are shuffling the validation / test sets.
    if args.dataset == 'MNIST':
        num_train = args.datasize
        if num_train == -1: num_train = 50000
        train_loader, val_loader, test_loader = load_mnist(
            args.batch_size,
            subset=[args.datasize, args.valsize, args.testsize],
            num_train=num_train)
        in_channel = 1
        imsize = 28
        fc_shape = 800
        num_classes = 10
    else:
        raise Exception("Must choose MNIST dataset")
    # TODO (JON): Right now we are not using the test loader for anything.  Should evaluate it occasionally.

    ##################### Setup model
    if args.model == "mlp":
        model = Net(args.num_layers,
                    args.dropout,
                    imsize,
                    in_channel,
                    args.l2,
                    num_classes=num_classes)
    else:
        raise Exception("bad model")

    hyper = init_hyper_train(args, model)  # We need this when doing all_weight
    if args.cuda:
        model = model.cuda()
        model.weight_decay = model.weight_decay.cuda()
        # model.Gaussian.dropout = model.Gaussian.dropout.cuda()

    ############ Setup Optimizer
    # TODO (JON):  Add argument for other optimizers?
    init_optimizer = torch.optim.Adam(model.parameters(),
                                      lr=args.lr)  # , momentum=0.9)
    hyper_optimizer = torch.optim.RMSprop([get_hyper_train(
        args, model)])  # , lr=args.lrh)  # try 0.1 as lr

    ############## Setup Inversion Algorithms
    KFAC_damping = 1e-2
    kfac_opt = KFACOptimizer(model, damping=KFAC_damping)  # sec_optimizer

    ########### Perform the training
    global_step = 0
    hp_k, update = 0, 0
    for epoch_h in range(0, args.hepochs + 1):
        print(f"Hyper epoch: {epoch_h}")
        if (epoch_h) % args.hyper_log_interval == 0:
            if args.hyper_train == 'opt_data':
                if args.dataset == 'MNIST':
                    save_learned(
                        get_hyper_train(args,
                                        model).reshape(args.batch_size, imsize,
                                                       imsize), True,
                        args.batch_size, args)
            elif args.hyper_train == 'various':
                print(
                    f"saturation: {torch.sigmoid(model.various[0])}, brightness: {torch.sigmoid(model.various[1])}, decay: {torch.exp(model.various[2])}"
                )
            eval_train_corr, eval_train_loss = evaluate(
                args, model, global_step, train_loader, 'train')
            # TODO (JON):  I don't know if we want normal train loss, or eval?
            eval_val_corr, eval_val_loss = evaluate(args, model, epoch_h,
                                                    val_loader, 'valid')
            eval_test_corr, eval_test_loss = evaluate(args, model, epoch_h,
                                                      test_loader, 'test')
            if args.break_perfect_val and eval_val_corr >= 0.999 and eval_train_corr >= 0.999:
                break

        min_loss = 10e8
        elementary_epochs = args.epochs
        if epoch_h == 0:
            elementary_epochs = args.init_epochs
        if True:  # epoch_h == 0:
            optimizer = init_optimizer
        # else:
        #    optimizer = sec_optimizer
        for epoch in range(1, elementary_epochs + 1):
            global_step, epoch_train_loss = train(args, model, train_loader,
                                                  optimizer, train_loss_func,
                                                  kfac_opt, epoch, global_step)
            if np.isnan(epoch_train_loss):
                print("Loss is nan, stop the loop")
                break
            elif False:  # epoch_train_loss >= min_loss:
                print(
                    f"Breaking on epoch {epoch}. train_loss = {epoch_train_loss}, min_loss = {min_loss}"
                )
                break
            min_loss = epoch_train_loss
        # if epoch_h == 0:
        #     continue

        hp_k, update = KFAC_optimize(args, model, train_loader, val_loader,
                                     hyper_optimizer, kfac_opt, KFAC_damping,
                                     epoch_h)