Beispiel #1
0
 def do_train_epoch(i: int) -> Tuple[float, str]:
     train_epoch(ref_model, optimizer, train_loader, i, use_cuda,
                 loss_func=lossfunc, log_interval=100)
     time_stamp = get_timestamp()
     avg_loss, _ = eval_epoch(ref_model, val_loader, i, use_cuda,
                              loss_func=lossfunc)
     return avg_loss, time_stamp
Beispiel #2
0
 def do_train_epoch(i: int) -> Tuple[float, str]:
     train_epoch(msg_model, optimizer, train_loader, i, use_cuda,
                 loss_func=lossfunc,
                 output_ops=lambda x: x[0].squeeze(1), log_interval=100)
     time_stamp = get_timestamp()
     avg_loss, _ = eval_epoch(msg_model, val_loader, i, use_cuda,
                              loss_func=lossfunc,
                              input_ops=lambda x: x.unsqueeze(-1),
                              output_ops=lambda x: x[0].squeeze(1))
     return avg_loss, time_stamp
Beispiel #3
0
    mkdirp(data_dir)
    mkdirp(save_dir)
    mkdirp(log_dir)

    # prepare proper loss function

    lossfunc = nnf.nll_loss

    # start logger

    log_file = '{0}_{1}_{2}_{3}.log'.format(
            proto_name,
            '-'.join([str(i) for i in (frequency, train_batch, val_batch,
                                       test_batch)]),
            '-'.join([infer_method, dataset_flavor]),
            get_timestamp())
    log_title = log_file[:-4]

    logger = Log(log_dir + log_file)
    logger.start(log_title)
    logger.start_intercept()

    # check cuda availablility when needed

    if use_cuda:
        check_cuda()
    else:
        print('Currently using cpu device')

    # set up dataset
Beispiel #4
0
    mkdirp(data_dir)
    mkdirp(save_dir)
    mkdirp(log_dir)

    # prepare proper loss function

    lossfunc = nnf.nll_loss

    # start logger

    log_file = '{0}_{1}_{2}_{3}.log'.format(
        "LocalMininet" if hidden_layers is None else '-'.join(
            [str(i) for i in hidden_layers]),
        '-'.join([str(i) for i in (train_batch, val_batch, test_batch,
                                   activ_func.__name__)]),
        dataset_flavor, get_timestamp())
    log_title = log_file[:-4]
    logger = Log(log_dir + log_file)
    logger.start(log_title)
    logger.start_intercept()

    # check cuda availablility when needed

    if use_cuda:
        check_cuda()

    # set up dataset

    if dataset_flavor == 'MNIST':
        ((train_loader, val_loader, test_loader),
         (nb_train, nb_val, nb_test)) = get_MNIST_dataloaders(