Пример #1
0
def run_1(exp_folder, exp, run, c_size, c_filters, cd2x, hidden_t, hidden_s,
          hidden_lt):
    name = f'baseline4_C{c_filters}s{c_size}_Cd2x{cd2x}x{hidden_t}{hidden_s}_H{hidden_lt}_x5000'
    exp = f"{exp}x{run}"

    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[c_filters,
                         int(0.2 * output_shape),
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['conv', hidden_lt, 'normal'],
            act_funcs=['softplus', 'softplus', 'softplus'],
            shift_spacing=[(c_size + 1) // 2, 0],
            conv_filter_widths=[c_size, 0, 0],
            reg_list={
                'd2x': [cd2x, None, None],
                hidden_t: [None, hidden_s, None],
                'l2': [None, None, 0.1],
            })
        hsm_params['weights_initializers'] = ['normal', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params

    def get_training_params():
        epochs = 5000
        return {
            'batch_size': 16,
            'use_gpu': False,
            'epochs_summary': epochs // 50,
            'epochs_training': epochs,
            'learning_rate': 0.001
        }

    input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(
        [1], 'training', udata.normalize_mean_std)
    input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
        [1], 'validation', udata.normalize_mean_std)

    for i in range(10):
        seed = i

        hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
        pprint(hsm_params)
        hsm, input_tuple = unet.get_network(
            input_tr_processed,
            output_tr,
            'adam',
            get_training_params(),
            hsm_params,
            'poisson',
            input_val_processed,
            output_val,
            output_tr_mask,
            output_val_mask,
            f"{name}__{i}",
            seed,
        )
        hsm.log_correlation = 'zero-NaNs'

        (input, output, train_indxs, test_indxs, data_filters, larg,
         opt_params, name_str) = input_tuple
        hsm.train(
            input_data=input,
            output_data=output,
            train_indxs=train_indxs,
            test_indxs=test_indxs,
            data_filters=data_filters,
            learning_alg=larg,
            opt_params=opt_params,
            output_dir=f"training_data/logs/{exp_folder}/{exp}/{name_str}")
        res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed,
                                              output_val, output_val_mask)
        hsm.save_model(
            f"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod")
    with open("./training_data/experiments.txt", "a+") as f:
        f.write(f"{exp_folder}/{exp}/{name}\n")
Пример #2
0
def main():
    args = parse_arguments()

    random.seed(args.pretrained_seed)
    torch.manual_seed(args.pretrained_seed)
    if args.use_cuda:
        torch.cuda.manual_seed_all(args.pretrained_seed)
    cudnn.benchmark = True

    # get a path for saving the model to be trained
    model_path = get_model_path(dataset_name=args.pretrained_dataset,
                                network_arch=args.pretrained_arch,
                                random_seed=args.pretrained_seed)

    # Init logger
    log_file_name = os.path.join(model_path, 'log_seed_{}.txt'.format(args.pretrained_seed))
    print("Log file: {}".format(log_file_name))
    log = open(log_file_name, 'w')
    print_log('save path : {}'.format(model_path), log)
    state = {k: v for k, v in args._get_kwargs()}
    for key, value in state.items():
        print_log("{} : {}".format(key, value), log)
    print_log("Random Seed: {}".format(args.pretrained_seed), log)
    print_log("Python version : {}".format(sys.version.replace('\n', ' ')), log)
    print_log("Torch  version : {}".format(torch.__version__), log)
    print_log("Cudnn  version : {}".format(torch.backends.cudnn.version()), log)
    # Get data specs
    num_classes, (mean, std), input_size, num_channels = get_data_specs(args.pretrained_dataset, args.pretrained_arch)
    pretrained_data_train, pretrained_data_test = get_data(args.pretrained_dataset,
                                                            mean=mean,
                                                            std=std,
                                                            input_size=input_size,
                                                            train_target_model=True)

    pretrained_data_train_loader = torch.utils.data.DataLoader(pretrained_data_train,
                                                    batch_size=args.batch_size,
                                                    shuffle=True,
                                                    num_workers=args.workers,
                                                    pin_memory=True)

    pretrained_data_test_loader = torch.utils.data.DataLoader(pretrained_data_test,
                                                    batch_size=args.batch_size,
                                                    shuffle=False,
                                                    num_workers=args.workers,
                                                    pin_memory=True)


    print_log("=> Creating model '{}'".format(args.pretrained_arch), log)
    # Init model, criterion, and optimizer
    net = get_network(args.pretrained_arch, input_size=input_size, num_classes=num_classes, finetune=args.finetune)
    print_log("=> Network :\n {}".format(net), log)
    net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    non_trainale_params = get_num_non_trainable_parameters(net)
    trainale_params = get_num_trainable_parameters(net)
    total_params = get_num_parameters(net)
    print_log("Trainable parameters: {}".format(trainale_params), log)
    print_log("Non Trainable parameters: {}".format(non_trainale_params), log)
    print_log("Total # parameters: {}".format(total_params), log)

    # define loss function (criterion) and optimizer
    criterion_xent = torch.nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
                weight_decay=state['decay'], nesterov=True)

    if args.use_cuda:
        net.cuda()
        criterion_xent.cuda()

    recorder = RecorderMeter(args.epochs)

    # Main loop
    start_time = time.time()
    epoch_time = AverageMeter()
    for epoch in range(args.epochs):
        current_learning_rate = adjust_learning_rate(args.learning_rate, args.momentum, optimizer, epoch, args.gammas, args.schedule)

        need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (args.epochs-epoch))
        need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)

        print_log('\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} [learning_rate={:6.4f}]'.format(time_string(), epoch, args.epochs, need_time, current_learning_rate) \
                    + ' [Best : Accuracy={:.2f}, Error={:.2f}]'.format(recorder.max_accuracy(False), 100-recorder.max_accuracy(False)), log)

        # train for one epoch
        train_acc, train_los = train_target_model(pretrained_data_train_loader, net, criterion_xent, optimizer, epoch, log,
                                    print_freq=args.print_freq,
                                    use_cuda=args.use_cuda)

        # evaluate on validation set
        print_log("Validation on pretrained test dataset:", log)
        val_acc = validate(pretrained_data_test_loader, net, criterion_xent, log, use_cuda=args.use_cuda)
        is_best = recorder.update(epoch, train_los, train_acc, 0., val_acc)

        save_checkpoint({
          'epoch'       : epoch + 1,
          'arch'        : args.pretrained_arch,
          'state_dict'  : net.state_dict(),
          'recorder'    : recorder,
          'optimizer'   : optimizer.state_dict(),
          'args'        : copy.deepcopy(args),
        }, model_path, 'checkpoint.pth.tar')

        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()
        recorder.plot_curve(os.path.join(model_path, 'curve.png') )

    log.close()
Пример #3
0
def run_1(exp_folder, exp, run, bs):
    epochs = max(
        int((bs / 16) * 5000), 5000
    )  # at least 5k but potentially more to maintain the number of updates

    name = f'baseline3_bs{bs}x{epochs}'
    exp = f"{exp}x{run}"

    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[9, int(0.2 * output_shape),
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['diff_of_gaussians', 'normal', 'normal'],
            act_funcs=['lin', 'softplus', 'softplus'],
            reg_list={
                'd2x': [None, None, None],
                'l1': [None, None, None],
                'max': [None, None, None]
            })
        hsm_params['weights_initializers'] = ['random', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params

    def get_training_params():
        return {
            'batch_size': bs,
            'use_gpu': False,
            'epochs_summary': 100,
            'epochs_training': epochs,
            'learning_rate': 0.001
        }

    input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(
        [1], 'training', udata.normalize_mean_std)
    input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
        [1], 'validation', udata.normalize_mean_std)

    for i in range(10):
        seed = i

        hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
        pprint(hsm_params)
        hsm, input_tuple = unet.get_network(
            input_tr_processed,
            output_tr,
            'adam',
            get_training_params(),
            hsm_params,
            'poisson',
            input_val_processed,
            output_val,
            output_tr_mask,
            output_val_mask,
            f"{name}__{i}",
            seed,
        )
        hsm.log_correlation = 'zero-NaNs'

        (input, output, train_indxs, test_indxs, data_filters, larg,
         opt_params, name_str) = input_tuple
        hsm.train(
            input_data=input,
            output_data=output,
            train_indxs=train_indxs,
            test_indxs=test_indxs,
            data_filters=data_filters,
            learning_alg=larg,
            opt_params=opt_params,
            output_dir=f"training_data/logs/{exp_folder}/{exp}/{name_str}")
        res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed,
                                              output_val, output_val_mask)
        hsm.save_model(
            f"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod")
    with open("./training_data/experiments.txt", "a+") as f:
        f.write(f"{exp_folder}/{exp}/{name}\n")
Пример #4
0
    [1], 'training', lambda x: x * 1_000_000)
input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
    [1], 'validation', lambda x: x * 1_000_000)

for i in [3, 3, 23, 23]:
    seed = i

    hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
    pprint(hsm_params)
    hsm, input_tuple = unet.get_network(
        input_tr_processed,
        output_tr,
        'adam',
        get_training_params(),
        hsm_params,
        'poisson',
        input_val_processed,
        output_val,
        output_tr_mask,
        output_val_mask,
        f"{name}__{i}",
        seed,
    )
    hsm.log_correlation = 'zero-NaNs'

    (input, output, train_indxs, test_indxs, data_filters, larg, opt_params,
     name_str) = input_tuple
    hsm.train(input_data=input,
              output_data=output,
              train_indxs=train_indxs,
              test_indxs=test_indxs,
              data_filters=data_filters,
Пример #5
0
def run_1(exp_folder, exp, hidden, reg_h, reg_l, region):
    name = f'multieval_exp1_{region}'
    exp = f"{exp}"

    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[int(hidden * output_shape),
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None],
            normalization=[0, 0],
            layer_types=['normal', 'normal'],
            act_funcs=['softplus', 'softplus'],
            reg_list={
                'l2': [None, reg_l],
                'd2x': [reg_h, None],
            })
        hsm_params['weights_initializers'] = ['normal', 'normal']
        hsm_params['biases_initializers'] = ['trunc_normal', 'trunc_normal']

        return hsm_params

    def get_training_params():
        epochs = 35000
        return {
            'batch_size': 16,
            'use_gpu': False,
            'epochs_summary': epochs // 50,
            'epochs_training': epochs,
            'learning_rate': 0.001
        }

    input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(
        [region], 'training', udata.normalize_mean_std)
    input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
        [region], 'validation', udata.normalize_mean_std)

    for i in range(10):
        seed = i

        hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
        pprint(hsm_params)
        hsm, input_tuple = unet.get_network(
            input_tr_processed,
            output_tr,
            'adam',
            get_training_params(),
            hsm_params,
            'poisson',
            input_val_processed,
            output_val,
            output_tr_mask,
            output_val_mask,
            f"{name}__{i}",
            seed,
        )
        hsm.log_correlation = 'zero-NaNs'

        (input, output, train_indxs, test_indxs, data_filters, larg,
         opt_params, name_str) = input_tuple
        hsm.train(
            input_data=input,
            output_data=output,
            train_indxs=train_indxs,
            test_indxs=test_indxs,
            data_filters=data_filters,
            learning_alg=larg,
            opt_params=opt_params,
            output_dir=f"training_data/logs/{exp_folder}/{exp}/{name_str}")
        res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed,
                                              output_val, output_val_mask)
        print(f"MultiEval_val: {str(np.mean(corr))}")
        res, naeval, corr = uasp.evaluate_all(hsm, input_tr_processed,
                                              output_tr, output_tr_mask)
        print(f"MultiEval_tr: {str(np.mean(corr))}")
        hsm.save_model(
            f"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod")
    with open("./training_data/experiments.txt", "a+") as f:
        f.write(f"{exp_folder}/{exp}/{name}\n")
Пример #6
0
def run_1(exp_folder, exp):
    name = 'baseline4_1normIms_Dog9xN0x2N_LxSpxSp_Wn_Btn_Nonorm_regL2Last0.1_0.001x16x5000'

    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[9, int(0.2 * output_shape),
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['diff_of_gaussians', 'normal', 'normal'],
            act_funcs=['lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [None, 0.0, 0.1],
            })
        hsm_params['weights_initializers'] = ['random', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params

    def get_training_params():
        epochs = 5000
        return {
            'batch_size': 16,
            'use_gpu': False,
            'epochs_summary': epochs // 50,
            'epochs_training': epochs,
            'learning_rate': 0.001
        }

    input_tr_processed, output_tr, output_tr_mask = udata.load_data_multiple(
        [1], 'training', udata.normalize_mean_std)
    input_val_processed, output_val, output_val_mask = udata.load_data_multiple(
        [1], 'validation', udata.normalize_mean_std)

    for i in range(25):
        seed = i

        hsm_params = get_hsm_params_custom(input_tr_processed, output_tr, i)
        pprint(hsm_params)
        hsm, input_tuple = unet.get_network(
            input_tr_processed,
            output_tr,
            'adam',
            get_training_params(),
            hsm_params,
            'poisson',
            input_val_processed,
            output_val,
            output_tr_mask,
            output_val_mask,
            f"{name}__{i}",
            seed,
        )
        hsm.log_correlation = 'zero-NaNs'

        (input, output, train_indxs, test_indxs, data_filters, larg,
         opt_params, name_str) = input_tuple
        hsm.train(
            input_data=input,
            output_data=output,
            train_indxs=train_indxs,
            test_indxs=test_indxs,
            data_filters=data_filters,
            learning_alg=larg,
            opt_params=opt_params,
            output_dir=f"training_data/logs/{exp_folder}/{exp}/{name_str}")
        res, naeval, corr = uasp.evaluate_all(hsm, input_val_processed,
                                              output_val, output_val_mask)
        hsm.save_model(
            f"./training_data/models/{exp_folder}/{exp}/{name}__{i}.ndnmod")
    with open("./training_data/experiments.txt", "a+") as f:
        f.write(f"{exp_folder}/{exp}/{name}\n")