Beispiel #1
0
def process_jobs(config):
    common_config = config.common;
    for job in config.jobs:
        log.info('Processing job {} with base {}'.format(job.name, job.base));
        job_config = merge_params(common_config, config[job.base]);
        log.debug('job overrides: {}'.format(job.overrides));
        job_config = merge_params(job_config, job.overrides);
        
        job_config.experiment_root = os.path.join(
                                                  config.output_root,
                                                  job_config.type,
                                                  job.name
                                                  );
        log.debug('experiment root: {}'.format(job_config.experiment_root));
        
        print job_config;
        
#         try:
        if job_config.type == 'cnn':
            train_convnet(job_config);                
        elif job_config.type == 'fftcnn':
            train_convnet(job_config);
        elif job_config.type == 'sda':
            train_mlp(job_config);
        else:
            log.error('unsupported job type {}'.format(job_config.type));
Beispiel #2
0
def flatten_yaml(yaml_file_path, base_config=None, hyper_params=None):
    yaml_template = load_yaml_template(yaml_file_path)

    params = merge_params(default_params=base_config,
                          override_params=hyper_params)

    yaml = yaml_template % params

    return yaml
Beispiel #3
0
def flatten_yaml(yaml_file_path, base_config=None, hyper_params=None):
    yaml_template = load_yaml_template(yaml_file_path)

    params = merge_params(
                          default_params = base_config, 
                          override_params = hyper_params
                          )

    yaml = yaml_template % params

    return yaml
Beispiel #4
0
def process_jobs(config):
    common_config = config.common
    for job in config.jobs:
        log.info('Processing job {} with base {}'.format(job.name, job.base))
        job_config = merge_params(common_config, config[job.base])
        log.debug('job overrides: {}'.format(job.overrides))
        job_config = merge_params(job_config, job.overrides)

        job_config.experiment_root = os.path.join(config.output_root,
                                                  job_config.type, job.name)
        log.debug('experiment root: {}'.format(job_config.experiment_root))

        print job_config

        #         try:
        if job_config.type == 'cnn':
            train_convnet(job_config)
        elif job_config.type == 'fftcnn':
            train_convnet(job_config)
        elif job_config.type == 'sda':
            train_mlp(job_config)
        else:
            log.error('unsupported job type {}'.format(job_config.type))
Beispiel #5
0
def run_experiment(config, hyper_params, random_seeds):

    experiment_root = hyper_params['experiment_root']

    best_acc = -1
    best_results = [np.NAN, np.NAN, np.NAN]
    for seed in random_seeds:
        hyper_params['random_seed'] = seed
        hyper_params['experiment_root'] = experiment_root + '.' + str(seed)

        params = merge_params(config, hyper_params)

        if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')):
            print 'found existing mlp.pkl: {}'.format(params.experiment_root)
        else:
            print 'no mlp.pkl found at: {}'.format(params.experiment_root)
            if not config.get('only_extract_results', False):
                train_convnet(params)

        try:
            values = extract_results(params.experiment_root, mode='misclass')

            results = np.multiply(
                100,
                [
                    #                         1 - values['test_y_misclass'],
                    #                         1 - values['test_wseq_misclass_rate'],
                    #                         1 - values['test_wtrial_misclass_rate']]);
                    1 - values['frame_misclass'],
                    1 - values['sequence_misclass'],
                    1 - values['trial_misclass']
                ])

            # save the best results
            if np.max(results[2]) > best_acc:
                best_results = results
                best_acc = np.max(results[2])
        except:
            print traceback.format_exc()
            results = [np.NAN, np.NAN, np.NAN]

        print 'results for seed {}: {}'.format(seed, results)

        if params.save_output:
            output = extract_output(params, values['best_epoch'])
            save(os.path.join(params.experiment_root, 'best_output.pklz'),
                 output)

    print 'best results: {}'.format(best_results)
    return best_results
Beispiel #6
0
 def load_datasets_for_subjects(dataset_params, subjects, suffix=''):
     datasets = {}
     for key, params in dataset_params.items():
         if not key in dataset_names:
             continue;            
         params['subjects'] = subjects;
         params['name'] = params['name']+suffix;
         dataset_config = merge_params(config, params);
         dataset, dataset_yaml = load_yaml_file(
                    os.path.join(os.path.dirname(__file__), 'run', 'dataset_template.yaml'),
                    params=dataset_config,
                    );
 #        log.info('dataset loaded. X={} y={}'.format(dataset.X.shape, dataset.y.shape));
         datasets[key+suffix] = dataset;
         del dataset, dataset_yaml;
     return datasets;
Beispiel #7
0
def run_experiment(config, hyper_params, random_seeds):
    
    experiment_root = hyper_params['experiment_root'];
    
    best_acc = -1;
    best_results = [np.NAN, np.NAN, np.NAN];
    for seed in random_seeds:
        hyper_params['random_seed'] = seed;
        hyper_params['experiment_root'] = experiment_root + '.' + str(seed);            
    
        params = merge_params(config, hyper_params);
    
        if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')):
            print 'found existing mlp.pkl: {}'.format(params.experiment_root);
        else:
            print 'no mlp.pkl found at: {}'.format(params.experiment_root);
            if not config.get('only_extract_results', False):
                train_convnet(params);
        
        try:
            values = extract_results(params.experiment_root, mode='misclass');        
            
            results = np.multiply(100, [
#                         1 - values['test_y_misclass'],
#                         1 - values['test_wseq_misclass_rate'],
#                         1 - values['test_wtrial_misclass_rate']]);     
                       
                        1 - values['frame_misclass'],
                        1 - values['sequence_misclass'],
                        1 - values['trial_misclass']]);           
            
            # save the best results
            if np.max(results[2]) > best_acc:
                best_results = results; 
                best_acc = np.max(results[2]);
        except:
            print traceback.format_exc();
            results = [np.NAN, np.NAN, np.NAN];
            
        print 'results for seed {}: {}'.format(seed, results);
        
        if params.save_output:
            output = extract_output(params, values['best_epoch']);
            save(os.path.join(params.experiment_root, 'best_output.pklz'), output);
        
    print 'best results: {}'.format(best_results);
    return best_results;
 def load_datasets_for_subjects(dataset_params, subjects, suffix=''):
     datasets = {}
     for key, params in dataset_params.items():
         if not key in dataset_names:
             continue
         params['subjects'] = subjects
         params['name'] = params['name'] + suffix
         dataset_config = merge_params(config, params)
         dataset, dataset_yaml = load_yaml_file(
             os.path.join(os.path.dirname(__file__), 'run',
                          'dataset_template.yaml'),
             params=dataset_config,
         )
         #        log.info('dataset loaded. X={} y={}'.format(dataset.X.shape, dataset.y.shape));
         datasets[key + suffix] = dataset
         del dataset, dataset_yaml
     return datasets
Beispiel #9
0
        
        job_config.experiment_root = os.path.join(
                                                  config.output_root,
                                                  job_config.type,
                                                  job.name
                                                  );
        log.debug('experiment root: {}'.format(job_config.experiment_root));
        
        print job_config;
        
#         try:
        if job_config.type == 'cnn':
            train_convnet(job_config);                
        elif job_config.type == 'fftcnn':
            train_convnet(job_config);
        elif job_config.type == 'sda':
            train_mlp(job_config);
        else:
            log.error('unsupported job type {}'.format(job_config.type));
 
#         except:
#             log.fatal("Unexpected error:", sys.exc_info());

if __name__ == '__main__':
    default_config = os.path.join(os.path.dirname(__file__), 'batch.cfg');    
    config = load_config(default_config=default_config, reset_logging=False);
                         
    config = merge_params(load_config_file(default_config), config);
                         
    process_jobs(config);
Beispiel #10
0
def pair_cross_trial_test(config, pairs=None):
    
    if pairs is None:
        pairs = [
             [18, 19],
             [20, 21],
             [22, 23],
             [ 0,  1],
             [15,  9],
             [16, 17],
             [11,  5],
             [12,  6],
             [ 2,  3],
             [10,  4],
             [13,  7],
             [14,  8],
             ];

#     config.experiment_root = os.path.join(config.experiment_root, 'cross-trial') ;
    
    accuracy = np.zeros(len(pairs))
    results = np.zeros([len(pairs),3]);
    for i in xrange(len(pairs)):
        
        test_stimulus_ids = pairs[i];
        
        train_stimulus_ids = set();
        for j in xrange(48):
            if not j in test_stimulus_ids:
                train_stimulus_ids.add(j);
        train_stimulus_ids = list(train_stimulus_ids);
                
        log.info('training stimuli: {} \t test stimuli: {}'.format(train_stimulus_ids, test_stimulus_ids));
        
        hyper_params = { 
                    'experiment_root' : os.path.join(config.experiment_root, 'pair'+str(pairs[i])),
                    'remove_train_stimulus_ids' : test_stimulus_ids,
                    'remove_test_stimulus_ids' : train_stimulus_ids, 
                    };
    
        params = merge_params(config, hyper_params);
        
        # dummy values for testing
#         results[i] = [i, i*10, i*100.];     
#         accuracy[i] = 0;
#         continue;
        
        if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')):
            print 'found existing mlp.pkl: {}'.format(params.experiment_root);
        else:
            print 'no mlp.pkl found at: {}'.format(params.experiment_root);
            if not config.get('only_extract_results', False):                
                train_mlp(params);  
        
        values = extract_results(params.experiment_root, mode='misclass');        
            
        results[i] = np.multiply(100, [
#                         1 - values['test_y_misclass'],
#                         1 - values['test_wseq_misclass_rate'],
#                         1 - values['test_wtrial_misclass_rate']]);
                   
                    1 - values['frame_misclass'],
                    1 - values['sequence_misclass'],
                    1 - values['trial_misclass']]);
        
        accuracy[i] = 100 * (1 - extract_best_result(params.experiment_root, mode='misclass', check_dataset='test')[0]);
    
    print results;
    print results.mean(axis=0);
    print results.max(axis=1);
    
    print accuracy;
    print accuracy.mean();
    
    return results, accuracy;
Beispiel #11
0
        log.debug('job overrides: {}'.format(job.overrides))
        job_config = merge_params(job_config, job.overrides)

        job_config.experiment_root = os.path.join(config.output_root,
                                                  job_config.type, job.name)
        log.debug('experiment root: {}'.format(job_config.experiment_root))

        print job_config

        #         try:
        if job_config.type == 'cnn':
            train_convnet(job_config)
        elif job_config.type == 'fftcnn':
            train_convnet(job_config)
        elif job_config.type == 'sda':
            train_mlp(job_config)
        else:
            log.error('unsupported job type {}'.format(job_config.type))


#         except:
#             log.fatal("Unexpected error:", sys.exc_info());

if __name__ == '__main__':
    default_config = os.path.join(os.path.dirname(__file__), 'batch.cfg')
    config = load_config(default_config=default_config, reset_logging=False)

    config = merge_params(load_config_file(default_config), config)

    process_jobs(config)
Beispiel #12
0
    config = load_config(default_config=os.path.join(os.path.dirname(__file__),
                                                     'train_convnet.cfg'),
                         reset_logging=False)

    batch_subjects = config.get('batch_subjects', xrange(13))

    # per person
    for i in batch_subjects:
        hyper_params = {
            'experiment_root':
            os.path.join(config.experiment_root, 'individual',
                         'subj' + str(i + 1)),
            'subjects': [i]
        }

        params = merge_params(config, hyper_params)

        run(params)

#     # run cross_trial_test
#     try:
#         params = merge_params(config, {});
#         cross_trial_test(params); # Note: this modifies config!
#     except:
#         print "Unexpected error:", sys.exc_info()[0]
#
#     # for slow rhythms
#     hyper_params = {
#                     'experiment_root' : os.path.join(config.experiment_root, 'slow'),
#                     'subjects' : [0,1,2,6,7,8]
#                     };
Beispiel #13
0
def pair_cross_trial_test(config, pairs=None):

    if pairs is None:
        pairs = [
            [18, 19],
            [20, 21],
            [22, 23],
            [0, 1],
            [15, 9],
            [16, 17],
            [11, 5],
            [12, 6],
            [2, 3],
            [10, 4],
            [13, 7],
            [14, 8],
        ]


#     config.experiment_root = os.path.join(config.experiment_root, 'cross-trial') ;

    accuracy = np.zeros(len(pairs))
    results = np.zeros([len(pairs), 3])
    for i in xrange(len(pairs)):

        test_stimulus_ids = pairs[i]

        train_stimulus_ids = set()
        for j in xrange(48):
            if not j in test_stimulus_ids:
                train_stimulus_ids.add(j)
        train_stimulus_ids = list(train_stimulus_ids)

        log.info('training stimuli: {} \t test stimuli: {}'.format(
            train_stimulus_ids, test_stimulus_ids))

        hyper_params = {
            'experiment_root':
            os.path.join(config.experiment_root, 'pair' + str(pairs[i])),
            'remove_train_stimulus_ids':
            test_stimulus_ids,
            'remove_test_stimulus_ids':
            train_stimulus_ids,
        }

        params = merge_params(config, hyper_params)

        # dummy values for testing
        #         results[i] = [i, i*10, i*100.];
        #         accuracy[i] = 0;
        #         continue;

        if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')):
            print 'found existing mlp.pkl: {}'.format(params.experiment_root)
        else:
            print 'no mlp.pkl found at: {}'.format(params.experiment_root)
            if not config.get('only_extract_results', False):
                train_mlp(params)

        values = extract_results(params.experiment_root, mode='misclass')

        results[i] = np.multiply(
            100,
            [
                #                         1 - values['test_y_misclass'],
                #                         1 - values['test_wseq_misclass_rate'],
                #                         1 - values['test_wtrial_misclass_rate']]);
                1 - values['frame_misclass'],
                1 - values['sequence_misclass'],
                1 - values['trial_misclass']
            ])

        accuracy[i] = 100 * (1 - extract_best_result(
            params.experiment_root, mode='misclass', check_dataset='test')[0])

    print results
    print results.mean(axis=0)
    print results.max(axis=1)

    print accuracy
    print accuracy.mean()

    return results, accuracy
Beispiel #14
0
    with log_timing(log, 'training MLP'):    
        train.main_loop();
        
    log.info('done');
    
def get_default_config_path():
    return os.path.join(os.path.dirname(__file__),'train_sda_mlp.cfg');

if __name__ == '__main__':
#     config = load_config(default_config='../../train_sda.cfg', reset_logging=False);
    config = load_config(default_config=get_default_config_path(), reset_logging=False);
                         
    hyper_params = {   
    };
    
    params = merge_params(config, hyper_params);

    if not config.get('only_extract_results', False):
        train_mlp(params);
        
    scan_for_best_performance(params.experiment_root, 'valid_y_misclass');
    scan_for_best_performance(params.experiment_root, 'valid_ptrial_misclass_rate')
    
    values = extract_results(config.experiment_root, mode='misclass');        
            
    print np.multiply(100, [
#                         1 - values['test_y_misclass'],
#                         1 - values['test_wseq_misclass_rate'],
#                         1 - values['test_wtrial_misclass_rate']]);     
               
                1 - values['frame_misclass'],
Beispiel #15
0
def run_job(job_id,
            meta_job_path,
            yaml_template_file,
            base_config_path,
            hyper_params,
            cache_path=None):

    # ConstrainedGPEIOptChooser requires NaN or inf to recognize constraints
    #     BAD_SOLUTION_RETURN_VALUE = np.inf;
    BAD_SOLUTION_RETURN_VALUE = 1

    # TODO: nice-to-have: make logging a little nicer
    init_logging(pylearn2_loglevel=logging.INFO)

    for key, value in hyper_params.items():
        hyper_params[key] = value[0]
        log.debug('{} = {}'.format(key, hyper_params[key]))

    base_config = load_config_file(base_config_path)

    # fix dataset path
    localizer_class = base_config.get(
        'localizer_class',
        'deepthought.datasets.rwanda2013rhythms.PathLocalizer')
    # for compatibility with old code
    localizer = load_class(localizer_class)
    base_config = localizer.localize_config(base_config)

    if not hasattr(base_config, 'random_seed') \
                            and not hasattr(hyper_params, 'random_seed'):
        random_seed = random.randint(0, 100)
        hyper_params['random_seed'] = random_seed
        log.debug('using random seed {}'.format(random_seed))

    param_str = ''
    for key in sorted(hyper_params.keys()):  # deterministic order
        param_str += '_{}_{}'.format(key, hyper_params[key])

    verbose_job_id = str(job_id) + param_str
    base_config.verbose_job_id = verbose_job_id

    if cache_path is None:
        cache_path = os.path.join(meta_job_path, 'cache')

    job_output_path = os.path.join(meta_job_path, 'output', str(job_id))
    output_path = os.path.join(cache_path,
                               convert_to_valid_filename(param_str))

    # check whether cached result already exists
    model = None
    failed_file = os.path.join(output_path, 'failed')
    if os.path.exists(output_path):
        # create a link to job-id
        symlink(output_path,
                job_output_path,
                override=True,
                ignore_errors=True)

        # using cached result
        model_file = os.path.join(output_path, 'mlp.pkl')
        if os.path.exists(model_file):
            try:
                with log_timing(
                        log,
                        'loading cached model from {}'.format(model_file)):
                    model = serial.load(model_file)

                    channels = model.monitor.channels
            except Exception as e:
                log.error('unexpected exception loading model from {}: {} \n{}'\
                      .format(model_file, e, traceback.format_exc()))
        else:
            # if mlp.pkl is missing but mlp-best.pkl is there, then it was a bad configuration
            if os.path.exists(failed_file):
                log.info('cache contains \'failed\' flag')
                return BAD_SOLUTION_RETURN_VALUE

    if model is None:

        #     output_path = os.path.join(
        #                                meta_job_path,
        #                                'output',
        #                                convert_to_valid_filename(verbose_job_id)
        #                                );

        # needs to go here to get the internal reference resolved
        base_config.output_path = output_path

        # sanity check of structural parameters:
        if not structural_param_check(
                merge_params(base_config, hyper_params),
                raise_error=False,
        ):
            touch(failed_file, mkdirs=True)
            # set marker
            return BAD_SOLUTION_RETURN_VALUE

        ensure_dir_exists(output_path)
        symlink(output_path,
                job_output_path,
                override=True,
                ignore_errors=True)

        yaml = flatten_yaml(
            yaml_file_path=yaml_template_file,
            base_config=base_config,
            hyper_params=hyper_params,
        )

        save_yaml_file(yaml_str=yaml,
                       yaml_file_path=os.path.join(output_path, 'train.yaml'))

        with log_timing(log, 'loading yaml for job {}'.format(job_id)):
            train = load_yaml(yaml)[0]

        with log_timing(log, 'running job {} '.format(job_id)):
            try:
                train.main_loop()
            except Exception as e:
                log.error('unexpected exception during training: {} \n{}'\
                          .format(e, traceback.format_exc()))
                touch(failed_file, mkdirs=True)
                # set marker
                return BAD_SOLUTION_RETURN_VALUE

        channels = train.model.monitor.channels

    # directly analyze the model from the train object
    best_results = _extract_best_results(
        channels=channels,
        mode='misclass',
        check_dataset='valid',
        check_channels=['_y_misclass'],
    )
    best_epochs = _get_best_epochs(best_results)
    best_epoch = best_epochs[-1]
    # take last entry -> more stable???

    datasets = ['train', 'valid', 'test', 'post']
    measures = ['_y_misclass', '_objective', '_nll']

    print 'results for job {}'.format(job_id)
    for measure, dataset in itertools.product(measures, datasets):
        channel = dataset + measure
        if channel in channels:
            value = float(channels[channel].val_record[best_epoch])
            print '{:>30} : {:.4f}'.format(channel, value)

#     return float(channels['test_y_misclass'].val_record[best_epoch]);
    return float(channels['valid_y_misclass'].val_record[best_epoch])
Beispiel #16
0
def run_job(job_id, meta_job_path, yaml_template_file, base_config_path, hyper_params, cache_path=None):
    
    # ConstrainedGPEIOptChooser requires NaN or inf to recognize constraints
#     BAD_SOLUTION_RETURN_VALUE = np.inf;
    BAD_SOLUTION_RETURN_VALUE = 1;

    # TODO: nice-to-have: make logging a little nicer
    init_logging(pylearn2_loglevel=logging.INFO);
    
    for key, value in hyper_params.items():
        hyper_params[key] = value[0];
        log.debug('{} = {}'.format(key, hyper_params[key]));
    
    base_config = load_config_file(base_config_path);
    
    # fix dataset path
    localizer_class = base_config.get('localizer_class', 
                                      'deepthought.datasets.rwanda2013rhythms.PathLocalizer'); # for compatibility with old code
    localizer = load_class(localizer_class);
    base_config = localizer.localize_config(base_config);

    if not hasattr(base_config, 'random_seed') \
                            and not hasattr(hyper_params, 'random_seed'):
        random_seed = random.randint(0, 100);
        hyper_params['random_seed'] = random_seed;
        log.debug('using random seed {}'.format(random_seed))

    param_str = '';
    for key in sorted(hyper_params.keys()): # deterministic order
        param_str += '_{}_{}'.format(key, hyper_params[key]);
    
    verbose_job_id = str(job_id) + param_str;
    base_config.verbose_job_id = verbose_job_id;
    

    if cache_path is None:
        cache_path = os.path.join(meta_job_path, 'cache');

    job_output_path = os.path.join(meta_job_path, 'output', str(job_id));
    output_path = os.path.join( 
                               cache_path, 
                               convert_to_valid_filename(param_str)
                               );                            

    # check whether cached result already exists
    model = None;
    failed_file = os.path.join(output_path, 'failed');
    if os.path.exists(output_path):
        # create a link to job-id                    
        symlink(output_path, job_output_path, override=True, ignore_errors=True);
        
        # using cached result
        model_file = os.path.join(output_path, 'mlp.pkl');
        if os.path.exists(model_file):    
            try: 
                with log_timing(log, 'loading cached model from {}'.format(model_file)): 
                    model = serial.load(model_file);
                
        
                    channels = model.monitor.channels;
            except Exception as e:
                log.error('unexpected exception loading model from {}: {} \n{}'\
                      .format(model_file, e, traceback.format_exc())); 
        else:
            # if mlp.pkl is missing but mlp-best.pkl is there, then it was a bad configuration
            if os.path.exists(failed_file):
                log.info('cache contains \'failed\' flag'); 
                return BAD_SOLUTION_RETURN_VALUE;
        
    if model is None:
        
#     output_path = os.path.join(
#                                meta_job_path, 
#                                'output', 
#                                convert_to_valid_filename(verbose_job_id)
#                                );    

        # needs to go here to get the internal reference resolved
        base_config.output_path = output_path;     
        
        # sanity check of structural parameters:
        if not structural_param_check(
                                   merge_params(base_config, hyper_params), 
                                   raise_error=False,
                                   ):
            touch(failed_file, mkdirs=True); # set marker
            return BAD_SOLUTION_RETURN_VALUE;
    
        ensure_dir_exists(output_path);
        symlink(output_path, job_output_path, override=True, ignore_errors=True);
    
        
        yaml = flatten_yaml(
                            yaml_file_path = yaml_template_file, 
                            base_config = base_config, 
                            hyper_params = hyper_params,
                            );
    
        save_yaml_file(
                       yaml_str = yaml, 
                       yaml_file_path = os.path.join(output_path, 'train.yaml')
                       );    
        
        with log_timing(log, 'loading yaml for job {}'.format(job_id)):
            train = load_yaml(yaml)[0];
        
        with log_timing(log, 'running job {} '.format(job_id)):   
            try: 
                train.main_loop();
            except Exception as e:
                log.error('unexpected exception during training: {} \n{}'\
                          .format(e, traceback.format_exc()));
                touch(failed_file, mkdirs=True); # set marker
                return BAD_SOLUTION_RETURN_VALUE;
        
        channels = train.model.monitor.channels;

                 
    # directly analyze the model from the train object   
    best_results = _extract_best_results(
                                         channels=channels,
                                         mode='misclass', 
                                         check_dataset='valid',
                                         check_channels=['_y_misclass'],
                                         );    
    best_epochs = _get_best_epochs(best_results);
    best_epoch = best_epochs[-1]; # take last entry -> more stable???
    
    datasets = ['train', 'valid', 'test', 'post'];
    measures = ['_y_misclass', '_objective', '_nll'];
    
    print 'results for job {}'.format(job_id);
    for measure,dataset in itertools.product(measures,datasets):
        channel = dataset+measure;
        if channel in channels:
            value = float(channels[channel].val_record[best_epoch]);
            print '{:>30} : {:.4f}'.format(channel, value);
     
#     return float(channels['test_y_misclass'].val_record[best_epoch]);
    return float(channels['valid_y_misclass'].val_record[best_epoch]);