コード例 #1
0
def _init_():
    '''
    Inference script for image-classification task on mxnet
    Update: 2019-01-03 
    Author: @Northrend
    Contributor: 

    Change log:
    2019/01/03  v3.5                fix category list sort bug 
    2018/09/30  v3.4                support parallelized image pre-processing
    2018/07/23  v3.3                fix testing bug caused by mxnet v1.0.0
    2018/06/22  v3.2                support mutable images testing
    2018/06/19  v3.1                support multi(3 for now) crop
    2018/06/11  v3.0                code-refactoring 
    2018/05/31  v2.6                support log file name with parent path 
    2018/04/18  v2.5                support print foward fps
    2017/12/29  v2.4                fix numpy truth value err bug
    2017/12/11  v2.3                fix center crop bug
    2017/12/07  v2.2                convert img-data to float before resizing
    2017/11/29  v2.1                support center crop
    2017/11/17  v2.0                support mean and std
    2017/09/25  v1.3                support alternative gpu
    2017/09/21  v1.2                support batch-inference & test mode
    2017/07/31  v1.1                support different label file
    2017/06/20  v1.0                basic functions

    Usage:
        mxnet_image_classifier.py   <input-cfg> [--single-img=str]
        mxnet_image_classifier.py   -v | --version
        mxnet_image_classifier.py   -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
        -------------------------------------------------------
        --single-img=str            give path to one image file
                                    to use single image testing
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #2
0
def _init_():
    '''
    Training script for image-classification task on mxnet
    Update: 2019-05-14
    Author: @Northrend
    Contributor:

    Changelog:
    2019/05/14      v2.3              fix top_k bug 
    2019/04/08      v2.2              fix crashing caused by loading image error
    2018/11/08      v2.1              fix pre-eval batch size bug
    2018/11/05      v2.0              support lr fixed decay step
    2018/10/29      v1.9              fix PIL loading bug 
    2018/10/24      v1.8              support mobilenet-v2 and se-mobilenet-v2
    2018/10/23      v1.7              fix cuda oom caused by val-phase grad
    2018/10/18      v1.6              fix mix-up training bug
    2018/10/15      v1.5              support mix-up training trick  
    2018/10/10      v1.4              support pre-evaluation  
    2018/10/09      v1.3              support check nets mode 
                                      support resnet-v2
    2018/09/26      v1.2              optimize logging info 
    2018/09/25      v1.1              support finetune & scratch training
                                      support xavier initialization
    2018/09/13      v1.0              basic functions 

    Usage:
        pytorch_train.py              <input-cfg> [-c|--check-nets]
        pytorch_train.py              -v | --version
        pytorch_train.py              -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
        -------------------------------------------------------------
        -c --check-nets             check available network arch only

    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #3
0
def _init_():
    '''
    Inference script for image-classification task on mxnet
    Update: 2018-06-22 12:09:37 
    Author: @Northrend
    Contributor: 

    Change log:
    2018/06/22  v3.2                support mutable images testing
    2018/06/19  v3.1                support multi(3 for now) crop
    2018/06/11  v3.0                code-refactoring 
    2018/05/31  v2.6                support log file name with parent path 
    2018/04/18  v2.5                support print foward fps
    2017/12/29  v2.4                fix numpy truth value err bug
    2017/12/11  v2.3                fix center crop bug
    2017/12/07  v2.2                convert img-data to float before resizing
    2017/11/29  v2.1                support center crop
    2017/11/17  v2.0                support mean and std
    2017/09/25  v1.3                support alternative gpu
    2017/09/21  v1.2                support batch-inference & test mode
    2017/07/31  v1.1                support different label file
    2017/06/20  v1.0                basic functions

    Usage:
        mxnet_image_classifier.py   <input-cfg> [--single-img=str]
        mxnet_image_classifier.py   -v | --version
        mxnet_image_classifier.py   -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
        -------------------------------------------------------
        --single-img=str            give path to one image file
                                    to use single image testing
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #4
0
ファイル: alter_train.py プロジェクト: lidexu/Kaels-toolbox-1
def _init_():
    '''
    Alternately training multi-image-classification networks on mxnet
    Update: 2018-06-07 15:12:24 
    Author: @Northrend
    Contributor:

    Changelog:
    2018/06/07      v1.2            modify config param name
    2018/05/28      v1.1            fix evaluation bug 
    2018/05/24      v1.0            basic functions 

    Usage:
        altet_train.py              <input-cfg> 
        altet_train.py              -v | --version
        altet_train.py              -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #5
0
ファイル: pg_train.py プロジェクト: lidexu/Kaels-toolbox-1
def _init_():
    '''
    Precision-guided training for image classification task on mxnet
    Update: 2018/09/06
    Author: @Northrend
    Contributor:

    Changelog:
    2018/09/06      v1.1            support single image heatmap 
    2018/05/30      v1.0            basic functions

    Usage:
        pg_train.py                 <input-cfg> [-s|--single-img]
        pg_train.py                 -v | --version
        pg_train.py                 -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
        -s --single-img             single img model, will draw heat-map simultaneously
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #6
0
def _init_():
    '''
    Alternately training multi-image-classification networks on mxnet
    Update: 2018-06-07 15:12:24 
    Author: @Northrend
    Contributor:

    Changelog:
    2018/06/07      v1.2            modify config param name
    2018/05/28      v1.1            fix evaluation bug 
    2018/05/24      v1.0            basic functions 

    Usage:
        altet_train.py              <input-cfg> 
        altet_train.py              -v | --version
        altet_train.py              -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!') 
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #7
0
def _init_():
    '''
    Training script for image-classification task on mxnet
    Update: 2018-09-29
    Author: @Northrend
    Contributor:

    Changelog:
    2018/09/29      v1.0              basic functions

    Usage:
        pytorch_train.py              <input-cfg>
        pytorch_train.py              -v | --version
        pytorch_train.py              -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version

    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #8
0
import os
import unittest

import numpy as np
from scipy import signal
from mne import create_info, EpochsArray

import Offline.model as Model
import Offline.utils as util
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt

from config import merge_cfg_from_file

merge_cfg_from_file('./test_stimulator.yml')


class TestOfflineUtils(unittest.TestCase):

    def test_cut_epochs(self):
        timestamp = [1, 1.2, 2, 3]
        start, end, fs = -0.1, 0.5, 10
        timestamp = list(map(lambda x: int(x * fs), timestamp))
        data = np.zeros((1, fs * 4))
        start_p = tuple(map(lambda x: x + int(start * fs), timestamp))
        data[:, start_p] = 1
        t = (start, end, fs)
        epochs = util.cut_epochs(t, data, timestamp)
        self.assertTrue(
            epochs.shape == (4, 1, int(fs * (end - start))))
        self.assertTrue(np.allclose(epochs[:, :, 0], 1))
コード例 #9
0
ファイル: mxnet_train.py プロジェクト: lidexu/Kaels-toolbox-1
def _init_():
    '''
    Training script for image-classification task on mxnet
    Update: 2019-04-08 
    Author: @Northrend
    Contributor:

    Changelog:
    2019/04/08  v5.8           fix DATA_TYPE config bug
    2019/01/15  v5.7           support shufflenet v1 & v2 
    2019/01/09  v5.6           compatiable with mxnet v1.3.0 for dcnv2 
    2018/12/24  v5.5           support softmax label smoothing 
    2018/12/16  v5.4           support COSINE_DECAY learning rate 
    2018/12/04  v5.3           support loading gluoncv pretrained-model 
    2018/11/27  v5.2           fix random_resized_crop arg bug 
    2018/11/13  v5.1           support xception 
    2018/11/13  v5.0           add new dataloader preprocess supported by MXNet 1.3 
    2018/10/19  v4.3           support NV-DALI 
    2018/09/13  v4.2           fix svm bug
    2018/09/12  v4.1           support directly image loading
    2018/06/07  v4.0           code-refactoring 
    2018/03/12  v3.2           support freeze feature layer weights 
    2018/02/28  v3.1           support svm classifier 
    2018/02/11  v3.0           support customized finetune layer name
    2018/02/10  v2.9           support resize dev data separately
    2018/02/03  v2.8           support random resize scale
    2018/01/29  v2.7           fix resume training job bugs
    2017/12/27  v2.6           support se-inception-v4
    2017/12/04  v2.5           support change shorter edge size
    2017/11/21  v2.4           support input nomalization
    2017/09/22  v2.3           support resume training job 
    2017/09/04  v2.2           support modify cardinality for resnext
    2017/08/15  v2.1           support modify dropout argument
    2017/08/09  v2.0           apply logging module
    2017/08/08  v1.5           support scratch-training and more networks
    2017/07/24  v1.4           support customized input shape
    2017/07/13  v1.3           fix lr descend bug
    2017/05/19  v1.2           support multi-metrics during training
                               support threshold and save-json options
                               modify script name from mxnet_train_finetune to mxnet_train
    2017/05/10  v1.1           separate _init_ as a function
    2017/05/08  v1.0           finetune task tested
                               resume training mode unsupported

    Usage:
        mxnet_train.py         <input-cfg>
        mxnet_train.py         -v | --version
        mxnet_train.py         -h | --help

    Arguments:
        <input-cfg>            path to customized config file

    Options:
        -h --help              show this help screen
        -v --version           show current version
    
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #10
0
def online_main(screen_index, test, cfg_file=None, model_date=None):
    if cfg_file is not None:
        merge_cfg_from_file(cfg_file)
    # q_result: controller -> screen (sending judgement result and start/quit signal)
    # q_stim: screen -> controller (sending event order)
    q_result = Queue()
    q_stim = Queue()
    # create stimulator object
    stim_string = cfg.exp_config.train_string if not test else cfg.exp_config.test_string
    kwargs = {
        'q_stim': q_stim,
        'q_result': q_result,
        'screen_index': screen_index,
        'stim_string': stim_string,
        'amp': cfg.amp_info.amp,
        'trigger_type': cfg.amp_info.trigger_type,
        'stim_dir': cfg.exp_config.stim_dir
        if not cfg.exp_config.bidir else None  # None for bidir
    }

    print('Configuration finished. Start process.')
    # start process
    process = Process(target=Stimulator.run_exp, kwargs=kwargs)
    process.start()

    # main process
    if test:
        # testing mode
        # create data_client object
        n_channel = len(cfg.subj_info.montage)
        if cfg.amp_info.amp == 'neuracle':
            from Online import Neuracle

            data_client = Neuracle.Neuracle(
                n_channel=n_channel + 1,  # +1 for trigger channel
                samplerate=cfg.amp_info.samplerate)

        else:
            raise ValueError("Unexpected amplifier type")

        controller = Controller.TestingController(q_stim=q_stim,
                                                  q_result=q_result,
                                                  dataclient=data_client,
                                                  model_date=model_date,
                                                  stim_string=stim_string)
    else:
        # training mode
        controller = Controller.TrainingController(q_stim=q_stim,
                                                   q_result=q_result,
                                                   stim_string=stim_string)
    # write exp config info to log file
    controller.write_exp_log()
    # waiting start signal
    keyboard.wait('s')
    # put starting signal into q_result
    q_result.put(-2)
    # set quit hotkey
    keyboard.add_hotkey('q', quit_process)
    while controller.char_cnt < len(
            controller.stim_string) and not Controller.quit_flag:
        controller.run()
        time.sleep(0.05)
    # close events file io
    controller.close()
    if test:
        # writing down itr
        print('accu: %.2f, average time: %.2f, itr: %.2f' % controller.itr())
        # turn off data client thread
        data_client.close()
    # terminate process
    if Controller.quit_flag:
        process.terminate()
    process.join()
コード例 #11
0
ファイル: train.py プロジェクト: CLIsVeryOK/tatanic
    # start train
    for epoch_idx in range(0, cfg.SOLVER.EPOCHS):
        logger.info('train epoch: {0}'.format(epoch_idx))
        TrainEpoch(epoch_idx, train_loader, model, loss_funcs, optimizer,
                   lr_scheduler, device, logger)
        # if not epoch_idx % 10:
        #     val_loader = MakeTrainLoader(os.path.join(cfg.DATA.ROOT_DIR, cfg.DATA.VAL_PATH), cfg.DATA.VAL_BATCH)
        #     Valid(val_loader, model, loss_funcs, device, logger)


def parse_args(args):
    import argparse
    parser = argparse.ArgumentParser(description='train')
    parser.add_argument('--yaml_filepath',
                        type=str,
                        help='config filepath',
                        default='ConfigYaml/config.yaml')

    args = parser.parse_args(args)
    return args


if __name__ == '__main__':
    args = parse_args(sys.argv[1:])
    if os.path.exists(args.yaml_filepath):
        merge_cfg_from_file(args.yaml_filepath, cfg)
    else:
        raise Exception('invalid config filepath: ', args.yaml_filepath)

    Train(cfg)
コード例 #12
0
def _init_():
    '''
    Training script for image-classification task on mxnet
    Update: 2018-06-07 16:43:27
    Author: @Northrend
    Contributor:

    Changelog:
    2018/06/07  v4.0                code-refactoring 
    2018/03/12  v3.2                support freeze feature layer weights 
    2018/02/28  v3.1                support svm classifier 
    2018/02/11  v3.0                support customized finetune layer name
    2018/02/10  v2.9                support resize dev data separately
    2018/02/03  v2.8                support random resize scale
    2018/01/29  v2.7                fix resume training job bugs
    2017/12/27  v2.6                support se-inception-v4
    2017/12/04  v2.5                support change shorter edge size
    2017/11/21  v2.4                support input nomalization
    2017/09/22  v2.3                support resume training job 
    2017/09/04  v2.2                support modify cardinality for resnext
    2017/08/15  v2.1                support modify dropout argument
    2017/08/09  v2.0                apply logging module
    2017/08/08  v1.5                support scratch-training and more networks
    2017/07/24  v1.4                support customized input shape
    2017/07/13  v1.3                fix lr descend bug
    2017/05/19  v1.2                support multi-metrics during training
                                    support threshold and save-json options
                                    modify script name from mxnet_train_finetune to mxnet_train
    2017/05/10  v1.1                separate _init_ as a function
    2017/05/08  v1.0                finetune task tested
                                    resume training mode unsupported

    Usage:
        mxnet_train.py              <input-cfg>
        mxnet_train.py              -v | --version
        mxnet_train.py              -h | --help

    Arguments:
        <input-cfg>                 path to customized config file

    Options:
        -h --help                   show this help screen
        -v --version                show current version
    
    '''
    # merge configuration
    merge_cfg_from_file(args["<input-cfg>"])

    # config logger
    logger.setLevel(eval('logging.' + cfg.LOG_LEVEL))
    assert cfg.LOG_PATH, logger.error('Missing LOG_PATH!')
    fhandler = logging.FileHandler(cfg.LOG_PATH, mode=cfg.LOG_MODE)
    logger.addHandler(fhandler)

    # print arguments
    logger.info('=' * 80 + '\nCalled with arguments:')
    for key in sorted(args.keys()):
        logger.info('{:<20}= {}'.format(key.replace('--', ''), args[key]))
    logger.info('=' * 80)

    # reset logger format
    fhandler.setFormatter(logging.Formatter(log_format))
コード例 #13
0
def main(args):
    if args.cfg_file is not None:
        merge_cfg_from_file(args.cfg_file)
    # loading dataset
    dataset = Dataset(subject=cfg.subj_info.subjname, date=args.date)
    # print dataset information
    print(dataset.data.shape)
    print(len(dataset.timestamp))
    print(dataset.events.shape)
    print(dataset.ch_names)
    # extract features
    extractor = FeatExtractor(sfreq=cfg.amp_info.samplerate,
                              band_erp=cfg.subj_info.erp_band)
    # select channels in montage
    data = dataset.data[dataset.montage_indices]
    ch_names = dataset.montage.copy()

    print(data.shape)

    model = Model(subject=cfg.subj_info.subjname)
    feature = model.extract_feature(extractor, data, channel_selection=False)
    X = model.raw2epoch(feature, dataset.timestamp, dataset.events)

    # get target
    if cfg.exp_config.bidir:
        target = utils.get_label_bidir(cfg.exp_config.train_string,
                                       cfg.exp_config.n_rep, dataset.events)
    else:
        target = utils.get_label(cfg.exp_config.train_string,
                                 cfg.exp_config.n_rep)
    y = target.flatten()

    try:
        plots_path = os.path.join(dataset.root_dir, 'plots')
        os.mkdir(plots_path)
    except FileExistsError:
        pass

    # split train/validate set and cross validating on train set
    # randomly split 20%
    train_ind, val_ind = utils.uniform_split(y, shuffle=True, random_state=42)
    X_train = X[train_ind]
    y_train = y[train_ind]
    X_val = X[val_ind]
    y_val = y[val_ind]
    ind = utils.chan_select(X_train, y_train, cfg.off_config.k_best_channel)
    print('Selected k best channel')
    print(ind)

    if args.p:
        if cfg.subj_info.type == 'eeg':
            # parameter search
            C = np.logspace(-4, 2, 10)
            n_components = range(1, (X_train.shape[1] // 2) + 1)
            best_params, best_result = _grid_search(X_train,
                                                    y_train,
                                                    args,
                                                    ind,
                                                    C=C,
                                                    n_components=n_components)
        else:
            raise KeyError('Unsupported experiment type.')
        for key in best_params:
            print('Best %s: %.4f' % (key, best_params[key]))
        for i in best_result:
            print('%s: ' % i, best_result[i])
        print('')
    else:
        if cfg.subj_info.type == 'eeg':
            best_params = {'C': 1., 'n_components': 3}
        else:
            raise KeyError('Unsupported experiment type.')
        best_result = _k_fold(X_train, y_train, args, ch_select=ind)

    # train a model with selected parameters
    model = _train(X_train, y_train, select=ind, **best_params)
    y_pred = _prediction(X_val, y_val, model, args.n_avg)

    if cfg.exp_config.bidir:
        fig_roc, fig_cm, fig_pr, param_dict = utils.evaluate_multiclass(
            y_pred, y_val, if_plot=True)
    else:
        fig_roc, fig_cm, fig_pr, param_dict = utils.evaluate_binary(
            y_pred, y_val, if_plot=True)

    print('Cross validation results:')
    for i in best_result:
        print('%s: ' % i, best_result[i])
    print('')
    print('Split out validation result:')
    for i in param_dict:
        print('%s: %.4f' % (i, param_dict[i]))
    print('')
    # save evaluation results
    fig_roc.savefig(os.path.join(plots_path, 'roc.png'))
    fig_cm.savefig(os.path.join(plots_path, 'cm.png'))
    fig_pr.savefig(os.path.join(plots_path, 'pr.png'))
    # dump training info
    info = {
        'average':
        args.n_avg,
        'k-fold':
        args.k,
        'selected channels': [
            ch_names[i] +
            ' ERPs' if i < len(ch_names) else ch_names[i - len(ch_names)] +
            ' HG' for i in ind
        ],
    }
    info.update(best_params)
    try:
        os.mkdir(os.path.join(dataset.root_dir, 'logs'))
    except FileExistsError:
        pass
    with open(
            os.path.join(
                dataset.root_dir, 'logs',
                'log%s.txt' % datetime.now().strftime("%Y-%m-%d-%H-%M-%S")),
            'w') as f:
        # training parameters
        f.write(json.dumps(info))
        f.write('\n')
        f.write('Cross validation result:\n')
        f.write(json.dumps(best_result))
        f.write('\n')
        f.write('Split out validation result:\n')
        f.write(json.dumps(param_dict))
        f.write('\n')
        f.write('\n')
        f.write(json.dumps(cfg.subj_info))
        f.write('\n')
        f.write('\n')
        f.write(json.dumps(cfg.off_config))

    # training with whole dataset and save model
    model = _train(X, y, select=ind, date=args.date, **best_params)
    model.dump()
    # show plots
    plt.show()