Beispiel #1
0
def run_train(c, args, model_params):
        c = copy_config_dict(c)

        ap = AudioProcessor(**c.audio)
        
        if args.seed is None:
            log_path = os.path.join(c.train_config['logs_path'], c.model_name)
        else:
            log_path = os.path.join(os.path.join(c.train_config['logs_path'], str(args.seed)), c.model_name)
            c.train_config['seed'] = args.seed

        os.makedirs(log_path, exist_ok=True)

        tensorboard = TensorboardWriter(os.path.join(log_path,'tensorboard'))

        trainloader = train_dataloader(copy_config_dict(c), ap, class_balancer_batch=c.dataset['class_balancer_batch'])
        max_seq_len = trainloader.dataset.get_max_seq_lenght()
        c.dataset['max_seq_len'] = max_seq_len
        model_params['config'] = copy_config_dict(c)
        # save config in train dir, its necessary for test before train and reproducity
        save_config_file(c, os.path.join(log_path,'config.json'))

        # one_window in eval use overlapping
        if c.dataset['temporal_control'] == 'one_window':
            c.dataset['temporal_control']  = 'overlapping'

        evaloader = eval_dataloader(c, ap, max_seq_len=max_seq_len)
        # enablePrint()
        # print(max_seq_len, trainloader.dataset.get_max_seq_lenght(), c.dataset['temporal_control'], c.dataset['max_seq_len'])

        return train(args, log_path, args.checkpoint_path, trainloader, evaloader, tensorboard, c, c.model_name, ap, cuda=True, model_params=model_params)
Beispiel #2
0
def run_train(c, args, model_params=None):

    ap = AudioProcessor(**c.audio)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)

    os.makedirs(log_path, exist_ok=True)

    tensorboard = TensorboardWriter(os.path.join(log_path, 'tensorboard'))
    print(c.dataset['train_csv'], c.dataset['eval_csv'])

    trainloader = train_dataloader(
        c, ap, class_balancer_batch=c.dataset['class_balancer_batch'])
    max_seq_len = trainloader.dataset.get_max_seq_lenght()
    c.dataset['max_seq_len'] = max_seq_len

    print(c.dataset['train_csv'], c.dataset['eval_csv'])

    # save config in train dir, its necessary for test before train and reproducity
    save_config_file(c, os.path.join(log_path, 'config.json'))

    evaloader = eval_dataloader(c, ap, max_seq_len=max_seq_len)

    return train(args,
                 log_path,
                 args.checkpoint_path,
                 trainloader,
                 evaloader,
                 tensorboard,
                 c,
                 c.model_name,
                 ap,
                 cuda=True,
                 model_params=model_params)
Beispiel #3
0
    parser.add_argument(
        '--checkpoint_path',
        type=str,
        default=None,
        help="path of checkpoint pt file, for continue training")
    args = parser.parse_args()

    c = load_config(args.config_path)
    ap = AudioProcessor(c.audio)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)
    os.makedirs(log_path, exist_ok=True)
    audio_config = c.audio[c.audio['backend']]
    tensorboard = TensorboardWriter(log_path, audio_config)
    if (not os.path.isdir(c.dataset['train_dir'])) or (not os.path.isdir(
            c.dataset['test_dir'])):
        raise Exception("Please verify directories of dataset in " +
                        args.config_path)

    train_dataloader = train_dataloader(c, ap)
    test_dataloader = eval_dataloader(c, ap)
    train(args,
          log_path,
          args.checkpoint_path,
          train_dataloader,
          test_dataloader,
          tensorboard,
          c,
          c.model_name,
          ap,
          cuda=True)
Beispiel #4
0
        default=None,
        help="path of checkpoint pt file, for continue training")
    args = parser.parse_args()

    c = load_config(args.config_path)
    ap = AudioProcessor(**c.audio)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)
    os.makedirs(log_path, exist_ok=True)

    tensorboard = TensorboardWriter(os.path.join(log_path, 'tensorboard'))

    train_dataloader = train_dataloader(c, ap)
    max_seq_len = train_dataloader.dataset.get_max_seq_lenght()
    c.dataset['max_seq_len'] = max_seq_len

    # save config in train dir, its necessary for test before train and reproducity
    save_config_file(c, os.path.join(log_path, 'config.json'))

    eval_dataloader = eval_dataloader(c, ap, max_seq_len=max_seq_len)

    train(args,
          log_path,
          args.checkpoint_path,
          train_dataloader,
          eval_dataloader,
          tensorboard,
          c,
          c.model_name,
          ap,
          cuda=True)
Beispiel #5
0
        
        print('=================================================')
        print("Epoch %d End !"%epoch)
        print('=================================================')
        # run validation and save best checkpoint at end epoch
        val_loss = validation(eval_criterion, model, c, testloader, tensorboard, step,  cuda=cuda)
        best_loss = save_best_checkpoint(log_dir, model, optimizer, c, step, val_loss, best_loss)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config_path', type=str, required=True,
                        help="json file with configurations")
    parser.add_argument('--checkpoint_path', type=str, default=None,
                        help="path of checkpoint pt file, for continue training")
    args = parser.parse_args()

    c = load_config(args.config_path)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)
    os.makedirs(log_path, exist_ok=True)

    tensorboard = TensorboardWriter(os.path.join(log_path,'tensorboard'))

    train_dataloader = train_dataloader(c)

    # save config in train dir, its necessary for test before train and reproducity
    save_config_file(c, os.path.join(log_path,'config.json'))

    eval_dataloader = eval_dataloader(c)

    train(args, log_path, args.checkpoint_path, train_dataloader, eval_dataloader, tensorboard, c, c.model_name, cuda=True)