Exemple #1
0
def run_train(c, args, model_params):
        c = copy_config_dict(c)

        ap = AudioProcessor(**c.audio)
        
        if args.seed is None:
            log_path = os.path.join(c.train_config['logs_path'], c.model_name)
        else:
            log_path = os.path.join(os.path.join(c.train_config['logs_path'], str(args.seed)), c.model_name)
            c.train_config['seed'] = args.seed

        os.makedirs(log_path, exist_ok=True)

        tensorboard = TensorboardWriter(os.path.join(log_path,'tensorboard'))

        trainloader = train_dataloader(copy_config_dict(c), ap, class_balancer_batch=c.dataset['class_balancer_batch'])
        max_seq_len = trainloader.dataset.get_max_seq_lenght()
        c.dataset['max_seq_len'] = max_seq_len
        model_params['config'] = copy_config_dict(c)
        # save config in train dir, its necessary for test before train and reproducity
        save_config_file(c, os.path.join(log_path,'config.json'))

        # one_window in eval use overlapping
        if c.dataset['temporal_control'] == 'one_window':
            c.dataset['temporal_control']  = 'overlapping'

        evaloader = eval_dataloader(c, ap, max_seq_len=max_seq_len)
        # enablePrint()
        # print(max_seq_len, trainloader.dataset.get_max_seq_lenght(), c.dataset['temporal_control'], c.dataset['max_seq_len'])

        return train(args, log_path, args.checkpoint_path, trainloader, evaloader, tensorboard, c, c.model_name, ap, cuda=True, model_params=model_params)
Exemple #2
0
def run_train(c, args, model_params=None):

    ap = AudioProcessor(**c.audio)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)

    os.makedirs(log_path, exist_ok=True)

    tensorboard = TensorboardWriter(os.path.join(log_path, 'tensorboard'))
    print(c.dataset['train_csv'], c.dataset['eval_csv'])

    trainloader = train_dataloader(
        c, ap, class_balancer_batch=c.dataset['class_balancer_batch'])
    max_seq_len = trainloader.dataset.get_max_seq_lenght()
    c.dataset['max_seq_len'] = max_seq_len

    print(c.dataset['train_csv'], c.dataset['eval_csv'])

    # save config in train dir, its necessary for test before train and reproducity
    save_config_file(c, os.path.join(log_path, 'config.json'))

    evaloader = eval_dataloader(c, ap, max_seq_len=max_seq_len)

    return train(args,
                 log_path,
                 args.checkpoint_path,
                 trainloader,
                 evaloader,
                 tensorboard,
                 c,
                 c.model_name,
                 ap,
                 cuda=True,
                 model_params=model_params)
                        help="path of checkpoint pt file, for continue training")
    args = parser.parse_args()

    all_checkpoints = sorted(glob(os.path.join(args.checkpoints_path, '*.pt')))
    #print(all_checkpoints, os.listdir(args.checkpoints_path))
    if args.config_path:
        c = load_config(args.config_path)
    else: #load config in checkpoint
        checkpoint = torch.load(all_checkpoints[0], map_location='cpu')
        c = load_config_from_str(checkpoint['config_str'])

    ap = AudioProcessor(c.audio)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)
    audio_config = c.audio[c.audio['backend']]
    tensorboard = TensorboardWriter(log_path, audio_config)
    # set test dataset dir
    c.dataset['test_dir'] = args.dataset_dir
    # set batchsize = 32
    c.test_config['batch_size'] = 5
    test_dataloader = test_dataloader(c, ap)
    best_loss = 999999999
    best_loss_checkpoint = ''
    sdrs_checkpoint = []
    for i in tqdm.tqdm(range(len(all_checkpoints))):
        checkpoint = all_checkpoints[i]
        mean_loss= test(args, log_path, checkpoint, test_dataloader, tensorboard, c, c.model_name, ap, cuda=True)
        sdrs_checkpoint.append([mean_loss, checkpoint])
        if mean_loss < best_loss:
            best_loss = mean_loss
            best_loss_checkpoint = checkpoint
Exemple #4
0
                        required=True,
                        help="json file with configurations")
    parser.add_argument(
        '--checkpoint_path',
        type=str,
        default=None,
        help="path of checkpoint pt file, for continue training")
    args = parser.parse_args()

    c = load_config(args.config_path)
    ap = AudioProcessor(**c.audio)

    log_path = os.path.join(c.train_config['logs_path'], c.model_name)
    os.makedirs(log_path, exist_ok=True)

    tensorboard = TensorboardWriter(os.path.join(log_path, 'tensorboard'))

    train_dataloader = train_dataloader(c, ap)
    max_seq_len = train_dataloader.dataset.get_max_seq_lenght()
    c.dataset['max_seq_len'] = max_seq_len

    # save config in train dir, its necessary for test before train and reproducity
    save_config_file(c, os.path.join(log_path, 'config.json'))

    eval_dataloader = eval_dataloader(c, ap, max_seq_len=max_seq_len)

    train(args,
          log_path,
          args.checkpoint_path,
          train_dataloader,
          eval_dataloader,