示例#1
0
def main():
    cfg = config_fun.config()

    best_prec1 = 0
    # only used when we resume training from some checkpoint model
    resume_epoch = 0

    if cfg.resume_training and os.path.exists(cfg.init_model_file):
        model = train_helper.get_model(cfg, load_param_from_ours=True)
    else:
        model = train_helper.get_model(cfg, pretrained=cfg.model_pretrain)

    print('model: ')
    print(model)

    # multiple gpu
    # model.cuda()

    # optimizer
    optimizer = optim.SGD(model.parameters(),
                          cfg.lr,
                          momentum=cfg.momentum,
                          weight_decay=cfg.weight_decay)

    # if we load model from pretrained, we need the optim state here
    if cfg.resume_training and os.path.exists(cfg.optim_state_file):
        print('loading optim epoch prec from {0}'.format(cfg.optim_state_file))
        optim_state = torch.load(cfg.optim_state_file)

        resume_epoch = optim_state['epoch'] + 1
        best_prec1 = optim_state['best_prec1']
        best_confusion_mat = optim_state['best_confusion_matrix']
        optimizer.load_state_dict(optim_state['optim_state_best'])
        del optim_state

    criterion = nn.CrossEntropyLoss()

    print('shift model and criterion to GPU .. ')
    # model = model.cuda()
    # define loss function (criterion) and pptimizer
    criterion = criterion.cuda()

    train_loader = None
    val_loader = None
    if cfg.train_file_wise == False and cfg.train_slide_wise == False:
        train_loader = train_helper.get_dataloader(True, cfg.train_patch_frac,
                                                   cfg)
        val_loader = train_helper.get_dataloader(False, cfg.val_patch_frac,
                                                 cfg)

    for epoch in range(resume_epoch, cfg.max_epoch):

        if cfg.train_slide_wise:
            # train_helper.train_slide_wise(train, model, criterion, optimizer, epoch, cfg)
            prec1, confusion_mat = train_helper.validate_slide_wise(
                validate, model, criterion, epoch, cfg)
        elif cfg.train_file_wise:
            train_helper.train_file_wise(train, model, criterion, optimizer,
                                         epoch, cfg)
            prec1, confusion_mat = train_helper.validate_file_wise(
                validate, model, criterion, epoch, cfg)
        else:
            train(train_loader, model, criterion, optimizer, epoch, cfg)
            prec1, confusion_mat = validate(val_loader, model, criterion,
                                            epoch, cfg)

        if best_prec1 < prec1:
            # save checkpoints
            best_prec1 = prec1
            best_confusion_mat = confusion_mat
            train_helper.save_model_and_optim(cfg, model, optimizer, epoch,
                                              best_prec1, best_confusion_mat)

        print('best accuracy: ', best_prec1)
        print('best confusion matrix:')
        print(best_confusion_mat)
示例#2
0
    create_folder('experiments')
    base_path = os.path.join('experiments', args.exp_name)
    model_path = os.path.join(base_path, 'best.pth')
    create_folder(base_path)
    create_logging(base_path, filemode = 'w')
    logging.info(f'logging started for experiment = {args.exp_name} and validation fold = {args.val_fold}')

    # Data and yaml path
    data_path = os.path.join(args.data_dir, f'logmel/logmel_snr_{args.snr}.h5')
    yaml_path = os.path.join(args.data_dir, 'mixture.yaml')
    with open(yaml_path, 'r') as f:
        meta = yaml.load(f, Loader=yaml.FullLoader)
    f.close()

    # Dataset and Dataloader
    data_container = get_dataloader(data_path, yaml_path, args, cuda)

    # model and dataparallel
    Model = get_model(args.model_type)
    model = Model(config.classes_num, config.seq_len, config.mel_bins, cuda)
    if cuda and torch.cuda.device_count() > 1 and data_parallel:
        logging.info(f'Using {torch.cuda.device_count()} GPUs!')
        model = nn.DataParallel(model)
    if cuda:
        model = model.cuda()

    # Loss functions, optimsier, scheduler
    criterian_SED = nn.BCELoss(reduction = 'mean') # SED -> Sound event detection 
    criterian_AT = nn.MSELoss(reduction = 'mean') # AT -> Auxillary task
    optimizer = torch.optim.Adam(model.parameters(), lr = args.lr, weight_decay=args.w_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience = 3, verbose = True)