コード例 #1
0
def train_and_validate(model,
                       criterion,
                       optimizer,
                       train_data,
                       val_data,
                       metric,
                       mtype,
                       ctype=0):
    """
    Defines the training and validation cycle for the input batched data for the conv model
    Args:
        model:          object, trained model of PitchContourAssessor class
        criterion:      object, of torch.nn.Functional class which defines the loss 
        optimizer:      object, of torch.optim class which defines the optimization algorithm
        train_data:     list, batched training data
        val_data:       list, batched validation data
        metric:         int, from 0 to 3, which metric to evaluate against
        mtype:          string, 'conv' for fully convolutional model, 'lstm' for lstm based model
        ctype:          int, 0 for regression, 1 for classification
    """
    # train the network
    train(model, criterion, optimizer, train_data, metric, mtype, ctype)
    # evaluate the network on train data
    train_loss_avg, train_r_sq, train_accu, train_accu2 = eval_utils.eval_model(
        model, criterion, train_data, metric, mtype, ctype)
    # evaluate the network on validation data
    val_loss_avg, val_r_sq, val_accu, val_accu2 = eval_utils.eval_model(
        model, criterion, val_data, metric, mtype, ctype)
    # return values
    return train_loss_avg, train_r_sq, train_accu, train_accu2, val_loss_avg, val_r_sq, val_accu, val_accu2
コード例 #2
0
def train_and_validate(model, criterion, optimizer, train_data, val_data,
                       metric):
    """
    Defines the training and validation cycle for the input batched data for the conv model
    Args:
        model:          object, trained model of PitchContourAssessor class
        criterion:      object, of torch.nn.Functional class which defines the loss 
        optimizer:      object, of torch.optim class which defines the optimization algorithm
        train_data:     list, batched training data
        val_data:       list, batched validation data
        metric:         int, from 0 to 3, which metric to evaluate against
    """
    # train the network
    train(model, criterion, optimizer, train_data, metric)
    # evaluate the network on train data
    train_loss_avg, train_r_sq, train_accu = eval_utils.eval_model(
        model, criterion, train_data, metric)
    # evaluate the network on validation data
    val_loss_avg, val_r_sq, val_accu = eval_utils.eval_model(
        model, criterion, val_data, metric)
    # return values
    return train_loss_avg, train_r_sq, train_accu, val_loss_avg, val_r_sq, val_accu
コード例 #3
0
else:
    perf_model.load_state_dict(
        torch.load('saved/' + filename + '.pt',
                   map_location=lambda storage, loc: storage))

# initialize dataset, dataloader and created batched data
file_name = BAND + '_' + str(SEGMENT) + '_data'
if sys.version_info[0] < 3:
    data_path = 'dat/' + file_name + '.dill'
else:
    data_path = 'dat/' + file_name + '_3.dill'
dataset = PitchContourDataset(data_path)
dataloader = PitchContourDataloader(dataset, NUM_DATA_POINTS, NUM_BATCHES)
_, _, vef, _, tef = dataloader.create_split_data(1000, 500)
# test on full length data
test_loss, test_r_sq, test_accu, test_accu2, pred, target = eval_utils.eval_model(
    perf_model, criterion, tef, METRIC, MTYPE, CTYPE, 1)
print('[%s %0.5f, %s %0.5f, %s %0.5f %0.5f]' %
      ('Testing Loss: ', test_loss, ' R-sq: ', test_r_sq, ' Accu:', test_accu,
       test_accu2))

# convert to numpy
if torch.cuda.is_available():
    pred = pred.clone().cpu().numpy()
    target = target.clone().cpu().numpy()
else:
    pred = pred.clone().numpy()
    target = target.clone().numpy()
a = np.absolute((pred - target))

# compute correlation coefficient
R, p = ss.pearsonr(pred, target)
コード例 #4
0
            print('[%s %0.5f, %s %0.5f, %s %0.5f]'% ('Valid Loss: ', val_loss, ' R-sq: ', val_r_sq, ' Accu:', val_accu))

        # save model if best validation loss
        if val_loss < best_val_loss:
            n = file_info + '_best'
            train_utils.save(n, perf_model)
            best_val_loss = val_loss

    print("Saving...")
    train_utils.save(file_info, perf_model, log_parameters)

except KeyboardInterrupt:
    print("Saving before quit...")
    train_utils.save(file_info, perf_model, log_parameters)

# RUN VALIDATION SET ON THE BEST MODEL
# read the best model
filename = file_info + '_best' + '_Reg'
if torch.cuda.is_available():
    perf_model.cuda()
    perf_model.load_state_dict(torch.load('saved/' + filename + '.pt'))
else:
    perf_model.load_state_dict(torch.load('saved/' + filename + '.pt', map_location=lambda storage, loc: storage))

# run on validation set
val_loss, val_r_sq, val_accu = eval_utils.eval_model(perf_model, criterion, validation_data, METRIC)
print('[%s %0.5f, %s %0.5f, %s %0.5f]'% ('Best Valid Loss: ', val_loss, ' R-sq: ', val_r_sq, ' Accu:', val_accu))

print('Saving best model with timestamp')
train_utils.save(file_info + time.strftime("_%m-%d_%H-%M-%S"), perf_model, log_parameters)
        # store the best r-squared value from training
        if val_r_sq > best_valrsq:
            best_valrsq = val_r_sq
        if best_epoch < epoch - 200:
            break
    print("Saving...")
    train_utils.save(NAME, perf_model)
except KeyboardInterrupt:
    print("Saving before quit...")
    train_utils.save(NAME, perf_model)

print('BEST R^2 VALUE: ' + str(best_valrsq))

# test
# test of full length data
test_loss, test_r_sq, test_accu, test_accu2 = eval_utils.eval_model(perf_model, criterion, testing_data, METRIC, MTYPE, CTYPE)
print('[%s %0.5f, %s %0.5f, %s %0.5f %0.5f]'% ('Testing Loss: ', test_loss, ' R-sq: ', test_r_sq, ' Accu:', test_accu, test_accu2))

# validate and test on best validation model
# read the model
#filename = file_info + '_Reg'
filename = NAME + '_best'
if torch.cuda.is_available():
    perf_model.cuda()
    #perf_model.load_state_dict(torch.load('/Users/michaelfarren/Desktop/MusicPerfAssessment-master/src/runs/' + filename + '.pt'))
    perf_model.load_state_dict(torch.load('pc_runs/' + NAME))

else:
    perf_model.load_state_dict(torch.load('pc_runs/' + filename + '.pt', map_location=lambda storage, loc: storage))

val_loss, val_r_sq, val_accu, val_accu2 = eval_utils.eval_model(perf_model, criterion, vef, METRIC, MTYPE, CTYPE)
コード例 #6
0
# import data from MAST dataset
if sys.version_info[0] < 3:
    mast_path = '/Users/Som/GitHub/Mastmelody_dataset/f0data'
else:
    mast_path = '/home/apati/MASTmelody_dataset/f0data'
mast_dataset = MASTDataset(mast_path)
mast_len = mast_dataset.__len__()
mast_data = []
fail_count = 0
for i in range(mast_len):
    f0, target = mast_dataset.__getitem__(i)
    if target == 0:
        fail_count += 1
    if fail_count > 266:
        continue
    d = {}
    pitch_tensor = torch.from_numpy(f0).float()
    pitch_tensor = pitch_tensor.view(1, -1)
    if pitch_tensor.size(1) < 1000:
        pitch_tensor = torch.cat(
            (pitch_tensor, torch.zeros(1, 1000 - pitch_tensor.size(1))), 1)
    d['pitch_tensor'] = pitch_tensor
    d['score_tensor'] = torch.from_numpy(np.ones((1, 4)) * target).float()
    mast_data.append(d)

# evaluate model on MAST dataset
test_loss, test_r_sq, test_accu, bin_accu, pred, target = eval_model(
    perf_model, criterion, mast_data, METRIC, MTYPE, extra_outs=1)
print('[%s %0.5f]' % ('MAST Accuracy: ', bin_accu))