def model_loss(model, dataset, train=False, optimizer=None):
  performance=L1Loss()
  score_metric=R2Score()

  avg_loss=0
  avg_score=0
  count=0

  for input, output in iter(dataset):
    prediction=model.feed(input)
    loss=performance(prediction,output)
    score_metric.update([prediction,output])
    score=score_metric.compute()
    

    if(train):
      optimizer.zero_grad()
      loss.backward()
      optimizer.step()

    avg_loss+=loss.item()
    avg_score+=score
    count+=1

  return avg_loss / count, avg_score / count
Exemple #2
0
def model_loss(model, dataset, train = False, optimizer = None):
# Cycle through the batches and get the average L1 loss
  performance = L1Loss()
  score_metric = R2Score()
  avg_loss = 0
  avg_score = 0
  count = 0
  for input, output in iter(dataset):
# Get the model's predictions for the training dataset
    predictions = model.feed(input)
# Get the model's loss
    loss = performance(predictions, output)
# Get the model's R^2 score
    score_metric.update([predictions, output])
    score = score_metric.compute()
    if(train):
# Clear any errors so they don't cummulate
      optimizer.zero_grad()
# Compute the gradients for our optimizer
      loss.backward()
# Use the optimizer to update the model's parameters based on the gradients
      optimizer.step()
# Store the loss and update the counter
    avg_loss += loss.item()
    avg_score += score
    count += 1
  return avg_loss / count, avg_score / count
Exemple #3
0
def model_loss(model, dataset, train=False, optimizer=None):
    performance = L1Loss()
    score_metric = R2Score()

    avg_loss = 0
    avg_score = 0
    avg_mse = 0
    count = 0

    for input, output in iter(dataset):
        predictions = model.feed(input)

        loss = performance(predictions, output)

        score_metric.update([predictions, output])
        score = score_metric.compute()

        mse = mean_squared_error(output.cpu(),
                                 predictions.cpu().detach().numpy())

        if (train):
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        avg_loss += loss.item()
        avg_score += score
        count += 1
        avg_mse += mse

    return avg_loss / count, avg_score / count, avg_mse / count
def model_loss(model,dataset,train = False, optimizer = None):
  performance = L1Loss()
  score_metric = R2Score()
  avg_loss = 0
  avg_score = 0
  count = 0

  for input,output in iter(dataset):
    predictions = model.feed(input)

    loss = performance(predictions,output)

    score_metric.update([predictions,output])
    score = score_metric.compute()

    if(train):
      #clear any errors so they dont cummulate
      optimizer.zero_grad()

      loss.backward()

      #use the optimizer to update the models parameters based on the gradients
      optimizer.step()

    avg_loss += loss.item()
    avg_score += score
    count += 1

  return avg_loss/count, avg_score/count
def model_loss(model, dataset, train=False, optimizer= None):
  #cycle through batches and get avg L1loss
  performance=L1Loss()
  score_metric=R2Score()

  avg_loss=0
  avg_score=0
  count=0
  for input, output in iter(dataset):
    # get the model predictions for training dataset
    predictions=model.feed(input)
    #get the model loss
    loss= performance(predictions, output)
    #get the model r2 score
    score_metric.update([predictions,output])
    score= score_metric.compute()

    if(train):
      #clear any errors so that they dont commulate
      optimizer.zero_grad()
      #compute gradiennts for our optimizer
      loss.backward()
      # use the optimizer to update the model parameters based on gradients
      optimizer.step()

    avg_loss +=loss.item()
    avg_score +=score
    count +=1
  return avg_loss / count, avg_score/count