Esempio n. 1
0
def validate_epoch(model, loader, loss_fn, dtype):
    """
    validation for MultiLabelMarginLoss using f2 score
    `model` is a trained subclass of torch.nn.Module
    `loader` is a torch.dataset.DataLoader for validation data
    `dtype` data type for variables
        eg torch.FloatTensor (cpu) or torch.cuda.FloatTensor (gpu)
    """
    loss_history = []
    model.eval()
    for t, (x, y) in enumerate(loader):
        x_var = Variable(x.type(dtype))
        y_var = Variable(y.type(dtype))

        y_pred = model(x_var)

        loss = loss_fn(y_pred, y_var)
        loss_history.append(loss.data[0])

    if len(loader) == 1:
        returnvals = (y_pred, y_var)
    else:
        sys.exit('many batches not implemented')

    return np.sqrt(loss.data.numpy()[0]), returnvals
Esempio n. 2
0
def un_normalized_RMSE(y_pred, y_var, mean_target, std_target):
    # errors = (y_pred - y_var).data.numpy()
    y_pred = np.multiply(y_pred.data.numpy(),std_target) + mean_target
    y = np.multiply(y_var.data.numpy(),std_target) + mean_target
    y_pred = y_pred[:,0]
    ntest = len(y)
    errors = np.abs(y_pred - y)
    RMSE = np.sqrt(np.dot(errors, errors) / ntest)
    return RMSE
Esempio n. 3
0
    def __init__(self, *args):
        # This function initializes all the parameters needed for the neural network layer.
        # It initialises the no. of hidden layers in accordance with the input argument
        # It also initializes the size of hidden layer, input layer, output layer and the weights inclusive of the biases
        #self.ip_sz=args[0]
        self._op_sz = args[len(args) - 1]
        self._theta = {}
        self._theta_sz = len(args)
        for i in range(len(args) - 1):
            # In this block the weights for the bias term is included
            self._theta[i] = torch.FloatTensor(
                torch.np.random.normal(loc=0,
                                       scale=(1 / np.sqrt(args[i] + 1)),
                                       size=(args[i] + 1, args[i + 1])))

        return
Esempio n. 4
0
 def build(self,*args):
     # This function initializes all the parameters needed for the neural network layer.
     # It initialises the no. of hidden layers in accordance with the input argument
     # It also initializes the size of hidden layer, input layer, output layer and the weights inclusive of the biases
     # This function just initializes theta, dE_dtheta and a private variable
     # act which stores the g(z) of each layer to compute back propagation
     self._theta={}
     self.__dE_dtheta={}
     self.__act={}
     self._op_sz=args[len(args)-1]
     self._theta_sz=len(args)-1
     for i in range(self._theta_sz):
         # In this block the weights for the bias term is included
         self._theta[i]=torch.FloatTensor(torch.np.random.normal(loc=0,scale=(1/np.sqrt(args[i]+1)),size=(args[i]+1,args[i+1])))
         self.__dE_dtheta[i]=torch.FloatTensor(torch.np.zeros((args[i]+1,args[i+1])))
     return
Esempio n. 5
0
def train_epoch(loader_train, model, loss_fn, optimizer, dtype, print_every=20):
    """
    train `model` on data from `loader_train` for one epoch
    inputs:
    `loader_train` object subclassed from torch.data.DataLoader
    `model` neural net, subclassed from torch.nn.Module
    `loss_fn` loss function see torch.nn for examples
    `optimizer` subclassed from torch.optim.Optimizer
    `dtype` data type for variables
        eg torch.FloatTensor (cpu) or torch.cuda.FloatTensor (gpu)
    """
    loss_history = []
    model.train()
    inds = loader_train.sampler.indices
    for t, (x, y) in enumerate(loader_train):
        x_var = Variable(x.type(dtype))
        y_var = Variable(y.type(dtype))

        y_pred = model(x_var)

        loss = loss_fn(y_pred, y_var)
        loss_history.append(loss.data[0])

        if (t + 1) % print_every == 0:
            print('t = %d, loss = %.4f, f2 = %.4f' % (t + 1, loss.data[0], acc))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if len(loader_train) == 1:
        returnvals = y_pred, y_var
    else:
        sys.exit('many batches not implemented')

    return np.sqrt(loss.data.numpy()[0]), returnvals