Beispiel #1
0
    def train(self, input, real_val, ycl, idx=None, batches_seen=None):
        self.iter += 1

        if self.iter % self.step == 0 and self.task_level < self.seq_out_len:
            self.task_level += 1
            if self.new_training_method:
                self.iter = 0

        self.model.train()
        self.optimizer.zero_grad()
        if self.cl:
            output = self.model(input,
                                idx=idx,
                                ycl=ycl,
                                batches_seen=self.iter,
                                task_level=self.task_level)
        else:
            output = self.model(input,
                                idx=idx,
                                ycl=ycl,
                                batches_seen=self.iter,
                                task_level=self.seq_out_len)

        output = output.transpose(1, 3)
        real = torch.unsqueeze(real_val, dim=1)
        predict = self.scaler.inverse_transform(output)

        if self.cl:

            loss = self.loss(predict[:, :, :, :self.task_level],
                             real[:, :, :, :self.task_level], 0.0)
            mape = util.masked_mape(predict[:, :, :, :self.task_level],
                                    real[:, :, :, :self.task_level],
                                    0.0).item()
            rmse = util.masked_rmse(predict[:, :, :, :self.task_level],
                                    real[:, :, :, :self.task_level],
                                    0.0).item()
        else:
            loss = self.loss(predict, real, 0.0)
            mape = util.masked_mape(predict, real, 0.0).item()
            rmse = util.masked_rmse(predict, real, 0.0).item()

        loss.backward()

        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)

        self.optimizer.step()

        return loss.item(), mape, rmse
Beispiel #2
0
    def train(self, input, real_val, idx=None):
        self.model.train()
        self.optimizer.zero_grad()
        output = self.model(input, idx=idx)
        output = output.transpose(1, 3)
        real = torch.unsqueeze(real_val, dim=1)
        predict = self.scaler.inverse_transform(output)
        # if self.iter%4==2:
        #     loss = self.loss(predict[:,:,:,:8], real[:,:,:,:8], 0.0)
        # elif self.iter%4==3:
        #     loss = self.loss(predict, real, 0.0)
        # else:
        #     loss = self.loss(predict[:,:,:,:4], real[:,:,:,:4], 0.0)
        if self.iter % 2700 == 0 and self.m <= 12:
            self.m += 1
        loss = self.loss(predict[:, :, :, :self.m], real[:, :, :, :self.m],
                         0.0)
        # if self.iter>2*self.step:
        #     loss = self.loss(predict, real, 0.0)
        # elif self.iter>self.step:
        #     loss = self.loss(predict[:, :, :, :8], real[:, :, :, :8], 0.0)
        # else:
        #     loss = self.loss(predict[:, :, :, :4], real[:, :, :, :4], 0.0)

        loss.backward()
        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
            # torch.nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.model.parameters()), self.clip)

        self.optimizer.step()
        # mae = util.masked_mae(predict,real,0.0).item()
        mape = util.masked_mape(predict, real, 0.0).item()
        rmse = util.masked_rmse(predict, real, 0.0).item()
        self.iter += 1
        return loss.item(), mape, rmse
Beispiel #3
0
    def train(self, input, real_val):
        self.model.train()
        self.optimizer.zero_grad()
        input = nn.functional.pad(input, (1, 0, 0, 0))
        real = torch.unsqueeze(real_val, dim=-3)
        #print(f'input shape: {input.shape}')
        if self.eRec:
            output = self.model(input, real, self.scaler)
            real = real[-1, :, :, :, :]
        else:
            output = self.model(input)
        output = output.transpose(1, 3)
        # print(f'input shape: {input.shape}')
        # print(f'output shape: {output.shape}')
        # # output = [batch_size,12,num_nodes,1]
        # print(f'real_val shape: {real_val.shape}')
        # print(f'real shape: {real.shape}')
        predict = self.scaler.inverse_transform(output)

        loss = self.loss(predict, real, 0.0)
        loss.backward()
        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
        self.optimizer.step()
        mape = util.masked_mape(predict, real, 0.0).item()
        rmse = util.masked_rmse(predict, real, 0.0).item()
        return loss.item(), mape, rmse
Beispiel #4
0
    def train(self, input, real_val):
        self.model.train()
        self.optimizer.zero_grad()
        output = self.model(input)
        output = output.transpose(1,3)
        #output = [batch_size,12,num_nodes,1]
        real = torch.unsqueeze(real_val,dim=1)
        predict = self.scaler.inverse_transform(output)

        

        loss = self.loss(predict, real, 0.0) #+ torch.mean(torch.abs(self.model.cache))

        # if self.kwargs['model'] == 't_shift_net':
        #     attention = self.model.get_cache()
        #     loss -= 1*torch.sum(attention**2)
        

        loss.backward()
        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
        self.optimizer.step()
        mape = util.masked_mape(predict,real,0.0).item()
        rmse = util.masked_rmse(predict,real,0.0).item()
        return loss.item(),mape,rmse
 def eval(self, input, real_val):
     self.model.eval()
     output = self.model(input)
     output = output.transpose(1, 3)
     real = torch.unsqueeze(real_val, dim=1)
     predict = self.scaler.inverse_transform(output)
     loss = self.loss(predict, real, 0.0)
     mape = util.masked_mape(predict, real, 0.0).item()
     rmse = util.masked_rmse(predict, real, 0.0).item()
     return loss.item(), mape, rmse
Beispiel #6
0
 def eval(self, input, real_val):
     self.model.eval()
     input = nn.functional.pad(input, (1, 0, 0, 0))
     output = self.model(input)
     output = output.transpose(1, 3)
     #output = [batch_size,12,num_nodes,1]
     real = torch.unsqueeze(real_val, dim=1)
     predict = self.scaler.inverse_transform(output)
     loss = self.loss(predict, real, 0.0)
     mape = util.masked_mape(predict, real, 0.0).item()
     rmse = util.masked_rmse(predict, real, 0.0).item()
     return loss.item(), mape, rmse
Beispiel #7
0
    def eval(self, input, input_cluster, real_val, real_val_cluster):
        self.model.eval()
        #input = nn.functional.pad(input,(1,0,0,0))
        output, _, _ = self.model(input, input_cluster)
        output = output.transpose(1, 3)
        #output = [batch_size,12,num_nodes,1]
        real = torch.unsqueeze(real_val, dim=1)

        predict = output

        loss = self.loss(predict, real, 0.0)
        mae = util.masked_mae(predict, real, 0.0).item()
        mape = util.masked_mape(predict, real, 0.0).item()
        rmse = util.masked_rmse(predict, real, 0.0).item()
        return loss.item(), mae, mape, rmse
Beispiel #8
0
    def eval(self, input, real_val):
        self.model.eval()
        self.imputer.eval()
        input_ = nn.functional.pad(input, (1, 0, 0, 0))
        output = self.model(input_)
        output = output.transpose(1, 3)
        #output = [batch_size,12,num_nodes,1]
        real = torch.unsqueeze(real_val, dim=1)
        predict = self.scaler.inverse_transform(output)
        non_inf_indices = (real != float("-inf")).nonzero(as_tuple=True)
        real_ = real[non_inf_indices]
        predict_ = predict[non_inf_indices]

        loss = self.loss(predict_, real_, 0.0)
        mape = util.masked_mape(predict_, real_, 0.0).item()
        rmse = util.masked_rmse(predict_, real_, 0.0).item()

        return loss.item(), mape, rmse
Beispiel #9
0
    def train(self, input, real_val):
        self.model.train()
        self.optimizer.zero_grad()
        input = nn.functional.pad(input, (1, 0, 0, 0))
        output = self.model(input)
        output = output.transpose(1, 3)
        #output = [batch_size,12,num_nodes,1]
        real_val = torch.unsqueeze(real_val, dim=1)
        predict = self.scaler.inverse_transform(output)
        real = self.scaler.inverse_transform(real_val)

        loss = self.loss(predict, real, 0.0)
        loss.backward()
        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
        self.optimizer.step()
        mape = util.masked_mape(predict, real, 0.0).item()
        rmse = util.masked_rmse(predict, real, 0.0).item()
        return loss.item(), mape, rmse
Beispiel #10
0
    def train(self, input, input_cluster, real_val, real_val_cluster):
        self.model.train()
        self.optimizer.zero_grad()
        #input = nn.functional.pad(input,(1,0,0,0))
        output, output_cluster, tran2 = self.model(input, input_cluster)
        output = output.transpose(1, 3)
        #output_cluster = output_cluster.transpose(1,3)
        #output = [batch_size,1,num_nodes,12]
        real = torch.unsqueeze(real_val, dim=1)
        #real_cluster = real_val_cluster[:,1,:,:]
        #real_cluster = torch.unsqueeze(real_cluster,dim=1)
        predict = output

        loss = self.loss(predict, real, 0.0)  #+energy
        #loss1 =self.loss(output_cluster, real_cluster,0.0)
        #print(loss)
        (loss).backward()
        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
        self.optimizer.step()
        mae = util.masked_mae(predict, real, 0.0).item()
        mape = util.masked_mape(predict, real, 0.0).item()
        rmse = util.masked_rmse(predict, real, 0.0).item()
        return loss.item(), mae, mape, rmse
Beispiel #11
0
    def train(self, input, real_val):
        self.model.train()
        self.imputer.train()
        self.optimizer.zero_grad()
        input_ = nn.functional.pad(input, (1, 0, 0, 0))
        output = self.model(input_)
        output = output.transpose(1, 3)
        #output = [batch_size,12,num_nodes,1]
        real = torch.unsqueeze(real_val, dim=1)
        predict = self.scaler.inverse_transform(output)
        non_inf_indices = (real != float("-inf")).nonzero(as_tuple=True)
        real_ = real[non_inf_indices]
        predict_ = predict[non_inf_indices]

        loss = self.loss(predict_, real_, 0.0)
        loss.backward()
        if self.clip is not None:
            torch.nn.utils.clip_grad_norm_(
                list(self.model.parameters()) +
                list(self.imputer.parameters()), self.clip)
        self.optimizer.step()
        mape = util.masked_mape(predict_, real_, 0.0).item()
        rmse = util.masked_rmse(predict_, real_, 0.0).item()
        return loss.item(), mape, rmse
Beispiel #12
0
def evaluate_all(pred, target):
    mape = util.masked_mape(pred, target, 0.0).item()
    rmse = util.masked_rmse(pred, target, 0.0).item()
    mae = util.masked_mae(pred, target, 0.0).item()
    return mape, rmse, mae