Exemplo n.º 1
0
    def fit_view(self, input, target, view_interval, save_road='./Results/'):

        self.View_interval = view_interval
        input = input.cuda()
        target = target.cuda()
        RNN_h_state = self.initHidden(input)
        criterion = nn.MSELoss()
        if self.Optim_method == 'SGD':
            optimizer = optim.SGD(self.parameters(), lr=self.Learn_rate)
        if self.Optim_method == 'Adam':
            optimizer = optim.Adam(self.parameters(), lr=self.Learn_rate)

        # Initialize timer
        time_tr_start = time.time()

        plot_losses = []
        train_print_loss_total = 0  # Reset every print_every
        train_plot_loss_total = 0  # Reset every plot_every

        Predict_ViewList = []
        # begin to train
        for iter in range(1, self.Num_iters + 1):
            # input: shape[batch,time_step,input_dim]
            # h_state: shape[layer_num*direction,batch,hidden_size]
            # rnn_output: shape[batch,time_sequence_length,hidden_size]
            prediction, RNN_h_state = self.forward(input, RNN_h_state)
            RNN_h_state = RNN_h_state.data.cuda()
            loss = criterion(prediction, target)
            train_plot_loss_total += loss.item()
            train_print_loss_total += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if iter % self.View_interval == 0:
                Predict_ViewList.append(prediction[:, -1, :].cpu().data)
            if iter % self.Print_interval == 0:
                print_loss_avg = train_print_loss_total / self.Print_interval
                train_print_loss_total = 0
                print('%s (%d %d%%) %.8f' % (timeSince(time_tr_start, iter / self.Num_iters),
                                             iter, iter / self.Num_iters * 100, print_loss_avg))
            if iter % self.Plot_interval == 0:
                plot_loss_avg = train_plot_loss_total / self.Plot_interval
                plot_losses.append(plot_loss_avg)
                train_plot_loss_total = 0

        # Plot loss figure
        plot_loss(plot_losses, Fig_name=save_road+'Loss_'+self.cell_name+'_L'+str(self.Num_layers) +
                  '_H'+str(self.Hidden_Size)+'_I'+str(self.Num_iters)+'_'+self.Optim_method)
        print('\n------------------------------------------------')
        print('RNN Model finished fitting')
        print('------------------------------------------------')

        return self, Predict_ViewList
Exemplo n.º 2
0
    def fit(self, input, target, save_road='./Results/'):
        """fit the data to scn
        
        Parameters
        ----------
        input : torch array-like shape, (n_samples, Input_dim)
            The data to be transformed.
        target : torch array-like shape, (n_samples, Output_dim)
            The data to be transformed.
            
        Returns
        -------
        self : returns an instance of self.
        """
        fit_error = target.clone().detach()
        epoch = 1
        # Initialize timer
        time_tr_start = time.time()
        if self.plot_ == True:
            plot_losses = []
        train_print_loss_total = 0  # Reset every print_every
        train_plot_loss_total = 0  # Reset every plot_every

        while (epoch<=self.hidden_size) and (self.loss > self.tolerance):

            if epoch >= 2:
                if self.construct(epoch, input, target, fit_error) == False:
                    # once construct, hidden neurons add one
                    break

            H_state = self.forward(input)

            # solve the linear problem  H_state * Weight_HO = Output by ridge regression
            self.regressor.fit(H_state, target.data.numpy())
            pred = self.regressor.predict(H_state)
            # solve the linear problem: H_state * Weight_HO = Output by least square
            # self.weight_HO, LU = torch.gesv(target,H_state) 
            # pred = torch.mm(H_state, self.weight_HO)
            # pred = pred.data.numpy()

            self.loss = mean_squared_error(target.data.numpy(), pred)

            training_rmse = np.sqrt(self.loss)
            if self.plot_ == True:
                train_plot_loss_total += training_rmse
            train_print_loss_total += training_rmse

            fit_error = torch.from_numpy(pred).float() - target

            if epoch % self.Print_interval == 0:
                print_loss_avg = train_print_loss_total / self.Print_interval
                train_print_loss_total = 0
                print('%s (%d %d%%) ' % (timeSince(time_tr_start, epoch / self.hidden_size),
                                             epoch, epoch / self.hidden_size * 100))
                print('Training RMSE:  \t %.3e' % (print_loss_avg))
            if self.plot_ == True:
                if epoch % self.Plot_interval == 0:
                    plot_loss_avg = train_plot_loss_total / self.Plot_interval
                    plot_losses.append(plot_loss_avg)
                    train_plot_loss_total = 0

            epoch = epoch+1
        if self.plot_ == True:
            plot_loss(plot_losses, Fig_name=save_road+'Loss_'+"SCN" + '_H' + str(self.hidden_size) +'_C' + str(self.candidate_size))
        print('\n------------------------------------------------')
        print('SCN Model finished fitting')
        print('------------------------------------------------')
Exemplo n.º 3
0
    def fit(self, input, target, save_road='./Results/'):
        print('================================================')
        # print(self.Cell)
        print(self.cell_name+'_L'+str(self.Num_layers) + '_H' +
              str(self.Hidden_Size)+'_I'+str(self.Num_iters)+'_'+self.Optim_method)
        print('================================================\n')
        input = input.cuda()
        target = target.cuda()
        criterion = nn.MSELoss()
        if self.Optim_method == 'SGD':
            optimizer = optim.SGD(
                self.parameters(), lr=self.Learn_rate, momentum=0)
        if self.Optim_method == 'Adam':
            optimizer = optim.Adam(self.parameters(), lr=self.Learn_rate)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)
        # Initialize timer
        time_tr_start = time.time()

        plot_losses = []
        train_print_loss_total = 0  # Reset every print_every
        train_plot_loss_total = 0  # Reset every plot_every

        # plt.figure(1,figsize=(30,5))# continuously plot
        # plt.ion() # continuously plot

        # input_size=input.size(0)# continuously plot
        # time_period=np.arange(input_size)# continuously plot

        # begin to train
        for iter in range(1, self.Num_iters + 1):
            scheduler.step()
            # input: shape[batch,input_dim]
            # prediction: shape[batch,output_dim=1]
            prediction = self.forward(input)
            loss = criterion(prediction, target)
            training_rmse = np.sqrt(loss.item())
            train_plot_loss_total += training_rmse
            train_print_loss_total += training_rmse
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # target_view=input[:,-1,:].data.numpy()# continuously plot
            # prediction_view=prediction[:,-1,:].data.numpy()
            # plt.plot(time_period,target_view.flatten(),'r-')
            # plt.plot(time_period,prediction_view.flatten(),'b-')
            # plt.draw();plt.pause(0.05)

            if iter % self.Print_interval == 0:
                print_loss_avg = train_print_loss_total / self.Print_interval
                train_print_loss_total = 0
                print('%s (%d %d%%) ' % (timeSince(time_tr_start, iter / self.Num_iters),
                                             iter, iter / self.Num_iters * 100))
                print('Training RMSE:  \t %.3e' % (print_loss_avg))
            if iter % self.Plot_interval == 0:
                plot_loss_avg = train_plot_loss_total / self.Plot_interval
                plot_losses.append(plot_loss_avg)
                train_plot_loss_total = 0

        # Plot loss figure
        plot_loss(plot_losses, Fig_name=save_road+'Loss_'+self.cell_name+'_L'+str(self.Num_layers) +
                  '_H'+str(self.Hidden_Size)+'_I'+str(self.Num_iters)+'_'+self.Optim_method)
        print('\n------------------------------------------------')
        print('MLP Model finished fitting')
        print('------------------------------------------------')

        return self
Exemplo n.º 4
0
    def fit_validate(self, train_input, train_target, validate_input, validate_target, save_road='./Results/'):
        train_input = train_input.cuda()
        train_target = train_target.cuda()
        validate_input = validate_input.cuda()
        validate_target = validate_target.cuda()

        RNN_h_state = self.initHidden(train_input)
        validate_RNN_h_state = self.initHidden(validate_input)

        criterion = nn.MSELoss()
        if self.Optim_method == 'ASGD':
            optimizer = optim.ASGD(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adam':
            optimizer = optim.Adam(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'RMSprop':
            optimizer = optim.RMSprop(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adadelta':
            optimizer = optim.Adadelta(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adagrad':
            optimizer = optim.Adagrad(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'SparseAdam':
            optimizer = optim.Adagrad(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adamax':
            optimizer = optim.Adamax(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'SGD':
            optimizer = optim.SGD(
                self.parameters(), lr=self.Learn_rate, momentum=0)

        # Initialize timer
        time_tr_start = time.time()

        training_losses = []
        validate_losses = []

        train_print_loss_total = 0  # Reset every print_every
        train_plot_loss_total = 0  # Reset every plot_every

        validate_print_loss_total = 0  # Reset every print_every
        validate_plot_loss_total = 0  # Reset every plot_every

        # plt.figure(1,figsize=(30,5))# continuously plot
        # plt.ion() # continuously plot

        # input_size=input.size(0)# continuously plot
        # time_period=np.arange(input_size)# continuously plot

        # begin to train
        for iter in range(1, self.Num_iters + 1):
            # input: shape[batch,time_step,input_dim]
            # h_state: shape[layer_num*direction,batch,hidden_size]
            # rnn_output: shape[batch,time_sequence_length,hidden_size]
            prediction, RNN_h_state = self.forward(train_input, RNN_h_state)
            RNN_h_state = RNN_h_state.data.cuda()
            loss = criterion(prediction, train_target)
            training_rmse = np.sqrt(loss.item())
            train_plot_loss_total += training_rmse
            train_print_loss_total += training_rmse
            optimizer.zero_grad()
            loss.backward()

            validate_prediction, validate_RNN_h_state_pred = self.forward(
                validate_input, validate_RNN_h_state)
            validate_loss = criterion(validate_prediction, validate_target)
            validate_rmse = np.sqrt(validate_loss.item())
            validate_print_loss_total += validate_rmse
            validate_plot_loss_total += validate_rmse

            optimizer.step()

            # target_view=input[:,-1,:].data.numpy()# continuously plot
            # prediction_view=prediction[:,-1,:].data.numpy()
            # plt.plot(time_period,target_view.flatten(),'r-')
            # plt.plot(time_period,prediction_view.flatten(),'b-')
            # plt.draw();plt.pause(0.05)

            if iter % self.Print_interval == 0:
                print_loss_avg = train_print_loss_total / self.Print_interval
                train_print_loss_total = 0

                validate_print_loss_avg = validate_print_loss_total / self.Print_interval
                validate_print_loss_total = 0

                print('%s (%d %d%%) ' % (timeSince(time_tr_start, iter / self.Num_iters),
                                         iter, iter / self.Num_iters * 100))
                print('Training RMSE:  \t %.3e\nValidating RMSE:\t %.3e' %
                      (print_loss_avg, validate_print_loss_avg))

            if iter % self.Plot_interval == 0:
                plot_loss_avg = train_plot_loss_total / self.Plot_interval
                training_losses.append(plot_loss_avg)
                train_plot_loss_total = 0

                validate_plot_loss_avg = validate_plot_loss_total / self.Plot_interval
                validate_losses.append(validate_plot_loss_avg)
                validate_plot_loss_total = 0

        # Plot loss figure
        # Plot RMSE Loss Figure
        training_losses = np.sqrt(training_losses)
        validate_losses = np.sqrt(validate_losses)
        plot_train(training_losses, validate_losses, Fig_title=self.cell_name+'_L'+str(self.Num_layers)+'_H'+str(self.Hidden_Size)+'_E'+str(self.Num_iters)+'_' +
                   self.Optim_method, Fig_name=save_road+'_Loss_'+self.cell_name+'_L'+str(self.Num_layers) + '_H'+str(self.Hidden_Size)+'_E'+str(self.Num_iters)+'_'+self.Optim_method)
        print('\n------------------------------------------------')
        print('RNN Model finished fitting')
        print('------------------------------------------------')

        return self
Exemplo n.º 5
0
    def fit(self, input, target, save_road='./Results/'):
        input = input.cuda()
        target = target.cuda()
        RNN_h_state = self.initHidden(input)
        criterion = nn.MSELoss()
        if self.Optim_method == 'ASGD':
            optimizer = optim.ASGD(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adam':
            optimizer = optim.Adam(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'RMSprop':
            optimizer = optim.RMSprop(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adadelta':
            optimizer = optim.Adadelta(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adagrad':
            optimizer = optim.Adagrad(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'SparseAdam':
            optimizer = optim.Adagrad(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'Adamax':
            optimizer = optim.Adamax(self.parameters(), lr=self.Learn_rate)
        elif self.Optim_method == 'SGD':
            optimizer = optim.SGD(
                self.parameters(), lr=self.Learn_rate, momentum=0)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)

        # Initialize timer
        time_tr_start = time.time()

        plot_losses = []
        train_print_loss_total = 0  # Reset every print_every
        train_plot_loss_total = 0  # Reset every plot_every

        # plt.figure(1,figsize=(30,5))# continuously plot
        # plt.ion() # continuously plot

        # input_size=input.size(0)# continuously plot
        # time_period=np.arange(input_size)# continuously plot

        # begin to train
        for epoch in range(1, self.Num_iters + 1):
            scheduler.step()
            # input: shape[batch,time_step,input_dim]
            # h_state: shape[layer_num*direction,batch,hidden_size]
            # rnn_output: shape[batch,time_sequence_length,hidden_size]
            prediction, RNN_h_state = self.forward(input, RNN_h_state)
            RNN_h_state = RNN_h_state.data.cuda()
            prediction_2d = prediction[:, -1, :]
            # prediction=torch.reshape(prediction,(prediction.shape[0],1,1))
            target_2d = target[:, 0, :]
            loss = criterion(prediction_2d, target_2d)
            training_rmse = np.sqrt(loss.item())
            train_plot_loss_total += training_rmse
            train_print_loss_total += training_rmse
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            

            # target_view=input[:,-1,:].data.numpy()# continuously plot
            # prediction_view=prediction[:,-1,:].data.numpy()
            # plt.plot(time_period,target_view.flatten(),'r-')
            # plt.plot(time_period,prediction_view.flatten(),'b-')
            # plt.draw();plt.pause(0.05)

            if epoch % self.Print_interval == 0:
                print_loss_avg = train_print_loss_total / self.Print_interval
                train_print_loss_total = 0
                print('%s (%d %d%%) ' % (timeSince(time_tr_start, epoch / self.Num_iters),
                                             epoch, epoch / self.Num_iters * 100))
                print('Training RMSE:  \t %.3e' % (print_loss_avg))
            if epoch % self.Plot_interval == 0:
                plot_loss_avg = train_plot_loss_total / self.Plot_interval
                plot_losses.append(plot_loss_avg)
                train_plot_loss_total = 0

        # Plot loss figure
        plot_loss(plot_losses, Fig_name=save_road+'Loss_'+self.cell_name+'_L'+str(self.Num_layers) +
                  '_H'+str(self.Hidden_Size)+'_I'+str(self.Num_iters)+'_'+self.Optim_method)
        print('\n------------------------------------------------')
        print('RNN Model finished fitting')
        print('------------------------------------------------')

        return self