コード例 #1
0
    def check_param_minmax(self):
        ''' load saved parameter from trained model and original data as no crash data
        use to check parameters min-max and save param after backward '''
        ws = []
        bs = []
        for n in self.get_netlist():
            for name, param in n.named_parameters():
                if 'weight' in name:
                    ws.append(param.view(-1))
                if 'bias' in name:
                    bs.append(param.view(-1))
        ws = torch.cat(ws)
        bs = torch.cat(bs)

        data_io.write_param_dist('../data/gen_by_ML/nocrash_retrain/w_b.pt',
                                 ws, bs)

        print('w max', max(ws), 'min', min(ws))
        print('b max', max(bs), 'min', min(bs))
コード例 #2
0
    def check_grad_minmax(self):
        ''' load saved parameter from trained model and original data as no crash data
        use to check param gradient min-max and save param grad after backward '''
        gradw = []
        gradb = []
        for n in self.get_netlist():
            for name, param in n.named_parameters():
                if 'weight' in name:
                    gradw.append(param.grad.view(-1))
                if 'bias' in name:
                    gradb.append(param.grad.view(-1))
        gradw = torch.cat(gradw)
        gradb = torch.cat(gradb)

        data_io.write_param_dist(
            '../data/gen_by_ML/nocrash_retrain/gradw_gradb.pt', gradw, gradb)

        print('grad w max', max(gradw), 'min', min(gradw))
        print('grad b max', max(gradb), 'min', min(gradb))