def weights(self): '''Get model parameters Returns: a numpy array of model parameters ''' return getParameters(self.net)
def params(self): '''Get model parameter vector Returns: params: A vector of parameters ''' return param.getParameters(self)
def permuteNet(self, goodIdx, badIdx): goodNet = self.model.net.net[goodIdx] badNet = self.model.net.net[badIdx] goodParams = getParameters(goodNet) noise = self.config.PERMVAL * np.random.randn(len(goodParams)) goodParams = np.array(goodParams) + noise setParameters(badNet, goodParams)
def __init__(self, ann, config): self.saver = save.Saver(config.MODELDIR, 'models', 'bests', resetTol=256) self.config = config print('Initializing new model...') self.net = ann(config) self.parameters = Parameter( torch.Tensor(np.array(getParameters(self.net))))
def __init__(self, ann, config): self.saver = save.Saver(config.MODELDIR, 'models', 'bests', resetTol=256) self.config = config print('Initializing new model...') self.net = ann(config) self.parameters = Parameter( torch.Tensor(np.array(getParameters(self.net)))) #Have been experimenting with population based #training. Nothing stable yet -- advise avoiding if config.POPOPT: self.opt = PopulationOptimizer(self, config) else: self.opt = GradientOptimizer(self, config) if config.LOAD or config.BEST: self.load(self.opt, config.BEST)
def initModel(self): return getParameters(trinity.ANN(self.config))
def params(self): return param.getParameters(self)
def model(self): return [getParameters(ann) for ann in self.anns], [getParameters(self.lawmaker)]
def initModel(self): return getParameters(ANN(self.config, self.args))