예제 #1
0
def train_network(samples_filename, weights_filename):
    """ Perform training on network from file and saved weights. """
    sample_set = SampleSet()
    sample_set.init_from_file(samples_filename)
    net = HopfieldNet(sample_set.sample_size)
    net.initialize()
    for sample in sample_set:
        net.train(sample)
    save_weights_to_file(net.weights, weights_filename)
예제 #2
0
def test_network(weights_filename, samples_filename):
    """ Perform testing on network from weights file."""
    weights = read_weights_from_file(weights_filename)
    net = HopfieldNet(len(weights), weights)
    sample_set = SampleSet()
    sample_set.init_from_file(samples_filename)
    results = []
    for sample in sample_set:
        results.append(net.test(sample))
    return results
예제 #3
0
def test_from_file(samples_filename, test_samples_filename, results_filename):
    """ Perform test on network from specified sample filenames """
    sample_set = SampleSet()
    sample_set.init_from_file(samples_filename)
    testing_set = SampleSet()
    testing_set.init_from_file(test_samples_filename)
    perform_test(sample_set, testing_set, results_filename)
예제 #4
0
def random_pattern_test(sample_set_size, vector_size):
    """ Perform test on network using random patterns """
    pattern_gen = PatternGen(vector_size)
    samples = []
    for _ in range(sample_set_size):
        samples.append(pattern_gen.generate_random_pattern())
    sample_set = SampleSet(samples)
    perform_test(sample_set, sample_set)
예제 #5
0
    def __init__(self, fname='', csvfile='', backgroundnum=10000):
        self.fs = FeatureSpace(fname)
        self.featurename = list(self.fs.layers.keys())
        self.sampleset = SampleSet(csvfile)
        self.sampleset.getbgvalues(self.fs)
        bglist = self.fs.layers['h_dem'].getRDsamples(backgroundnum)
        self.bgset = self.fs.getlayervalues(bglist)

        self.density = []
        self.linearPredictor = np.zeros(backgroundnum)
        self.numFeatures = len(self.fs.layers.keys())
        self.features = []
        self.featureGenerators = []
        self.linearPredictorNormalizer = 0.0
        self.densityNormalizer = self.bgset.shape[0]
        self.entropy = -1.0
        self.reg = 0
        self.iteration = -1
        self.activeLayer = {'ecoreg', 'h_dem', 'tmp6190_ann'}
예제 #6
0
def test_from_file(samples_filename, test_samples_filename, results_filename):
    """ Perform test on network from specified sample filenames """
    sample_set = SampleSet()
    sample_set.init_from_file(samples_filename)
    testing_set = SampleSet()
    testing_set.init_from_file(test_samples_filename)
    perform_test(sample_set, testing_set, results_filename)
예제 #7
0
class sequential:
    def __init__(self, fname='', csvfile='', backgroundnum=10000):
        self.fs = FeatureSpace(fname)
        self.featurename = list(self.fs.layers.keys())
        self.sampleset = SampleSet(csvfile)
        self.sampleset.getbgvalues(self.fs)
        bglist = self.fs.layers['h_dem'].getRDsamples(backgroundnum)
        self.bgset = self.fs.getlayervalues(bglist)

        self.density = []
        self.linearPredictor = np.zeros(backgroundnum)
        self.numFeatures = len(self.fs.layers.keys())
        self.features = []
        self.featureGenerators = []
        self.linearPredictorNormalizer = 0.0
        self.densityNormalizer = self.bgset.shape[0]
        self.entropy = -1.0
        self.reg = 0
        self.iteration = -1
        self.activeLayer = {'ecoreg', 'h_dem', 'tmp6190_ann'}

    def isActive(self, nm):
        return nm in self.activeLayer

    def featuresToUpdate(self):
        toUpdate = []
        dlb = []
        for feature in self.features:
            last = feature.lastChange
            dlb.append(deltaLossBound(feature))
        orderedDlb = np.argsort(dlb)
        for n in orderedDlb:
            if not self.features[n].isGenerated():
                toUpdate.append(self.features[n])
        return toUpdate

    def increaseLambda(self, f=AbstractFeature(), alpha=0.0, toUpdate=[]):
        self.reg += np.abs(f.getLambda() + alpha) - np.abs(f.getLambda()*f.getSampleDeviation())
        if alpha == 0:
            return None
        f.increaseLambda(alpha)
        for fg in self.featureGenerators:
            if f.name == fg.name:
                fg.lambdas[f.thrnum] += alpha
                break
        self.linearPredictor += f.eval(self.bgset[f.name]) * alpha
        self.linearPredictorNormalizer = max(max(self.linearPredictor), self.linearPredictorNormalizer)
        for feature in toUpdate:
            # Lastchange = iteration. to be complete
            feature.lastExpectationUpdate = 0
        self.setDensity(toUpdate)
        return self.getLoss()



    def doSequentialUpdate(self, feature=AbstractFeature(), iteration=0):
        newLoss = self.getLoss()
        oldLambda = feature.getLambda()
        feature.lastChange = self.iteration
        toUpdate = self.featuresToUpdate()
        dlb = deltaLossBound(feature)

        if (True):
            alpha = goodAlpha(feature)
            alpha = reduceAlpha(alpha, iteration)
            newLoss = self.increaseLambda(feature, alpha, toUpdate)
        else:
            pass
        return newLoss

    def feature2Generator(self, featureName=''):
        return ThrFeatureGenerator(featureName, self.sampleset[featureName], self.bgset[featureName])

    def getLoss(self):
        sum = 0
        for feature in self.features:
            sum += feature.lam * feature.getSampleExpectation()
        try:
            with np.errstate(divide = 'raise'):
                res = -sum + self.linearPredictorNormalizer + np.log(self.densityNormalizer) + self.reg
        except:
            print("error: ", sum)
            return None
        return res

    def getN1(self, *args):
        if args:
            pass
        else:
            pass

    def linearPredictor(self, sample):
        pass

    def setDensity(self, toUpdate=[]):
        self.density = BIASEDFEATURE * np.exp(self.linearPredictor - self.linearPredictorNormalizer)
        density_sum = np.zeros(len(toUpdate))
        for i, feature in enumerate(toUpdate):
            density_sum[i] = np.sum(feature.eval(self.bgset[feature.name]) * self.density)
        self.densityNormalizer = np.sum(self.density)
        for i, feature in enumerate(toUpdate):
            feature.expectation = density_sum[i] / self.densityNormalizer
        for featureGenerator in self.featureGenerators:
            featureGenerator.updateFeatureExpectations(self)

    def getDensity(self, sample):
        pass

    def getBestFeature(self):
        bestlb = np.inf
        bestFeature = None
        for feature in self.featureGenerators:
            for num in range(len(feature.thr)):
                ft = feature.exportFeature(num)
                bound = deltaLossBound(ft)
                if bound < bestlb:
                    bestlb = bound
                    bestFeature = ft
        eq = False
        for ft in self.features:
            if ft == bestFeature:
                eq = True
                break
        if not eq:
            self.features.append(bestFeature)
        return bestFeature


    def newDensity(self):
        pass

    def scaledBiasDist(self, biasDistFeature):
        pass

    def setFeatures(self):
        for nm in self.featurename:
            if self.isActive(nm):
                feature = Linear(nm, 0, self.fs.layers[nm].min, self.fs.layers[nm].max)
                fInfo =  SampleInfo(feature.eval(self.sampleset[nm]).mean(),
                                    feature.eval(self.sampleset[nm]).std(),
                                    feature.eval(self.sampleset[nm]).min(),
                                    feature.eval(self.sampleset[nm]).max())
                biasInfo = SampleInfo(1.0, 0.0, 1.0, 1.0, self.sampleset.shape[0])
                fInterval = Interval(sampleinfo1 = fInfo, sampleinfo2 = biasInfo, beta = 0.05)
                feature.samplexpectation = fInterval.getMid()
                feature.sampledeviation = fInterval.getDev()
                self.features.append(feature)

    def setFeatureGenerators(self):
        for feature in self.features:
            nm = feature.name
            featureGenerator = ThrFeatureGenerator(nm, self.sampleset[nm], self.bgset[nm])
            featureGenerator.setThrExpectation()
            self.featureGenerators.append(featureGenerator)

    def setBiasDiv(self):
        pass

    def setLinearPredictor(self):
        for feature in self.features:
            self.linearPredictor += feature.eval(self.bgset[feature.name]) * feature.getLambda()
        self.linearPredictorNormalizer = np.min(self.linearPredictor)

    def setBiasDist(self):
        pass

    def goodAlpha(self):
        pass

    def getEntropy(self):
        pass

    def run(self):
        newLoss = self.getLoss()
        for iteration in range(70):
            oldLoss = newLoss
            if False:
                newLoss = doParalleUpdateFrequency(-1)
            bestFeature = self.getBestFeature()
            if (bestFeature==None):
                break
            newLoss = self.doSequentialUpdate(bestFeature, iteration)

    def predict(self):
        pointsres = np.zeros(self.bgset.shape[0])
        sampleres = np.zeros(self.sampleset.shape[0])
        for feature in self.features:
            sampleres += feature.eval(self.sampleset[feature.name]) * feature.lam
            pointsres += feature.eval(self.bgset[feature.name]) * feature.lam
        return sampleres, pointsres
예제 #8
0
#         self.hinge = []
#         self.revhinge = []
#
#     def computecell(self, val):
#         pass
#
#     def computegrid(self):
#         pass
#

if __name__ == '__main__':
    fname = r'D:\test\SJY\asc'
    csvfile = r'D:\test\SJY\with9factors\settlements_samplePredictions.csv'
    lambdafile = r'D:\test\SJY\with9factors\settlements.lambdas'
    fs = FeatureSpace(fname)
    ss = SampleSet(csvfile)
    l = Lambdas(lambdafile)
    l.parselambdafile()

    ss.getbgvalues(fs)
    dt = ss.getfeatures()
    for nm in dt.columns:
        for v in l.lambdas:
            if nm in v.name:
                print(nm, v)

    res = []
    for v in l.lambdas:
        if isinstance(v, Product):
            g1, g2 = v.name
            pp = v.eval(ss.csv[g1], ss.csv[g2]) * v.lam