예제 #1
0
def rtrl_batch_XOR(numSamples=500,seqLen=50,delay=2):
    # generate data - a set of xor streams
    I = np.empty([seqLen,numSamples,2])
    T = np.empty([seqLen,numSamples,1])
    for n in range(numSamples):
        (tmpI,tmpT) = genData.my_xor_stream(seqLen,delay)
        I[:,n,:] = tmpI
        T[:,n,:] = tmpT
    # init RNN
    W=rtrl.allocate(2,1,5)
    ANN.initWeights(W)
    uhs.learningRate.rate = 0.000015
    uhs.momentum.momentum = 0.1
    uhs.momentum.paramUpdates_old = None
    # learn
    errorHist = rtrl.rtrl_batch(I,T,W,maxEpochs=3000,updateHooks=(uhs.learningRate,uhs.momentum))
    # evaluate solution
    net = np.empty((seqLen*1,numSamples,6))
    A = np.zeros((seqLen*1+1,numSamples,6))
    rtrl.batch_forward(I,W,net,A,afs.sigmoid,numCycles=1)
    target = T[19:50,:,0] > 0.5
    learned = A[20:51,:,0] > 0.5
    result = np.logical_and(target,learned)
    accuracy = float(np.sum(result)) / float(np.prod(result.shape))
    print "Accuracy: " + str(accuracy)
    # return activations
    return (I,T,A)
예제 #2
0
    def test_train(self):
        print "test_train"
        network = ANN.basic_ANN(2, 3, 1)
        network.input_weights = np.array([[-0.24434436, 0.85612428],
                                          [0.70503219, 0.35294645],
                                          [0.32192263, 0.82835977]])
        network.output_weights = np.array(
            [[-0.10350651, -0.21068824, 0.60715585]])
        print network.input_weights
        print network.output_weights
        print ""
        print ANN.sigmoid(network.input_weights)

        error = network.train(np.array([0, 0]), 0)
        print error
        self.assertTrue(error - 0.536555 < 1e-5)
        error = network.train(np.array([0, 1]), 0)
        print error
        self.assertTrue(error - 0.556309 < 0.00001)
        error = network.train(np.array([1, 0]), 0)
        print error
        self.assertTrue(error - 0.541293 < 0.00001)
        error = network.train(np.array([1, 1]), 1)
        print error
        self.assertTrue(error - 0.440861 < 0.00001)
def test_digits(model, digits, labels, ensemble_size, reshape_fun):
    steps_results = {'c_error': {}, 'entropy': {}}

    dnum = 200

    pb = ProgressBar(total=100,
                     prefix='Sim trial progress',
                     length=25,
                     fill='=',
                     zfill='_')
    for i in range(1, 101):
        dnoice = salt_and_pepper(digits, i * dnum)

        d = utils.normalize_data(reshape_fun(dnoice))
        entropy = ann.test_model(model, [d] * ensemble_size,
                                 labels,
                                 metric='entropy')
        c_error = ann.test_model(model, [d] * ensemble_size,
                                 labels,
                                 metric='c_error')
        steps_results['entropy'][i] = entropy
        steps_results['c_error'][i] = c_error
        pb.print_progress_bar(i)

    return steps_results
예제 #4
0
    def __init__(self, n_input, n_hidden, n_nodes, lr):
        self.n_hidden = n_hidden
        self.n_nodes = n_nodes
        self.lr = lr
        self.cross_en = []

        weight_bias = 1

        self.layers = []
        for i in range(0, self.n_hidden):
            layer = []

            for j in range(0, self.n_nodes):
                n_perceptron = self.n_nodes
                bias = random.random()

                if i == 0:
                    n_perceptron = n_input
                node = ANN.neuron(n_perceptron, bias, self.lr, weight_bias)
                layer.append(node)
            self.layers.append(layer)

        output_layer = []

        output_layer.append(ANN.neuron(n_perceptron, 0.0, self.lr,
                                       weight_bias))
        output_layer.append(ANN.neuron(n_perceptron, 0.0, self.lr,
                                       weight_bias))
        self.layers.append(output_layer)
def test_digits(model, digits, labels, ensemble_size, reshape_fun):
    steps_results = {'c_error': {}, 'entropy': {}}

    dnum = 80

    for i in range(1, 101):
        dless, dmore = salt_and_pepper(digits, i * dnum)

        d = utils.normalize_data(reshape_fun(dmore))
        entropy = ann.test_model(model, [d] * ensemble_size,
                                 labels,
                                 metric='entropy')
        c_error = ann.test_model(model, [d] * ensemble_size,
                                 labels,
                                 metric='c_error')
        steps_results['entropy'][i] = entropy
        steps_results['c_error'][i] = c_error

        d = utils.normalize_data(reshape_fun(dless))
        entropy = ann.test_model(model, [d] * ensemble_size,
                                 labels,
                                 metric='entropy')
        c_error = ann.test_model(model, [d] * ensemble_size,
                                 labels,
                                 metric='c_error')
        steps_results['entropy'][-1 * i] = entropy
        steps_results['c_error'][-1 * i] = c_error

    return steps_results
def test_digits(model, model_list, ensemble_size ,digits, labels):
    l_c_errors = []
    l_pred_entropy = []
    for d in digits:
        c_error = ann.test_model(model, [d]*ensemble_size, labels, metric = 'c_error')
        entropy = ann.test_model(model, [d]*ensemble_size, labels, metric = 'entropy')
        l_c_errors.append(c_error)
        l_pred_entropy.append(entropy)

    return l_c_errors, l_pred_entropy
예제 #7
0
파일: example.py 프로젝트: d-e-lu/NN
def main():

    lsizes = np.array(([784], [50], [30], [10]))
    learning_rate = 0.4
    bias_learning_rate = 0.4
    n = ANN.ArtificialNeuralNet(lsizes, ANN.squared_error, learning_rate,
                                bias_learning_rate)
    training_data, validation_data, testing_data = mnist_loader.load_data_wrapper(
    )

    nc = ANN.NetworkChecker()

    if nc.check(n, training_data[0][0], training_data[0][1].T.ravel()):
        n.train(training_data)
        percentage, correct, incorrect = n.test(
            testing_data=testing_data, output_size=training_data[0][1].size)
        print percentage, "%"
        print "Correct [1-9]", correct
        print "Incorrect [1-9]", incorrect
        # n.save_weights(percentage, lsizes, learning_rate)

    user_input = raw_input(
        "Please select image file to detect. Or type q to quit.\n")

    while True:
        if user_input == "q":
            break
        elif user_input == "draw":
            PygamePaint.draw()
            raw_image = Image.open("screenshot.png").convert('L')
        else:
            try:
                raw_image = Image.open(user_input).convert('L')
            except:
                print "Not a valid file. Please Try again\n"
                user_input = raw_input(
                    "Please select image file to detect. Or type q to quit.\n")
                continue

        scaled_image = raw_image.resize((28, 28))
        scaled_image.save("scaled.png")
        image = np.reshape(np.asarray(scaled_image), (1, 784))
        image = np.true_divide(image, 255)

        vals = n.forward(image)
        print "I see a {}".format(np.argmax(vals))
        print vals
        user_input = raw_input(
            "Please select image file to detect. Or type q to quit.\n")
def thickness_sim(model_list, data, labels, thicknesses):

    m_preds = {}
    m_bits = {}
    m_cerr = {}

    for d, t in zip(digits, thicknesses):

        m_preds[t] = list(map(lambda m: m.predict(d), model_list))
        m_cerr[t] = list(
            map(lambda m: ann.test_model(m, d, labels, "c_error"), model_list))
        m_bits[t] = list(
            map(lambda m: ann.test_model(m, d, labels, "entropy"), model_list))

    return m_preds, m_cerr, m_bits
예제 #9
0
파일: ANN_loader.py 프로젝트: mk2908/WSD
    def _addLayer(self, type, neurons):  # add empty neurons
        """Add a layer to the network of a specified type. Complain
      if there is no support for the specified type."""

        if ('input' == type):
            self._currentlayer = self._inputlayer = ANN.InputLayer(
                int(neurons))
        elif ('hidden' == type):
            self._currentlayer = ANN.HiddenLayer(int(neurons))
            self._hiddenlayers.append(self._currentlayer)
        elif ('output' == type):
            self._currentlayer = self._outputlayer = ANN.OutputLayer(
                int(neurons))
        else:
            raise Exception('Unsupported layer type : ' + type)
def get_classifier(clf_name, params):
    clf = None
    data = load_data() 
    if clf_name == "Случайный лес":
        clf = RandomForest(data, params['n_start'], params['n_stop'], params['n_num'])
    elif clf_name == "LightGBM":
        clf = ml.LGBM( params['num_leaves'], params['n_estimators'],  params['min_child_samples'])
    elif clf_name == "Stochastic Gradient Decent":
        clf = ml.SGD(params['al'], params['epsilon'], params['eta'], params['n_iter'])
    elif clf_name == "Decision Tree":
        clf = ml.DT(params['min_samples_splitint'], params['min_samples_leaf'], params['ccp_alphanon_negative'])    
    elif clf_name == "Naive Bayes":
        clf = ml.GNB()
    elif clf_name == "Support Vector Machines":
        clf = ml.SVM(params['С'], params['degree'], params['cache'])
    elif clf_name == "KNN":
        clf = ml.KNN(params['n_neighbors'], params['leaf_size'], params['p'])
    elif clf_name == "Logistic Regression":
        clf = ml.LOR(params['С'], params['max_iter'])
    elif clf_name == "Random Forest":
        clf = ml.RF(params['max_depth'], params['min_samples_split'], params['min_samples_leaf'])
    elif clf_name == "Linear Regression":
        clf = ml.LR()
    elif clf_name == "Logistic Regression":
        clf = ml.LOR(params['С'], params['max_iter'])
    elif clf_name == "XGBoost":
        clf = ml.XGB()
    elif clf_name == "ANN":
        clf = ann.ANN(params['epo'], params['batch_size'])
    return clf
예제 #11
0
def initPop(size, noInputs, mi, mx):

    pop = []
    for i in range(size):
        toAdd = ANN.ANN(noInputs, 6, mi, mx)
        pop.append(toAdd)
    return pop
예제 #12
0
def vote_predict(model_list, ensemble_size, data):
    votes = 0
    for m in model_list:
        preds = m.predict(data)
        votes += ann.classify(preds)

    return votes / ensemble_size
    def calc_cerror(preds, labels):

        classes = ann.classify(preds)
        num_data = preds.shape[0]
        diff = classes - labels
        c_err = (1 / (2 * num_data)) * np.sum(np.sum(np.abs(diff)))
        return c_err
예제 #14
0
def main():
    print "********** start time is = ", time.strftime("%H:%M:%S",
                                                       time.localtime())
    try:
        with Timer() as t:
            fileW = createAnOutputFile()
            model = ANN.ANN()
            numOfPop = 50  # should be 50 population
            numOfFea = 385  # should be 385 descriptors
            unfit = 1000
            # Final model requirements
            R2req_train = .6
            R2req_validate = .5
            R2req_test = .5
            # get training, validation, test data and rescale
            TrainX, TrainY, ValidateX, ValidateY, TestX, TestY = FromDataFileMLR_DE_BPSO.getAllOfTheData(
            )
            TrainX, ValidateX, TestX = FromDataFileMLR_DE_BPSO.rescaleTheData(
                TrainX, ValidateX, TestX)
    finally:
        print("Time to load and rescale data: {:.03f} sec".format(t.interval))

    # initial velocities, numbers between 0 and 1
    velocity = createInitVelMat(numOfPop, numOfFea)
    unfit = 1000
    fittingStatus = unfit
    try:
        with Timer() as t:
            while (fittingStatus == unfit):
                # create inititial population and find fitness for each row in population
                population = createInitPopMat(numOfPop, numOfFea)
                fittingStatus, fitness = FromFinessFileMLR_DE_BPSO.validate_model(
                    model, fileW, population, TrainX, TrainY, ValidateX,
                    ValidateY, TestX, TestY)
    finally:
        print "Validated model: {} min".format((t.interval / 60))

    try:
        with Timer() as t:
            # initialize global best row and fitness to first population row
            globalBestRow = InitializeGlobalBestRow(population[0])
            globalBestFitness = fitness[0]
            # find actual global best row and fitness
            globalBestRow, globalBestFitness = findGlobalBest(
                population, fitness, globalBestRow, globalBestFitness)
            # initialze local best matrix (Pid) with current population matirix
            # initialize local best fitness with current fitness vector
            localBestMatrix = CreateInitialLocalBestMatrix(population)
            localBestFitness = CreateInitialLocalBestFitness(fitness)
            # parent population is current population
            parentPop = getParentPopulation(population)
    finally:
        print("Time to initialize data: {:.03f} sec".format(t.interval))

    print "Starting iteration loop at ", time.strftime("%H:%M:%S",
                                                       time.localtime())
    IterateNtimes(model, fileW, fitness, velocity, population, parentPop,
                  localBestFitness, localBestMatrix, globalBestRow,
                  globalBestFitness, TrainX, TrainY, ValidateX, ValidateY,
                  TestX, TestY)
예제 #15
0
파일: GUI.py 프로젝트: ranahiren27/BCI
 def training_ANN(self):
     # Normalize because collective signals have E so high
     init.INPUT_DATASETs = np.divide(init.INPUT_DATASETs, 500)
     
     self.NN = ann.Neural_Network(Lambda = 0.0001)
     self.T = ann.trainer(self.NN)
     self.T.train(init.INPUT_DATASETs, init.OUTPUT_DATASETs)
     
     ''' Draw training data, relation between T and error E '''
     plt.figure(1)
     plt.plot(self.T.E, label = 'Train line', linewidth = 2.0)
     plt.legend()
     
     plt.grid(1)
     plt.xlabel('Epochs')
     plt.ylabel('Cost')
     plt.show()
예제 #16
0
    def create(jsonFilePath, dataset):
        try:
            with open('schemas/estSchema.json') as schema_file:
                estimatorSchema = json.load(schema_file)
        except FileNotFoundError as err:
            template = "An exception of type {0} occurred. Arguments: {1!r}"
            message = template.format(type(err).__name__, err.args)
            print(message)
            raise ValueError(error.errors['estimator_config'])

        try:
            with open(jsonFilePath) as json_file:
                try:
                    jsonData = json.load(json_file)
                    validate(instance=jsonData, schema=estimatorSchema)
                except jsonschema.exceptions.ValidationError as err:
                    template = "An exception of type {0} occurred. Arguments: {1!r}"
                    message = template.format(type(err).__name__, err.args)
                    print(message)
                    raise ValueError(error.errors['estimator_config'])
                except ValueError as err:
                    template = "An exception of type {0} occurred. Arguments: {1!r}"
                    message = template.format(type(err).__name__, err.args)
                    print(message)
                    raise ValueError(error.errors['estimator_config'])

                if jsonData['estimator'].startswith('KNeighbors'):
                    import Knn  #as Knn
                    esti = Knn.Knn(jsonData)
                elif jsonData['estimator'].startswith('DecisionTree'):
                    import DecisionTree
                    esti = DecisionTree.DecisionTree(jsonData)
                elif jsonData['estimator'].startswith('RandomForest'):
                    import RandomForest
                    esti = RandomForest.RandomForest(jsonData)
                elif jsonData['estimator'] == 'LinearSVC' or jsonData[
                        'estimator'] == 'LinearSVR':
                    import SVM
                    esti = SVM.SVM(jsonData)
                elif jsonData['estimator'].startswith('ANN'):
                    import ANN
                    esti = ANN.ANN(jsonData)
                elif jsonData['estimator'] == 'TripleES':
                    import TripleES
                    esti = TripleES.TripleES(jsonData)
                else:
                    est_str = jsonData['estimator']
                    print(f'Invalid value for estimator name: {est_str}')
                    raise ValueError(error.errors['estimator_config'])

#esti.parse(jsonData) # right???
                esti.assign_dataset(dataset)
                return esti
        except FileNotFoundError as err:
            template = "An exception of type {0} occurred. Arguments: {1!r}"
            message = template.format(type(err).__name__, err.args)
            print(message)
            raise ValueError(error.errors['estimator_config'])
def ANNforecasting(dataset,
                   inputDim,
                   hiddenNum=50,
                   outputDim=1,
                   epoch=20,
                   batchSize=30):

    # 归一化数
    #dataset = dataset.reshape(-1, 1)
    scaler = MinMaxScaler(feature_range=(0.0, 1.0))
    dataset = scaler.fit_transform(dataset)

    # 分割序列为样本,此处不整理成RNN形式,采用标准形式
    train, test = util.divideTrainTest(dataset)

    trainX, trainY = util.createSamples(train, inputDim, RNN=False)
    testX, testY = util.createSamples(test, inputDim, RNN=False)
    print("trainX shape is", trainX.shape)
    print("trainY shape is", trainY.shape)

    # 构建模型并训练
    ANNModel = ANN.ANNModel(inputDim, hiddenNum, outputDim)
    t1 = time.time()
    ANNModel.train(trainX, trainY, epoch, batchSize)
    t2 = time.time() - t1
    print("train time is", t2)

    # 预测
    trainPred = ANNModel.predict(trainX)
    testPred = ANNModel.predict(testX)

    # 还原数据
    trainPred = scaler.inverse_transform(trainPred)
    trainY = scaler.inverse_transform(trainY)
    testPred = scaler.inverse_transform(testPred)
    testY = scaler.inverse_transform(testY)
    dataset = scaler.inverse_transform(dataset)

    # 评估指标
    # MAE = eval.calcMAE(trainY, trainPred)
    # print ("train MAE",MAE)
    # MRSE = eval.calcRMSE(trainY, trainPred)
    # print ("train MRSE",MRSE)
    # MAPE = eval.calcMAPE(trainY, trainPred)
    # print ("train MAPE",MAPE)
    MAE = eval.calcMAE(testY, testPred)
    print("test MAE", MAE)
    MRSE = eval.calcRMSE(testY, testPred)
    print("test RMSE", MRSE)
    #MAPE = eval.calcMAPE(testY,testPred)
    #print ("test MAPE",MAPE)
    SMAPE = eval.calcSMAPE(testY, testPred)
    print("test SMAPE", SMAPE)

    util.plot(trainPred, trainY, testPred, testY)

    return trainPred, testPred, MAE, MRSE, SMAPE
def calc_vote_entropy(model_list, ensemble_size, digits):
    vote_mat = np.zeros([digits.shape[0],10])
    for m in model_list:
        pred = m.predict(digits)
        votes = ann.classify(pred)
        vote_mat += votes

    vote_rate = vote_mat * 1/ensemble_size
    return np.mean(entropy(vote_rate.transpose()))
예제 #19
0
def genetic(a,b,c) :
 curPop = np.random.choice(np.arange(-0.2367, 0.2367, step=0.0001), size=(params[0], params[3]),replace=False)  # initialize current population to random values within range
 nextPop = np.zeros((curPop.shape[0], curPop.shape[1]))
 fitVec = np.zeros((params[0], 2))  # 1st col is indices, 2nd col is cost
 for i in range(params[2]):  # iterate through num generations
    fitVec = np.array([np.array([x, np.sum(NN.costFunction(a,b, curPop[x].reshape(18, 1)))]) for x in range(params[0])])  #Create vec of all errors from cost function
    
    winners = np.zeros((params[4], params[3]))  
    for n in range(len(winners)):  
        selected = np.random.choice(range(len(fitVec)), params[4], replace=False)
        wnr = np.argmin(fitVec[selected, 1])
        winners[n] = curPop[int(fitVec[selected[wnr]][0])]
    nextPop[:len(winners)] = winners  #populate new gen with winners
    nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners)) / len(winners)), axis=0)))for x in range(winners.shape[1])]).T  #Populate the rest of the generation with offspring of mating pairs
    nextPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape))
    curPop = nextPop
 best_soln = curPop[np.argmin(fitVec[:, 1])]
 return  np.round(NN.runForward(c,best_soln.reshape(18, 1)));
예제 #20
0
파일: ANN_loader.py 프로젝트: mk2908/WSD
    def _createNet(self, type):
        """Instantiate the correct network type attribute from the
      XML file. Complain if there is no support for the specified
      type."""

        if ('bpn' == type):
            self._network = ANN.BackPropNet()
            self._nettype = 'bpn'
        else:
            raise Exception('Unsupported network type : ' + type)
예제 #21
0
def oldnetlist(netsize, inpnodes, lb, ub):
    netlist = []

    for i in range(0, netsize):
        base = []
        #Creating a default NeuralNet with weights randomly distributed around the base value (triangular distribution)
        netelement = ANN.NeuralNet(inpnodes, lb, ub, base)
        #Adding this NeuralNet to the netlist
        netlist.append(netelement)
    return netlist
예제 #22
0
def learn(width, depth, train_file, test_file, iters=5000):
    train_data, test_data = file_data(train_file), file_data(test_file)
    ann = ANN.ArtificialNeuralNetwork(train_data, test_data, width, depth,
                                      iters)
    print("Correct/Incorrect for Training Set: ",
          ann.correct_div_incorrect_examples("train"))
    print("Correct/Incorrect for Test Set: ",
          ann.correct_div_incorrect_examples("test"))
    #        ann.plot_error_v_iter(description, "test")
    return ann
예제 #23
0
 def test_init(self):
     network = ANN.basic_ANN(2, 3, 2)
     self.assertTrue(network.input_weights.shape == (3, 2))
     self.assertTrue(network.output_weights.shape == (2, 3))
     for x in network.input_weights:
         for y in x:
             self.assertTrue(y != 0)
     for x in network.output_weights:
         for y in x:
             self.assertTrue(y != 0)
예제 #24
0
    def __init__(self, genome = None):
        pg.sprite.Sprite.__init__(self)
        self.image = pg.image.load("images/bird1.png")
        self.rect = self.image.get_rect()
        self.rect.center = (WIDTH/2, random.randint(15,HEIGHT-SAND_HEIGHT)-5)

        #self.pos = vec2(WIDTH/2, HEIGHT/2)
        self.pos = vec2(WIDTH/2, random.randint(15,HEIGHT-SAND_HEIGHT)-5)

        self.vel = vec2(0,0)
        self.acc = vec2(0, GRAVITY)

        self.live = 1;

        self.sensor = Sensor()

        if genome is None:
            self.ANN = ANN()
        else:
            self.ANN = ANN(genome)
예제 #25
0
def validate():
    conv_layer_1 = CNN.CNNLayer(3, 3, 1, 32)

    conv_layer_2 = CNN.CNNLayer(32, 5, 2, 64)

    pooling_layer_1 = CNN.PoolingLayer(2,2)

    conv_layers = [conv_layer_1, conv_layer_2, pooling_layer_1]
    ann = ANN.ANN(2304, [128, 3])

    # 0 = Airplane
    # 2 = Bird
    # 8 = Ship

    conv_layer_1.load("data/3-network/CNNL1.npz")
    conv_layer_2.load("data/3-network/CNNL2.npz")
    ann.load("data/3-network/FCL.npz")
    
    batch = data_batch.DataBatch("cifar-10-python/cifar-10-batches-py/test_batch")
    filteredImages = []
    filteredLabels = []
    for x in range(len(batch.labels)):
        if(batch.labels[x] == 0):
            filteredImages.append(batch.images[x])
            filteredLabels.append(0)
        if(batch.labels[x] == 2):
            filteredImages.append(batch.images[x])
            filteredLabels.append(1)
        if(batch.labels[x] == 8):
            filteredImages.append(batch.images[x])
            filteredLabels.append(2)
    batch.images = np.array(filteredImages)
    batch.labels = np.array(filteredLabels)
    data = batch.images
    for i in range(len(conv_layers)):
        data = conv_layers[i].forward(data)

    flattened = data.reshape(data.shape[0], data.shape[1] * data.shape[2] * data.shape[3])

    output = ann.prop_forward(flattened)

    right = 0
    seen = 0
    for i in range(batch.images.shape[0]):
        outputNum = -1
        biggest = -1
        for j in range(0, 3):
            if output[i][j] > biggest:
                outputNum = j
                biggest = output[i][j]
        seen+=1
        if outputNum == batch.labels[i]:
            right += 1
    print("Right: " + str(right) + " / " + str(seen) + " - " + "{0:.0%}".format(right/seen))
예제 #26
0
    def __init__(self, layer_sizes=[16, 2000, 4], lr=.1, activation=ANN.relu, max_iterations=10, rand_limit_min=-.02, rand_limit_max=.02,
                 learningSet = [], learningSet_answ = [], testSet = [], testSet_answ = []):

        # Converting data...
        print("Converting data...")
        learningSet = self.convert_input_divide_relative_to_max(learningSet)
        learningSet_answ = self.convert_answers(learningSet_answ)
        testSet = self.convert_input_divide_relative_to_max(testSet)

        # Builds the network
        self.neuralNet = ANN.neuralnetwork(layer_sizes=layer_sizes, lr=lr, activation=activation, max_iterations=max_iterations, rand_limit_min=rand_limit_min,
                                           rand_limit_max=rand_limit_max, learningSet=learningSet, learningSet_answ=learningSet_answ, testSet=testSet, testSet_answ=testSet_answ)
예제 #27
0
def train():
    batch_files = ["cifar-10-python/cifar-10-batches-py/data_batch_1",
    "cifar-10-python/cifar-10-batches-py/data_batch_2",
    "cifar-10-python/cifar-10-batches-py/data_batch_3",
    "cifar-10-python/cifar-10-batches-py/data_batch_4",
    "cifar-10-python/cifar-10-batches-py/data_batch_5"]
    conv_layer_1 = CNN.CNNLayer(3, 3, 1, 32)

    conv_layer_2 = CNN.CNNLayer(32, 5, 2, 64)

    pooling_layer_1 = CNN.PoolingLayer(2,2)

    conv_layers = [conv_layer_1, conv_layer_2, pooling_layer_1]
    ann = ANN.ANN(2304, [128, 3])

    # 0 = Airplane
    # 2 = Bird
    # 8 = Ship

    conv_layer_1.load("data/3-network/CNNL1.npz")
    conv_layer_2.load("data/3-network/CNNL2.npz")
    ann.load("data/3-network/FCL.npz")
    
    for epoch in range(0, 40):
        print ("epoch " + str(epoch))
        batch_sizes = 64
        for filename in batch_files:
            batch = data_batch.DataBatch(filename)
            filteredImages = []
            filteredLabels = []
            for x in range(len(batch.labels)):
                if(batch.labels[x] == 0):
                    filteredImages.append(batch.images[x])
                    filteredLabels.append(0)
                if(batch.labels[x] == 2):
                    filteredImages.append(batch.images[x])
                    filteredLabels.append(1)
                if(batch.labels[x] == 8):
                    filteredImages.append(batch.images[x])
                    filteredLabels.append(2)
            batch.images = np.array(filteredImages)
            batch.labels = np.array(filteredLabels)

            print("Running on file " + filename[-12:])
            order = list(range(0, len(batch.images), batch_sizes))
            random.shuffle(order)
            for i in order:
                conv_layers, ann = trainOn(batch.images[i:i+batch_sizes], batch.labels[i:i+batch_sizes],
                conv_layers, ann, True, 0.000004 * math.pow(0.96, epoch), 0.7) #Decay and momentum
            print("saving")
            conv_layer_1.save("data/3-network/CNNL1.npz")
            conv_layer_2.save("data/3-network/CNNL2.npz")
            ann.save("data/3-network/FCL.npz")
  def __init__(self, X_train, Y_train, Net='LeNet5', opti='SGDMomentum'):
    # Prepare Data: Load, Shuffle, Normalization, Batching, Preprocessing
    self.X_train = X_train
    self.Y_train = Y_train

    self.batch_size = 64
    # D_in: input depth of network, 784, 28*28 input grayscale image
    self.D_in = 784
    # D_out: output depth of network = 10, the 10 digits
    self.D_out = 10

    print ('  Net: ' + str(Net))
    print ('  batch_size: ' + str(self.batch_size))
    print ('  D_in: ' + str(self.D_in))
    print ('  D_out: ' + str(self.D_out))
    print ('  Optimizer: ' + opti)

    # =======================
    if Net == 'TwoLayerNet':
      # H is the size of the one hidden layer.
      H=400
      self.model = ANN.TwoLayerNet (self.D_in, H, self.D_out)
    elif Net == 'ThreeLayerNet':
      # H1, H2 are the size of the two hidden layers.
      H1=300
      H2=100
      self.model = ANN.ThreeLayerNet (self.D_in, H1, H2, self.D_out)
      
    elif Net == 'LeNet5':
      self.model = CNN.LeNet5()

    # store training loss over iterations, for later visualization
    self.losses = []

    if opti == 'SGD':
      self.opti = optimizer.SGD (self.model.get_params(), lr=0.0001, reg=0)
    else:
      self.opti = optimizer.SGDMomentum (self.model.get_params(), lr=0.0001, momentum=0.80, reg=0.00003)

    self.criterion = loss.CrossEntropyLoss()
예제 #29
0
def test_out_digits(model, data, labels):
    #print("===== TESTING THE CURRENT DIGITS =====")
    rescaled = list(map(dutils.unpad_img, data))
    rescaled = list(
        map(
            lambda img: dutils.center_box_image(dutils.resize_image(img, 20),
                                                20, 4), rescaled))
    testing_data = np.array(rescaled)
    testing_data = utils.normalize_data(testing_data)
    testing_data_size = testing_data.shape[0]
    return ann.test_model(model,
                          testing_data.reshape(testing_data_size, 28, 28, 1),
                          labels)
예제 #30
0
 def test_init(self):
     self.network = ANN.basic_ANN(2, 3, 2)
     self.assertTrue(self.network.input_nodes == 2)
     self.assertTrue(self.network.hidden_nodes == 3)
     self.assertTrue(self.network.output_nodes == 2)
     print "input weights"
     print self.network.input_weights
     print "output weights"
     print self.network.output_weights
     print ""
     for x in self.network.input_weights:
         for y in x:
             self.assertTrue(y != 0)
     for x in self.network.output_weights:
         for y in x:
             self.assertTrue(y != 0)
     test_output = self.network.feed_forward([np.array([[1], [0]])])
     print "test output"
     print test_output
     for x in range(0, 2):
         self.assertTrue(test_output[x] == ANN.sigmoid(
             self.network.input_weights[x][0]))
예제 #31
0
 def __init__(self):
     self.numAttributes = ['studytime','failures','freetime','absences','G1','G2','G3']
     self.textAttributes = ['paid','higher','internet','schoolsup']
     self.popSize = 20
     self.population = []
     self.children = [0 for x in range(self.popSize)]
     self.fitness = []
     self.weights = 310;
     self.ANN = ANN.ANN()
     self.data = CsvReader.readCsv('../student-mat.csv',self.numAttributes,self.textAttributes)
     self.results = []
     self.averageFitness = 0.0
     self.best = sys.maxsize
def test_digits(data, labels, merge_model, model_list):
    data_count = len(data)
    c_errors = np.zeros(data_count)
    pred_bits = np.zeros(data_count)
    class_bits = np.zeros(data_count)
    for i, d in zip(range(data_count), data):
        c_errors[i] = ann.test_model(merge_model, [d] * ensemble_size,
                                     labels,
                                     metric="c_error")
        pred_bits[i] = np.mean(
            calc_shannon_entropy(merge_model.predict([d] * ensemble_size)))
        class_bits[i] = np.mean(calc_class_entropy(model_list, d))
    return c_errors, pred_bits, class_bits
예제 #33
0
def backprop_XOR():
    (I,T)=genData.my_xor()
    (M_I,N_I) = I.shape
    (M_T,N_T) = T.shape
    M_H = 4
    (W_IH,W_HO,net_H,net_O,A_H,A_O,Delta_H,Delta_O,DeltaW_IH,DeltaW_HO) = ANN.allocate_feedForward_ANN(N_I,M_I,M_H,M_T,valRange=(-0.4,0.4))
    uhs.learningRate.rate = 0.000001
    trainError = bp.backProp(I,T,W_IH,W_HO,
                             net_H,net_O,A_H,A_O,Delta_H,
                             Delta_O,DeltaW_IH,DeltaW_HO,
                             (afs.sigmoid,afs.sigmoid_prime),
                             (afs.sigmoid,afs.sigmoid_prime),
                             (efs.sumSquaredError,efs.sumSquaredError_prime),
                             maxEpochs=100000,epsilon=10**-8,
                             updateHooks=(uhs.learningRate,uhs.momentum))
예제 #34
0
파일: ann_test.py 프로젝트: DaniM/mscthesis
def testGradient():
    '''
    Check if the gradient is calculated correctly
    '''
    input_layer_size = 3
    hidden_layer_size = 5
    num_labels = 3
    m = 5

    # generate some 'random' test data
    Theta1 = debugInitializeWeights(input_layer_size,hidden_layer_size)
    Theta2 = debugInitializeWeights(hidden_layer_size,num_labels)
    # Reusing debugInitializeWeights to generate X
    X  = debugInitializeWeights(input_layer_size - 1,m)
    y = np.array([1 + ((i + 1)%num_labels) for i in xrange(m)])
    
    print 'Theta 1'
    print Theta1
    
    print 'Theta 2'
    print Theta2
    
    print 'X'
    print X
    
    print 'y'
    print y

    # Add ones to the X data matrix
    X = np.hstack( (np.ones((m, 1)), X) )
    #Convert y 
    Y = np.zeros((m, num_labels));
    for i in xrange(m):
        Y[i,((y[i]-1)%num_labels)] = 1;
    y = Y;

    # Unroll parameters
    nn_params = np.concatenate((Theta1.flatten(),Theta2.flatten()),axis=2)
    
    results5 = [-9.2783e-003,8.8991e-003,-8.3601e-003,7.6281e-003,-6.7480e-003]
    
    J,grad = ANN.costFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y)
    
    print grad[0:5],results5
    print grad
예제 #35
0
	else:
		testingIDs = dataHandler.getTestingSetIDs(folder + typeSuffix + fileName);
		testData = dataHandler.getSampledData(dataset, testingIDs, 
											isTraining = False, truncate = True);
		testSynapses = getSynapses(folder + fileName + "-" + annType + truncatedSuffix);
		dataHandler.dumpPickle(testSynapses, fileName + truncatedSuffix + synapsesFileSuffix);

		noIndices = [j for j in range(0, len(testData[REWARD_SIGNALS])) 
										if testData[REWARD_SIGNALS][j] == -1]; # rs | rs = -1
		yesIndices = [p for p in range(0, len(testData[REWARD_SIGNALS])) 
										if testData[REWARD_SIGNALS][p] > -1]; # rs | rs > -1


		# print(len(noIndices), len(yesIndices));
		error = ANN.evaluateSynapses(testSynapses, 
									testData[SENSOR_DATA], testData[REWARD_SIGNALS],
									noIndices, yesIndices, 
									controlledError = useControlledError);
		testDataErrorList.append(error);

		print(i, error);

		dataHandler.dumpPickle(error, wholeFileName + evalFileSuffix);

		shuffledRSTrialResults = [];
		startTime = datetime.datetime.now();
		for k in range(nTrials):
			shuffledRSs = [testData[REWARD_SIGNALS][index] 
						  for index in np.random.permutation(len(testData[REWARD_SIGNALS]))];

			noIndicesShuffled = [a for a in range(0, len(testData[REWARD_SIGNALS]))
											if shuffledRSs[a] == -1]; # rs | rs = -1
def trainSimplestANNCMA(synapses, inputs, RSs, noIndices, yesIndices):
	synapsesReshaped = ANN.getSimplestANNWeights(synapses, robotType = "complex");

	return ANN.evaluateSynapses(synapsesReshaped, inputs, RSs, noIndices, yesIndices);
예제 #37
0
def setOutput(out):
    global output
    output = out
    ANN.setOutput(out)
예제 #38
0
def main(argv):
	
	#parse arguments
	#python UsingANN.py neuralnetworkfile [commandRate(default =40)]
	#check for the right number of arguments
	if (len(argv)<1):
		print"\nMust provide neural network file name"
		sys.exit(2)

	nnfile = argv[0] + '.txt'

	commandRate = -1
	if(len(argv)>1):
		commandRate = float(argv[1])
	
	#open log file for writing
	logfile = 'log'
	if(len(argv)>2):
		logfile = argv[2]+ '.txt'
	f = open(logfile, 'w')
	f.write("Start logging...")

	#Creating the Neural Network using a text file
	testann = ANN(1)
	testann.create_network(nnfile)
	#Loading the ANN with 0s initially
	print "\nLoading the ANN [0, 0, 0, 0]"
	inputvals = [0.0, 0.0, 0.0, 0.0]
	testann.load_NN(inputvals)
	#initilize the neural network using CTRNN_Controller()
	print "\nUsing CTRNN_Controller: dt = .02"
	nnoutput = testann.CTRNN_Controller(.02)

	for node in nnoutput:
		print node.get_output()

	robot = RobotPi()#Query Aracna on current sensors
	if(commandRate>0):
		robot = RobotPi(commandRate)
	
	val  = "\nneural network file: {0}\ncommandRate: {1}".format(nnfile, commandRate)
	line  = str(val)
	f.write(line)
	commanded_pos = []

	while(True):
		#Query Aracna on current sensors
		#old_pos = current_pos
		current_pos = robot.readCurrentPosition()#returns a list of 8 servo positions
		val = "\ncurrent_pos: {0}".format(current_pos)
		line = str(val)
		f.write(line)
		#if(commanded_pos is empty) load and activate NN
		#pos = [right hip, right knee, back hip, back knee, front hip, front knee, left hip, left knee]
		#sensors = [right knee, back knee, front knee, left knee]
		print "\nafter reading current pos\t", current_pos
        	for ii in range(0, 1):
        		sensors = []
        		for i in [0, 2, 4, 6]:
	        		current_pos[i] = max(min(MAX_HIP, current_pos[i]), MIN_HIP) #restrict servo pos [0, 1024]
	        		current_pos[i+1] = max(min(MIN_KNEE, current_pos[i+1]), MAX_KNEE)#restrict servo pos [1024, 0]
	        		value  = knee_to_NN(current_pos[i+1])#convert to neural network sensor bounds [-20, 20] degrees
			#sensors.append(value* (M_PI/180.0)) #convert to radian
			sensors.append(knee_to_NN(current_pos[3])*(M_PI/180.0)) #back knee
			sensors.append(knee_to_NN(current_pos[5])*(M_PI/180.0)) #front knee
			sensors.append(knee_to_NN(current_pos[1])*(M_PI/180.0)) #right knee
			sensors.append(knee_to_NN(current_pos[7])*(M_PI/180.0)) #left knee
			#Load sensors into ANN
			testann.load_NN(sensors)
			#Get ANN nnoutput [0, 1] 
			''' [0] back knee
                	[1] back outhip
               	 	[2] back hip =0.0
                	[3] front hip =0.0
                	[4] front outhip 
                	[5] front knee
			
                	[6] right knee
                	[7] right outhip
                	[8] right hip = 0.0
                	[9] left hip = 0.0
                	[10] left outhip
                	[11] left '''
             		#print "\nPropagating the ANN"
			nnoutput = testann.output_NN(.01)
			'''current_pos[3] = knee_to_POS(nnoutput[0].get_output()) #back knee
                	current_pos[5] = knee_to_POS(nnoutput[5].get_output()) #front knee
                	current_pos[1] = knee_to_POS(nnoutput[6].get_output()) #right knee
                	current_pos[7]= knee_to_POS(nnoutput[11].get_output()) #left knee
            		'''
		#Map nnoutput from [0, 1] to actual servo pos
		#print "nnoutput" 
		#for node in nnoutput:
                	#print node.get_output()
		desired_pos = []
		#desired_pos[0] = MIN_NNKNEE + (MAX_NNKNEE-MIN_NNKNEE)*nnoutput[0] #right knee [-20, 20]
		#output [back knee, back hip, 0.0, fro
		desired_pos.append(hip_to_POS(nnoutput[7].get_output())*2) #right hip convert from [0, 1] to [0, 1024]
		desired_pos.append(knee_to_POS(nnoutput[6].get_output())*2) #right knee convert from [0, 1] to [1024, 0]
		
		desired_pos.append(hip_to_POS(nnoutput[1].get_output())*2)#back hip 
		desired_pos.append(knee_to_POS(nnoutput[0].get_output())*2) #back knee
		
		desired_pos.append(hip_to_POS(nnoutput[4].get_output())*2) #front hip
		desired_pos.append(knee_to_POS(nnoutput[5].get_output())*2) #front knee
		
		desired_pos.append(hip_to_POS(nnoutput[10].get_output())*2) #left hip
		desired_pos.append(knee_to_POS(nnoutput[11].get_output())*2) #left knee
		#current_pos = desired_pos
		#for node in nnoutput:
                 #       print node.get_output() 
		#Move Aracna using nn output
		#if (is_reached(current_pos, commanded_pos)
		print"desired_pos", desired_pos
		val = "\tdesired_pos: {0}".format( desired_pos)
		line = str(val)
		f.write(line)
		robot.commandPosition(desired_pos, False)
예제 #39
0
파일: ann_test.py 프로젝트: DaniM/mscthesis
        Y[i,((y[i]-1)%num_labels)] = 1;
    y = Y;

    # Unroll parameters
    nn_params = np.concatenate((Theta1.flatten(),Theta2.flatten()),axis=2)
    
    results5 = [-9.2783e-003,8.8991e-003,-8.3601e-003,7.6281e-003,-6.7480e-003]
    
    J,grad = ANN.costFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y)
    
    print grad[0:5],results5
    print grad


#test if the sigmoid grad is working properly
print ANN.sigmoidGrad(0)
print ANN.sigmoidGrad(np.zeros((3,3)))

input_layer_size  = 400;  # 20x20 Input Images of Digits
hidden_layer_size = 25;   # 25 hidden units
num_labels = 10;          # 10 labels, from 1 to 10   
                          # (note that we have mapped "0" to label 10)

#load the precalculated weights to check cost function is working
theta1 = np.loadtxt(open("testfiles/Theta1.txt","rb"),delimiter=",")
theta2 = np.loadtxt(open("testfiles/Theta2.txt","rb"),delimiter=",")
X = np.loadtxt(open("testfiles/ann_test_data.txt","rb"),delimiter=",")
y = np.loadtxt(open("testfiles/ann_test_output.txt","rb"),delimiter=",")

m = X.shape[0]
# Add ones to the X data matrix
	print("|train set|", len(trainData[IDS]))

	del dataset; #free up dataset ~300mb!

	#used in calculating controlled error
	noIndices = [i for i in range(0, len(trainData[REWARD_SIGNALS])) if trainData[REWARD_SIGNALS][i] == -1]; # rs | rs = -1
	yesIndices = [i for i in range(0, len(trainData[REWARD_SIGNALS])) if trainData[REWARD_SIGNALS][i] != -1]; # rs | rs < 1


	#Set CMAES options;
	opts = cma.CMAOptions()
	opts.set("verb_disp", 1);
	opts.set("verb_filenameprefix", folder + fileName + "-" + annType + truncateSuffix + "-CMA-");

	if(annType == "simplest"):
		initialGuess = ANN.getSimplestANNWeights(robotType = robotType);
		res = cma.fmin(trainSimplestANNCMA, initialGuess, .1, args = (trainData[SENSOR_DATA], 
								trainData[REWARD_SIGNALS], noIndices, yesIndices), options = opts);
	
	#unused branch for future development using multiple different neural networks.
	elif(annType == "CTRNN"):
		print("not there yet - exiting");
		exit();

	print(res);