예제 #1
0
def main():
    print "********** start time is = ", time.strftime("%H:%M:%S",
                                                       time.localtime())
    try:
        with Timer() as t:
            fileW = createAnOutputFile()
            model = ANN.ANN()
            numOfPop = 50  # should be 50 population
            numOfFea = 385  # should be 385 descriptors
            unfit = 1000
            # Final model requirements
            R2req_train = .6
            R2req_validate = .5
            R2req_test = .5
            # get training, validation, test data and rescale
            TrainX, TrainY, ValidateX, ValidateY, TestX, TestY = FromDataFileMLR_DE_BPSO.getAllOfTheData(
            )
            TrainX, ValidateX, TestX = FromDataFileMLR_DE_BPSO.rescaleTheData(
                TrainX, ValidateX, TestX)
    finally:
        print("Time to load and rescale data: {:.03f} sec".format(t.interval))

    # initial velocities, numbers between 0 and 1
    velocity = createInitVelMat(numOfPop, numOfFea)
    unfit = 1000
    fittingStatus = unfit
    try:
        with Timer() as t:
            while (fittingStatus == unfit):
                # create inititial population and find fitness for each row in population
                population = createInitPopMat(numOfPop, numOfFea)
                fittingStatus, fitness = FromFinessFileMLR_DE_BPSO.validate_model(
                    model, fileW, population, TrainX, TrainY, ValidateX,
                    ValidateY, TestX, TestY)
    finally:
        print "Validated model: {} min".format((t.interval / 60))

    try:
        with Timer() as t:
            # initialize global best row and fitness to first population row
            globalBestRow = InitializeGlobalBestRow(population[0])
            globalBestFitness = fitness[0]
            # find actual global best row and fitness
            globalBestRow, globalBestFitness = findGlobalBest(
                population, fitness, globalBestRow, globalBestFitness)
            # initialze local best matrix (Pid) with current population matirix
            # initialize local best fitness with current fitness vector
            localBestMatrix = CreateInitialLocalBestMatrix(population)
            localBestFitness = CreateInitialLocalBestFitness(fitness)
            # parent population is current population
            parentPop = getParentPopulation(population)
    finally:
        print("Time to initialize data: {:.03f} sec".format(t.interval))

    print "Starting iteration loop at ", time.strftime("%H:%M:%S",
                                                       time.localtime())
    IterateNtimes(model, fileW, fitness, velocity, population, parentPop,
                  localBestFitness, localBestMatrix, globalBestRow,
                  globalBestFitness, TrainX, TrainY, ValidateX, ValidateY,
                  TestX, TestY)
예제 #2
0
def initPop(size, noInputs, mi, mx):

    pop = []
    for i in range(size):
        toAdd = ANN.ANN(noInputs, 6, mi, mx)
        pop.append(toAdd)
    return pop
def get_classifier(clf_name, params):
    clf = None
    data = load_data() 
    if clf_name == "Случайный лес":
        clf = RandomForest(data, params['n_start'], params['n_stop'], params['n_num'])
    elif clf_name == "LightGBM":
        clf = ml.LGBM( params['num_leaves'], params['n_estimators'],  params['min_child_samples'])
    elif clf_name == "Stochastic Gradient Decent":
        clf = ml.SGD(params['al'], params['epsilon'], params['eta'], params['n_iter'])
    elif clf_name == "Decision Tree":
        clf = ml.DT(params['min_samples_splitint'], params['min_samples_leaf'], params['ccp_alphanon_negative'])    
    elif clf_name == "Naive Bayes":
        clf = ml.GNB()
    elif clf_name == "Support Vector Machines":
        clf = ml.SVM(params['С'], params['degree'], params['cache'])
    elif clf_name == "KNN":
        clf = ml.KNN(params['n_neighbors'], params['leaf_size'], params['p'])
    elif clf_name == "Logistic Regression":
        clf = ml.LOR(params['С'], params['max_iter'])
    elif clf_name == "Random Forest":
        clf = ml.RF(params['max_depth'], params['min_samples_split'], params['min_samples_leaf'])
    elif clf_name == "Linear Regression":
        clf = ml.LR()
    elif clf_name == "Logistic Regression":
        clf = ml.LOR(params['С'], params['max_iter'])
    elif clf_name == "XGBoost":
        clf = ml.XGB()
    elif clf_name == "ANN":
        clf = ann.ANN(params['epo'], params['batch_size'])
    return clf
예제 #4
0
    def create(jsonFilePath, dataset):
        try:
            with open('schemas/estSchema.json') as schema_file:
                estimatorSchema = json.load(schema_file)
        except FileNotFoundError as err:
            template = "An exception of type {0} occurred. Arguments: {1!r}"
            message = template.format(type(err).__name__, err.args)
            print(message)
            raise ValueError(error.errors['estimator_config'])

        try:
            with open(jsonFilePath) as json_file:
                try:
                    jsonData = json.load(json_file)
                    validate(instance=jsonData, schema=estimatorSchema)
                except jsonschema.exceptions.ValidationError as err:
                    template = "An exception of type {0} occurred. Arguments: {1!r}"
                    message = template.format(type(err).__name__, err.args)
                    print(message)
                    raise ValueError(error.errors['estimator_config'])
                except ValueError as err:
                    template = "An exception of type {0} occurred. Arguments: {1!r}"
                    message = template.format(type(err).__name__, err.args)
                    print(message)
                    raise ValueError(error.errors['estimator_config'])

                if jsonData['estimator'].startswith('KNeighbors'):
                    import Knn  #as Knn
                    esti = Knn.Knn(jsonData)
                elif jsonData['estimator'].startswith('DecisionTree'):
                    import DecisionTree
                    esti = DecisionTree.DecisionTree(jsonData)
                elif jsonData['estimator'].startswith('RandomForest'):
                    import RandomForest
                    esti = RandomForest.RandomForest(jsonData)
                elif jsonData['estimator'] == 'LinearSVC' or jsonData[
                        'estimator'] == 'LinearSVR':
                    import SVM
                    esti = SVM.SVM(jsonData)
                elif jsonData['estimator'].startswith('ANN'):
                    import ANN
                    esti = ANN.ANN(jsonData)
                elif jsonData['estimator'] == 'TripleES':
                    import TripleES
                    esti = TripleES.TripleES(jsonData)
                else:
                    est_str = jsonData['estimator']
                    print(f'Invalid value for estimator name: {est_str}')
                    raise ValueError(error.errors['estimator_config'])

#esti.parse(jsonData) # right???
                esti.assign_dataset(dataset)
                return esti
        except FileNotFoundError as err:
            template = "An exception of type {0} occurred. Arguments: {1!r}"
            message = template.format(type(err).__name__, err.args)
            print(message)
            raise ValueError(error.errors['estimator_config'])
예제 #5
0
def validate():
    conv_layer_1 = CNN.CNNLayer(3, 3, 1, 32)

    conv_layer_2 = CNN.CNNLayer(32, 5, 2, 64)

    pooling_layer_1 = CNN.PoolingLayer(2,2)

    conv_layers = [conv_layer_1, conv_layer_2, pooling_layer_1]
    ann = ANN.ANN(2304, [128, 3])

    # 0 = Airplane
    # 2 = Bird
    # 8 = Ship

    conv_layer_1.load("data/3-network/CNNL1.npz")
    conv_layer_2.load("data/3-network/CNNL2.npz")
    ann.load("data/3-network/FCL.npz")
    
    batch = data_batch.DataBatch("cifar-10-python/cifar-10-batches-py/test_batch")
    filteredImages = []
    filteredLabels = []
    for x in range(len(batch.labels)):
        if(batch.labels[x] == 0):
            filteredImages.append(batch.images[x])
            filteredLabels.append(0)
        if(batch.labels[x] == 2):
            filteredImages.append(batch.images[x])
            filteredLabels.append(1)
        if(batch.labels[x] == 8):
            filteredImages.append(batch.images[x])
            filteredLabels.append(2)
    batch.images = np.array(filteredImages)
    batch.labels = np.array(filteredLabels)
    data = batch.images
    for i in range(len(conv_layers)):
        data = conv_layers[i].forward(data)

    flattened = data.reshape(data.shape[0], data.shape[1] * data.shape[2] * data.shape[3])

    output = ann.prop_forward(flattened)

    right = 0
    seen = 0
    for i in range(batch.images.shape[0]):
        outputNum = -1
        biggest = -1
        for j in range(0, 3):
            if output[i][j] > biggest:
                outputNum = j
                biggest = output[i][j]
        seen+=1
        if outputNum == batch.labels[i]:
            right += 1
    print("Right: " + str(right) + " / " + str(seen) + " - " + "{0:.0%}".format(right/seen))
예제 #6
0
    def __init__(self, genome = None):
        pg.sprite.Sprite.__init__(self)
        self.image = pg.image.load("images/bird1.png")
        self.rect = self.image.get_rect()
        self.rect.center = (WIDTH/2, random.randint(15,HEIGHT-SAND_HEIGHT)-5)

        #self.pos = vec2(WIDTH/2, HEIGHT/2)
        self.pos = vec2(WIDTH/2, random.randint(15,HEIGHT-SAND_HEIGHT)-5)

        self.vel = vec2(0,0)
        self.acc = vec2(0, GRAVITY)

        self.live = 1;

        self.sensor = Sensor()

        if genome is None:
            self.ANN = ANN()
        else:
            self.ANN = ANN(genome)
예제 #7
0
def train():
    batch_files = ["cifar-10-python/cifar-10-batches-py/data_batch_1",
    "cifar-10-python/cifar-10-batches-py/data_batch_2",
    "cifar-10-python/cifar-10-batches-py/data_batch_3",
    "cifar-10-python/cifar-10-batches-py/data_batch_4",
    "cifar-10-python/cifar-10-batches-py/data_batch_5"]
    conv_layer_1 = CNN.CNNLayer(3, 3, 1, 32)

    conv_layer_2 = CNN.CNNLayer(32, 5, 2, 64)

    pooling_layer_1 = CNN.PoolingLayer(2,2)

    conv_layers = [conv_layer_1, conv_layer_2, pooling_layer_1]
    ann = ANN.ANN(2304, [128, 3])

    # 0 = Airplane
    # 2 = Bird
    # 8 = Ship

    conv_layer_1.load("data/3-network/CNNL1.npz")
    conv_layer_2.load("data/3-network/CNNL2.npz")
    ann.load("data/3-network/FCL.npz")
    
    for epoch in range(0, 40):
        print ("epoch " + str(epoch))
        batch_sizes = 64
        for filename in batch_files:
            batch = data_batch.DataBatch(filename)
            filteredImages = []
            filteredLabels = []
            for x in range(len(batch.labels)):
                if(batch.labels[x] == 0):
                    filteredImages.append(batch.images[x])
                    filteredLabels.append(0)
                if(batch.labels[x] == 2):
                    filteredImages.append(batch.images[x])
                    filteredLabels.append(1)
                if(batch.labels[x] == 8):
                    filteredImages.append(batch.images[x])
                    filteredLabels.append(2)
            batch.images = np.array(filteredImages)
            batch.labels = np.array(filteredLabels)

            print("Running on file " + filename[-12:])
            order = list(range(0, len(batch.images), batch_sizes))
            random.shuffle(order)
            for i in order:
                conv_layers, ann = trainOn(batch.images[i:i+batch_sizes], batch.labels[i:i+batch_sizes],
                conv_layers, ann, True, 0.000004 * math.pow(0.96, epoch), 0.7) #Decay and momentum
            print("saving")
            conv_layer_1.save("data/3-network/CNNL1.npz")
            conv_layer_2.save("data/3-network/CNNL2.npz")
            ann.save("data/3-network/FCL.npz")
예제 #8
0
 def __init__(self):
     self.numAttributes = ['studytime','failures','freetime','absences','G1','G2','G3']
     self.textAttributes = ['paid','higher','internet','schoolsup']
     self.popSize = 20
     self.population = []
     self.children = [0 for x in range(self.popSize)]
     self.fitness = []
     self.weights = 310;
     self.ANN = ANN.ANN()
     self.data = CsvReader.readCsv('../student-mat.csv',self.numAttributes,self.textAttributes)
     self.results = []
     self.averageFitness = 0.0
     self.best = sys.maxsize
예제 #9
0
    def __init__(self):
        conv_layer_1 = CNN.CNNLayer(3, 3, 1, 32)

        conv_layer_2 = CNN.CNNLayer(32, 5, 2, 64)

        pooling_layer_1 = CNN.PoolingLayer(2, 2)

        self.conv_layers = [conv_layer_1, conv_layer_2, pooling_layer_1]
        self.ann = ANN.ANN(2304, [128, 3])

        # 0 = Airplane
        # 2 = Bird
        # 8 = Ship

        conv_layer_1.load("data/3-network/CNNL1.npz")
        conv_layer_2.load("data/3-network/CNNL2.npz")
        self.ann.load("data/3-network/FCL.npz")
def learning_rate_search():
    #based upon 28 28 28 model with 10 iterations

    learning_rate = list(np.arange(0.05, 0.16, 0.05))
    count = 0
    ANN_dict = {}
    for i in learning_rate:
        #init model
        temp_ANN = ANN(learning_rate=i)
        print(i)
        #traing_model
        temp_ANN.train(training_data)
        #test_model
        temp_ANN.test(test_pixels, test_labels)

        ANN_dict[count] = temp_ANN.accuracy, i, temp_ANN
        print(ANN_dict[count])
        count += 1
    return ANN_dict
예제 #11
0
def crossover(parent1, parent2, mut_rate, noInputs, mi, mx):

    child = ANN.ANN(noInputs, 6, mi, mx)

    if weights == True:
        noRows = len(parent1.weights)

        for row in range(noRows):
            noCols = len(parent1.weights[row])
            for col in range(noCols):
                cur1 = parent1.weights[row][col]  # current element in parent 1
                cur2 = parent2.weights[row][col]  # current element in parent 2
                child.weights[row][col] = random.choice([cur1, cur2])

    if func == True:  # the user can choose to cross over functions
        #print "crossing functions"
        child = crossFunctions(parent1, parent2, child)

    return child
def randomised_CV(iterations=12):

    number_of_layer_option = list(range(1, 5))

    learning_rate = list(np.arange(0.0001, 0.01, 0.0003))
    random.seed(time.time())
    CV_seed = random.randint(5, 100)
    #CV_seed = 24

    ANN_dict = {}
    for i in range(iterations):
        if i > 2 and ANN_dict[i - 1][0] < 0.6 and ANN_dict[i - 2][0] < 0.6:
            CV_seed += 1

        elif i > 3 and ANN_dict[i - 1][0] < 0.8 and ANN_dict[
                i - 2][0] < 0.8 and ANN_dict[i - 3][0] < 0.8:
            CV_seed += 1

        limit = 112
        number_of_layers = np.random.choice(number_of_layer_option)
        hidden_layers = []
        for j in range(number_of_layers):
            layer_size = np.random.choice(list(range(28, limit + 1, 14)))
            hidden_layers.append(layer_size)
            limit = layer_size
        learning = np.random.choice(learning_rate)
        #init model
        temp_ANN = ANN(hidden_layer_config=hidden_layers,
                       learning_rate=learning,
                       seed_number=CV_seed)
        print(hidden_layers, learning)
        #train_model
        temp_ANN.train(training_data)
        #test_model
        temp_ANN.test(test_pixels, test_labels, statements='off')

        ANN_dict[
            i] = temp_ANN.accuracy, hidden_layers, learning, temp_ANN.iterations_trained, temp_ANN.seed_number, temp_ANN
        print(ANN_dict[i])
        print(i)
    return ANN_dict
예제 #13
0
import numpy as np
import ANN

ann = ANN.ANN(shape=[1, 2, 45, 2])

print(ann([1]))
for i in range(20):
    grad = ann.gradient([1])
    grad = np.matmul(grad, np.array([[1], [-1]]))
    to_set = grad + ann.get_weights().reshape(grad.shape)
    ann.set_weights(to_set)
    print(ann([1]))
예제 #14
0
import random
import sys

sys.path.append("../x64/PythonLibDebug/")
from ANN import *

import loader

print("OpenCL initialized!!!")

batch_size = 10
epoch_count = 5  #30
learning_rate = 0.5

n = ANN("network.save")  #ANN([784, 100, 10])
print("Network initialized")

training_samples = []
validation_samples = []
test_samples = []

loader.load(training_samples, validation_samples, test_samples)

test_samples = test_samples[:100]

random.shuffle(training_samples)

print(loader.accuracy(n, test_samples), "of", len(test_samples))

for k in range(epoch_count):
    for i in range(0, len(training_samples) - batch_size, batch_size):
예제 #15
0
from ANN import *
import numpy as np

Model = ANN(6, 4, 3, learning_rate=1, regularization=0.01)
x = np.array([[1, 1, 0, 0, 1, 1]])
y = np.array([[0, 1, 0]])
Model.alpha = np.array([[1, 1, -1, -1, 0, -1], [3, 1, 0, 1, 0, 2],
                        [1, 2, -1, 0, 2, -1], [2, 0, 2, 1, -2, 1]],
                       dtype=float)
Model.beta = np.array([[3, -1, 2, 1], [1, -1, 2, 2], [1, -1, 1, 1]],
                      dtype=float)
Model.fit(x, y, epoch=1)
print("Finished")
예제 #16
0
파일: main.py 프로젝트: Gogka/ANN
from ANNLayer import *
from ANN import *

if __name__ == "__main__":
    inputs = [1, 2, 3]
    layer = ANNLayer(number_of_inputs=len(inputs), number_of_neurons=3)
    layer2 = ANNLayer(number_of_neurons=3, parent_layer=layer)
    layer3 = ANNLayer(parent_layer=layer2)
    network = ANN([layer, layer2, layer3])
    #print network.think(inputs)
    network.train([inputs], [[1]])
예제 #17
0
def main(trainDataPath, train_pIC50Path, validationDataPath,
         validation_pIC50Path, testDataPath, test_pIC50Path):
    try:
        TrainX, TrainY, ValidateX, ValidateY, TestX, TestY = getAllOfTheData(
            trainDataPath, train_pIC50Path, validationDataPath,
            validation_pIC50Path, testDataPath, test_pIC50Path)

        # combine test and training sets
        # Backprop trainer will split it up
        TrainValX = append(TrainX, ValidateX, axis=0)
        TrainValY = append(TrainY, ValidateY, axis=0)

        # rescale data
        TrainValX, TrainValY, TestX, TestY = rescaleTheData(
            TrainValX, TrainValY, TestX, TestY)

        modeler = ANN(TrainValX.shape[1])

        modeler.train(TrainX, TrainY)

        # TODO: reorganizing, roll-up any relevant code from main into seperate module(s)
        # Backprop trainer object
        #trainer = BackpropTrainer(ffn, ds)#, learningrate=.4, momentum=.2)#, verbose=True)

        # learning rates to test
        # note: best rates have been between .07 and .1, but seems to very inbetween
        # most consistent between .07 and .085
        #alpha = array([0, .05, .07, .0775, .085, .1, .2])#, .15, .2, .25, .3, 3.5])
        #momentum = array([.05, .1, .15])
        # test learning rates
        #for i in range(alpha.shape[0]):
        # randomize weights
        #ffn.randomize();
        #for k in range(momentum.shape[0]):
        # Backprop trainer object
        #trainer = BackpropTrainer(ffn, ds, learningrate=alpha[i], momentum=momentum[k])
        #, verbose=True)
        # for j in range(1000):
        # 	error = trainer.train()
        # 	if j%10 == 0:
        # 		print "{} error: {}".format(j, error)
        # 	# print "All weights: {}".format(ffn.params)
        # 	if error < .001:
        # 		break
        # splits data into 75% training, 25% validation
        # train until convergence
        #error = trainer.trainUntilConvergence(maxEpochs=20, continueEpochs=10)# validationPortion=.X
        # print results
        #print "alpha: {}, momentum: {}".format(alpha[i], momentum[k]);
        #train_outputs = zeros(TrainValX.shape[0]);
        #for j in range(TrainValX.shape[0]):
        #train_outputs[j] = ffn.activate(TrainValX[j]);
        # print train_outputs[j], TrainValY[j]
        #print "Train MSE: {}".format(Validator.MSE(train_outputs, TrainValY));
        #test_outputs = zeros(TestX.shape[0]);
        #for j in range(TestX.shape[0]):
        #test_outputs[j] = ffn.activate(TestX[j]);
        # print test_outputs[j], TestY[j]
        #print "Test MSE: {}".format(Validator.MSE(test_outputs, TestY));
        return 0
    except:
        print "error in main"
예제 #18
0
import ANN

model = ANN.ANN()
model.input(dim=10)
model.layer(dim=10, activation="tanh")
model.layer(dim=23, activation="tanh")
model.layer(dim=20, actiavtion="leaky_relu")
model.output(dim=4)
model.fit(input=None,
          output=None,
          labs=0.2,
          learning_rate=0.08,
          iteration=500,
          show_loss=True)
예제 #19
0
    parents = GA.select_parent(pop_vector, fitness.copy(), num_parent_mating)

    # print("Parents", parents)

    # Generate offspring
    offspring_crossover = GA.crossover(
        parents,
        offspring_size=(pop_vector.shape[0] - parents.shape[0],
                        pop_vector.shape[1]))
    # print("Crossover", offspring_crossover)

    # Mutation
    offspring_mutation = GA.mutation(offspring_crossover, mutation_rate)
    # print("Mutation", offspring_mutation)

    pop_vector[0:parents.shape[0], :] = parents
    pop_vector[parents.shape[0]:, :] = offspring_mutation

pop_matrix = GA.vector_to_matrix(pop_vector, pop_matrix)
end = time.time()
best_weight = pop_matrix[0, :]
ann = ANN.ANN(best_weight)
acc = ann.predict(x_test, t_test)
print(end - start)
plt.plot(accuracies)
print(acc)
plt.xlabel("Iterations", fontsize=20)
plt.ylabel("Fitness", fontsize=20)

plt.show()
예제 #20
0
def main(argv):

    #parse arguments
    #python UsingANN.py neuralnetworkfile [commandRate(default =40)]
    #check for the right number of arguments
    if (len(argv) < 1):
        print "\nMust provide neural network file name"
        sys.exit(2)

    nnfile = argv[0] + '.txt'

    commandRate = -1
    if (len(argv) > 1):
        commandRate = float(argv[1])

    #open log file for writing
    logfile = 'log'
    if (len(argv) > 2):
        logfile = argv[2] + '.txt'
    f = open(logfile, 'w')
    f.write("Start logging...")

    #Creating the Neural Network using a text file
    testann = ANN(1)
    testann.create_network(nnfile)
    #Loading the ANN with 0s initially
    print "\nLoading the ANN [0, 0, 0, 0]"
    inputvals = [0.0, 0.0, 0.0, 0.0]
    testann.load_NN(inputvals)
    #initilize the neural network using CTRNN_Controller()
    print "\nUsing CTRNN_Controller: dt = .02"
    nnoutput = testann.CTRNN_Controller(.02)

    for node in nnoutput:
        print node.get_output()

    robot = RobotPi()  #Query Aracna on current sensors
    if (commandRate > 0):
        robot = RobotPi(commandRate)

    val = "\nneural network file: {0}\ncommandRate: {1}".format(
        nnfile, commandRate)
    line = str(val)
    f.write(line)
    commanded_pos = []

    while (True):
        #Query Aracna on current sensors
        #old_pos = current_pos
        current_pos = robot.readCurrentPosition(
        )  #returns a list of 8 servo positions
        val = "\ncurrent_pos: {0}".format(current_pos)
        line = str(val)
        f.write(line)
        #if(commanded_pos is empty) load and activate NN
        #pos = [right hip, right knee, back hip, back knee, front hip, front knee, left hip, left knee]
        #sensors = [right knee, back knee, front knee, left knee]
        print "\nafter reading current pos\t", current_pos
        for ii in range(0, 1):
            sensors = []
            for i in [0, 2, 4, 6]:
                current_pos[i] = max(min(MAX_HIP, current_pos[i]),
                                     MIN_HIP)  #restrict servo pos [0, 1024]
                current_pos[i +
                            1] = max(min(MIN_KNEE, current_pos[i + 1]),
                                     MAX_KNEE)  #restrict servo pos [1024, 0]
                value = knee_to_NN(
                    current_pos[i + 1]
                )  #convert to neural network sensor bounds [-20, 20] degrees
        #sensors.append(value* (M_PI/180.0)) #convert to radian
            sensors.append(knee_to_NN(current_pos[3]) *
                           (M_PI / 180.0))  #back knee
            sensors.append(knee_to_NN(current_pos[5]) *
                           (M_PI / 180.0))  #front knee
            sensors.append(knee_to_NN(current_pos[1]) *
                           (M_PI / 180.0))  #right knee
            sensors.append(knee_to_NN(current_pos[7]) *
                           (M_PI / 180.0))  #left knee
            #Load sensors into ANN
            testann.load_NN(sensors)
            #Get ANN nnoutput [0, 1]
            ''' [0] back knee
                	[1] back outhip
               	 	[2] back hip =0.0
                	[3] front hip =0.0
                	[4] front outhip 
                	[5] front knee
			
                	[6] right knee
                	[7] right outhip
                	[8] right hip = 0.0
                	[9] left hip = 0.0
                	[10] left outhip
                	[11] left '''
            #print "\nPropagating the ANN"
            nnoutput = testann.output_NN(.01)
            '''current_pos[3] = knee_to_POS(nnoutput[0].get_output()) #back knee
                	current_pos[5] = knee_to_POS(nnoutput[5].get_output()) #front knee
                	current_pos[1] = knee_to_POS(nnoutput[6].get_output()) #right knee
                	current_pos[7]= knee_to_POS(nnoutput[11].get_output()) #left knee
            		'''
        #Map nnoutput from [0, 1] to actual servo pos
        #print "nnoutput"
        #for node in nnoutput:
        #print node.get_output()
        desired_pos = []
        #desired_pos[0] = MIN_NNKNEE + (MAX_NNKNEE-MIN_NNKNEE)*nnoutput[0] #right knee [-20, 20]
        #output [back knee, back hip, 0.0, fro
        desired_pos.append(hip_to_POS(nnoutput[7].get_output()) *
                           2)  #right hip convert from [0, 1] to [0, 1024]
        desired_pos.append(knee_to_POS(nnoutput[6].get_output()) *
                           2)  #right knee convert from [0, 1] to [1024, 0]

        desired_pos.append(hip_to_POS(nnoutput[1].get_output()) * 2)  #back hip
        desired_pos.append(knee_to_POS(nnoutput[0].get_output()) *
                           2)  #back knee

        desired_pos.append(hip_to_POS(nnoutput[4].get_output()) *
                           2)  #front hip
        desired_pos.append(knee_to_POS(nnoutput[5].get_output()) *
                           2)  #front knee

        desired_pos.append(hip_to_POS(nnoutput[10].get_output()) *
                           2)  #left hip
        desired_pos.append(knee_to_POS(nnoutput[11].get_output()) *
                           2)  #left knee
        #current_pos = desired_pos
        #for node in nnoutput:
        #       print node.get_output()
        #Move Aracna using nn output
        #if (is_reached(current_pos, commanded_pos)
        print "desired_pos", desired_pos
        val = "\tdesired_pos: {0}".format(desired_pos)
        line = str(val)
        f.write(line)
        robot.commandPosition(desired_pos, False)
예제 #21
0
import cv2
import operator
import numpy as np

from ANN import *
from solver import *


# -- initalising ML  --------------------------
shape = [784,128,10]          
net = ANN( shape ) 
#net.load('Params/params_900.pickle')
#train_net()

# -- Camera object  --------------------------
cam = cv2.VideoCapture(0)

# --  Misc varibles  --------------------------
margin = 10
case = 28 + 2*margin
perspective_size = 9*case

flag = 0
ans = 0

if __name__ == '__main__':
  while True:

    # -- collect images  --------------------------
    check, frame = cam.read()
    #frame = imutils.resize(frame, width = 1000)
예제 #22
0
    elif n == 'LR':
        lr = LR(X_train,X_train_res,X_test,y_train,y_train_res,y_test)
        lr.logreg_imbal()
        lr.logreg_bal()
        lr.roc_auc_logreg_imbal()
        lr.roc_auc_logreg_bal()    
        
    elif n == 'SVM':
        svm = SVM(X_train,X_train_res,X_test,y_train,y_train_res,y_test)
        svm.svm_imbal()
        svm.roc_auc_svm_imbal()
        svm.svm_bal()
        svm.roc_auc_svm_bal()
                                                                        
    elif n == 'ANN':
        ann = ANN(X_train,X_train_res,X_test,y_train,y_train_res,y_test)
        ann.ann_imbal()
        ann.roc_auc_ann_imbal()
        ann.ann_bal()
        ann.roc_auc_ann_bal()
                        

        
        
        
        
        
        
 
    
    
예제 #23
0
파일: test_ANN.py 프로젝트: Shaitender/ANN
inp = inpp
tar = tarr
inpp = np.array([inp0_test, inp1_test, inp2_test]).T
tarr = np.array([out_test]).T
inp_test = inpp
out_te = tarr
np.savetxt('input_training.txt', inp)
np.savetxt('output_training.txt', tar)
np.savetxt('output_testing.txt', out_te)
af = sigmoid
#plt.plot(inp_test,out_te,'ro')
#plt.show()
#num_inputs,num_outputs,hidden_layers,non_linearity,learning_rate
#plt.plot(inp,tar,'ro',)
#plt.show()
ann = ANN(3, 1, hid, af, 0.001, 5000)
t = ann.train(inp, tar, inp_test, out_te)
#print out
#print "ms error = ",ann.output_layer.mse,"avg error = ",ann.output_layer.avg_er
"""for x in range(len(out)):
    if out[x][0] > 0.5:
        out[x][0] = 1
    else:
        out[x][0] = 0"""
'''for k in range(len(tar)):
    tar[k] = mii + out[k]*rat'''
out = ann.forward_pass(inp_test)
np.savetxt('obtained_output_training.txt', t)
np.savetxt('obtained_output_testing.txt', out)

add = 0
#IMPORTANT
#SET DIRECTORY TO WHERE ANN.py IS
import sys
sys.path.insert(
    0,
    'C:\Users\Alexander\Documents\GitHub\Machine-Learning-Course')  #MODIFY ME
#READ ABOVE
#PLZ

#use a constant to keep track of how many nodes we are using in the hidden layer
hidden_layer_nodes = 20
import ANN
#intialize
aa = ANN.ANN(inputs,
             targets,
             nhidden1=hidden_layer_nodes,
             nlayers=1,
             momentum=0)
#train for n iterations
#first parameter is number of iterations
#second parameter is the learning rate
#third parameter is a boolean of whether or not you want to track and plot the error during training
aa.train_n_iterations(10000, 0.001, plot_errors=False)
#get the ouputs using the inputs
#we are using all the inputs here because of no parameter is provided
#forward pass defaults to all the data
results = aa.forward_pass()
#get rid of bias from weights
testWeight = aa.weights1[1:, :]
#transpose so it is positioned same as input
testWeight = transpose(testWeight)
            tat_domian = male_train.sample(target_size)
            mse = self.LININT_train(src_domain, tat_domian, male_dev,
                                    male_test)

        else:
            train_mixed = female_train.copy()
            train_mixed = train_mixed.append(male_train, ignore_index=True)
            src_domain = train_mixed.sample(frac=1).reset_index(drop=True)
            tat_domian = mixed_train.sample(target_size)
            mse = self.LININT_train(src_domain, tat_domian, mixed_dev,
                                    mixed_test)
        return mse


if __name__ == '__main__':
    ann = ANN.ANN()
    lr = LR()
    female_path = "Data/FEMALE.csv"
    female_train_path = "Data/FEMALE_train.csv"
    female_dev_path = "Data/FEMALE_dev.csv"
    female_test_path = "Data/FEMALE_test.csv"
    male_path = "Data/MALE.csv"
    male_train_path = "Data/MALE_train.csv"
    male_dev_path = "Data/MALE_dev.csv"
    male_test_path = "Data/MALE_test.csv"
    mixed_path = "Data/MIXED.csv"
    mixed_train_path = "Data/MIXED_train.csv"
    mixed_dev_path = "Data/MIXED_dev.csv"
    mixed_test_path = "Data/MIXED_test.csv"

    file_paths = {}
예제 #26
0
파일: test.py 프로젝트: mriedman/Research
from ANN import *
from adam import *

if __name__ == "__main__":
    test = ANN()
    test.createFromArrays([2, 2, 1], ["linear", "sigmoid"])

    test.set_max_epoch(50)
    test.set_desired_error(0.00000001)
    test.set_momentum(0.05)
    test.set_learning_rate(0.05)
    test.set_erf("MSE")

    #_input =[[1, 0], [0, 1]], [1,1], [0, 0]]
    #output = [[1], [1]], [0], [0]]

    #test.batch_train(_input, output, 1)

    #test adam

    _input = [1, 2]
    target = [0]
    test.run(_input)
    print(test.output)

    test = ADAM(test)
    test.adam(_input, target)
    test = test.get_ann()

    test.run(_input)
    print(test.output)
예제 #27
0
import numpy as np
import ANN

layer_sizes = (3,4,1,5)
x = np.ones((layer_sizes[0],1))

net = ANN.ANN(layer_sizes)
prediction = net.predict(x)

print(prediction)
예제 #28
0
from Example import *
from ANN import *
from read_data import *

import matplotlib.pyplot as plt

if __name__ == "__main__":
    train_data, test_data = read_data(800)
    ann = ANN([28 * 28, 50, 10], "sigmoid", "L2", 0)
    print "done loading"
    ann.train_GD(train_data, test_data)

# special things:
# converting images into black and white (maybe not so special ? :) )
# ability to store and load the network. (with nice user interface :D)
예제 #29
0
target = target[order, :]

#IMPORTANT
#SET DIRECTORY TO WHERE ANN.py IS
import sys

sys.path.insert(
    0,
    'C:\Users\Alexander\Documents\GitHub\Machine-Learning-Course')  #MODIFY ME
#READ ABOVE
#PLZ

#impot ANN
import ANN
#initialize perceptron
net = ANN.ANN(iris[:, :4], target, nhidden1=5, nlayers=1, momentum=0.9)
#split the data we randomized and encoded the output for earlier
net.split_50_25_25()
#train for n iterations
#first parameter is number of iterations
#second parameter is the learning rate
#third parameter is a boolean of whether or not you want to track and plot the error during training
net.train_n_iterations(1000, 0.3, plot_errors=True)
#print confusion matrix
net.confmat()

#repeat for seq training
net = ANN.ANN(iris[:, :4], target, nhidden1=5, nlayers=1, momentum=0.9)
net.split_50_25_25()
net.train_n_iterations_seq(1000, 0.1, plot_errors=True)
net.confmat()
    # RAW
    train_path = './preprocessed_training_set/train_df.csv'
    test_path = './preprocessed_training_set/test_df.csv'
    SVM_obj = svm.SVM(train_path, test_path)
    run_SVM_trainer()
elif usr_input == 4:
    # RAW
    train_path = './preprocessed_training_set/train_df.csv'
    test_path = './preprocessed_training_set/test_df.csv'
    XGB_obj = xgb.XGB(train_path, test_path)
    run_XGB_trainer()
elif usr_input == 5:
    # RAW
    train_path = './preprocessed_training_set/train_df.csv'
    test_path = './preprocessed_training_set/test_df.csv'
    ann_obj = ann.ANN(train_path, test_path)
    run_ann_trainer()
elif usr_input == 6:
    # #WITH Preprocessing
    train_path = './preprocessed_training_set/train_df_O_T_Smote.csv'
    test_path = './preprocessed_training_set/test_df_log.csv'
    KNN_obj = knn.KNN(train_path, test_path)
    run_KNN_trainer()
elif usr_input == 7:
    # #WITH Preprocessing
    train_path = './preprocessed_training_set/train_df_O_T_Smote.csv'
    test_path = './preprocessed_training_set/test_df_log.csv'
    RF_obj = rf.RF(train_path, test_path)
    run_RF_trainer()
elif usr_input == 8:
    # #WITH Preprocessing