Пример #1
0
def initialize_network():
    # Train
    data, target = get_train_data(dataset_location)

    if data.dtype != float:
        data = StandardScaler().fit_transform(data)
        print 'Dataset scaled'

    train = []
    for i in range(len(data)):
        temp = np.array(data[i], dtype='float64')
        train.append(Instance(temp, [float(target[i])]))

    # Evaluation
    data, target = get_validation_data(dataset_location)

    if data.dtype != float:
        data = StandardScaler().fit_transform(data, target)
        print 'Dataset scaled'

    evaluation = []
    for i in range(len(data)):
        temp = np.array(data[i], dtype='float64')
        evaluation.append(Instance(temp, [float(target[i])]))

    settings = {
        "n_inputs":
        len(data[0]),
        "layers":
        [(80, tanh_function), (70, ReLU_function), (60, tanh_function),
         (50, ReLU_function), (40, tanh_function), (30, ReLU_function),
         (20, tanh_function), (10, tanh_function), (1, ReLU_function)]
    }

    temp_network = NeuralNet(settings)
    training_set = train
    test_set = evaluation
    cost_function = cross_entropy_cost

    scaled_conjugate_gradient(
        temp_network,  # the network to train
        training_set,  # specify the training set
        test_set,  # specify the test set
        cost_function,  # specify the cost function to calculate error
        print_rate=1,
        save_trained_network=True)

    return temp_network
Пример #2
0
def getSlices(data, label=None, dataset=[]):
    for i in range(0, len(data[0]) - N - 2, 2):
        build = []
        for j in range(N / 2):
            #build = current x,y
            build.append(float(data[0][i + j]))
            build.append(float(data[1][i + j]))
        if label:
            dataset.insert(random.randrange(len(dataset) + 1),
                           Instance(build, label))
            #dataset.append ([build, label])
        else:
            dataset.insert(random.randrange(len(dataset) + 1), Instance(build))
            #dataset.append ([build])

    return dataset
Пример #3
0
def getOutputs(network, testset):
    testset = Instance(testset)
    test_data = testset.features

    input_signals, derivatives = network.update(test_data, trace=True)
    out = input_signals[-1]
    return out[0]
Пример #4
0
    def train(self):
        # Tell the neural network that all integers in (1..input) should map
        # to output.
        dataset = []
        n_inputs = 0
        for i in range(1, self.input + 1):
            example = self.make_input(i)
            n_inputs = len(example)
            dataset.append(Instance(example, self.output))

        settings = {
            "initial_bias_value": self.bias,
            "n_inputs": n_inputs,

            # The neural network in the challenge has two layers.
            "layers": [(3, sigmoid_function),
                       (len(self.output), sigmoid_function)]
        }

        network = NeuralNet(settings)
        training_set = dataset
        test_set = dataset
        cost_function = binary_cross_entropy_cost
        backpropagation(
            network,  # the network to train
            training_set,  # specify the training set
            test_set,  # specify the test set
            cost_function,  # specify the cost function to calculate error
            max_iterations=20000)

        self.network = network
        self.weights = self.network.weights
def create_prediction_data(song_list):
    prediction_set = []
    for song in song_list:
        prediction_set.append(
            Instance([
                float(song.attributes['accousticness']),
                float(song.attributes['danceability']),
                float(song.attributes['energy']),
                float(song.attributes['instrumentalness']),
                float(song.attributes['loudness']),
                float(song.attributes['speechiness']),
                float(song.attributes['tempo']),
                float(song.attributes['valence'])
            ]))

    return prediction_set
Пример #6
0
def produce_dataset(training_file):
    training_data = []
    with open(training_file) as data:
        dataset = pickle.load(data)
        for song in dataset:
            attributes = [
                song.attributes['accousticness'],
                song.attributes['danceability'], song.attributes['energy'],
                song.attributes['instrumentalness'],
                song.attributes['loudness'], song.attributes['speechiness'],
                song.attributes['tempo'], song.attributes['valence']
            ]
            recommended_value = [song.attributes['rec_value']]

            training_data.append(Instance(attributes, recommended_value))

    return training_data
def evaluate_neural_network(network, dataset_location=get_transformed_dir()):

    data, target = get_test_data(dataset_location)

    if data.dtype != float:
        data = StandardScaler().fit_transform(data)
        print 'Dataset scaled'

    test = []
    for i in range(len(data)):
        temp = np.array(data[i], dtype='float64')
        test.append(Instance(temp))

    prediction = list(
        np.rint(network.predict(test)).astype(int).astype(str).flatten('F'))
    target = map(np.str, target)
    print('Scaled conjugate network: ')
    print_f_measure(target, prediction)
Пример #8
0
def readDataSource(pickle_unpickle = False):
    if pickle_unpickle is not False:
        if os.path.isfile(pickle_unpickle) :
            print("reading "+pickle_unpickle+"...")
            return pickle.load(open(pickle_unpickle,"rb"))
    data = []
    in_out_layer = [] 
    inputLayerSize = setting["size"][0]*setting["size"][1]*(1 if setting["black_and_white"] else 3)
    outputLayerSize = len(class_to_predict)
    directories = glob.glob(setting["data_source"])
    for directory in directories:
        if os.path.isdir(directory):
            outputLayer = createOutputLayerFromMetadata(directory+"/metadata.json")
            for imageFile in glob.glob(directory+"/*"):
                imgData = readAndNormalizeImg(imageFile)
                if imgData is not None:
                    in_out_layer.append(Instance(imgData,outputLayer))
    if pickle_unpickle is not False:
        print("save data to " + pickle_unpickle + "...")
        pickle.dump([inputLayerSize, outputLayerSize, in_out_layer], open(pickle_unpickle, "wb"))
    return inputLayerSize, outputLayerSize, in_out_layer
Пример #9
0
from nimblenet.activation_functions import sigmoid_function
from nimblenet.cost_functions import cross_entropy_cost
from nimblenet.learning_algorithms import *
from nimblenet.neuralnet import NeuralNet
from nimblenet.preprocessing import construct_preprocessor, standarize
from nimblenet.data_structures import Instance
from nimblenet.tools import print_test


# Training set
dataset             = [ Instance( [0,0,0], [0] ), Instance( [1,0,1], [1] ), Instance( [0,1,0], [1] ), Instance( [1,1,0], [1] ) ]

preprocess          = construct_preprocessor( dataset, [standarize] ) 
training_data       = preprocess( dataset )
test_data           = preprocess( dataset )


cost_function       = cross_entropy_cost
settings            = {
    # Required settings
    "n_inputs"              : 3,       # Number of network input signals
    "layers"                : [  (3, sigmoid_function), (1, sigmoid_function) ],
                                        # [ (number_of_neurons, activation_function) ]
                                        # The last pair in the list dictate the number of output signals
    
    # Optional settings
    "initial_bias_value"    : 0.0,
    "weights_low"           : -0.1,     # Lower bound on the initial weight value
    "weights_high"          : 0.1,      # Upper bound on the initial weight value
}
Пример #10
0
 settings = {
     # Required settings
     "n_inputs": data_reduction,                                              # Number of network input signals
     "layers": [(hidden_nodes, sigmoid_function),
                (4, sigmoid_function)],   # [ (number_of_neurons, activation_function) ]
     # Optional settings
     "initial_bias_value": 0.0,
     "weights_low": -0.1,                                        # Lower bound on the initial weight value
     "weights_high": 0.1,                                        # Upper bound on the initial weight value
 }
 network = NeuralNet(settings)
 expected_output = [
     [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]
 ]
 # Training the net
 test_set = [Instance(testset[i, :], expected_output[test_classes[i]]) for i in range(0, len(testset))]
 training_set = [Instance(trainingset[i, :], expected_output[training_classes[i]]) for i in range(0, len(trainingset))]
 cost_function = sum_squared_error
 print 'Starting to train...'
 backpropagation(
     # Required parameters
     network,                     # the neural network instance to train
     training_set,                # the training dataset
     test_set,                    # the test dataset
     cost_function,               # the cost function to optimize
     # Optional parameters
     ERROR_LIMIT=1e-3,                           # Error tolerance when terminating the learning
     max_iterations=20000,                       # Regardless of the achieved error, terminate after max_iterations epochs. Default: infinite
     batch_size=0,                               # Set the batch size. 0 implies using the entire training_set as a batch, 1 equals no batch learning, and any other number dictate the batch size
     input_layer_dropout=input_layer_dropout,    # Dropout fraction of the input layer
     hidden_layer_dropout=hidden_layer_dropout,  # Dropout fraction of in the hidden layer(s)
Пример #11
0
    n = ui(n)
    r = ui(0)
    for i in arange(64, dtype=ui):
        a = ui(0)
        for j in reversed(arange(n + 1, dtype=ui)):
            b = ui(0)
            for k in arange(i + 1, dtype=ui):
                c = a ^ (((i & n & ~j) | (i & ~n & j) & ONE) << k)
                a ^= (j & (ONE << k)) ^ b
                b = (((c & j) | (c & b) | (j & b)) & (ONE << k)) << ONE
        r |= (a & (ONE << i))
    return r


sample_x = []
sample_y = [ui(randint(1, (2**5) - 1)) for _ in range(SAMPLE_SIZE)]
# generate sample x
for instance in sample_y:
    sample_x.append(
        list(map(int, "{0:064b}".format(carvedToWritten(instance)))))
# adapt sample y
sample_y = [list(map(int, "{0:064b}".format(y))) for y in sample_y]
# generate dataset
dataset = [Instance(x, y) for x, y in zip(sample_x, sample_y)]
# config
settings = {
    "n_inputs": 64,
    "layers": [(2, sigmoid_function), (1, sigmoid_function)]
}

RMSprop(NeuralNet(settings), dataset, dataset, cross_entropy_cost)
Пример #12
0
training_set = []
test_set = []

row_len = 0
col_len = 0
iteration = 0

f = open('entrenamiento.csv', 'rt')
try:
    reader = csv.reader(f)
    for row in reader:
        X = (map(int, row[:625]))
        Y = map(int, row[625:])
        col_len = len(X)
        row_len = len(Y)
        training_set.append(Instance(X, Y))
        iteration = iteration + 1
        #if iteration == 12:
        #break
finally:
    f.close()

g = open('test.csv', 'rt')
try:
    reader = csv.reader(g)
    for row in reader:
        X = (map(int, row[:625]))
        Y = map(int, row[625:])
        test_set.append(Instance(X, Y))
finally:
    g.close()
Пример #13
0
 def predict(self, n):
     # Test a prediction.
     prediction_set = [Instance(self.make_input(n))]
     prediction = self.network.predict(prediction_set)[0]
     return int(prediction[0] * 256)
Пример #14
0
## Train the network using resilient backpropagation
#resilient_backpropagation(
#        network,
#        training_data,                  # specify the training set
#        test_data,                      # specify the test set
#        cost_function,                  # specify the cost function to calculate error
#        ERROR_LIMIT          = 1e-3,    # define an acceptable error limit
#        #max_iterations      = (),      # continues until the error limit is reach if this argument is skipped
#
#        # optional parameters
#        print_rate           = 1000,    # print error status every `print_rate` epoch.
#        weight_step_max      = 50.,
#        weight_step_min      = 0.,
#        start_step           = 0.5,
#        learn_max            = 1.2,
#        learn_min            = 0.5,
#        save_trained_network = False    # Whether to write the trained weights to disk
#    )

# Print a network test
print_test(network, training_data, cost_function)
"""
Prediction Example
"""
prediction_set = [
    Instance([7.4, 0.7, 0, 1.9, 0.076, 11, 34, 0.9978, 3.51, 0.56, 9.4]),
    Instance([7.4, 0.66, 0, 1.8, 0.075, 13, 40, 0.9978, 3.51, 0.56, 9.4])
]
prediction_set = preprocess(prediction_set)
print network.predict(prediction_set)  # produce the output signal
            grado[1] = calcDeg(a1, a2, a3)
            grado[2] = calcDeg(b1, b2, b3)
            grado[3] = calcDeg(c1, c2, c3)

            for k in range(4):
                datos[k + 10] = round(distancias[k], 2)

            for i in range(4):
                datos[i + 6] = round(grado[i], 2)

            z = points['Cuerpo'][2][2]

            for j in range(6):
                datos[j] = round(points['Brazos'][j][2] - z, 2)

            prediction_set = [Instance(datos)]
            prediction_set = preprocess(prediction_set)
            #print "\nPrediccion:"
            prediction = network.predict(
                prediction_set)  # produce the output signal
            #print prediction[0][0]
            predicFormat = b.convertBin(prediction[0])

            cv2.putText(blank_image, str(predicFormat), (30, 50), font, 1.0,
                        (0, 0, 255), 2)

    cv2.imshow("Depth", blank_image)
    cv2.imshow("RGB", bgrframe)

    k = cv2.waitKey(5) & 0xFF
Пример #16
0
def generatePseudoPairs(network, numItems):
    mytask = task.Task(inputNodes=settings.inputNodes, hiddenNodes=settings.hiddenNodes,
                outputNodes=settings.outputNodes, populationSize=numItems, auto=False).task
    pseudoInputs = mytask['inputPatterns']
    pseudoItems = [Instance(a, getOutputs(network, a)) for a in pseudoInputs]
    return pseudoItems
Пример #17
0
from nimblenet.activation_functions import sigmoid_function
from nimblenet.cost_functions import *
from nimblenet.learning_algorithms import *
from nimblenet.data_structures import Instance
from nimblenet.neuralnet import NeuralNet

ins = [1,1,1]`

f = ord('f') / 256.0
l = ord('l') / 256.0
a = ord('a') / 256.0
g = ord('g') / 256.0

dataset = [
    Instance(ins, [f, l, a, g])
]

settings = {
    "n_inputs" : 3,
    "layers" : [(4, sigmoid_function)] * 1
}

network        = NeuralNet(settings)
training_set   = dataset
test_set       = dataset
cost_function  = sum_squared_error

scipyoptimize(
    network,           # the network to train
    training_set,      # specify the training set
 settings = {
     # Required settings
     "n_inputs": data_reduction,  # Number of network input signals
     "layers": [(hidden_nodes, sigmoid_function), (4, sigmoid_function)
                ],  # [ (number_of_neurons, activation_function) ]
     # Optional settings
     "initial_bias_value": 0.0,
     "weights_low": -0.1,  # Lower bound on the initial weight value
     "weights_high": 0.1,  # Upper bound on the initial weight value
 }
 network = NeuralNet(settings)
 expected_output = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                    [0, 0, 0, 1]]
 # Training the net
 test_set = [
     Instance(testset[i, :], expected_output[test_classes[i]])
     for i in range(0, len(testset))
 ]
 training_set = [
     Instance(trainingset[i, :], expected_output[training_classes[i]])
     for i in range(0, len(trainingset))
 ]
 cost_function = sum_squared_error
 print 'Starting to train...'
 backpropagation(
     # Required parameters
     network,  # the neural network instance to train
     training_set,  # the training dataset
     test_set,  # the test dataset
     cost_function,  # the cost function to optimize
     # Optional parameters
Пример #19
0
from nimblenet.activation_functions import sigmoid_function
from nimblenet.cost_functions import cross_entropy_cost
from nimblenet.learning_algorithms import *
from nimblenet.neuralnet import NeuralNet
from nimblenet.preprocessing import construct_preprocessor, standarize
from nimblenet.data_structures import Instance
from nimblenet.tools import print_test

# Training set
dataset = [
    Instance([0, 0], [0]),
    Instance([1, 0], [1]),
    Instance([0, 1], [1]),
    Instance([1, 1], [1])
]

preprocess = construct_preprocessor(dataset, [standarize])
training_data = preprocess(dataset)
test_data = preprocess(dataset)

cost_function = cross_entropy_cost
settings = {
    # Required settings
    "n_inputs": 2,  # Number of network input signals
    "layers": [(3, sigmoid_function), (1, sigmoid_function)],
    # [ (number_of_neurons, activation_function) ]
    # The last pair in the list dictate the number of output signals

    # Optional settings
    "initial_bias_value": 0.0,
    "weights_low": -0.1,  # Lower bound on the initial weight value
Пример #20
0
        hiddenNodes=hiddenNodes,
        outputNodes=outputNodes,
        populationSize=numPatterns,
        auto=auto,
        learningConstant=learningConstant,
        momentumConstant=momentumConstant
    )

    # Intervening task
    interventions = [mytask.popTask() for a in range(0, numInterventions)]
    inputs = mytask.task['inputPatterns']
    teacher = mytask.task['teacher']

    dataset = []
    for i in range(len(inputs)):
        dataset.append(Instance(inputs[i], teacher[i]))

    training_data       = dataset
    test_data           = dataset

    layers = [ (hiddenNodes, sigmoid_function) for i in range(settings.numLayers) ]
    layers.append((outputNodes, sigmoid_function))
    print("Layers: {}".format(layers))
    mysettings            = {
        "n_inputs"              : inputNodes,       # Number of network input signals
        "layers"                : layers,
        "initial_bias_value"    : 0.01,
        "weights_low"           : -0.3,     # Lower bound on the initial weight value
        "weights_high"          : 0.3,
    }
Пример #21
0
        rotation_y = get_y_rotation(accel_scaled_x, accel_scaled_y,
                                    accel_scaled_z)

        last_x = K * (last_x + gyro_x_delta) + (K1 * rotation_x)
        last_y = K * (last_y + gyro_y_delta) + (K1 * rotation_y)

        #[X1,Y1,X2,Y2]
        nSlice.append(last_x)
        nSlice.append(last_y)

        #When N values have been recorded
        if len(nSlice) == N:

            ##----------Predictions------------
            #Throws nSlice to Belle
            inData = [Instance(nSlice)]
            preprocess = construct_preprocessor(inData, [standarize])
            prediction_set = preprocess(inData)
            #Prints a prediction!
            #HIGH LOW = Bicep Curl!
            #LOW HIGH = Trash!
            #print(str(nSlice[0]) + ", " + str(nSlice[1]))
            neuralOut = network.predict(inData)
            print(neuralOut)
            exponents = np.floor(np.log10(np.abs(neuralOut)))
            ##----------Total Movement Calculation--------
            temp_x = nSlice[0]
            temp_y = nSlice[1]
            total = 0

            for i in range(len(nSlice)):
Пример #22
0
from nimblenet.activation_functions import sigmoid_function
from nimblenet.cost_functions import cross_entropy_cost
from nimblenet.learning_algorithms import RMSprop
from nimblenet.data_structures import Instance
from nimblenet.neuralnet import NeuralNet

dataset = [
    Instance([0, 0], [0]),
    Instance([1, 0], [1]),
    Instance([0, 1], [1]),
    Instance([1, 1], [0])
]

settings = {
    "n_inputs": 2,
    "layers": [(5, sigmoid_function), (1, sigmoid_function)]
}

network = NeuralNet(settings)
training_set = dataset
test_set = dataset
cost_function = cross_entropy_cost

RMSprop(
    network,  # the network to train
    training_set,  # specify the training set
    test_set,  # specify the test set
    cost_function,  # specify the cost function to calculate error
    ERROR_LIMIT=1e-2,  # define an acceptable error limit
    #max_iterations         = 100,      # continues until the error limit is reach if this argument is skipped
)