Esempio n. 1
0
def cvWithThreshold(conf, X, y_current_tr, y_current_te, threshold):
    scores = []
    fold=1
    for TrainIndices, TestIndices in cross_validation.StratifiedKFold(y_current_tr, n_folds=10, shuffle=False, random_state=None):
        #print('\r'+str(fold), end="")
        fold+=1
        X_tr = X[TrainIndices]
        y_tr = y_current_tr[TrainIndices]

        X_te = X[TestIndices]
        y_te = y_current_te[TestIndices]

        nn = NN(conf)
        nn.train(X_tr, y_tr, conf.iterations)
        _, score = nn.test(X_te, y_te)

        scores.append(score)
    
    print("\n--")
    f1  = np.mean([s[0] for s in scores])
    r   = np.mean([s[1] for s in scores])
    acc = np.mean([s[2] for s in scores])
    p   = np.mean([s[3] for s in scores])

    return f1, r, acc, p
Esempio n. 2
0
def getBestThresholds(X, y_current_tr, y_current_te, conf):
    assert len(X) == len(y_current_tr) == len(y_current_te), 'Number of features ({}), annotator1 labels ({}) and annotator2 labels ({}) is not equal!'.format(len(X), len(y_current_tr), len(y_current_te))
    #scores = {"F1":[], "Recall":[], "Accuracy":[], "Precision":[]}
    scores = []
    thresholds=[]


    print('Finding best thresholds...')
    fold=1
    for TrainIndices, TestIndices in cross_validation.StratifiedKFold(y_current_tr, n_folds=10, shuffle=False, random_state=None):
        #print('\r'+str(fold), end="")
        fold+=1
        X_tr = X[TrainIndices]
        y_tr = y_current_tr[TrainIndices]

        X_te = X[TestIndices]
        y_te = y_current_te[TestIndices]

        nn = NN(conf)
        nn.train(X_tr, y_tr, conf.iterations)
        #get prediction
        best_t, score = nn.test(X_te, y_te)
        thresholds.append(best_t)

        scores.append(score)
    
    #scores = cross_validation.cross_val_score(maxent, features, labels, cv=10)
    print("\n--")
    
    return np.array(thresholds), np.array(scores)
Esempio n. 3
0
def crossval(X,y,splits, conf, t=None):
    results = []
    ts = []
    m = len(X)
    cs = [(i*m/splits, (i+1)*len(X)/splits) for i in range(splits)]
    for s,e in cs:
        X_tr = [X[i] for i in range(m) if i < s or i >= e]
        X_te = [X[i] for i in range(m) if i >= s and i < e]
        y_tr = [y[i] for i in range(m) if i < s or i >= e]
        y_te = [y[i] for i in range(m) if i >= s and i < e]

    nn = NN(conf)
    nn.train(X_tr, y_tr, conf.iterations)
    best_t, res = nn.test(X_te, y_te, t)
    ts.append(best_t)
    results.append(res)

    f1s = [res[0] for res in results]
    rec = [res[1] for res in results]
    acc = [res[2] for res in results]
    pre = [res[3] for res in results]

    print '\nF1  | {:.3f}   (std {:.3f})'.format(np.average(f1s), np.std(f1s))
    print 'Rec | {:.3f}   (std {:.3f})'.format(np.average(rec), np.std(rec))
    print 'Acc | {:.3f}   (std {:.3f})'.format(np.average(acc), np.std(acc))
    print 'Pre | {:.3f}   (std {:.3f})'.format(np.average(pre), np.std(pre))

    return ts 
Esempio n. 4
0
def train(
    net: NeuralNet,
    train_inputs: np.ndarray,
    train_labels: np.ndarray, 
    input_converter: Callable,
    label_converter: Callable,
    epoch_count: int = 5000,
    batch_size: int = 32,
    learning_rate: int = 0.1):

    batch_iterator = BatchIterator(train_inputs, train_labels, batch_size)
    pbar = tqdm(total=epoch_count)
    for epoch in range(epoch_count):
        epoch_loss = 0
        batch = next(batch_iterator)
        for input, label in batch:
            vector_input = input_converter(input)
            vector_label = label_converter(label)
            output = net.predict(vector_input)
            epoch_loss += net.loss.loss_func(output, vector_label)
            grad = net.loss.grad_func(output, vector_label)
            net.backward(grad)
            net.gradient_step(learning_rate / batch_size)
        pbar.update()
        pbar.set_description(desc=f"Training model. Current epoch loss: {round(epoch_loss, 2)}")
class StageRecognizer():
    def __init__(self, trained_net_path):
        self.net = NeuralNet()
        self.net.load_from_file(trained_net_path)

    def recognize_image(self, img):
        net_return = self.net.apply_over_data(extract_counter_feat(img))
        stage_number = int(round(net_return))
        stage = ''
        precision = 'strong'

        if stage_number == 1:
            stage = 'red'
            if abs(stage_number - 1) > .15:
                precision = 'weak'

        elif stage_number == 2:
            stage = 'yellow'
            if abs(stage_number - 1) > .15:
                precision = 'weak'

        elif stage_number == 3:
            stage = 'green'
            if abs(stage_number - 1) > .15:
                precision = 'weak'

        return stage, precision
Esempio n. 6
0
def main():
    scriptdir = os.path.dirname(os.path.realpath(__file__))
    data = scriptdir+'/../data/cwi_training/cwi_training.txt.lbl.conll'
    testdata = scriptdir+'/../data/cwi_testing/cwi_testing.gold.txt.lbl.conll'
    pickled_data = scriptdir+'/../data.pickle'
    parser = argparse.ArgumentParser()
    parser.add_argument('--threshold', '-t', type=float, help='Threshold for predicting 0/1. If not specified, the optimal threshold will first be computed as the median of all CV splits. May take a while.')
    parser.add_argument('--iterations', '-i', type=int, default=50, help='Training iterations.')
    parser.add_argument('--hidden-layers', '-l', dest='layers', required=True, type=int, nargs='+', help='List of layer sizes')
    parser.add_argument('--cv-splits', '-c', dest='splits', type=int, help='No. of crossvalidation splits. If not specified, no CV will be performed.')
    parser.add_argument('--data', '-d', default=data, help='Features and labels')
    parser.add_argument('--testdata', '-y', default=testdata,  help='Test data (not needed for crossval).')
    parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='Print average loss at every training iteration.')
    parser.add_argument('--output', '-o', help="Output file")
    parser.add_argument('--features', '-f', dest='features', default=[], type=str, nargs='+', help='List of feature types')

    args = parser.parse_args()
    # X, y = load_pickled(args.data)
    combined_data = 'X_y_all.txt'
    cutoff = combine_data(args.data, args.testdata, combined_data)
    X, y, _ = feats_and_classify.collect_features(combined_data, True, args.features)
    X_tr = X[:cutoff]
    y_tr = y[:cutoff]
    X_te = X[cutoff:]
    y_te = y[cutoff:]
    conf = NeuralNetConfig(X=X, y=y, layers=args.layers, iterations=args.iterations, verbose=args.verbose)

    if args.splits:
        if args.threshold:
            crossval(X_tr,y_tr,args.splits, conf, t=args.threshold)
        else:
            # compute optimal threshold for each CV split
            print '### Computing optimal threshold... '
            ts = crossval(X_tr,y_tr,args.splits, conf)
            avg = np.average(ts)
            med = np.median(ts)
            print '\nThresholds for crossval splits:', ts
            print 'Mean threshold', avg
            print 'Median threshold', med
            print 'Threshold st.dev.', np.std(ts)
            # Run CV with fixed avg/median threshold
            print '\n\n### Running with avg. threshold... '
            crossval(X_tr,y_tr,args.splits, conf, t=avg)
            print '\n\n### Running with med. threshold... '
            crossval(X_tr,y_tr,args.splits, conf, t=med)
    else:
        
        nn = NN(conf)
        nn.train(X_tr,y_tr,args.iterations)
        if args.testdata:
            # X_test, y_test = load_pickled(args.testdata)
            pred = nn.get_output(X_te)
            if args.output:
                with open(args.output, 'w') as of:
                    for p in pred:
                        of.write('%f\n'%p)
            t, res = nn.test(X_te,y_te,args.threshold)
            resout = "G: %f, R: %f, A: %f, P: %f\n"%res
            sys.stderr.write('%s %f\n'%(' '.join(args.features), t))
            sys.stderr.write(resout)
Esempio n. 7
0
def crossval(X, y, splits, conf, t=None):
    results = []
    ts = []
    m = len(X)
    cs = [(i * m / splits, (i + 1) * len(X) / splits) for i in range(splits)]
    for s, e in cs:
        X_tr = [X[i] for i in range(m) if i < s or i >= e]
        X_te = [X[i] for i in range(m) if i >= s and i < e]
        y_tr = [y[i] for i in range(m) if i < s or i >= e]
        y_te = [y[i] for i in range(m) if i >= s and i < e]

    nn = NN(conf)
    nn.train(X_tr, y_tr, conf.iterations)
    best_t, res = nn.test(X_te, y_te, t)
    ts.append(best_t)
    results.append(res)

    f1s = [res[0] for res in results]
    rec = [res[1] for res in results]
    acc = [res[2] for res in results]
    pre = [res[3] for res in results]

    print '\nF1  | {:.3f}   (std {:.3f})'.format(np.average(f1s), np.std(f1s))
    print 'Rec | {:.3f}   (std {:.3f})'.format(np.average(rec), np.std(rec))
    print 'Acc | {:.3f}   (std {:.3f})'.format(np.average(acc), np.std(acc))
    print 'Pre | {:.3f}   (std {:.3f})'.format(np.average(pre), np.std(pre))

    return ts
Esempio n. 8
0
def main():
    """Testing file to show neural network can learn linearly separable
    data."""
    data = np.genfromtxt("training.csv", delimiter=',').tolist()

    shuffle(data)

    # NOTE: We have to wrap every target value into a tuple, for the
    # purpose of being able to classify n-tuples later
    targets = list((sample[-1] if sample[-1] == 1 else 0,) for sample in data)
    features = list(sample[:-1] for sample in data)
    print "Starting to train..."
    start = time()

    num_features = len(features[0])  # Subtract one because of target values
    nn = NeuralNet(num_features, max_epochs=2, default_bias="random",
                   learn_rate=.85, scale=0.1, verbose=True)
    nn.train(features, targets)
    print "Done with training. Took {0} seconds to train." \
            .format(round(time() - start, 2))

    print "Beginning with scoring..."
    start = time()
    scored_data = np.genfromtxt("data_features.csv", delimiter=",")
    correct = np.genfromtxt("data_targets.csv", delimiter=",")
    prediction = nn.score_data(scored_data)
    print "Done with scoring. Took {0} seconds to score the dataset" \
            .format(round(time() - start, 2))
    num_incorrect = sum(1 for i in xrange(len(correct)) \
                        if correct[i] != prediction[i])
    print "Total number incorrect: {0}".format(num_incorrect)
Esempio n. 9
0
def cross_validation_2(folds, epochs, learn_rate, n):
    averages = []
    timings = []

    
    for i in xrange(10):
        averages.append([])
        timings.append([])
        start_t = time.time()
        for j in xrange(10):
            test_vals = []
            for x in xrange(len(folds.keys())):
                test_index = x%n
                test_set = folds[test_index]

                train_set = []
                for k,v in folds.items():
                    if k != test_index: train_set += v
        
                nn = NeuralNet(9, [j+1,i+1], 1, learn_rate)
                nn.train(train_set, None, epochs)
                test_vals.append(nn.test(test_set, None, False))

            print "average: ", sum(test_vals) / len(test_vals)
            print ""


            timings[i].append(time.time()-start_t)
            averages[i].append(sum(test_vals)/len(test_vals))        

            print timings[i]
            print averages[i]
    
    return averages, timings
Esempio n. 10
0
def cvWithThreshold(conf, X, y_current_tr, y_current_te, threshold, folds=10):
    scores = []
    fold = 1
    for TrainIndices, TestIndices in cross_validation.StratifiedKFold(
            y_current_tr, n_folds=folds, shuffle=False, random_state=None):
        #print('\r'+str(fold), end="")
        fold += 1
        X_tr = X[TrainIndices]
        y_tr = y_current_tr[TrainIndices]

        X_te = X[TestIndices]
        y_te = y_current_te[TestIndices]

        nn = NN(conf)
        nn.train(X_tr, y_tr, conf.iterations)
        _, score = nn.test(X_te, y_te)

        scores.append(score)

    print("\n--")
    f1 = np.mean([s[0] for s in scores])
    r = np.mean([s[1] for s in scores])
    acc = np.mean([s[2] for s in scores])
    p = np.mean([s[3] for s in scores])

    return f1, r, acc, p
Esempio n. 11
0
def getBestThresholds(X, y_current_tr, y_current_te, conf, folds=10):
    assert len(X) == len(y_current_tr) == len(
        y_current_te
    ), 'Number of features ({}), annotator1 labels ({}) and annotator2 labels ({}) is not equal!'.format(
        len(X), len(y_current_tr), len(y_current_te))
    #scores = {"F1":[], "Recall":[], "Accuracy":[], "Precision":[]}
    scores = []
    thresholds = []
    print('Finding best thresholds...')
    fold = 1
    for TrainIndices, TestIndices in cross_validation.StratifiedKFold(
            y_current_tr, n_folds=folds, shuffle=False, random_state=None):
        #print('\r'+str(fold), end="")
        fold += 1
        X_tr = X[TrainIndices]
        y_tr = y_current_tr[TrainIndices]

        X_te = X[TestIndices]
        y_te = y_current_te[TestIndices]

        nn = NN(conf)
        nn.train(X_tr, y_tr, conf.iterations)
        #get prediction
        best_t, score = nn.test(X_te, y_te)
        thresholds.append(best_t)

        scores.append(score)
    print("\n--")
    return np.array(thresholds), np.array(scores)
Esempio n. 12
0
def test1():
	X=np.array([[1,0,1,0],[1,0,1,1],[0,1,0,1]])
	y=np.array([[1],[1],[0]])
	nn = NeuralNet()
	nn.train(X, y, epochs=5000)
	pred = nn.predict(X)
	print(pred)
 def __init__(self):
     '''
     A container for NeuralEditElement representing the GUI components of a neural net
     '''
     self.Net = NeuralNet()
     self.NetPath = None  # set during pickle op
     self.Elements = []   # elements are UI representation of individual neurons
     self.LookupTable = {}
Esempio n. 14
0
def neuralnet(arglist):
    nn = NeuralNet(arglist[1])

    folds = arglist[2]
    learning_rate = arglist[3]
    epochs = arglist[4]

    nn.evaluate(folds, epochs, learning_rate)
    nn.print_results()
Esempio n. 15
0
    def test_predict(self):
        neural_net = NeuralNet(
            input_size=3,
            hidden_size=3,
            output_size=1,
        )

        result = neural_net.predict([1, 1, 1])
        self.assertEquals(result, 6)
Esempio n. 16
0
def test2():
	X1 = np.array([[0,0],
					[0,1],
               		[1,0],
               		[1,1]])
	y1 = np.array([0,1,1,0])
	nn = NeuralNet(input_layer=2, hidden_layer=4, output_layer=1)
	nn.train(X1, y1)
	pred = nn.predict(X1)
	print(pred)
Esempio n. 17
0
 def net_create_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     net_layers = net.return_net()
     l = len(net_layers)
     design = [1, 3, 1]
     for i in range(l):
         with self.subTest(i=i):
             layer = net_layers[i]
             self.assertEqual(layer.return_amount_of_neurons(), design[i],
                              i)
Esempio n. 18
0
def plot3(trainf):
    print("Running Test 3")
    nn = NeuralNet(trainf)

    #nn.evaluate(folds, epochs, learning_rate)
    nn.evaluate(10, 50, 0.1)
    x, y = nn.evaluate_roc()
    fig2 = plt.figure()
    ax2 = fig2.add_subplot(111)
    ax2.set_title('ROC for Neural Net')
    ax2.set_xlabel('False Positive Rate')
    ax2.set_ylabel('True Positive Rate')
    ax2.plot(x, y, c='b', marker='o')
Esempio n. 19
0
def main():
    X, Y = create2DData()
    plot2DData(X, Y)
    noOfLayers = 2  # Hidden and Output layer (Excluding the input layer)
    layerDimensions = [2, 3, 1]  # No of units in Input, Hidden, Output layer
    noOfIterations = 6000
    learningRate = 0.6
    N = NeuralNet(noOfLayers, layerDimensions)  # Create a object of Neural Net
    AL, WL, bL = N.gradientDescent(X,
                                   noOfIterations,
                                   learningRate,
                                   Y,
                                   printCost=True)
Esempio n. 20
0
def main():
    X, Y = generateOneDData()
    # plotOneDData(X, Y)
    noOfLayers = 2  # Hidden and Output layer (Excluding the input layer)
    layerDimensions = [1, 2, 1]  # No of units in Input, Hidden, Output layer
    noOfIterations = 5000
    learningRate = 0.6
    N = NeuralNet(noOfLayers, layerDimensions)  # Create a object of Neural Net
    AL, WL, bL = N.gradientDescent(X,
                                   noOfIterations,
                                   learningRate,
                                   Y,
                                   printCost=True)
    plotTransformedData(AL, WL, bL, Y)
Esempio n. 21
0
    def build(self, config):
        """Build the recurrent convolutional net."""
        nets = OrderedDict()

        nets['t_input'] = self.tensor_in  #(12,-1,512)

        nets['reshape_t_input'] = tf.reshape(
            nets['t_input'], (-1, 1, 1, 1, nets['t_input'].shape[-1]))

        nets['bar_main'] = NeuralNet(nets['reshape_t_input'],
                                     config['net_g']['bar_main'],
                                     name='bar_main')

        nets['bar_pitch_time'] = NeuralNet(nets['bar_main'].tensor_out,
                                           config['net_g']['bar_pitch_time'],
                                           name='bar_pitch_time')

        nets['bar_time_pitch'] = NeuralNet(nets['bar_main'].tensor_out,
                                           config['net_g']['bar_time_pitch'],
                                           name='bar_time_pitch')

        config_bar_merged = config['net_g']['bar_merged'].copy()

        if config_bar_merged[-1][1][0] is None:
            l = list(config_bar_merged[-1])
            l[1] = list(l[1])
            l[1][0] = config['deconv_ds']['num_track']
            l[1] = tuple(l[1])
            config_bar_merged[-1] = tuple(l)

        nets['bar_merged'] = NeuralNet(tf.concat([
            nets['bar_pitch_time'].tensor_out,
            nets['bar_time_pitch'].tensor_out
        ], -1),
                                       config_bar_merged,
                                       name='bar_merged')

        nets['t_output'] = nets['bar_merged'].tensor_out[
            ..., :config['deconv_ds']['num_pitch'], :]

        nets['reshape_t_output'] = tf.reshape(
            nets['t_output'],
            (config['deconv_ds']["batch_size"], -1,
             nets['t_output'].shape[-3] * nets['t_output'].shape[-4],
             nets['t_output'].shape[-2], nets['t_output'].shape[-1]))

        tensor_out = nets['reshape_t_output']

        return tensor_out, nets
Esempio n. 22
0
def cross_validation_iterative(folds, epochs, learn_rate, n, num_points):
    
    averages = []
    test_vals = []
    fold_results = {}
    timings = [0]*epochs

    for x in xrange(len(folds.keys())):
        fold_results[x] = {"train": [], "test": []}
        
        test_index = x%n
        test_set = folds[test_index]

        train_set = []
        for k,v in folds.items():
            if k != test_index: train_set += v
        
        nn = NeuralNet(9, [13,14], 1, learn_rate)
        
        start_t = time.time()
        for j in xrange(epochs):
            nn.train(train_set, None, 1)
        
            # get train and test accuracy
            train_val = nn.test(train_set, None, False)
            test_val = nn.test(test_set, None, False)
            
            # store the accuracy results
            fold_results[x]["train"].append(train_val)
            fold_results[x]["test"].append(test_val)
            timings[j] += time.time()-start_t
        print "fold complete"

    
    # compute the average for each epoch
    train_a, test_a = [], []
    for e in xrange(epochs):
        num_train, num_test = 0, 0
        for i in xrange(len(folds.keys())):
            num_train += fold_results[i]["train"][e]
            num_test += fold_results[i]["test"][e]
        train_a.append((float(num_train)/(num_points*(n-1)))*100)
        test_a.append((float(num_test)/num_points)*100)
    
    for e in xrange(epochs):
        timings[e] = float(timings[e])/len(folds.keys())
    
    print train_a, test_a, timings
    return train_a, test_a, timings
Esempio n. 23
0
def create_roc_data(data):
    
    epochs = 60
    nn = NeuralNet(9, [13,14], 1, .1)
    nn.train(data, None, epochs)
    ret = nn.test(data, None, False)

    results = []
    for row in ret:
        results.append((row[0][0][0],row[1][0][0],row[2][0][0]))

    print results[0]

    num_pos = len(filter(lambda x: x[1] == 1, results))
    num_neg = len(results)-num_pos

    results.sort(key=lambda x: x[-1])
    results.reverse()

    tp = 0
    fp = 0
    last_tp = 0

    roc_set = [[x[-2],x[-1]] for x in results]
    fpr_set = []
    tpr_set = []

    for i in range(1,len(roc_set)):
        if roc_set[i][1] != roc_set[i-1][1] and roc_set[i][0] != 1 and tp > last_tp:
            fpr = fp / float(num_neg)
            tpr = tp / float(num_pos)
            
            fpr_set.append(fpr)
            tpr_set.append(tpr)

            last_tp = tp
        if roc_set[i][0] == 1:
            tp += 1
        else:
            fp += 1

    fpr = fp / float(num_neg)
    tpr = tp / float(num_pos)

    fpr_set.append(fpr)
    tpr_set.append(tpr)

    return fpr_set, tpr_set
Esempio n. 24
0
    def __init__(self):
        self.brain = NeuralNet(settings.NUM_INPUTS, settings.NUM_OUTPUTS,
                               settings.NUM_HIDDEN,
                               settings.NEURONS_PER_HIDDEN)

        self.position = Vector2D(random() * settings.WINDOW_WIDTH,
                                 random() * settings.WINDOW_HEIGHT)

        self.look_at = Vector2D()
        self.rotation = random() * 2 * math.pi
        self.ltrack = 0.16
        self.rtrack = 0.16
        self.fitness = 0.0
        self.scale = settings.SWEEPER_SCALE
        self.closest_mine = 0
        self.speed = 0.0
Esempio n. 25
0
    def __init__(self, maxn=2):
        # Only supports 2 player
        self.maxn = maxn
        # nets is a series of networks mapping nplayers to corresponding nnet
        self.nets = {}
        for i in xrange(2, self.maxn + 1):
            self.nets[i] = NeuralNet(layers=[9, 5, 8, 3, 1],
                                     input_layers=[0, 1],
                                     output_layers=[3, 4],
                                     wiring=[(None, None), (None, None),
                                             ([0, 1], RELU_FUN),
                                             ([2], SOFTMAX_FUN),
                                             ([2, 3], LINEAR_FUN)],
                                     learning_rate=0.00001,
                                     L2REG=0.001,
                                     build=False)

        # To prevent overfitting, share weights between the networks
        # as much as possible
        '''
        for i in xrange(3, self.maxn+1):
            assert self.nets[i]._vweights[3].get_value().shape == self.nets[2]._vweights[3].get_value().shape
            assert self.nets[i]._vbiases[3].get_value().shape == self.nets[2]._vbiases[3].get_value().shape
            assert self.nets[i]._vweights[5].get_value().shape == self.nets[2]._vweights[5].get_value().shape
            assert self.nets[i]._vbiases[5].get_value().shape == self.nets[2]._vbiases[5].get_value().shape
            self.nets[i]._vweights[3] = self.nets[2]._vweights[3]
            self.nets[i]._vbiases[3] = self.nets[2]._vbiases[3]
            self.nets[i]._vweights[5] = self.nets[2]._vweights[5]
            self.nets[i]._vbiases[5] = self.nets[2]._vbiases[5]
            self.nets[i].rebuild()
        '''

        self.nets[2].rebuild()
        self.nets[2]._vbiases[4].set_value(np.array([1.5]))
Esempio n. 26
0
def load_network():
    if os.path.isfile("networks/XOR_Operator/XOR_Operator.obj"):
        global network
        network = NeuralNet.load_from_file(
            "networks/XOR_Operator/XOR_Operator.obj")
    else:
        raise ValueError("networks/XOR_Operator/XOR_Operator.obj")
Esempio n. 27
0
    def build(self, config):
        """Build the discriminator. 
        Dataset in dis_ds do config"""
        nets = OrderedDict()
        config = deepcopy(config)

        nets['t_input'] = self.tensor_in
        nets['t_seqlen'] = self.tensor_len

        config['conv_ds'] = config['dis_ds']

        nets['conv'] = ConvNet(self.tensor_in, config)

        lstm_cell = tf.nn.rnn_cell.LSTMCell(config['net_d']['rnn_features'],
                                            state_is_tuple=True,
                                            name="lstm")
        cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell], state_is_tuple=True)
        init_state = cells.zero_state(config['dis_ds']['batch_size'],
                                      tf.float32)
        nets['rnn_outputs'], nets['final_state'] = tf.nn.dynamic_rnn(
            cells,
            nets['conv'].tensor_out,
            initial_state=init_state,
            sequence_length=nets['t_seqlen'])

        nets['full_connected'] = NeuralNet(nets['rnn_outputs'][:, -1, :],
                                           config['net_d']['full_connected'],
                                           name='full_connected')

        nets['t_output'] = nets['full_connected'].tensor_out

        return nets['t_output'], nets
Esempio n. 28
0
def findBin(frame):
	image=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
	pil_im = Image.fromarray(image)
	#~ img1 = pil_im.resize((basewidth, height), Image.ANTIALIAS)
	pil_im.thumbnail((256, 256), Image.ANTIALIAS)
	img2 = pil_im.convert('1')
	#~ pixels = img2.load()
	pixels1 = np.asarray(img2.getdata(),dtype=np.bool)
	outstr = "outimg" +".bmp"
	img2.save(outstr)
	array01 = []
	count = 0
	Tot = 0
	for item in pixels1:
		Tot  += 1
		if not item:
			array01.append(1)
			count += 1
		else: 
			array01.append(0)
	testitem = []
	testitem.append(Instance(array01, [0]))
# load a stored network configuration
	network = NeuralNet.load_from_file( "plastic122.pkl" )
	arr = network.print_test(testitem)
	print('Value returned by neural network plastic: ' + str(arr[0]))
	
	network2 = NeuralNet.load_from_file( "metal122.pkl" )
	arr2 = network2.print_test(testitem)
	print('Value returned by neural network metal: ' + str(arr2[0]))
	
	network3 = NeuralNet.load_from_file( "paper122.pkl" )
	arr3 = network3.print_test(testitem)
	print('Value returned by neural network paper: ' + str(arr3[0]))
	
	pl = arr[0]
	me = arr2[0]
	pa = arr3[0]
	
	if((pl > pa and pl > me) or pl > 0.5 or (pa < 0.42 and me < 0.09) ):
		return 1 #plastic
	elif((me > pa and me > pl) or me > 0.13):
		return 3 #metal
	else:
		return 2 #paper
Esempio n. 29
0
 def learn_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     x = [[[-3]], [[2]], [[0]], [[-2]]]
     y = [[[1]], [[1]], [[0]], [[0]]]
     training_set = [x, y]
     J = net.learn(training_set, 5000, 0.5)
     plt.plot(J)
     plt.show()
     res = net.forward_prop([[-3]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
     res = net.forward_prop([[2]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
     res = net.forward_prop([[0]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
     res = net.forward_prop([[-2]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
Esempio n. 30
0
def findBin(frame):
    image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    pil_im = Image.fromarray(image)
    #~ img1 = pil_im.resize((basewidth, height), Image.ANTIALIAS)
    pil_im.thumbnail((256, 256), Image.ANTIALIAS)
    img2 = pil_im.convert('1')
    #~ pixels = img2.load()
    pixels1 = np.asarray(img2.getdata(), dtype=np.bool)
    outstr = "outimg" + ".bmp"
    img2.save(outstr)
    array01 = []
    count = 0
    Tot = 0
    for item in pixels1:
        Tot += 1
        if not item:
            array01.append(1)
            count += 1
        else:
            array01.append(0)
    testitem = []
    testitem.append(Instance(array01, [0]))
    # load a stored network configuration
    network = NeuralNet.load_from_file("plastic122.pkl")
    arr = network.print_test(testitem)
    print('Value returned by neural network plastic: ' + str(arr[0]))

    network2 = NeuralNet.load_from_file("metal122.pkl")
    arr2 = network2.print_test(testitem)
    print('Value returned by neural network metal: ' + str(arr2[0]))

    network3 = NeuralNet.load_from_file("paper122.pkl")
    arr3 = network3.print_test(testitem)
    print('Value returned by neural network paper: ' + str(arr3[0]))

    pl = arr[0]
    me = arr2[0]
    pa = arr3[0]

    if ((pl > pa and pl > me) or pl > 0.5 or (pa < 0.42 and me < 0.09)):
        return 1  #plastic
    elif ((me > pa and me > pl) or me > 0.13):
        return 3  #metal
    else:
        return 2  #paper
Esempio n. 31
0
def train_xor_network():
    # two training sets
    training_one = [
        Instance([0, 0], [0]),
        Instance([0, 1], [1]),
        Instance([1, 0], [1]),
        Instance([1, 1], [0])
    ]
    training_two = [
        Instance([0, 0], [0, 0]),
        Instance([0, 1], [1, 1]),
        Instance([1, 0], [1, 1]),
        Instance([1, 1], [0, 0])
    ]

    settings = {
        # Required settings
        "n_inputs": 2,  # Number of network input signals
        "n_outputs": 1,  # Number of desired outputs from the network
        "n_hidden_layers": 1,  # Number of nodes in each hidden layer
        "n_hiddens": 2,  # Number of hidden layers in the network
        "activation_functions": [
            tanh_function, sigmoid_function
        ],  # specify activation functions per layer eg: [ hidden_layer, output_layer ]

        # Optional settings
        "weights_low": -0.1,  # Lower bound on initial weight range
        "weights_high": 0.1,  # Upper bound on initial weight range
        "save_trained_network":
        False,  # Whether to write the trained weights to disk
        "input_layer_dropout": 0.0,  # dropout fraction of the input layer
        "hidden_layer_dropout": 0.1,  # dropout fraction in all hidden layers
        "batch_size":
        0,  # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
    }

    # initialize the neural network
    global network
    network = NeuralNet(settings)

    # load a stored network configuration
    # network = NeuralNet.load_from_file( "xor_trained_configuration.pkl" )

    # start training on test set one
    network.backpropagation(
        training_one,  # specify the training set
        ERROR_LIMIT=1e-6,  # define an acceptable error limit 
        learning_rate=0.03,  # learning rate
        momentum_factor=0.95  # momentum
    )

    # Test the network by looping through the specified dataset and print the results.
    for instance in training_one:
        print "Input: {features} -> Output: {output} \t| target: {target}".format(
            features=str(instance.features),
            output=str(network.update(np.array([instance.features]))),
            target=str(instance.targets))

    # save the trained network
    network.save_to_file("networks/XOR_Operator/XOR_Operator.obj")
Esempio n. 32
0
 def __init__(self, models=[], blending='average',nbFeatures=4):
     self.models = models
     self.blending = blending
     self.logR = LogisticRegression(C=10)#,multi_class='multinomial',solver='lbfgs', max_iter=10000)
     self.logRT= LogisticRegression(C=10)#,multi_class='multinomial',solver='lbfgs', max_iter=10000)
     self.nn=NeuralNet(nbFeatures) 
     self.XGB=ModifiedXGBClassifier()
     if self.blending not in ['average', 'most_confident']:
         raise Exception('Wrong blending method')
Esempio n. 33
0
 def __init__(self, name: str, position: list, surround=None):
     self._position = position
     self.name = name
     self.color = [rand_unif(0, 1),
                   rand_unif(0, 1),
                   rand_unif(0, 1)]
     self.field_of_vision = surround  # sense surroundings
     self._reserves = 0
     self.nn = NeuralNet(8, 4, 2)
     self.genome = []
Esempio n. 34
0
def label_data():
    p = excel_reader.get_data(DATA_FROM, DATA_TO, 'D:\python\projdata\data\\1m.xlsx')
    log_price = np.log(p)
    #plt.plot(p)
    topology = [14, 100, 100, 50, 20, 2]
    nn = NeuralNet(topology)
    #nn = nn_factory.read('net_11_7d')
    #index = comp_index_matrix(p)
    #b, s = comp_b_s(nn, p, index)
    #plt.plot(index[0,:])
    #comp_loss(b, s, index)
    #plt.plot(p / 4000 - 1)
    #db, ds = grad_b_s(b, s, index)
    #plt.plot(ds * 10)
    #plt.show()
    lb, ls = gradient_descent(nn, p, log_price, STEPS, LEARNING_RATE)
    plt.plot(lb)
    plt.show()
    nn.save('net_final')
Esempio n. 35
0
def main():

    args = parser.parse_args()
    print('Options:')
    for (key, value) in iteritems(vars(args)):
        print("{:12}: {}".format(key, value))

    assert os.path.exists(args.xp_dir)

    # default value for basefile: string basis for all exported file names
    if args.out_name:
        base_file = "{}/{}".format(args.xp_dir, args.out_name)
    else:
        base_file = "{}/{}_{}_{}".format(args.xp_dir, args.dataset,
                                         args.solver, args.loss)

    # if pickle file already there, consider run already done
    if (os.path.exists("{}_weights.p".format(base_file))
            and os.path.exists("{}_results.p".format(base_file))):
        sys.exit()

    # computation device
    if 'gpu' in args.device:
        try:  # Theano-1.0.2
            theano.gpuarray.use(args.device)
        except:  # Theano-0.8.2
            theano.sandbox.cuda.use(args.device)

    np.random.seed(args.seed)

    # set save_at to n_epochs if not provided
    save_at = args.n_epochs if not args.save_at else args.save_at

    log_file = "{}/log_{}.txt".format(args.xp_dir, args.dataset)
    save_to = "{}_weights.p".format(base_file)
    weights = "{}/{}_weights.p".format(args.xp_dir, args.in_name) \
        if args.in_name else None

    # update config data
    Cfg.C.set_value(args.C)
    Cfg.batch_size = args.batch_size
    Cfg.compile_lwsvm = False
    Cfg.learning_rate.set_value(args.lr)
    Cfg.softmax_loss = (args.loss == 'ce')

    # train
    nnet = NeuralNet(dataset=args.dataset, use_weights=weights)
    nnet.train(solver=args.solver,
               n_epochs=args.n_epochs,
               save_at=save_at,
               save_to=save_to)

    # log
    nnet.log.save_to_file("{}_results.p".format(base_file))
    nnet.dump_weights("{}_final_weights.p".format(base_file))

    logged = open(log_file, "a")
    logged.write("{}\t{}\t{}: OK\n".format(args.dataset, args.solver,
                                           args.loss))
    logged.close()
Esempio n. 36
0
 def back_prop_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     net_layers = net.return_net()
     net_layers[0].set_matrix(numpy.array([[1, 1], [2, 2], [3, 3]]))
     net_layers[1].set_matrix(numpy.array([1, 2, 3, 4]))
     net_layers[1].set_matrix(
         numpy.reshape(net_layers[1].return_matrix(), (1, 4)))
     net.set_net(net_layers)
     x = numpy.array([[-3]])
     forward_res = net.forward_prop(x)
     res = net.back_prop(1, forward_res)
Esempio n. 37
0
def main():

    print "Loading in the data..."
    text = open("data/lemon_training.csv").read().split("\n")
    data = list(map(int, sample.strip().split(",")) for sample in text
                if sample.strip() != "")
    print "Shuffling..."
    shuffle(data)

    # NOTE: We have to wrap every target value into a tuple, for the
    # purpose of being able to classify n-tuples later
    targets = tuple((1 if sample[-1] == 1 else 0,) for sample in data)
    features = tuple(sample[:-1] for sample in data)
    print "Starting to train..."
    start = time()

    num_features = len(features[0])  # Subtract one because of target values
    nn = NeuralNet(num_features, max_epochs=20, learn_rate=.7, scale=3, 
                   hidden_layer=[4], verbose=True, activation=("expit", 2))
    nn.train(features, targets)

    print "Done with training. Took {0} seconds to train." \
            .format(round(time() - start, 2))

    print "Beginning with scoring..."
    start = time()
    
    scored_text = open("data/lemon_testing.csv").read().split("\n")
    testing = list(map(int, sample.strip().split(',')) for sample in scored_text
                   if sample.strip() != "")
    predictions = nn.score_data(testing)
    print "Done with scoring. Took {0} seconds to score the dataset" \
            .format(round(time() - start, 2))

    with open("results.txt", "w") as f:
        f.write("IsBadBuy\n")
        for pred in predictions:
            f.write(str(pred[0, 0]) + "\n")
Esempio n. 38
0
def main():
    args = get_args()
    # f1_matrix holds for every training annotator: the list of tuples of 
    # avg/med f1_row based on avg/med threshold
    f1_matrix = []
    # holds for every training annotator: the list of tuples of avg/med threshold
    t_matrix = []
    current_label_list = []
    
    f1_final = [] # holds 4-tuples of avgs over (f1_avg_avg, f1_avg_med, f1_med_avg, f1_med_med) f.e. tr 
    t_final  = [] # holds 4-tuples of (t_avg_avg, t_avg_med, t_med_avg, t_med_med) f.e. tr

    #X_tr, _, v = feats_and_classify_py2.collect_features(args.parsed_file)
    with open('X_train.pickle', 'rb') as pf:
        X_tr = pickle.load(pf)
    with open('X_test.pickle', 'rb') as pf:
        X_te = pickle.load(pf)
    y_tr = feats_and_classify_py2.collect_labels_positive_threshold(args.all_annotations_file, 1)

    #X_out, _, _ = feats_and_classify_py2.collect_features(args.predictfile)
    # filter for targets
    #X_out = [x for x in X_out if not x.label == '?']

    conf = NeuralNetConfig(X=X_tr, y=y_tr, layers=args.layers, iterations=args.iterations, verbose=args.verbose)
    
    nn = NN(conf)
    nn.train(X_tr, y_tr)
    if args.threshold:
        preds = nn.predict_for_threshold(X_te, args.threshold)
    else:
        preds = nn.get_output(X_te) 
    with open(args.output, 'w') as outfile:
        for p in preds:
            #print(p)
            outfile.write(str(p))
            outfile.write('\n')
    sys.exit(0)
Esempio n. 39
0
 def calculate_gradient_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     net_layers = net.return_net()
     net_layers[0].set_matrix(numpy.array([[1, 1], [2, 2], [3, 3]]))
     net_layers[1].set_matrix(numpy.array([1, 2, 3, 4]))
     net_layers[1].set_matrix(
         numpy.reshape(net_layers[1].return_matrix(), (1, 4)))
     net.set_net(net_layers)
     x = numpy.array([[-3]])
     res = net.calculate_gradients([[x], [1]])
Esempio n. 40
0
 def __init__(self):
     self.brain = NeuralNet(settings.NUM_INPUTS,
                            settings.NUM_OUTPUTS,
                            settings.NUM_HIDDEN,
                            settings.NEURONS_PER_HIDDEN)
     
     self.position = Vector2D(random() * settings.WINDOW_WIDTH, 
                              random() * settings.WINDOW_HEIGHT)
     
     self.look_at = Vector2D()
     self.rotation = random() * 2 * math.pi
     self.ltrack = 0.16
     self.rtrack = 0.16
     self.fitness = 0.0
     self.scale = settings.SWEEPER_SCALE
     self.closest_mine = 0
     self.speed = 0.0
Esempio n. 41
0
def test(net: NeuralNet,
         inputs: np.ndarray,
         labels: np.ndarray,
         confusion_matrix: DataFrame,
         input_converter: Callable,
         output_converter: Callable,
         label_converter: Callable,
         title='') -> ModelEvaluator:
    evaluator = ModelEvaluator(confusion_matrix, title=title)
    pbar = tqdm(total=len(labels))
    for input, label in zip(inputs, labels):
        output = net.predict(input_converter(input))
        evaluator.receive(output_converter(output), label,
                          net.loss.loss_func(output, label_converter(label)))
        pbar.update()
        pbar.set_description(desc=f"Testing model")
    return evaluator
Esempio n. 42
0
def train_xor_network():
    # two training sets
    training_one    = [ Instance( [0,0], [0] ), Instance( [0,1], [1] ), Instance( [1,0], [1] ), Instance( [1,1], [0] ) ]
    training_two    = [ Instance( [0,0], [0,0] ), Instance( [0,1], [1,1] ), Instance( [1,0], [1,1] ), Instance( [1,1], [0,0] ) ]

    settings = {
        # Required settings
        "n_inputs"              : 2,        # Number of network input signals
        "n_outputs"             : 1,        # Number of desired outputs from the network
        "n_hidden_layers"       : 1,        # Number of nodes in each hidden layer
        "n_hiddens"             : 2,        # Number of hidden layers in the network
        "activation_functions"  : [ tanh_function, sigmoid_function ], # specify activation functions per layer eg: [ hidden_layer, output_layer ]
        
        # Optional settings
        "weights_low"           : -0.1,     # Lower bound on initial weight range
        "weights_high"          : 0.1,      # Upper bound on initial weight range
        "save_trained_network"  : False,    # Whether to write the trained weights to disk
        
        "input_layer_dropout"   : 0.0,      # dropout fraction of the input layer
        "hidden_layer_dropout"  : 0.1,      # dropout fraction in all hidden layers
        
        "batch_size"            : 0,        # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
    }


    # initialize the neural network
    global network 
    network = NeuralNet( settings )

    # load a stored network configuration
    # network = NeuralNet.load_from_file( "xor_trained_configuration.pkl" )


    # start training on test set one
    network.backpropagation( 
                    training_one,           # specify the training set
                    ERROR_LIMIT     = 1e-6, # define an acceptable error limit 
                    learning_rate   = 0.03, # learning rate
                    momentum_factor = 0.95  # momentum
                )


    # Test the network by looping through the specified dataset and print the results.
    for instance in training_one:
        print "Input: {features} -> Output: {output} \t| target: {target}".format( 
                    features = str(instance.features), 
                    output   = str(network.update( np.array([instance.features]) )), 
                    target   = str(instance.targets)
                )

    # save the trained network
    network.save_to_file("networks/XOR_Operator/XOR_Operator.obj")
Esempio n. 43
0
def main():

    args = parser.parse_args()
    print('Options:')
    for (key, value) in iteritems(vars(args)):
        print("{:12}: {}".format(key, value))

    assert os.path.exists(args.xp_dir)

    Cfg.C.set_value(args.C)
    Cfg.D.set_value(args.D)
    Cfg.batch_size = args.batch_size
    Cfg.compile_lwsvm = True
    Cfg.softmax_loss = False

    # default value for basefile: string basis for all exported file names
    if args.out_name:
        base_file = "{}/{}".format(args.xp_dir, args.out_name)
    else:
        base_file = "{}/{}_lwsvm".format(args.xp_dir, args.in_name)

    # if pickle file already there, consider run already done
    if (os.path.exists("{}_final_weights.p".format(base_file))
            and os.path.exists("{}_results.p".format(base_file))):
        sys.exit()

    # computation device
    if 'gpu' in args.device:
        try:  # Theano-1.0.2
            theano.gpuarray.use(args.device)
        except:  # Theano-0.8.2
            theano.sandbox.cuda.use(args.device)

    np.random.seed(args.seed)

    log_file = "{}/log_{}.txt".format(args.xp_dir, args.dataset)
    if args.dataset != 'imagenet':
        weights = "{}/{}_weights.p".format(args.xp_dir, args.in_name)
    else:
        weights = "{}/vgg16.pkl".format(args.xp_dir)

    nnet = NeuralNet(dataset=args.dataset, use_weights=weights)
    nnet.train(solver="svm")

    # log
    nnet.log.save_to_file("{}_results.p".format(base_file))
    nnet.dump_weights("{}_final_weights.p".format(base_file))

    logged = open(log_file, "a")
    logged.write("{}\t{}\tlwsvm: OK\n".format(args.dataset, args.in_name))
    logged.close()
Esempio n. 44
0
 def __init__(self, id, row=6, numStones=4):
     self.setID(id)
     
     self.learn = True
     self.alpha = 0.5
     self.discount = 0.9
     
     self.rowSize = row
     self.stones = numStones
     self.movelist = [[]] * 2  # two lists to allow for playing against self
     
     self.inputSize = 2+2*self.rowSize+1
     self.Q = NeuralNet(self.inputSize, 2 * self.inputSize) # set the hidden layer 2 times the input layer
     # if exploit, choose expected optimal move
     # otherwise, explore (randomize choice)
     self.strategy = "greedy"
     
     self.recentGames = []   # holds the transcripts of the most recent games
     self.numIterations = 1
     self.numRecent = 1      # number of games to track as recent
Esempio n. 45
0
 def forward_prop_test(self):
     net = NeuralNet([3, 3, 3], -1, 1)
     net_layers = net.return_net()
     net_layers[0].set_matrix(
         numpy.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]))
     net_layers[1].set_matrix(
         numpy.array([[-1, 1, -1, 1], [-2, 2, -2, 2], [3, -3, 3, -3]]))
     net.set_net(net_layers)
     x = numpy.array([[0.5], [-0.5], [-0.7]])
     res = net.forward_prop(x)
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     # a = a[1:]
     expected_res = numpy.array([[0.41089559], [0.3272766], [0.746644]])
     self.assertEqual(a.all(), expected_res.all())
Esempio n. 46
0
def evaluate(weights_file):

    Cfg.compile_lwsvm = False
    Cfg.batch_size = 1
    Cfg.C.set_value(1e3)

    nnet = NeuralNet(dataset="imagenet", use_weights=weights_file)

    n_batches = int(50000. / Cfg.batch_size)
    make_fully_convolutional = compile_make_fully_convolutional(nnet)
    print("Weight transformation compiled.")
    make_fully_convolutional()
    print("Network has been made fully convolutional.")

    eval_fun = compile_eval_function(nnet)
    print("Evaluation function compiled")

    # full pass over the validation data:
    top1_acc = 0
    top5_acc = 0
    val_batches = 0
    count_images = 0
    for batch in tqdm(nnet.data.get_epoch_val(), total=n_batches):

        inputs, targets, _ = batch
        inputs = np.concatenate((inputs, inputs[:, :, :, ::-1]))
        top1, top5 = eval_fun(inputs, targets)
        top1_acc += top1
        top5_acc += top5
        val_batches += 1
        count_images += len(targets)

    print("(Used %i samples in validation)" % count_images)
    top1_acc *= 100. / val_batches
    top5_acc *= 100. / val_batches

    print("Top-1 validation accuracy: %g%%" % top1_acc)
    print("Top-5 validation accuracy: %g%%" % top5_acc)
def main():
    mnist_path = os.path.join(os.getcwd(), "MNIST")
    (train_images, train_labels), (test_images,
                                   test_labels) = load_data(mnist_path)

    layers = [
        LinearLayer(32, 28**2, xavier),
        SigmoidLayer(),
        LinearLayer(32, 32, xavier),
        SigmoidLayer(),
        LinearLayer(10, 32, xavier),
        SigmoidLayer()
    ]
    net = NeuralNet(layers)

    np.seterr(over='ignore')
    train(net,
          train_images,
          train_labels,
          flatten_mnist_input,
          mnist_label_as_one_hot,
          epoch_count=1000,
          batch_size=1)

    confusion_matrix = DataFrame(np.zeros((10, 10)),
                                 index=range(10),
                                 columns=range(10))
    evaluator = test(net,
                     test_images,
                     test_labels,
                     confusion_matrix,
                     flatten_mnist_input,
                     highest_output_neuron,
                     mnist_label_as_one_hot,
                     title="POST-TRAIN")
    evaluator.plot()
Esempio n. 48
0
def main():

    # Imports and converts training and test data to useable form
    training_data = rd.read_data('data/training.txt')
    rd.hot_encode(training_data)
    training_data = rd.to_object(training_data)

    test_data = rd.read_data('data/testing.txt')
    rd.hot_encode(test_data)
    test_data = rd.to_object(test_data)

    # Initialize neural network
    net = NeuralNet([64, 90, 10], 0.25, -0.3, 0.3)

    # Train neural network with 5 epochs
    net.train_network(training_data, 5)

    # Display accuracies for training and testing dataset
    print('\nFinal Testing Accuracy')
    print(net.accuracy(test_data))

    print('\nFinal Training Accuracy:')
    print(net.accuracy(training_data))
Esempio n. 49
0
    y1 = [nClasses]
    for char in y:
        y1 += [char, nClasses]

    data_y.append(np.asarray(y1, dtype=np.int32))
    data_x.append(np.asarray(x, dtype=th.config.floatX))

    if labels_len(y1) > (1 + len(x[0])) // conv_sz:
        bad_data = True
        show_all(y1, x, None, x[:, ::conv_sz], "Squissed")


################################
print("Building the Network")

ntwk = NeuralNet(nDims, nClasses, midlayer, midlayerargs, log_space)

print("Training the Network")
for epoch in range(nEpochs):
    print('Epoch : ', epoch)
    for samp in range(nSamples):
        x = data_x[samp]
        y = data_y[samp]
        # if not samp % 500:            print(samp)

        if samp < nTrainSamples:
            if log_space and len(y) < 2:
                continue

            cst, pred, aux = ntwk.trainer(x, y)
            if (epoch % 10 == 0 and samp < 3) or np.isinf(cst):
Esempio n. 50
0
class NNPlayer(Player):
    """ A manacala player uses a neural network to store its approximation function """
    
    LEGAL_STRATEGY = [
        'greedy',
        'random', 
        'weighted', 
        'exponential'
    ]
    
    def __init__(self, id, row=6, numStones=4):
        self.setID(id)
        
        self.learn = True
        self.alpha = 0.5
        self.discount = 0.9
        
        self.rowSize = row
        self.stones = numStones
        self.movelist = [[]] * 2  # two lists to allow for playing against self
        
        self.inputSize = 2+2*self.rowSize+1
        self.Q = NeuralNet(self.inputSize, 2 * self.inputSize) # set the hidden layer 2 times the input layer
        # if exploit, choose expected optimal move
        # otherwise, explore (randomize choice)
        self.strategy = "greedy"
        
        self.recentGames = []   # holds the transcripts of the most recent games
        self.numIterations = 1
        self.numRecent = 1      # number of games to track as recent
        
    def setID(self, id):
        """ set player identity """
        if id > 1 or id < 0:
            return False
        self.id = id
        return True
    
    def setLearning(self, toLearn):
        self.learn = toLearn
        
    def setDiscountFactor(self, discount):
        """ set discount factor """
        if discount > 1 or discount < 0:
            return False
        self.discount = discount
        return True
    
    def setStrategy(self, strategy):
        """ if given strategy is supported return true """
        if strategy in NNPlayer.LEGAL_STRATEGY:
            self.strategy = strategy
            return True
        return False
    
    def getMove(self, board):
        """ chooses next move """
        state = self._getState(board)
        qVals = self._getQvals(board)
        myside = board.mySide(self.id)
        validMoves = [index for index, val in enumerate(myside) if val > 0]
        
        # if there is no action available, just choose 0
        if len(validMoves) == 0: return -1
        # condense to only non-empty pits
        validQVals = []
        #for index, val in enumerate(validMoves):
#            validQVals[index] = qVals[val]
        for val in validMoves:
            validQVals.append(qVals[val - 1])
            
        # choose action based on strategy
        if self.strategy == NNPlayer.LEGAL_STRATEGY[0]: # greedy
            validMove = self._getBestIndex(validQVals)
        elif self.strategy == NNPlayer.LEGAL_STRATEGY[1]: # random
            validMove = self._getRandIndex(validQVals)
        elif self.strategy == NNPlayer.LEGAL_STRATEGY[2]:   # weighted
            validMove = self._getWeightedIndex(validQVals)
        elif self.strategy == NNPlayer.LEGAL_STRATEGY[3]:   #exponential
            validMove = self._getExponentialIndex(validQVals)
        else:   # greedy
            validMove = self._getBestIndex(validQVals)
        
        move = validMoves[validMove]
        self.movelist[self.id].append(Pair(state, move))
        return move
        
    def _getRandIndex(self, validQvals):
        """ chooses a move randomly with uniform distribution """
        return random.randint(len(validQvals))
    
    def _getWeightedIndex(self, validQvals):
        """ chooses a move randomly based on predicted Q values """
        validQvals = self._makePositive(validQvals)
        sumValue = sum(validQvals)
        arrow = random.random() * sumValue
        runningSum = 0
        for index, val in enumerate(validQvals):
            runningSum += val
            if runningSum >= arrow:
                return index
        return 0
    
    def _getExponentialIndex(self, validQvals):
        """ chooses a moove randomly based on the exponential of the Q values """
        validQvals = self._makePositive(validQvals)
        validQvals = self._getExponentialValues(validQvals)
        return self._getWeightedIndex(validQvals)
    
    def _getExponentialValues(self, arr):
        """ returns an array of the exponential of the values of the array """
        return [math.exp(val) for val in arr]
    
    def _makePositive(self, arr):
        """ if array has a negtive value, its abs value is added to
        all elements of the array; half the least postive value is then
        assigned for all zero values """
        minVal = min(arr)
        if minVal < 0:
            arr = self._addToArray(minVal, arr)
            minVal = self._getMinPos(arr)
            arr = self._addToZeros(minVal/2, arr)
        return arr
        
    def _getMinPos(self, arr):
        """ finds the minimum positive value in the array """
        min = sys.maxint
        found = False
        for i in arr:
            if i > 0 and i < min:
                min = i
                found = True
        # the minimum positive was found
        if found: return min
        # array has no positive values
        else: return 0
        
    def _addToZeros(self, num, arr):
        """ adds num to all zero values in the array """
        for index, val in enumerate(arr):
            if val == 0:
                arr[index] += num
        return arr
                
    def _addToArray(self, num, arr):
        """ adds the num to all values in the array """
        return [i + num for i in arr]
    
    def _getBestIndex(self, validQvals):
        """ chooses current expected best move """
        maxVal = max(validQvals) # FIXME
        bestMoves = [index for index, move in enumerate(validQvals) if move == maxVal]

        # heuristic: choose last bucket
        return int(bestMoves[-1])
    
    def _getQvals(self, board):
        """ retrieves the q values for all actions from the current state """
        state = self._getState(board)
        # create the input to neural network
        toNN = [state[i-1] for i in range(1, self.inputSize)]
        toNN.insert(0, 0.0)
        # find expected rewards
        qVals = []
        for i in range(self.rowSize):
            toNN[0] = float(i)
            qVals.append(self.Q.calculate(toNN))
        return qVals
        
    def _getState(self, board):
        """ constructs the state as a list """
        mySide = board.mySide(self.id)
        oppSide = board.oppSide(self.id)
        myMancala = board.stonesInMyMancala(self.id)
        oppMancala = board.stonesInOppMancala(self.id)
        
        state = [] # size should be inputSize - 1
        state.append(float(myMancala))
#        for i in range(self.rowSize):
#            state.append(mySide[i])
        for my in mySide:
            state.append(float(my))
        state.append(float(oppMancala))
#        for i in range(self.rowSize):
#            state.append(oppSide[i])
        for op in oppSide:
            state.append(float(op))
        return state
    
    def gameOver(self, myScore, oppScore):
        """ notifies learner that the game is over,
        update the Q function based on win or loss and the move list """
        if not self.learn:
            return
        
        reward = float(myScore) - float(oppScore)
        self.movelist[self.id].append(reward)
        self._updateGameRecord(self.movelist[self.id])
        self.movelist[self.id] = []
        self._learnFromGameRecord()
        
    def _learnFromGameRecord(self):
        for i in self.recentGames:
            self._learnFromGame(self.recentGames[i])
            
    def _learnFromGame(self, movelist):
        reward = float(movelist[-1])
        # update Q function
        sap = movelist[-2]
        state = sap.state
        action = sap.action
        
        example = []
        example.append(float(action))
        example.extend(state[:self.inputSize-1])
        
        oldQ = self.Q.calculate(example)
        newQ = float((1.0 - self.alpha) * oldQ + self.alpha * reward)
        self.Q.learnFromExample(example, newQ)
        
        nextState = state[:]
        nextAction = action
        nextExample = example[:]
        
        for i in range(3, len(movelist)):
            sap = movelist[len(movelist)-i]
            reward = sap.reward
            state = sap.state
            action = sap.action
            
            example = []
            example.append(float(action))
            example.extend(state[:self.inputSize-1])
        
            # find expected rewards
            qVals = []
            for i in range(self.rowSize):
                nextExample[0] = float(i)
                qVals[i] = self.Q.calculate(nextExample)
            
            maxVal = max(qVals)
            oldQ = self.Q.calculate(example)
            newQ = float((1.0 - self.alpha) * oldQ + self.alpha * (reward + self.discount * maxVal))
            self.Q.learnFromExample(example, newQ)
            
    def _updateGameRecord(self, moves):
        """ updates statistics """
        while len(self.recentGames) > self.numRecent:
            del self.recentGames[0]
        self.recentGames.extend(moves)
        
    def setNumRecent(self, recent):
        """ changes number of results to store as recent """
        self.numRecent = recent
        
    def setNumIterations(self, iters):
        self.numIterations = iters
        
    def saveToFile(self, filename, mode):
        self.Q.saveToFile(filename, mode)
        f = open(filename, mode)
        f.write(str(self.id)+"\n")
        f.write(str(self.rowSize)+"\n")
        f.write(str(self.stones)+"\n")
        f.write(str(self.inputSize)+"\n")
        f.write(self.strategy+"\n")
        f.write(str(self.learn)+"\n")
        f.write(str(self.alpha)+"\n")
        f.write(str(self.discount)+"\n")
        f.write(str(self.numIterations)+"\n")
        f.write(str(self.numRecent)+"\n")
        f.flush()
        f.close()
        
    def loadFromFile(self, filename):
        self.Q.loadFromFile(filename)
        f = open(filename, 'r')
        self.id = int(f.readline())
        self.rowSize = int(f.readline())
        self.stones = int(f.readline())
        self.strategy = f.readline().trim()
        self.learn = f.readline()
        self.alpha = float(f.readline())
        self.discount = float(f.readline())
        self.numIterations = int(f.readline())
        self.numRecent = int(f.readline())
Esempio n. 51
0
from neuralnet import NeuralNet
from extractfeatures import *
import cv2

if __name__ == "__main__":
    red = cv2.imread('/home/hoshiro/Pictures/test-img/red-light.jpg', cv2.CV_LOAD_IMAGE_COLOR)
    yellow = cv2.imread('/home/hoshiro/Pictures/test-img/yellow-light.jpg', cv2.CV_LOAD_IMAGE_COLOR)
    green = cv2.imread('/home/hoshiro/Pictures/test-img/green-light.jpg', cv2.CV_LOAD_IMAGE_COLOR)

    features_red = extract_hist_features(red)
    features_yellow = extract_hist_features(yellow)
    features_green = extract_hist_features(green)

    neural_net = NeuralNet()
    neural_net.build(len(features_red), len(features_red) / 2, 1)
    neural_net.create_data_set()
    neural_net.add_list_of_data([features_red], 1)
    neural_net.add_list_of_data([features_yellow], 2)
    neural_net.add_list_of_data([features_green], 3)
    neural_net.train()

    print neural_net.apply_over_data(features_yellow)
Esempio n. 52
0
def plot1(trainf):
    print("Running Test 1")
    nn = NeuralNet(trainf)

    #nn.evaluate(folds, epochs, learning_rate)
    nn.evaluate(10, 25, 0.1)
    acc1 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(10, 50, 0.1)
    acc2 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(10, 75, 0.1)
    acc3 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(10, 100, 0.1)
    acc4 = nn.evaluate_accuracy()

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.set_title('Accuracy vs. Epochs for Neural Net')
    ax.set_xlabel('Epochs')
    ax.set_ylabel('Accuracy')
    y = [acc1, acc2, acc3, acc4]
    x = [25, 50, 75, 100]
    ax.plot(x, y, c='b', marker='o')
 def __init__(self, trained_net_path):
     self.net = NeuralNet()
     self.net.load_from_file(trained_net_path)
Esempio n. 54
0
def plot2(trainf):
    print("Running Test 2")
    nn = NeuralNet(trainf)

    #nn.evaluate(folds, epochs, learning_rate)
    nn.evaluate(5, 50, 0.1)
    acc1 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(10, 50, 0.1)
    acc2 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(15, 50, 0.1)
    acc3 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(20, 50, 0.1)
    acc4 = nn.evaluate_accuracy()

    nn.clean_training_data()
    nn.evaluate(25, 50, 0.1)
    acc5 = nn.evaluate_accuracy()

    fig1 = plt.figure()
    ax1 = fig1.add_subplot(111)
    ax1.set_title('Accuracy vs. Folds for Neural Net')
    ax1.set_xlabel('Folds')
    ax1.set_ylabel('Accuracy')
    y = [acc1, acc2, acc3, acc4, acc5]
    x = [5, 10, 15, 20, 25]
    ax1.plot(x, y, c='b', marker='o')
Esempio n. 55
0
for x, y in zip(data['x'], data['y']):
    # Insert blanks b/w every pair of labels & at the beginning & end of sequence
    y1 = [blankIndex]
    for phonemeIndex in y:
        y1 += [phonemeIndex, blankIndex]

    data_y.append(np.asarray(y1, dtype=np.int32))
    data_x.append(np.asarray(x, dtype=theano.config.floatX))

    if labels_len(y1) > (1 + len(x[0])) // conv_sz:
        bad_data = True
        diagnostix(y1, x, None, x[:, ::conv_sz], "auxiliary name")


network = NeuralNet(num_dims, num_classes, hiddenLyr, hiddenLyrArgs, in_log_scale)

print("Training ▪▪▪")
for epoch in range(num_epochs):
    print('Epoch : ', epoch)
    for example in range(num_examples):
        x = data_x[example]
        y = data_y[example]

        if example < num_training_examples:
            if in_log_scale and len(y) < 2:
                continue

            cst, pred, aux = network.train(x, y)
            if (epoch % 12 == 0 and example < 3) or np.isinf(cst):
               print('\n▪▪▪▪▪▪▪▪▪▪▪▪▪▪ COST = {}  ▪▪▪▪▪▪▪▪▪▪▪▪▪▪ '.format(np.round(cst, 3)))
Esempio n. 56
0
                                        #~ # The last pair in you list describes the number of output signals
    #~ 
    #~ # Optional settings
    #~ "weights_low"           : -0.1,     # Lower bound on initial weight range
    #~ "weights_high"          : 0.1,      # Upper bound on initial weight range
    #~ "save_trained_network"  : True,    # Whether to write the trained weights to disk
    #~ 
    #~ "input_layer_dropout"   : 0.0,      # dropout fraction of the input layer
    #~ "hidden_layer_dropout"  : 0.0,      # dropout fraction in all hidden layers
#~ }

# initialize the neural network
#~ network = NeuralNet( settings )

# load a stored network configuration
network = NeuralNet.load_from_file( "network5.pkl" )

# Train the network using backpropagation
#~ backpropagation(
        #~ network,
        #~ training_one,          # specify the training set
        #~ ERROR_LIMIT     = 0.001, # define an acceptable error limit 
        #~ #max_iterations  = 100, # continues until the error limit is reach if this argument is skipped
                    #~ 
        #~ # optional parameters
        #~ learning_rate   = 0.03, # learning rate
        #~ momentum_factor = 0.4, # momentum
         #~ )
#~ 
#~ # Train the network using SciPy
#~ scipyoptimize(
Esempio n. 57
0
#!/usr/bin/python
from __future__ import division
from time import time
from neuralnet import NeuralNet
import numpy as np
from scipy.special import expit

"""Script to test out loading a neural network from a json file"""
nn = NeuralNet.load("weights2.json")
nn.default_act = np.vectorize(lambda x: (1 - np.exp(-2*x)) / (1 + np.exp(-2*x)))

print "Beginning with scoring..."
start = time()

scored_text = open("testing.csv").read().split("\n")
testing = list(map(int, sample.strip().split(',')) for sample in scored_text
               if sample.strip() != "")
predictions = nn.score_data(testing)
print "Done with scoring. Took {0} seconds to score the dataset" \
        .format(round(time() - start, 2))

with open("results.txt", "w") as f:
    f.write("IsBadBuy\n")
    for pred in predictions:
        if pred[0, 0] < 0:
            f.write(str(0) + "\n")
        else: f.write(str(pred[0, 0]) + "\n")

Esempio n. 58
0
    "n_inputs"              : 2,       # Number of network input signals
    "layers"                : [ (2, tanh_function), (1, sigmoid_function) ],
                                        # [ (number_of_neurons, activation_function) ]
                                        # The last pair in you list describes the number of output signals
    
    # Optional settings
    "weights_low"           : -0.1,     # Lower bound on initial weight range
    "weights_high"          : 0.1,      # Upper bound on initial weight range
    "save_trained_network"  : False,    # Whether to write the trained weights to disk
    
    "input_layer_dropout"   : 0.0,      # dropout fraction of the input layer
    "hidden_layer_dropout"  : 0.0,      # dropout fraction in all hidden layers
}

# initialize the neural network
network = NeuralNet( settings )

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

# Train the network using backpropagation
backpropagation(
        network,
        training_one,          # specify the training set
        ERROR_LIMIT     = 1e-3, # define an acceptable error limit 
        #max_iterations  = 100, # continues until the error limit is reach if this argument is skipped
                    
        # optional parameters
        learning_rate   = 0.03, # learning rate
        momentum_factor = 0.9, # momentum
         )
Esempio n. 59
0
cost_function       = cross_entropy_cost
settings            = {
    # Required settings
    "n_inputs"              : 2,       # Number of network input signals
    "layers"                : [  (5, tanh_function), (1, sigmoid_function) ],
                                        # [ (number_of_neurons, activation_function) ]
                                        # The last pair in the list dictate the number of output signals
    
    # Optional settings
    "weights_low"           : -0.1,     # Lower bound on the initial weight value
    "weights_high"          : 0.1,      # Upper bound on the initial weight value
}


# initialize the neural network
network             = NeuralNet( settings )
network.check_gradient( training_data, cost_function )



## load a stored network configuration
# network           = NeuralNet.load_network_from_file( "network0.pkl" )


## Train the network using backpropagation
#backpropagation(
#        network,                        # the network to train
#        training_data,                  # specify the training set
#        test_data,                      # specify the test set
#        cost_function,                  # specify the cost function to calculate error
#        ERROR_LIMIT          = 1e-3,    # define an acceptable error limit