Beispiel #1
0
def f(state,params):
    bnet = Sequential()
    arch = params["arch"]
    bnet.add(Dense(arch[0],input_shape=(width,),activation=params["activation"]))
    for layer in arch[1:]:
        bnet.add(Dense(int(layer),activation=params["activation"]))
    bnet.add(Dense(4,activation="softmax"))
    optimizer = get_optimizer(params["optimizer"])
    bnet.compile(loss="categorical_crossentropy",
                 optimizer=optimizer(lr=params["lr"]), metrics=["accuracy"])
    history = bnet.fit(btrain[0], btrain_y,
                       validation_data=(bvalid[0], bvalid_y),
	               epochs=params["epochs"], batch_size=params["batch_size"])
    classify = bnet.predict_classes(test_bdata)
    print(theautil.classifications(classify,test_blabels))
    score = bnet.evaluate(test_bdata, btest_y)
    print("Scores: %s" % score)
    return score[1]
Beispiel #2
0
# my solution
def in_circle(x, y, cx, cy, radius):
    return (x - float(cx))**2 + (y - float(cy))**2 < radius**2


def mysolution(pt, outer=0.3):
    return in_circle(pt[0], pt[1], 0.5, 0.5,
                     outer) and not in_circle(pt[0], pt[1], 0.5, 0.5, 0.1)


# apply my classifier
myclasses = np.apply_along_axis(mysolution, 1, test[0])
print "My classifier!"
print "%s / %s " % (sum(myclasses == test[1]), len(test[1]))
print theautil.classifications(myclasses, test[1])


def euclid(pt1, pt2):
    return sum([(pt1[i] - pt2[i])**2 for i in range(0, len(pt1))])


def oneNN(data, labels):
    def func(input):
        distance = None
        label = None
        for i in range(0, len(data)):
            d = euclid(input, data[i])
            if distance == None or d < distance:
                distance = d
                label = labels[i]
Beispiel #3
0
########################################################################
# Part 3. Let's start using neural networks!
########################################################################

# try different combos here
archi=1;
print "Architecture 1: [11,15,15,15,2]"
print "Architecture 2: [11,30,30,30,2]"
print "Architecture 3: [11,30,30,30,30,30,30,2]"
archi = input('choose one of these three architecture(input 1, 2 or 3):')
if archi==2:
	architecture = '[11,30,30,30,2]'
	net = theanets.Classifier([11,30,30,30,2])
elif archi==3:
	architecture = '[11,30,30,30,30,30,30,2]'
	net = theanets.Classifier([11,30,30,30,30,30,30,2])
else:
	architecture = '[11,15,15,15,2]'
	net = theanets.Classifier([11,15,15,15,2])

net.train(train, valid, algo='layerwise', max_updates=mupdates, patience=1)
#net.train(train, valid, algo='rprop',     max_updates=mupdates, patience=1)
print architecture
print "Learner on the test set"
classify = net.classify(test[0])
print "%s / %s " % (sum(classify == test[1]),len(test[1]))
print collections.Counter(classify)
print theautil.classifications(classify,test[1])

Beispiel #4
0
cnet.add(Dense(4, activation="softmax"))
copt = SGD(lr=0.1)
# opt = Adam(lr=0.1)
cnet.compile(loss="categorical_crossentropy",
             optimizer=copt,
             metrics=["accuracy"])
history = cnet.fit(train1[0],
                   train1_y,
                   validation_data=(valid1[0], valid1_y),
                   epochs=100,
                   batch_size=16)

#score = cnet.evaluate(test_data, test_labels)
#print("Scores: %s" % score)
classify = cnet.predict_classes(test_data)
print(theautil.classifications(classify, test_labels))
score = cnet.evaluate(test_data, test1_y)
print("Scores: %s" % score)

# now that's kind of interesting, an accuracy of .3 to .5 max
# still pretty inaccurate, but 1 sample might never be enough.

print(
    "We could train longer and we might get better results, but there's ambiguity in each. As a human we might have a hard time determining them."
)

print('''
########################################################################
# Experiment 2: can we classify a sample of data?
#
#
Beispiel #5
0
# my solution
def in_circle(x, y, cx, cy, radius):
    return (x - float(cx))**2 + (y - float(cy))**2 < radius**2


def mysolution(pt, outer=0.3):
    return in_circle(pt[0], pt[1], 0.5, 0.5,
                     outer) and not in_circle(pt[0], pt[1], 0.5, 0.5, 0.1)


# apply my classifier
myclasses = np.apply_along_axis(mysolution, 1, mltest[0])
print("My classifier!")
print("%s / %s " % (sum(myclasses == mltest[1]), len(mltest[1])))
print(theautil.classifications(myclasses, mltest[1]))


def euclid(pt1, pt2):
    return sum([(pt1[i] - pt2[i])**2 for i in range(0, len(pt1))])


def oneNN(data, labels):
    def func(input):
        distance = None
        label = None
        for i in range(0, len(data)):
            d = euclid(input, data[i])
            if distance == None or d < distance:
                distance = d
                label = labels[i]
train = (train[0],linit(train[1]))
valid = (valid[0],linit(valid[1]))
test  = (test[0] ,linit(test[1]))

# my solution
def in_circle(x,y,cx,cy,radius):
    return (x - float(cx)) ** 2 + (y - float(cy)) ** 2 < radius**2

def mysolution(pt,outer=0.3):
    return in_circle(pt[0],pt[1],0.5,0.5,outer) and not in_circle(pt[0],pt[1],0.5,0.5,0.1)

# apply my classifier
myclasses = np.apply_along_axis(mysolution,1,test[0])
print "My classifier!"
print "%s / %s " % (sum(myclasses == test[1]),len(test[1]))
print theautil.classifications(myclasses,test[1])

def euclid(pt1,pt2):
    return sum([ (pt1[i] - pt2[i])**2 for i in range(0,len(pt1)) ])

def oneNN(data,labels):
    def func(input):
        distance = None
        label = None
        for i in range(0,len(data)):
            d = euclid(input,data[i])
            if distance == None or d < distance:
                distance = d
                label = labels[i]
        return label
    return func
Beispiel #7
0
train = (train[0],linit(train[1]))
valid = (valid[0],linit(valid[1]))
#test  = (test[0] ,linit(test[1]))


#first
cnet = theanets.Classifier([8,2,2])
cnet.train(train,valid, algo='layerwise', patience=1, max_updates=mupdates)
cnet.train(train,valid, algo='rprop', patience=1, max_updates=mupdates)

print "%s / %s " % (sum(cnet.classify(inputs) == outputs),len(outputs))
classify = cnet.classify(valid[0])
print "%s / %s " % (sum(classify == valid[1]),len(valid[1]))
print collections.Counter(classify)
print theautil.classifications(classify,valid[1])

#Second
cnet = theanets.Classifier([8,4,8,2])
cnet.train(train,valid, algo='layerwise', patience=1, max_updates=mupdates)
cnet.train(train,valid, algo='rprop', patience=1, max_updates=mupdates)

print "%s / %s " % (sum(cnet.classify(inputs) == outputs),len(outputs))
classify = cnet.classify(valid[0])
print "%s / %s " % (sum(classify == valid[1]),len(valid[1]))
print collections.Counter(classify)
print theautil.classifications(classify,valid[1])

#Third
cnet = theanets.Classifier([8,6,5,8,2])
cnet.train(train,valid, algo='layerwise', patience=1, max_updates=mupdates)