Example #1
0
def buildDNN(args):
    # initialize an DNN object instance
    if args.loadModel is None:
        args.layerSizes = [args.InpsSize] + args.hid + [args.OutsSize]
        net = dnn.DNN(args.layerSizes, dnn.Linear(), args.relu, None, None, args.targMean, args.targStd)
        net.dropouts = args.dropouts
    else:
        net, VariableParaDict = dnn.loadSavedNeuralNet(args.loadModel,True)
        print >>sys.stderr, "Loaded previous trained model from %s. " % (args.loadModel)
        if len(args.dropouts)==1 :
            if (args.dropouts[0] == -1):
                # use same dropouts as in loaded model
                net.dropouts = VariableParaDict['dropouts']
                net.dropouts = net.dropouts.tolist()
            else:
                net.dropouts = [args.dropouts[0] for i in range(len(net.weights))]
        else:
            assert(len(args.dropouts)==len(net.weights))
            net.dropouts = args.dropouts
        args.layerSizes = net.layerSizes
        datNames = VariableParaDict['datNames']
        for i in range(len(args.datNames)):
            assert(args.datNames[i] == datNames[i])

    # set training parameters
    net.learnRates = [args.learnRate for i in range(len(net.layerSizes))]
    net.momentum = args.momentum
    net.L2Costs = [args.weightCost for i in range(len(net.layerSizes))]
    net.nesterov = False
    net.nestCompare = False
    net.rmsLims = [None for i in range(len(net.layerSizes))]
    net.realValuedVis = (not (args.transform == 'binarize'))
    if net.realValuedVis and args.reducelearnRateVis:
        net.learnRates[0] = 0.005
    return net
Example #2
0
 def __build_dnn(self,
                 nin,
                 nout,
                 nneurons,
                 nhidden,
                 activation=tf.nn.relu6,
                 outactivation=tf.nn.sigmoid,
                 seed=None):
     """
     Builds a neural network based on the parameters and stores it in
     self.dnn.
     """
     self.dnn = dnn.DNN(seed=seed)
     self.dnn.add_layer([nin, nneurons], dropout=0.2)
     for _ in range(2, nhidden):
         self.dnn.add_layer([nneurons, nneurons], activation, dropout=0.4)
     self.dnn.add_layer([nneurons, nout], outactivation)
     self.dnn.build()
Example #3
0
    def load(self, path):
        """
        Loads a localizer from disk.
        Args:
            path: The path from which a localizer should be loaded.
        """
        with tempfile.TemporaryDirectory() as tmpdir:
            with zipfile.ZipFile(path, 'r') as zip_:
                zip_.extractall(path=tmpdir)

            data = os.path.join(tmpdir, 'data')
            with open(data) as f:
                data = json.load(f)
            self._mean = np.asarray(data['mean'])
            self._sigma = np.asarray(data['sigma'])
            self._zscore = data['zscore']
            if 'classes' in data:
                self._classes = data['classes']
            else:
                self._classes = None

            model = os.path.join(tmpdir, 'model', 'model')
            self.dnn = dnn.DNN(model_file=model)
Example #4
0
    n = len(e[e.nonzero()])
    print n
    return float(n) / y.shape[1]


print 'loading dataset'
trainx, trainy, valix, valiy, testx = loaddata()
print 'loading complete'
print 'start training'
dnlist = []
result = 0
valiresult = 0
num = 30
for i in range(num):
    print i
    dn = dnn.DNN([[784, 'relu'], [30, 'relu'], [20, 'softmax'], [10]])
    dnlist.append(dn)
    errorrate = 1.0
    while errorrate > 0.1:
        dn.train(trainx, trainy, 5, 20, 0.2, 0.0)
        errorrate = err(dn.forward(valix), valiy)
        print 'errorrate=', errorrate
for i in range(num):
    valiresult = valiresult + dnlist[i].forward(valix)
    result = result + dnlist[i].forward(testx)
errorrate = err(valiresult, valiy)
print 'vali errorrate=', errorrate
result = result.argmax(axis=0).reshape((-1, 1))
id = np.arange(1, 28001).reshape((-1, 1))
np.savetxt('result.csv', np.hstack((id, result)), fmt='%d', delimiter=',')
        acc_list = []
        mse_list = []
        mse_list_lrr = []
        mse_list_krr = []
        acc_pca_list = []
        mse_pca_list = []
        mse_pca_list_lrr = []
        mse_pca_list_krr = []

        for i in para:
            print("***************************************")
            args.noise_term = i
            print(args)
            tf.reset_default_graph()
            model = dnn.DNN(args)
            acc, mse, mse_lrr, mse_krr, acc_pca, mse_pca, mse_pca_lrr, mse_pca_krr = model.train(
            )
            acc_list.append(acc)
            mse_list.append(mse)
            mse_list_lrr.append(mse_lrr)
            mse_list_krr.append(mse_krr)

            acc_pca_list.append(acc_pca)
            mse_pca_list.append(mse_pca)
            mse_pca_list_lrr.append(mse_pca_lrr)
            mse_pca_list_krr.append(mse_pca_krr)
            #noise_term_list.append(i)
            print("***************************************")

        Matrix = {}
Example #6
0
from flask import Flask, request, json
import dnn
import time

app = Flask(__name__, static_url_path='')

det = dnn.DNN("mmod_human_face_detector.dat")


@app.route('/')
def root():
    return app.send_static_file('index.html')

@app.route('/post', methods=['POST'])
def upload_file():
    if not request.method == 'POST':
        print("post it")
        return "[]"
       
    files = request.files.getlist("files")

    print(files)

    _files = []
    for (i, file) in enumerate(files):
        name = ("/tmp/img%d.img" % i)
        file.save(name)
        _files.append(name)
    
    print(_files)
Example #7
0
import mnist_loader

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

training_data = list(training_data)
# print(len(training_data))
# print(training_data[0][0].shape)

# x, y = training_data[0]
# print("Training data shape")
# print(x.shape)
# print(y.shape)
# Display the image
# from matplotlib import pyplot as plt
# plt.imshow(training_data[1000][0].reshape((28,28)), interpolation='nearest',cmap='gray')
# plt.show()

import dnn

net = dnn.DNN([784, 30, 10])
# print(net.feedForward(training_data[1000][0]))
net.sgd(training_data=training_data,
        epochs=30,
        mini_batch_size=10,
        eta=10.0,
        test_data=validation_data)