Example #1
0
def demo_autoencoder():
    #load and norm the data
    data = np.load('scaled_images.npy')
    data = np.asarray(data, dtype='float32')
    data /= 255.0
    #set up and train the initial deepnet
    dnn = deepnet.DeepNet(
        [data.shape[1], data.shape[1], data.shape[1], data.shape[1] * 2],
        ['gaussian', 'sigmoid', 'sigmoid', 'sigmoid'])
    dnn.train(data, [5, 5, 5], 0.0025)
    #save the trained deepnet
    pickle.dump(dnn, file('pretrained.pkl', 'wb'))
    #unroll the deepnet into an autoencoder
    autoenc = unroll_network(dnn.network)
    #print out the sizes of the autoenc layers
    for i in range(len(autoenc)):
        print autoenc[i].W.shape
        print autoenc[i].hbias.shape
    #fine-tune with backprop
    mlp = backprop.NeuralNet(network=autoenc)
    trained = mlp.train(mlp.network,
                        data,
                        data,
                        max_iter=10,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr')
    #save
    pickle.dump(trained, file('network.pkl', 'wb'))
Example #2
0
def demo_autoencoder():

    #load and norm the xs
    import GwData
    xs = GwData.GwData.as_binary()

    #OLD - MNIST training
    #xs = np.load('scaled_images.npy')
    #xs = np.asarray(xs, dtype='float32')
    #xs /= 255.0

    #set up and train the initial deepnet
    dnn = deepnet.DeepNet([xs.shape[1], 300, 150],
                          ['sigmoid', 'sigmoid', 'sigmoid'])
    dnn.train(xs, [500, 500], [0.25, 0.1])
    #save the trained deepnet
    pickle.dump(dnn, file('pretrained.pkl', 'wb'))
    #unroll the deepnet into an autoencoder
    autoenc = unroll_network(dnn.network)
    ##fine-tune with backprop
    mlp = backprop.NeuralNet(network=autoenc)

    trained = mlp.train(mlp.network,
                        xs,
                        xs,
                        max_iter=30,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr')
    ##save
    pickle.dump(trained, file('network.pkl', 'wb'))
Example #3
0
def run_supervised():
    import GwData
    gwData = GwData.GwData()

    xs = get_data(gwData)

    def flip(i):
        if i == 0:
            return 1
        return 0

    ys = [[lbl, flip(lbl)] for lbl in gwData.labels_for("50")]

    xs = np.array(xs)
    ys = np.array(ys)

    td_size = 2500

    td_x = xs[0:td_size]
    vd_x = xs[td_size:]

    dbnetwork = DeepNet([td_x.shape[1], 600, 400],
                        ['sigmoid', 'sigmoid', 'sigmoid'])
    dbnetwork.train(td_x, [1000, 1000], [0.1, 0.1])
    out = dbnetwork.run_through_network(xs)

    top_layer = backprop.NeuralNet(
        layer_sizes=[out.shape[1], int(out.shape[1]), 2],
        layer_types=['sigmoid', 'sigmoid', 'sigmoid'])

    o_td_x = out[0:td_size]
    o_vd_x = out[td_size:]

    td_y = ys[0:td_size]
    vd_y = ys[td_size:]

    top_layers = top_layer.train(top_layer.network, o_td_x, td_y, o_vd_x, vd_y,
                                 10, 'classification', 'crossEntropy', 0, 25)

    #TODO We need to train a top layer neural network from the top DBNN layer to the output
    #TODO Then we create a final network composed of the two concatenated together
    mlp = to_feed_forward_network(dbnetwork, top_layers)
    trained = mlp.train(mlp.network,
                        td_x,
                        td_y,
                        vd_x,
                        vd_y,
                        max_iter=30,
                        validErrFunc='classification',
                        targetCost='crossEntropy')

    print out.shape
    np.save('output.npy', out)
Example #4
0
    def train(self):
        # this will be replaced by calls to loadData.py
        #data = np.load('scaled_images.npy')
        #data = np.asarray(data, dtype='float32')
        #data /= 255.0
        
        l = loadData.Loader(str(self.dataDir),stream=self.stream)
        if self.layer_types[0] != 'sigmoid':
            layer1_sigmoid = False
        else:
            layer1_sigmoid = True
        l.loadData(layer1_sigmoid)
        data = l.XC
        
        if self.limit:
            inds = np.arange(data.shape[0])
            np.random.shuffle(inds)
            data = data[inds[:self.limit_num],:]
        self.stream.write(data.shape)

        # parse the layer sizes
        sizes = []
        for i in self.layer_sizes:
            if i == -1:
                sizes.append(data.shape[1])
            else:
                sizes.append(i)
        
        #set up and train the initial deepnet
        dnn = deepnet.DeepNet(sizes, self.layer_types, stream=self.stream)
        dnn.train(data, self.pretrain_iter, self.pretrain_lr)

        #save the trained deepnet
        #pickle.dump(dnn, file('pretrained.pkl','wb')) # Looks like pickle won't work with Qt :(
        self.save(dnn.network, 'pretrained.mat')
        
        #unroll the deepnet into an autoencoder
        autoenc = autoencoder.unroll_network(dnn.network)
        
        #fine-tune with backprop
        mlp = backprop.NeuralNet(network=autoenc, stream=self.stream)
        trained = mlp.train(mlp.network, data, data, max_iter=self.backprop_iter, 
                validErrFunc='reconstruction', targetCost='linSquaredErr')
        
        #save
        #pickle.dump(trained, file('network.pkl','wb'))
        self.save(trained, 'network.mat')
Example #5
0
def to_feed_forward_network(dbn, top_layers):
    network = dbn.network
    '''
    Takes a pre-trained network and treats it as an top_layers network. The decoder
    network is constructed by inverting the top_layers. The decoder is then appended
    to the input network to produce an autoencoder.
    '''
    import backprop
    layers = []
    for i in range(len(network)):
        layer = backprop.Layer(network[i].W.T, network[i].hbias,
                               network[i].n_hidden, network[i].hidtype)
        layers.append(layer)

    net = layers + top_layers
    mlp = backprop.NeuralNet(network=net)
    return mlp
Example #6
0
def demo_simple_autoencoder():

    #load and norm the xs
    import GwData
    xs = GwData.GwData.as_binary()

    mlp = backprop.NeuralNet(None, [xs.shape[1], 500, xs.shape[1]],
                             ['sigmoid', 'sigmoid', 'sigmoid'])

    trained = mlp.train(mlp.network,
                        xs,
                        xs,
                        max_iter=30,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr',
                        initialfit=0,
                        cg_iter=5)
    ##save
    pickle.dump(trained, file('network.pkl', 'wb'))
Example #7
0
def demo_autoencoder():
    #load and norm the data
    data = np.load('scaled_images.npy')
    data = np.asarray(data, dtype='float32')
    data /= 255.0
    #set up and train the initial deepnet
    dnn = deepnet.DeepNet([data.shape[1], data.shape[1], data.shape[1], 42],
                          ['sigmoid', 'sigmoid', 'sigmoid', 'sigmoid'])
    dnn.train(data, [225, 75, 75], 0.0025)
    #save the trained deepnet
    pickle.dump(dnn, file('pretrained.pkl', 'wb'))
    #unroll the deepnet into an autoencoder
    autoenc = unroll_network(dnn.network)
    ##fine-tune with backprop
    mlp = backprop.NeuralNet(network=autoenc)
    trained = mlp.train(mlp.network,
                        data,
                        data,
                        max_iter=30,
                        validErrFunc='reconstruction',
                        targetCost='linSquaredErr')
    ##save
    pickle.dump(trained, file('network.pkl', 'wb'))
Example #8
0
def visualize_results(netfile, datafile):
    network = pickle.load(file(netfile, 'rb'))
    #network = unroll_network(dnn.network)
    data = np.load(datafile)
    data = np.asarray(data, dtype='float32')
    data /= 255.0
    mlp = backprop.NeuralNet(network=network)
    recon = mlp.run_through_network(data, network)
    inds = np.arange(recon.shape[0])
    np.random.shuffle(inds)
    for i in range(10):
        dim = int(np.sqrt(data.shape[1]))
        orig = data[inds[i]].reshape((dim, dim))
        rec = recon[inds[i]].reshape((dim, dim))
        plt.figure(i)
        ax = plt.subplot(211)
        plt.imshow(orig, cmap=cm.gray)
        ax.set_yticks([])
        ax.set_xticks([])
        ax = plt.subplot(212)
        plt.imshow(rec, cmap=cm.gray)
        ax.set_yticks([])
        ax.set_xticks([])
        plt.savefig('img_%d.jpg' % (inds[i]))