コード例 #1
0
 def init(self, mins, maxs, num_actions, p):
     layers = []
     self.state_size = len(list(mins))
     self.num_actions = num_actions
     self.mins = np.array(mins)
     self.maxs = np.array(maxs)
     self.incorrect_target = p['incorrect_target']
     #print(str(self.state_size) + " " + str(self.num_actions))
     self.correct_target = p['correct_target']
     layers.append(nnet.layer(self.state_size + self.num_actions))
     layers.append(
         nnet.layer(p['num_hidden'],
                    p['activation_function'],
                    initialization_scheme=p['initialization_scheme'],
                    initialization_constant=p['initialization_constant'],
                    dropout=p['dropout'],
                    use_float32=p['use_float32'],
                    momentum=p['momentum'],
                    maxnorm=p['maxnorm'],
                    step_size=p['learning_rate']))
     layers.append(
         nnet.layer(
             1,
             initialization_scheme=p['initialization_scheme_final'],
             initialization_constant=p['initialization_constant_final'],
             use_float32=p['use_float32'],
             momentum=p['momentum'],
             step_size=p['learning_rate']))
     self.net = nnet.net(layers)
コード例 #2
0
def load_net(filename):
    matlabdict = {}
    scipy.io.loadmat(filename, matlabdict)
    net = nnet
    num_layers = matlabdict["num_layers"][0]
    layers = [nnet.layer(matlabdict["layer_node_count_input_1"][0])]
    for i in range(num_layers):
        l = matlabdict["layer_node_count_output_" + str(i + 1)][0]
        a = matlabdict["layer_activation_" + str(i + 1)]
        layers.append(nnet.layer(l, a))
    net = nnet.net(layers)

    for i in range(num_layers):
        net.layer[i].weights = matlabdict["layer_weights_" + str(i + 1)]
        dropout = matlabdict["layer_dropout_" + str(i + 1)][0]
        if dropout == "None":
            # print('Layer ' + str(i) + ': Dropout is none')
            net.layer[i].dropout = None
        else:
            # print('Layer ' + str(i) + ': Dropout: ' + str(dropout))
            net.layer[i].dropout = float(dropout)
    data = {}
    data["net"] = net
    data["sample_mean"] = matlabdict["sample_mean"]
    data["sample_std"] = matlabdict["sample_std"]
    data["patchsize"] = matlabdict["patchsize"]
    data["test_rate"] = matlabdict["test_rate"]
    return data
コード例 #3
0
    def init(self,state_size,num_actions,p):
        layers = [];
        self.state_size = state_size
        self.num_actions = num_actions

        self.incorrect_target = p['incorrect_target']
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(self.state_size))
        if(p.has_key('num_hidden') and p['num_hidden'] is not None):
            layers.append(nnet.layer(p['num_hidden'],p['activation_function'],
                                     initialization_scheme=p['initialization_scheme'],
                                     initialization_constant=p['initialization_constant'],
                                     dropout=p['dropout'],use_float32=p['use_float32'],
                                     momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))
        layers.append(nnet.layer(num_actions,p['activation_function_final'],
                                 initialization_scheme=p['initialization_scheme_final'],
                                 initialization_constant=p['initialization_constant_final'],
                                 use_float32=p['use_float32'],
                                 momentum=p['momentum'],step_size=p['learning_rate']))
        self.net = nnet.net(layers)

        self.do_neuron_clustering=False #by default
        if(p.has_key('cluster_func') and p['cluster_func'] is not None):
            self.net.layer[0].centroids = np.asarray(((np.random.random((self.net.layer[0].weights.shape)) - 0.5) * 2.5),np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:,-1] = 1.0
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p['cluster_speed']
            self.net.layer[0].num_selected = p['clusters_selected']
            self.do_neuron_clustering=True #set a flag to indicate neuron clustering
            if(p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
コード例 #4
0
def load_net(filename):
    matlabdict = {}
    scipy.io.loadmat(filename, matlabdict)
    net = nnet
    num_layers = matlabdict['num_layers'][0]
    layers = [nnet.layer(matlabdict['layer_node_count_input_1'][0])]
    for i in range(num_layers):
        l = matlabdict['layer_node_count_output_' + str(i + 1)][0]
        a = matlabdict['layer_activation_' + str(i + 1)]
        layers.append(nnet.layer(l, a))
    net = nnet.net(layers)

    for i in range(num_layers):
        net.layer[i].weights = matlabdict['layer_weights_' + str(i + 1)]
        dropout = matlabdict['layer_dropout_' + str(i + 1)][0]
        if (dropout == 'None'):
            #print('Layer ' + str(i) + ': Dropout is none')
            net.layer[i].dropout = None
        else:
            #print('Layer ' + str(i) + ': Dropout: ' + str(dropout))
            net.layer[i].dropout = float(dropout)
    data = {}
    data['net'] = net
    data['sample_mean'] = matlabdict['sample_mean']
    data['sample_std'] = matlabdict['sample_std']
    data['patchsize'] = matlabdict['patchsize']
    data['test_rate'] = matlabdict['test_rate']
    return data
コード例 #5
0
    def init(self, state_size, num_actions, p):
        layers = []
        self.state_size = state_size
        self.num_actions = num_actions

        self.incorrect_target = p['incorrect_target']
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(self.state_size))
        if (p.has_key('num_hidden') and p['num_hidden'] is not None):
            layers.append(
                nnet.layer(
                    p['num_hidden'],
                    p['activation_function'],
                    initialization_scheme=p['initialization_scheme'],
                    initialization_constant=p['initialization_constant'],
                    dropout=p['dropout'],
                    use_float32=p['use_float32'],
                    momentum=p['momentum'],
                    maxnorm=p['maxnorm'],
                    step_size=p['learning_rate']))
        layers.append(
            nnet.layer(
                num_actions,
                p['activation_function_final'],
                initialization_scheme=p['initialization_scheme_final'],
                initialization_constant=p['initialization_constant_final'],
                use_float32=p['use_float32'],
                momentum=p['momentum'],
                step_size=p['learning_rate']))
        self.net = nnet.net(layers)

        self.do_neuron_clustering = False  #by default
        if (p.has_key('cluster_func') and p['cluster_func'] is not None):
            self.net.layer[0].centroids = np.asarray(((np.random.random(
                (self.net.layer[0].weights.shape)) - 0.5) * 2.5), np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:, -1] = 1.0
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p['cluster_speed']
            self.net.layer[0].num_selected = p['clusters_selected']
            self.do_neuron_clustering = True  #set a flag to indicate neuron clustering
            if (p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
コード例 #6
0
 def init(self,mins,maxs,num_actions,p):
     layers = [];
     self.state_size = len(list(mins))
     self.num_actions = num_actions
     self.mins = np.array(mins)
     self.maxs = np.array(maxs)
     self.incorrect_target = p['incorrect_target']
     #print(str(self.state_size) + " " + str(self.num_actions))
     self.correct_target = p['correct_target']
     layers.append(nnet.layer(self.state_size + self.num_actions))
     layers.append(nnet.layer(p['num_hidden'],p['activation_function'],
                              initialization_scheme=p['initialization_scheme'],
                              initialization_constant=p['initialization_constant'],
                              dropout=p['dropout'],use_float32=p['use_float32'],
                              momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))
     layers.append(nnet.layer(1,
                              initialization_scheme=p['initialization_scheme_final'],
                              initialization_constant=p['initialization_constant_final'],
                              use_float32=p['use_float32'],
                              momentum=p['momentum'],step_size=p['learning_rate']))
     self.net = nnet.net(layers)
コード例 #7
0
    def init(self,mins,maxs,num_actions,p):
        layers = [];
        self.state_size = len(list(mins))
        self.num_actions = num_actions
        self.mins = np.array(mins)
        self.maxs = np.array(maxs)
        self.incorrect_target = p['incorrect_target']
        print(str(self.state_size) + " " + str(self.num_actions))
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(self.state_size + self.num_actions))
        layers.append(nnet.layer(p['num_hidden'],p['activation_function'],
                                 initialization_scheme=p['initialization_scheme'],
                                 initialization_constant=p['initialization_constant'],
                                 dropout=p['dropout'],use_float32=p['use_float32'],
                                 momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))
        layers.append(nnet.layer(1,
                                 initialization_scheme=p['initialization_scheme_final'],
                                 initialization_constant=p['initialization_constant_final'],
                                 use_float32=p['use_float32'],
                                 momentum=p['momentum'],step_size=p['learning_rate']))

        self.net = nnet.net(layers)

        self.do_neuron_clustering=False #by default
        if(p.has_key('cluster_func') and p['cluster_func'] is not None):
            #TODO: Make sure the centroids cover the input space appropriately
            self.net.layer[0].centroids = np.asarray(((np.random.random((self.net.layer[0].weights.shape)) - 0.5) * 2.25),np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:,-1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            #print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p['cluster_speed']
            self.net.layer[0].num_selected = p['clusters_selected']
            self.do_neuron_clustering=True #set a flag to log neurons that were used for clustering
            if(p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
コード例 #8
0
(sample_data,class_data) = load_data(range(10),"training",p)
train_size = sample_data.shape[0]

#(test_data,test_class) = load_data(range(10),"testing",p)
#test_size = test_data.shape[0]

num_hidden = p['num_hidden']

training_epochs = p['training_epochs']

minibatch_size = p['minibatch_size']


layers = [];
layers.append(nnet.layer(28*28))
layers.append(nnet.layer(p['num_hidden'],p['activation_function'],select_func=p['select_func'],
                         select_func_params=p['num_selected_neurons'],
                         initialization_scheme=p['initialization_scheme'],
                         initialization_constant=p['initialization_constant'],
                         dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],
                         sparse_target=p['sparse_target'],use_float32=p['use_float32'],
                         momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))

#Add 2nd and 3rd hidden layers if there are parameters indicating that we should
if(p.has_key('num_hidden2')):
    layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],select_func=p['select_func2'],
                             select_func_params=p['num_selected_neurons2'],
                             initialization_scheme=p['initialization_scheme2'],
                             initialization_constant=p['initialization_constant2'],
                             dropout=p['dropout2'],sparse_penalty=p['sparse_penalty2'],
コード例 #9
0
ファイル: nnet_qsa.py プロジェクト: chenbokaix250/pyrlcade
    def init(self,state_size,num_actions,p):
        layers = [];
        self.state_size = state_size
        self.num_actions = num_actions
        #self.mins = np.array(mins)
        #self.maxs = np.array(maxs)
        #self.divs = np.array(divs)

        #self.size = self.maxs - self.mins
        #self.size = self.size + self.divs
        #self.size = self.size/self.divs

        #self.arr_mins = (np.zeros(self.size.shape)).astype(np.int64)
        #self.arr_maxs = (self.size - np.ones(self.size.shape)).astype(np.int64)


        self.incorrect_target = p['incorrect_target']
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(self.state_size + self.num_actions))
        if(p.has_key('num_hidden') and p['num_hidden'] is not None):
            layers.append(nnet.layer(p['num_hidden'],p['activation_function'],
                                     initialization_scheme=p['initialization_scheme'],
                                     initialization_constant=p['initialization_constant'],
                                     dropout=p.get('dropout',None),use_float32=p['use_float32'],
                                     momentum=p['momentum'],maxnorm=p.get('maxnorm',None),step_size=p['learning_rate'],rms_prop_rate=p.get('rms_prop_rate',None)))

        layers.append(nnet.layer(1,p['activation_function_final'],
                                 initialization_scheme=p['initialization_scheme_final'],
                                 initialization_constant=p['initialization_constant_final'],
                                 use_float32=p['use_float32'],
                                 momentum=p['momentum'],step_size=p['learning_rate'],rms_prop_rate=p.get('rms_prop_rate',None)))
        self.net = nnet.net(layers)

        self.do_neuron_clustering=False #by default
        if(p.has_key('cluster_func') and p['cluster_func'] is not None):
            self.cluster_func = p['cluster_func']
            self.net.layer[0].centroids = np.asarray(((np.random.random((self.net.layer[0].weights.shape)) - 0.5) * 2.5),np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:,-1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p.get('cluster_speed',1.0)
            self.net.layer[0].num_selected = p['clusters_selected']
            self.do_neuron_clustering=True #set a flag to indicate neuron clustering
            if(p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
            #decay for balancing learning and moving centroids
            if(p.has_key('zeta_decay') and p['zeta_decay'] is not None):
                self.net.layer[0].zeta_matrix = np.ones(self.net.layer[0].weights.shape,dtype=np.float32)
                self.net.layer[0].zeta = 1.0
                self.zeta_decay = p['zeta_decay']

        self.max_update = 0.0
        self.grad_clip = p.get('grad_clip',None)

        if(p.has_key('_lambda') and p['_lambda'] is not None):
            self._lambda = p['_lambda']
            self.gamma = p['gamma']
            for l in self.net.layer:
                l.eligibility = l.gradient
コード例 #10
0
vx_axis = [p['axis_x_min'],p['axis_x_max']];
vy_axis = [p['axis_y_min'],p['axis_y_max']];

num_classes = p['num_classes']
examples_per_class = p['examples_per_class'];
spread = p['spread']

img_width = p['img_width'];
img_height = p['img_height'];

frameskip = p['frameskip']

num_hidden = p['num_hidden']

layers = [];
layers.append(nnet.layer(2))
layers.append(nnet.layer(p['num_hidden'],p['activation_function'],select_func=p['select_func'],select_func_params=p['num_selected_neurons'],dropout=p['dropout']))

#Add 2nd and 3rd hidden layers if there are parameters indicating that we should
if(p.has_key('num_hidden2')):
    layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],select_func=p['select_func2'],select_func_params=p['num_selected_neurons2'],dropout=p['dropout2']))
if(p.has_key('num_hidden3')):
    layers.append(nnet.layer(p['num_hidden3'],p['activation_function3'],select_func=p['select_func3'],select_func_params=p['num_selected_neurons3'],dropout=p['dropout3']))
layers.append(nnet.layer(num_classes,p['activation_function_final']))

learning_rate = p['learning_rate']


#generate random classes
sample_data = np.zeros([2,num_classes*examples_per_class]);
class_data = np.ones([num_classes,num_classes*examples_per_class])*-1.0;
コード例 #11
0
        f['centroids'] = centroids
    f = h5.File(p['data_dir'] + 'mnist_initial_centroids_' + str(num_centroids) + '.h5py','w')
    f.close()

#now we have a k-means clustered set of centroids.

#create an autoencoder network
if(p.has_key('nodes_per_group')):
    nodes_per_group=p['nodes_per_group']
else:
    nodes_per_group=None

rms_prop_rate = p.get('rms_prop_rate',None)

layers = [];
layers.append(nnet.layer(28*28))
layers.append(nnet.layer(num_centroids,p['activation_function'],
                         nodes_per_group=nodes_per_group,
                         initialization_scheme=p['initialization_scheme'],
                         initialization_constant=p['initialization_constant'],
                         dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],
                         sparse_target=p['sparse_target'],use_float32=p['use_float32'],
                         momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']
                         ,rms_prop_rate=rms_prop_rate))
layers.append(nnet.layer(28*28,
                         initialization_scheme=p['initialization_scheme_final'],
                         initialization_constant=p['initialization_constant_final'],
                         use_float32=p['use_float32'],
                         momentum=p['momentum_final'],step_size=p['learning_rate'],
                         rms_prop_rate=rms_prop_rate))
コード例 #12
0
    def init(self, state_size, num_actions, p):
        layers = []

        self.do_trig_transform = False
        if (p.get('do_trig_transform', True)):
            self.do_trig_transform = True

        self.state_size = state_size
        self.num_actions = num_actions

        self.action_dupe_count = p.get('action_dupe_count', 1)

        self.do_recurrence = False
        if (p['do_recurrence']):
            input_size = self.state_size + self.num_actions * self.action_dupe_count + p[
                'num_hidden']
            self.do_recurrence = True
        else:
            input_size = self.state_size + self.num_actions * self.action_dupe_count

        self.learning_rate = p['learning_rate']

        print("state size        : " + str(self.state_size))
        print("num actions       : " + str(self.num_actions))
        print("action dupe count : " + str(self.action_dupe_count))
        print("num hidden        : " + str(p['num_hidden']))
        print("input size        : " + str(input_size))

        self.incorrect_target = p['incorrect_target']
        print(str(self.state_size) + " " + str(self.num_actions))
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(input_size))
        layers.append(
            nnet.layer(p['num_hidden'],
                       p['activation_function'],
                       initialization_scheme=p['initialization_scheme'],
                       initialization_constant=p['initialization_constant'],
                       dropout=p['dropout'],
                       use_float32=p['use_float32'],
                       momentum=p['momentum'],
                       maxnorm=p['maxnorm'],
                       step_size=p['learning_rate']))
        if (p.has_key('num_hidden2') and p['num_hidden2'] is not None):
            layers.append(
                nnet.layer(
                    p['num_hidden2'],
                    p['activation_function2'],
                    initialization_scheme=p['initialization_scheme2'],
                    initialization_constant=p['initialization_constant2'],
                    dropout=p['dropout2'],
                    use_float32=p['use_float32'],
                    momentum=p['momentum2'],
                    maxnorm=p['maxnorm2'],
                    step_size=p['learning_rate2']))

        layers.append(
            nnet.layer(
                1,
                initialization_scheme=p['initialization_scheme_final'],
                initialization_constant=p['initialization_constant_final'],
                use_float32=p['use_float32'],
                momentum=p['momentum'],
                step_size=p['learning_rate']))

        self.net = nnet.net(layers)

        if (p.has_key('cluster_func') and p['cluster_func'] is not None):
            print("layer 0 has cluster func")
            self.cluster_func = p['cluster_func']
            #TODO: Make sure the centroids cover the input space appropriately
            self.net.layer[0].centroids = np.asarray(((np.random.random(
                (self.net.layer[0].weights.shape)) - 0.5) * 2.25), np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:, -1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            #print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p['cluster_speed']
            self.net.layer[0].num_selected = p['clusters_selected']
            if (p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
            if (p.has_key('zeta_decay') and p['zeta_decay'] is not None):
                self.net.layer[0].zeta_matrix = np.ones(
                    self.net.layer[0].weights.shape, dtype=np.float32)
                self.net.layer[0].zeta = 1.0
                self.zeta_decay = p['zeta_decay']

        if (p.has_key('cluster_func2') and p['cluster_func2'] is not None):
            print("layer 1 has cluster func")
            self.cluster_func2 = p['cluster_func2']
            #TODO: Make sure the centroids cover the input space appropriately
            self.net.layer[1].centroids = np.asarray(((np.random.random(
                (self.net.layer[1].weights.shape)) - 0.5) * 2.25), np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[1].centroids[:, -1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[1].select_func = csf.select_names[
                p['cluster_func2']]
            #print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[1].centroid_speed = p['cluster_speed2']
            self.net.layer[1].num_selected = p['clusters_selected2']
            if (p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[1].do_cosinedistance = True
                print('cosine set to true')
            if (p.has_key('zeta_decay2') and p['zeta_decay2'] is not None):
                self.net.layer[1].zeta_matrix = np.ones(
                    self.net.layer[1].weights.shape, dtype=np.float32)
                self.net.layer[1].zeta = 1.0
                self.zeta_decay = p['zeta_decay2']

        self.do_full_zeta = p.get('do_full_zeta', False)

        if (p.has_key('_lambda') and p['_lambda'] is not None):
            self._lambda = p['_lambda']
            self.gamma = p['gamma']
            for l in self.net.layer:
                l.eligibility = l.gradient

        print("Network Size Check:")
        print("  weight 0 size: " + str(self.net.layer[0].weights.shape))
        if (len(self.net.layer) > 1):
            print("  weight 1 size: " + str(self.net.layer[1].weights.shape))
        if (len(self.net.layer) > 2):
            print("  weight 2 size: " + str(self.net.layer[2].weights.shape))
        if (len(self.net.layer) > 3):
            print("  weight 3 size: " + str(self.net.layer[3].weights.shape))
コード例 #13
0
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import time
import numpy as np

#from nnet_toolkit import nnet_cuda as nnet
from nnet_toolkit import nnet

layers = [nnet.layer(2),
          nnet.layer(128, 'sigmoid'),
          nnet.layer(1, 'sigmoid')]
#layers = [nnet_toolkit.layer(2),nnet_toolkit.layer(256,'linear_rectifier'),nnet_toolkit.layer(128,'linear_rectifier'),nnet_toolkit.layer(64,'linear_rectifier'),nnet_toolkit.layer(32,'linear_rectifier'),nnet_toolkit.layer(1,'squash')];

training_data = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
training_out = np.array([0, 1, 1, 0])

#net = nnet.net_cuda(layers,step_size=.1);
net = nnet.net(layers, step_size=.1)

net.input = training_data
t = time.time()
for i in range(100000):
    net.feed_forward()
    net.error = net.output - training_out
コード例 #14
0
    centroids = np.array(f['centroids'])
    print('centroid data loaded. Shape: ' + str(centroids.shape))
    f.close()
#if clusters selected is the same as the number of neurons then we can skip this step (since everything will always be selected)
elif(p['clusters_selected'] != p['num_centroids']):
    centroids = do_kmeans(sample_data);
    f = h5.File(p['data_dir'] + 'mnist_initial_centroids_' + str(num_centroids) + '.h5py','w')
    f['centroids'] = centroids
    f.close()

#now we have a k-means clustered set of centroids.

#create a classifier network

layers = [];
layers.append(nnet.layer(input_size))

if(not p.has_key('do_logistic') or p['do_logistic'] == False):
    layers.append(nnet.layer(num_centroids,p['activation_function'],
                             initialization_scheme=p['initialization_scheme'],
                             initialization_constant=p['initialization_constant'],
                             dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],
                             sparse_target=p['sparse_target'],use_float32=p['use_float32'],
                             momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))

layers.append(nnet.layer(10,p['activation_function_final'],
                         initialization_scheme=p['initialization_scheme_final'],
                         initialization_constant=p['initialization_constant_final'],
                         use_float32=p['use_float32'],
                         momentum=p['momentum_final'],step_size=p['learning_rate']))
コード例 #15
0
print('total number of samples: ' + str(sample_list.shape[0]))
print('creating testing set')

train_size = sample_list.shape[0]

print('training size: ' + str(sample_list.shape[0]))
print('test size: ' + str(sample_list_test.shape[0]))

class_type = np.sum(class_list, axis=0, dtype=np.float64)
print('class type count: ' + str(class_type))

class_type_test = np.sum(class_list_test, axis=0, dtype=np.float64)
print('class test type count: ' + str(class_type_test))

print('initializing network...')
layers = [nnet.layer(inputsize)]

for i in range(len(hidden_sizes)):
    l = hidden_sizes[i]
    a = hidden_activations[i]
    layers.append(nnet.layer(l, a))

layers.append(nnet.layer(3, 'squash'))

net = nnet.net(layers, step_size=step_size, dropout=dropout_percentage)

print('beginning training...')
save_time = time.time()
epoch_time = time.time()
for i in range(training_epochs):
    minibatch_count = int(train_size / minibatch_size)
コード例 #16
0
print('total number of samples: ' + str(sample_list.shape[0]))
print('creating testing set')

train_size = sample_list.shape[0]

print('training size: ' + str(sample_list.shape[0]))
print('test size: ' + str(sample_list_test.shape[0]))

class_type = np.sum(class_list,axis=0,dtype=np.float64)
print('class type count: ' + str(class_type))

class_type_test = np.sum(class_list_test,axis=0,dtype=np.float64)
print('class test type count: ' + str(class_type_test))

print('initializing network...')
layers = [nnet.layer(inputsize)]

for i in range(len(hidden_sizes)):
	l = hidden_sizes[i]
	a = hidden_activations[i]
	layers.append(nnet.layer(l,a))

layers.append(nnet.layer(3,'squash'))

net = nnet.net(layers,step_size=step_size,dropout=dropout_percentage)

print('beginning training...')
save_time = time.time()
epoch_time = time.time()
for i in range(training_epochs):
	minibatch_count = int(train_size/minibatch_size)
コード例 #17
0
img_height = p['img_height'];

#the axis for the view
vx_axis = [p['axis_x_min'],p['axis_x_max']];
vy_axis = [p['axis_y_min'],p['axis_y_max']];

num_classes = p['num_classes']
num_hidden = p['num_hidden']

training_epochs = p['training_epochs']
total_epochs = p['total_epochs']

#minibatch_size = p['minibatch_size']

layers = [];
layers.append(nnet.layer(2))
layers.append(nnet.layer(p['num_hidden'],p['activation_function'],
              dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],
              sparse_target=p['sparse_target'],use_float32=p['use_float32'],
              momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))

layers.append(nnet.layer(num_classes,p['activation_function_final'],use_float32=p['use_float32'],
              momentum=p['momentum_final'],step_size=p['learning_rate_final']))

#init net
net = nnet.net(layers)

if(p.has_key('cluster_func') and p['cluster_func'] is not None):
    #net.layer[0].centroids = np.asarray((((np.random.random((net.layer[0].weights.shape)) - 0.5)*2.0)),np.float32)
    net.layer[0].centroids = np.asarray(np.zeros(net.layer[0].weights.shape),np.float32)
#set bias to 1
コード例 #18
0
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import time
import numpy as np

#from nnet_toolkit import nnet_cuda as nnet
from nnet_toolkit import nnet

#layers = [nnet_toolkit.layer(2),nnet_toolkit.layer(128,'squash'),nnet_toolkit.layer(1,'squash')];
layers = [nnet.layer(400),nnet.layer(128,'sigmoid'),nnet.layer(3,'sigmoid')];

#training_data = np.array([[0,0,1,1],[0,1,0,1]]);
#training_out = np.array([0,1,1,0]);

training_data = np.random.random((400,500));
training_out = np.random.random((3,500));

#net = nnet.net_cuda(layers,step_size=.1);
net = nnet.net(layers,step_size=.1);

net.input = training_data;
t = time.time();
for i in range(100000):
	net.feed_forward();
	net.error = net.output - training_out;
コード例 #19
0
    def init(self, state_size, num_actions, p):
        layers = []
        self.state_size = state_size
        self.num_actions = num_actions
        #self.mins = np.array(mins)
        #self.maxs = np.array(maxs)
        #self.divs = np.array(divs)

        #self.size = self.maxs - self.mins
        #self.size = self.size + self.divs
        #self.size = self.size/self.divs

        #self.arr_mins = (np.zeros(self.size.shape)).astype(np.int64)
        #self.arr_maxs = (self.size - np.ones(self.size.shape)).astype(np.int64)

        self.incorrect_target = p['incorrect_target']
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(self.state_size + self.num_actions))
        if (p.has_key('num_hidden') and p['num_hidden'] is not None):
            layers.append(
                nnet.layer(
                    p['num_hidden'],
                    p['activation_function'],
                    initialization_scheme=p['initialization_scheme'],
                    initialization_constant=p['initialization_constant'],
                    dropout=p.get('dropout', None),
                    use_float32=p['use_float32'],
                    momentum=p['momentum'],
                    maxnorm=p.get('maxnorm', None),
                    step_size=p['learning_rate'],
                    rms_prop_rate=p.get('rms_prop_rate', None)))

        layers.append(
            nnet.layer(
                1,
                p['activation_function_final'],
                initialization_scheme=p['initialization_scheme_final'],
                initialization_constant=p['initialization_constant_final'],
                use_float32=p['use_float32'],
                momentum=p['momentum'],
                step_size=p['learning_rate'],
                rms_prop_rate=p.get('rms_prop_rate', None)))
        self.net = nnet.net(layers)

        self.do_neuron_clustering = False  #by default
        if (p.has_key('cluster_func') and p['cluster_func'] is not None):
            self.cluster_func = p['cluster_func']
            self.net.layer[0].centroids = np.asarray(((np.random.random(
                (self.net.layer[0].weights.shape)) - 0.5) * 2.5), np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:, -1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p.get('cluster_speed', 1.0)
            self.net.layer[0].num_selected = p['clusters_selected']
            self.do_neuron_clustering = True  #set a flag to indicate neuron clustering
            if (p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
            #decay for balancing learning and moving centroids
            if (p.has_key('zeta_decay') and p['zeta_decay'] is not None):
                self.net.layer[0].zeta_matrix = np.ones(
                    self.net.layer[0].weights.shape, dtype=np.float32)
                self.net.layer[0].zeta = 1.0
                self.zeta_decay = p['zeta_decay']

        self.max_update = 0.0
        self.grad_clip = p.get('grad_clip', None)

        if (p.has_key('_lambda') and p['_lambda'] is not None):
            self._lambda = p['_lambda']
            self.gamma = p['gamma']
            for l in self.net.layer:
                l.eligibility = l.gradient
コード例 #20
0
    def init(self,state_size,num_actions,p):
        layers = [];

        self.do_trig_transform = False
        if(p.get('do_trig_transform',True)):
            self.do_trig_transform = True


        self.state_size = state_size
        self.num_actions = num_actions

        self.action_dupe_count = p.get('action_dupe_count',1)

        self.do_recurrence = False
        if(p['do_recurrence']):
            input_size = self.state_size + self.num_actions*self.action_dupe_count + p['num_hidden']
            self.do_recurrence = True
        else:
            input_size = self.state_size + self.num_actions*self.action_dupe_count

        self.learning_rate = p['learning_rate']

        print("state size        : " + str(self.state_size))
        print("num actions       : " + str(self.num_actions))
        print("action dupe count : " + str(self.action_dupe_count))
        print("num hidden        : " + str(p['num_hidden']))
        print("input size        : " + str(input_size))

        self.incorrect_target = p['incorrect_target']
        print(str(self.state_size) + " " + str(self.num_actions))
        self.correct_target = p['correct_target']
        layers.append(nnet.layer(input_size))
        layers.append(nnet.layer(p['num_hidden'],p['activation_function'],
                                 initialization_scheme=p['initialization_scheme'],
                                 initialization_constant=p['initialization_constant'],
                                 dropout=p['dropout'],use_float32=p['use_float32'],
                                 momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))
        if(p.has_key('num_hidden2') and p['num_hidden2'] is not None):
            layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],
                                 initialization_scheme=p['initialization_scheme2'],
                                 initialization_constant=p['initialization_constant2'],
                                 dropout=p['dropout2'],use_float32=p['use_float32'],
                                 momentum=p['momentum2'],maxnorm=p['maxnorm2'],step_size=p['learning_rate2']))
 

        layers.append(nnet.layer(1,
                                 initialization_scheme=p['initialization_scheme_final'],
                                 initialization_constant=p['initialization_constant_final'],
                                 use_float32=p['use_float32'],
                                 momentum=p['momentum'],step_size=p['learning_rate']))

        self.net = nnet.net(layers)

        if(p.has_key('cluster_func') and p['cluster_func'] is not None):
            print("layer 0 has cluster func")
            self.cluster_func = p['cluster_func']
            #TODO: Make sure the centroids cover the input space appropriately
            self.net.layer[0].centroids = np.asarray(((np.random.random((self.net.layer[0].weights.shape)) - 0.5) * 2.25),np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[0].centroids[:,-1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[0].select_func = csf.select_names[p['cluster_func']]
            #print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[0].centroid_speed = p['cluster_speed']
            self.net.layer[0].num_selected = p['clusters_selected']
            if(p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[0].do_cosinedistance = True
                print('cosine set to true')
            if(p.has_key('zeta_decay') and p['zeta_decay'] is not None):
                self.net.layer[0].zeta_matrix = np.ones(self.net.layer[0].weights.shape,dtype=np.float32)
                self.net.layer[0].zeta = 1.0
                self.zeta_decay = p['zeta_decay']


        if(p.has_key('cluster_func2') and p['cluster_func2'] is not None):
            print("layer 1 has cluster func")
            self.cluster_func2 = p['cluster_func2']
            #TODO: Make sure the centroids cover the input space appropriately
            self.net.layer[1].centroids = np.asarray(((np.random.random((self.net.layer[1].weights.shape)) - 0.5) * 2.25),np.float32)
            #make the centroid bias input match the bias data of 1.0
            self.net.layer[1].centroids[:,-1] = 1.0
            #print(str(self.net.layer[0].centroids.shape))
            #print(str(self.net.layer[0].centroids))
            self.net.layer[1].select_func = csf.select_names[p['cluster_func2']]
            #print('cluster_func: ' + str(csf.select_names[p['cluster_func']]))
            self.net.layer[1].centroid_speed = p['cluster_speed2']
            self.net.layer[1].num_selected = p['clusters_selected2']
            if(p.has_key('do_cosinedistance') and p['do_cosinedistance']):
                self.net.layer[1].do_cosinedistance = True
                print('cosine set to true')
            if(p.has_key('zeta_decay2') and p['zeta_decay2'] is not None):
                self.net.layer[1].zeta_matrix = np.ones(self.net.layer[1].weights.shape,dtype=np.float32)
                self.net.layer[1].zeta = 1.0
                self.zeta_decay = p['zeta_decay2']

        self.do_full_zeta = p.get('do_full_zeta',False)

        if(p.has_key('_lambda') and p['_lambda'] is not None):
            self._lambda = p['_lambda']
            self.gamma = p['gamma']
            for l in self.net.layer:
                l.eligibility = l.gradient


        print("Network Size Check:" )
        print("  weight 0 size: " + str(self.net.layer[0].weights.shape))
        if(len(self.net.layer) > 1):
            print("  weight 1 size: " + str(self.net.layer[1].weights.shape))
        if(len(self.net.layer) > 2):
            print("  weight 2 size: " + str(self.net.layer[2].weights.shape))
        if(len(self.net.layer) > 3):
            print("  weight 3 size: " + str(self.net.layer[3].weights.shape))
コード例 #21
0
#(test_data,test_class) = load_data(range(10),"testing",p)
#test_size = test_data.shape[0]

num_hidden = p['num_hidden']

training_epochs = p['training_epochs']

minibatch_size = 128;

#layers = [nnet.layer(28*28),
#          nnet.layer(num_hidden,'tanh',select_func=p['select_func'],select_func_params=p['num_selected_neurons']),
#          nnet.layer(10,'tanh')]

layers = [];
layers.append(nnet.layer(28*28))
layers.append(nnet.layer(p['num_hidden'],p['activation_function'],select_func=p['select_func'],select_func_params=p['num_selected_neurons']))

#Add 2nd and 3rd hidden layers if there are parameters indicating that we should
if(p.has_key('num_hidden2')):
    layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],select_func=p['select_func2'],select_func_params=p['num_selected_neurons2']))
if(p.has_key('num_hidden3')):
    layers.append(nnet.layer(p['num_hidden3'],p['activation_function3'],select_func=p['select_func3'],select_func_params=p['num_selected_neurons3']))
layers.append(nnet.layer(10,p['activation_function_final']))

learning_rate = p['learning_rate']

np.random.seed(p['random_seed']);

#init net
net = nnet.net(layers,learning_rate)
コード例 #22
0
old_sample_targets = np.random.randint(0,2,(num_old_samples,sample_size))

if(p['zerosandones'] == False):
    old_sample_data = (old_sample_data-0.5)*2.0
    new_sample_data = (new_sample_data-0.5)*2.0
    old_sample_targets = (old_sample_targets-0.5)*2.0
    new_sample_targets = (new_sample_targets-0.5)*2.0

num_hidden = p['num_hidden']

training_epochs = p['training_epochs']

minibatch_size = p['minibatch_size'];

layers = [];
layers.append(nnet.layer(sample_size))
layers.append(nnet.layer(p['num_hidden'],p['activation_function'],select_func=p['select_func'],
                         select_func_params=p['num_selected_neurons'],
                         initialization_scheme=p['initialization_scheme'],
                         initialization_constant=p['initialization_constant'],
                         dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],
                         sparse_target=p['sparse_target']))
                         
#Add 2nd and 3rd hidden layers if there are parameters indicating that we should
if(p.has_key('num_hidden2')):
    layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],select_func=p['select_func2'],
                             select_func_params=p['num_selected_neurons2'],
                             initialization_scheme=p['initialization_scheme2'],
                             initialization_constant=p['initialization_constant2'],
                             dropout=p['dropout2'],sparse_penalty=p['sparse_penalty2'],
                             sparse_target=p['sparse_target2']))
コード例 #23
0
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import time
import numpy as np

#from nnet_toolkit import nnet_cuda as nnet
from nnet_toolkit import nnet

#layers = [nnet_toolkit.layer(2),nnet_toolkit.layer(128,'squash'),nnet_toolkit.layer(1,'squash')];
layers = [
    nnet.layer(400),
    nnet.layer(128, 'sigmoid'),
    nnet.layer(3, 'sigmoid')
]

#training_data = np.array([[0,0,1,1],[0,1,0,1]]);
#training_out = np.array([0,1,1,0]);

training_data = np.random.random((400, 500))
training_out = np.random.random((3, 500))

#net = nnet.net_cuda(layers,step_size=.1);
net = nnet.net(layers, step_size=.1)

net.input = training_data
t = time.time()
    nodes_per_group = p['nodes_per_group']
else:
    nodes_per_group = None

if(p.has_key('nodes_per_group2')):
    nodes_per_group2 = p['nodes_per_group2']
else:
    nodes_per_group2 = None

if(p.has_key('nodes_per_group3')):
    nodes_per_group3 = p['nodes_per_group3']
else:
    nodes_per_group3 = None

layers = [];
layers.append(nnet.layer(reduce_to))
layers.append(nnet.layer(p['num_hidden'],p['activation_function'],nodes_per_group=nodes_per_group,
                         initialization_scheme=p['initialization_scheme'],
                         initialization_constant=p['initialization_constant'],
                         dropout=p['dropout'],sparse_penalty=p['sparse_penalty'],
                         sparse_target=p['sparse_target'],use_float32=p['use_float32'],
                         momentum=p['momentum'],maxnorm=p['maxnorm'],step_size=p['learning_rate']))

#Add 2nd and 3rd hidden layers if there are parameters indicating that we should
if(p.has_key('num_hidden2')):
    layers.append(nnet.layer(p['num_hidden2'],p['activation_function2'],nodes_per_group=nodes_per_group2,
                             initialization_scheme=p['initialization_scheme2'],
                             initialization_constant=p['initialization_constant2'],
                             dropout=p['dropout2'],sparse_penalty=p['sparse_penalty2'],
                             sparse_target=p['sparse_target2'],use_float32=p['use_float32'],
                             momentum=p['momentum2'],maxnorm=p['maxnorm2'],step_size=p['learning_rate2']))
コード例 #25
0
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import time
import numpy as np;

#from nnet_toolkit import nnet_cuda as nnet
from nnet_toolkit import nnet

layers = [nnet.layer(2),nnet.layer(128,'sigmoid'),nnet.layer(1,'sigmoid')];
#layers = [nnet_toolkit.layer(2),nnet_toolkit.layer(256,'linear_rectifier'),nnet_toolkit.layer(128,'linear_rectifier'),nnet_toolkit.layer(64,'linear_rectifier'),nnet_toolkit.layer(32,'linear_rectifier'),nnet_toolkit.layer(1,'squash')];

training_data = np.array([[0,0,1,1],[0,1,0,1]]);
training_out = np.array([0,1,1,0]);

#net = nnet.net_cuda(layers,step_size=.1);
net = nnet.net(layers,step_size=.1);

net.input = training_data;
t = time.time();
for i in range(100000):
	net.feed_forward();
	net.error = net.output - training_out;
	net.back_propagate();
	net.update_weights();
コード例 #26
0
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import time
import numpy as np
import data.mnist as mnist
from nnet_toolkit import nnet


# Load Data
features, labels = mnist.read(range(9), dataset="training")
tfeatures, tlabels = mnist.read(range(9), dataset="testing")

# Initialize Network
layers = [nnet.layer(features.shape[1]), nnet.layer(128, "sigmoid"), nnet.layer(labels.shape[1], "sigmoid")]
net = nnet.net(layers, step_size=0.1)


# Train Network for N epochs
N = 50
mini_batch_size = 1000
t = time.time()
print "Starting Training..."
for epoch in range(N):
    # Randomize Features
    rix = np.random.permutation(features.shape[0])
    features = features[rix]
    labels = labels[rix]
    net.input = features
def init_network(pnet,input_size):
    num_hidden = pnet['num_hidden']

    nodes_per_group = pnet['nodes_per_group']
    nodes_per_group2 = pnet['nodes_per_group2']
    nodes_per_group3 = pnet['nodes_per_group3']


    layers = [];
    layers.append(nnet.layer(input_size))
    layers.append(nnet.layer(pnet['num_hidden'],pnet['activation_function'],nodes_per_group=nodes_per_group,
                                 initialization_scheme=pnet['initialization_scheme'],
                                 initialization_constant=pnet['initialization_constant'],
                                 dropout=pnet['dropout'],sparse_penalty=pnet['sparse_penalty'],
                                 sparse_target=pnet['sparse_target'],use_float32=pnet['use_float32'],
                                 momentum=pnet['momentum'],maxnorm=pnet['maxnorm'],step_size=pnet['learning_rate']))

    #Add 2nd and 3rd hidden layers if there are parameters indicating that we should
    if(pnet.has_key('num_hidden2') and pnet['num_hidden2'] is not None):
        layers.append(nnet.layer(pnet['num_hidden2'],pnet['activation_function2'],nodes_per_group=nodes_per_group2,
                                 initialization_scheme=pnet['initialization_scheme2'],
                                 initialization_constant=pnet['initialization_constant2'],
                                 dropout=pnet['dropout2'],sparse_penalty=pnet['sparse_penalty2'],
                                 sparse_target=pnet['sparse_target2'],use_float32=pnet['use_float32'],
                                 momentum=pnet['momentum2'],maxnorm=pnet['maxnorm2'],step_size=pnet['learning_rate2']))

    if(pnet.has_key('num_hidden3') and pnet['num_hidden3'] is not None):
        layers.append(nnet.layer(pnet['num_hidden3'],pnet['activation_function3'],nodes_per_group=nodes_per_group3,
                                 initialization_scheme=pnet['initialization_scheme3'],
                                 initialization_constant=pnet['initialization_constant3'],
                                 dropout=pnet['dropout3'],sparse_penalty=pnet['sparse_penalty3'],
                                 sparse_target=pnet['sparse_target3'],use_float32=pnet['use_float32'],
                                 momentum=pnet['momentum3'],maxnorm=pnet['maxnorm3'],step_size=pnet['learning_rate3']))

    layers.append(nnet.layer(num_labels,pnet['activation_function_final'],use_float32=pnet['use_float32'],
                                 step_size=pnet['learning_rate_final'],momentum=pnet['momentum_final']))
    #init net
    net = nnet.net(layers)

    if(pnet.has_key('cluster_func') and pnet['cluster_func'] is not None):
        #net.layer[0].centroids = np.asarray((((np.random.random((net.layer[0].weights.shape)) - 0.5)*2.0)),np.float32)
        net.layer[0].centroids = np.asarray(((np.ones((net.layer[0].weights.shape))*10.0)),np.float32)
        net.layer[0].select_func = csf.select_names[pnet['cluster_func']]
        print('cluster_func: ' + str(csf.select_names[pnet['cluster_func']]))
        net.layer[0].centroid_speed = pnet['cluster_speed']
        net.layer[0].num_selected = pnet['clusters_selected']
        net.layer[0].number_to_replace = pnet['number_to_replace']
        if(p.has_key('do_cosinedistance') and pnet['do_cosinedistance']):
            net.layer[0].do_cosinedistance = True
            print('cosine set to true')

    if(pnet.has_key('num_hidden2') and pnet.has_key('cluster_func2') and pnet['cluster_func2'] is not None):
        #net.layer[0].centroids = np.asarray((((np.random.random((net.layer[0].weights.shape)) - 0.5)*2.0)),np.float32)
        net.layer[1].centroids = np.asarray(((np.ones((net.layer[1].weights.shape))*10.0)),np.float32)
        net.layer[1].select_func = csf.select_names[pnet['cluster_func2']]
        print('cluster_func: ' + str(csf.select_names[pnet['cluster_func2']]))
        net.layer[1].centroid_speed = pnet['cluster_speed2']
        net.layer[1].num_selected = pnet['clusters_selected2']
        net.layer[1].number_to_replace = pnet['number_to_replace']
        if(p.has_key('do_cosinedistance') and pnet['do_cosinedistance']):
            net.layer[1].do_cosinedistance = True
            print('cosine set to true')

    if(pnet.has_key('num_hidden3') and pnet.has_key('cluster_func3') and pnet['cluster_func3'] is not None):
        #net.layer[0].centroids = np.asarray((((np.random.random((net.layer[0].weights.shape)) - 0.5)*2.0)),np.float32)
        net.layer[2].centroids = np.asarray(((np.ones((net.layer[2].weights.shape))*10.0)),np.float32)
        net.layer[2].select_func = csf.select_names[pnet['cluster_func3']]
        print('cluster_func: ' + str(csf.select_names[pnet['cluster_func3']]))
        net.layer[2].centroid_speed = pnet['cluster_speed3']
        net.layer[2].num_selected = pnet['clusters_selected3']
        net.layer[2].number_to_replace = pnet['number_to_replace']
        if(p.has_key('do_cosinedistance') and pnet['do_cosinedistance']):
            net.layer[2].do_cosinedistance = True
            print('cosine set to true')
    
    net.do_clustering = False
    if(pnet['cluster_func'] is not None):
        net.do_clustering = True
    if(pnet['cluster_func2'] is not None):
        net.do_clustering = True
    if(pnet['cluster_func3'] is not None):
        net.do_clustering = True

    return net
コード例 #28
0
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import time
import numpy as np
import data.mnist as mnist
from nnet_toolkit import nnet

# Load Data
features, labels = mnist.read(range(9), dataset='training')
tfeatures, tlabels = mnist.read(range(9), dataset='testing')

# Initialize Network
layers = [
    nnet.layer(features.shape[1]),
    nnet.layer(128, 'sigmoid'),
    nnet.layer(labels.shape[1], 'sigmoid')
]
net = nnet.net(layers, step_size=.1)

# Train Network for N epochs
N = 50
mini_batch_size = 1000
t = time.time()
print "Starting Training..."
for epoch in range(N):
    # Randomize Features
    rix = np.random.permutation(features.shape[0])
    features = features[rix]
    labels = labels[rix]