コード例 #1
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
    def init_net_without_loading_data(self, config):
        """This should be called after loading all required data."""
        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches = 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(self.num_layers):
            layer_spec = config.layer[i]
            self.layer.append(Layer(
                in_dim, layer_spec.out_dim, layer_spec.act_type, 
                layer_spec.weight_decay, layer_spec.weight_constraint, 
                layer_spec.dropout))
            in_dim = layer_spec.out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                config.output.output_type, config.output.weight_decay,
                config.output.weight_constraint, config.output.dropout)

        # if not linear output (regression) load task loss function
        if not isinstance(self.output.act_type, act.LinearOutput):
            if config.task_loss_file != None:
                self.task_loss = self.read_loss(config.task_loss_file)
                print 'Loading task loss from %s' % config.task_loss_file
            else:
                self.task_loss = 1 - np.eye(self.train_data.K)
                print 'No task loss specified, using 0-1 loss.'

        # To use multi-class hinge output, a training loss function is required
        if isinstance(self.output.act_type, act.MulticlassHingeOutput):
            if config.train_loss_file != None:
                self.train_loss = self.read_loss(config.train_loss_file)
                print 'Loading surrogate loss from %s' % config.train_loss_file
            else:
                self.train_loss = 1 - np.eye(self.train_data.K)
                print 'No surrogate loss specified, using 0-1 loss.'
            self.output.act_type.set_loss(self.train_loss)

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)
コード例 #2
0
ファイル: lstm.py プロジェクト: Yevgnen/LSTM
    def forward_time(self, ix, prev_cell):
        """Forward in time t given current input and previous hidden cell state."""
        # FIXME: There maybe multiple layers here.
        # Compute the hidden state
        (hidden, output) = self.create_cell()

        hidden.forward(self.Wz, self.Wi, self.Wf, self.Wo, self.Rz, self.Ri, self.Rf, self.Ro, self.pi, self.pf,
                       self.po, self.bz, self.bi, self.bf, self.bo, ix, prev_cell[0].c, prev_cell[0].h)
        # Compute the output
        output = OutputLayer(self.hidden_size)
        output.forward(self.V, hidden.h, self.c)

        return (hidden, output)
コード例 #3
0
ファイル: lstm.py プロジェクト: Yevgnen/LSTM
    def forward_time(self, ix, prev_cell):
        """Forward in time t given current input and previous hidden cell state."""
        # FIXME: There maybe multiple layers here.
        # Compute the hidden state
        (hidden, output) = self.create_cell()

        hidden.forward(self.Wz, self.Wi, self.Wf, self.Wo, self.Rz, self.Ri,
                       self.Rf, self.Ro, self.pi, self.pf, self.po, self.bz,
                       self.bi, self.bf, self.bo, ix, prev_cell[0].c,
                       prev_cell[0].h)
        # Compute the output
        output = OutputLayer(self.hidden_size)
        output.forward(self.V, hidden.h, self.c)

        return (hidden, output)
コード例 #4
0
    class TrainingResultsTest( unittest.TestCase ):
        def setUp( self ):
            self.tr = TrainingResults()

            from opencl import OpenCL
            from layer import InputLayer, Layer, OutputLayer, ExecutionContext

            self.ocl = OpenCL( pyopencl.create_some_context() )

            self.i = InputLayer( 2, self.ocl )
            self.h = Layer( 3, self.ocl )
            self.o = OutputLayer( 1, self.ocl )

            self.i.link_next( self.h )
            self.h.link_next( self.o, 0, 3 )

            self.nnc = ExecutionContext( self.i, self.o, allow_training = True )

            self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) )
            self.h.set_weights( numpy.array( [ 0.2 ] * self.h.weights_count, numpy.float32 ) )
            self.o.set_weights( numpy.array( [ 0.3 ] * self.o.weights_count, numpy.float32 ) )

        def assertArrayEqual( self, ar1, ar2 ):
            self.assertEqual( len( ar1 ), len( ar2 ) )
            for x, y in zip( numpy.array( ar1, numpy.float32 ), numpy.array( ar2, numpy.float32 ) ):
                self.assertAlmostEqual( x, y, places = 5 )

        def test_store( self ):
            self.tr.reset()
            self.assertEqual( self.tr.iterations, numpy.int32( 0 ) )
            self.assertGreater( self.tr.minimal_error, numpy.float32( 1e6 ) )
            self.assertIsNone( self.tr.optimal_weights )
            self.assertAlmostEqual( self.tr.total_time, 0.0 )
            self.assertAlmostEqual( self.tr.opencl_time, 0.0 )

            self.i.set_inputs( numpy.array( [1.0, 1.0], numpy.float32 ), is_blocking = True )
            self.i.process()
            initial_result = self.o.get_outputs()

            self.tr.store_weights( self.nnc )
            self.i.set_weights( numpy.array( [ 0.4 ] * self.i.weights_count, numpy.float32 ) )
            self.i.process()

            self.assertNotEqual( initial_result, self.o.get_outputs() )

            self.tr.apply_weights( self.nnc )
            self.i.process()
            self.assertArrayEqual( initial_result , self.o.get_outputs() )
コード例 #5
0
ファイル: network.py プロジェクト: MrMois/brickscale
    def __init__(self, cfg):

        self.model = cfg["model"]
        self.cost = cfg["cost"]
        self.struct = cfg["struct"]
        self.activation = cfg["activation"]

        # size is the number of weight matrices
        self.size = len(cfg["struct"]) - 1

        # init of layers

        self.layers = []

        layer_cfg = {
            "model": cfg["model"],
            "load_weights": cfg["load_weights"],
            "activation": cfg["activation"],
            "cost": cfg["cost"]
        }

        for l in range(self.size):

            layer_cfg["shape"] = [
                cfg["struct"][l],  # input size
                cfg["struct"][l + 1]  # output size
            ]

            layer_cfg["ID"] = l

            if l == self.size - 1:
                self.layers.append(OutputLayer(layer_cfg))

            else:
                self.layers.append(Layer(layer_cfg))
コード例 #6
0
def Mininet_test():

    from functions import Sigmoid, Quadratic

    w_h = np.ones((2, 2)) * 0.5
    w_o = np.ones((1, 3)) * 0.5

    f_activation = Sigmoid
    f_cost = Quadratic

    layers = [
        Layer(f_activation, weights=w_h),
        OutputLayer(f_activation, f_cost, weights=w_o)
    ]

    net = Network(f_activation, f_cost, layers=layers)

    input = [1]

    act, cost = net.train(input=input, target=input, learning_rate=1)

    print(act, cost)

    for l in net.layers:
        print(l.delta_w)
コード例 #7
0
    def load(name):

        with open(name + "/p.pkl", "rb") as file:
            print(file)
            params = pickle.load(file)

        layers = []

        for index in range(params["no_of_layers"] - 1):
            path = name + "/l" + str(index) + ".npy"
            weights = np.load(path)
            layers.append(
                Layer(f_activation=params["f_activation"], weights=weights))

        path = name + "/l" + str(params["no_of_layers"] - 1) + ".npy"
        weights = np.load(path)

        layers.append(
            OutputLayer(f_activation=params["f_activation"],
                        f_cost=params["f_cost"],
                        weights=weights))

        return Network(f_activation=params["f_activation"],
                       f_cost=params["f_cost"],
                       layers=layers)
コード例 #8
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
    def init_net(self, config):
        """config is an instance of class Config"""

        import os

        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        self.train_data = self.read_data(config.train_data_file)

        if config.is_val:
            self.val_data = self.read_data(config.val_data_file)
        if config.is_test:
            self.test_data = self.read_data(config.test_data_file)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches = 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(0, self.num_layers):
            self.layer.append(
                Layer(in_dim, config.layer[i].out_dim,
                      config.layer[i].act_type))
            in_dim = config.layer[i].out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                                  config.output.output_type)

        # To use multi-class hinge output, we need to specify the loss function
        if isinstance(self.output.act_type, act.MulticlassHingeOutput):
            if config.loss_file != None:
                self.output.act_type.set_loss(self.read_loss(config.loss_file))
            else:
                self.output.act_type.set_loss(1 - np.eye(self.train_data.K))

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)
コード例 #9
0
ファイル: rnn.py プロジェクト: Yevgnen/RNN
    def forward_propagation(self, x):
        """Forward Progation of a single sample."""
        tau = len(x)
        prev_h = sp.zeros(self.n_hiddens)

        cells = [None for i in range(tau)]
        for i in range(tau):
            # Compute the hidden state
            time_input = x[i]
            hidden = HiddenLayer()
            hidden.forward(self.U, time_input, self.W, prev_h, self.b)

            # Compute the output
            prev_h = hidden.h
            output = OutputLayer()
            output.forward(self.V, hidden.h, self.c)

            cells[i] = (hidden, output)
        return cells
コード例 #10
0
        def setUpClass( self ):
            from opencl import OpenCL
            from layer import InputLayer, OutputLayer, ExecutionContext

            self.ocl = OpenCL( pyopencl.create_some_context() )

            self.i = InputLayer( 2, self.ocl )
            self.o = OutputLayer( 1, self.ocl )

            self.i.link_next( self.o )

            self.nnc = ExecutionContext( self.i, self.o, allow_training = True )

            self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) )
            self.o.set_weights( numpy.array( [ 0.3 ] * self.o.weights_count, numpy.float32 ) )

            self.tr = TrainingResults()

            self._create_method()
コード例 #11
0
ファイル: network.py プロジェクト: warmstar1986/neural-net
    def __init_layers(self, layer_spec):
        self.layers = []
        last_index = len(layer_spec) - 1
        for i, size in enumerate(layer_spec):
            if i == 0:
                self.layers.append(InputLayer(size, self.activation_fn))
            elif i == last_index:
                self.layers.append(OutputLayer(size, self.activation_fn))
            else:
                self.layers.append(HiddenLayer(size, self.activation_fn))

        for i in range(len(self.layers) - 1):
            self.__join_layers(self.layers[i], self.layers[i+1])
コード例 #12
0
ファイル: nn.py プロジェクト: Barnonewdm/deep_learning
    def __init__(self,
                 architecture=[784, 100, 10],
                 activation='sigmoid',
                 learning_rate=0.1,
                 momentum=0.5,
                 weight_decay=1e-4,
                 dropout=0.5,
                 early_stopping=True,
                 seed=99):
        """
        Neural network model initializer.
        """

        # Attributes
        self.architecture = architecture
        self.activation = activation
        self.learning_rate = learning_rate
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.dropout = dropout
        self.early_stopping = early_stopping
        self.seed = seed

        # Turn `activation` and `learning_rate` to class instances
        if not isinstance(self.activation, Activation):
            self.activation = Activation(self.activation)
        if not isinstance(self.learning_rate, LearningRate):
            self.learning_rate = LearningRate(self.learning_rate)

        # Initialize a list of layers
        self.layers = []
        for i, (n_in,
                n_out) in enumerate(zip(architecture[:-2],
                                        architecture[1:-1])):
            l = HiddenLayer('layer{}'.format(i), n_in, n_out, self.activation,
                            self.learning_rate, self.momentum,
                            self.weight_decay, self.dropout, self.seed + i)
            self.layers.append(l)
        # Output layer
        n_in, n_out = architecture[-2], architecture[-1]
        l = OutputLayer('output_layer', n_in, n_out, self.learning_rate,
                        self.momentum, self.weight_decay, self.dropout,
                        self.seed + i + 1)
        self.layers.append(l)

        # Training updates
        self.epoch = 0
        self.training_error = []
        self.validation_error = []
        self.training_loss = []
        self.validation_loss = []
コード例 #13
0
    def create_layers(f_activation, f_cost, struct):

        layers = []

        for input, output in zip(struct, struct[1:-1]):
            layer = Layer(f_activation, size=[output, input])
            layers.append(layer)

        layer = OutputLayer(f_activation,
                            f_cost,
                            size=[struct[-1], struct[-2]])
        layers.append(layer)

        return layers
コード例 #14
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
    def init_net(self, config):
        """config is an instance of class Config"""
        
        import os

        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        self.train_data = self.read_data(config.train_data_file)

        if config.is_val:
            self.val_data = self.read_data(config.val_data_file)
        if config.is_test:
            self.test_data = self.read_data(config.test_data_file)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches = 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(0, self.num_layers):
            self.layer.append(Layer(
                in_dim, config.layer[i].out_dim, config.layer[i].act_type))
            in_dim = config.layer[i].out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                config.output.output_type)

        # To use multi-class hinge output, we need to specify the loss function
        if isinstance(self.output.act_type, act.MulticlassHingeOutput):
            if config.loss_file != None:
                self.output.act_type.set_loss(self.read_loss(config.loss_file))
            else:
                self.output.act_type.set_loss(1 - np.eye(self.train_data.K))

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)
コード例 #15
0
    def __init__(self,
                 model,
                 struct,
                 activation="sigmoid",
                 cost="quadratic",
                 load_weights=False,
                 weight_range=[-1, 1],
                 bias_range=[0.5, 0.95]):

        self.model = model
        self.struct = struct
        self.activation = activation
        self.cost = cost

        # size is the number of weight matrices
        self.size = len(self.struct) - 1

        # init of layers

        self.layers = []

        for l in range(self.size):

            if l == self.size - 1:
                self.layers.append(
                    OutputLayer(model=self.model,
                                ID=l,
                                activation=self.activation,
                                load_weights=load_weights,
                                in_size=self.struct[l],
                                out_size=self.struct[l + 1],
                                weight_range=weight_range,
                                bias_range=bias_range,
                                cost=self.cost))

            else:
                self.layers.append(
                    Layer(model=self.model,
                          ID=l,
                          activation=self.activation,
                          load_weights=load_weights,
                          in_size=self.struct[l],
                          out_size=self.struct[l + 1],
                          weight_range=weight_range,
                          bias_range=bias_range))
コード例 #16
0
def main():

    sess = tf.Session()

    image = read_image('../data/heart.jpg')
    image = np.reshape(image, [1, 224, 224, 3])  # type numpy.ndarray
    image.astype(np.float32)

    parser = Parser('../data/alexnet.cfg')
    network_builder = NetworkBuilder("test")  # type: NetworkBuilder
    network_builder.set_parser(parser)
    network = network_builder.build()  # type: Network
    network.add_input_layer(InputLayer(tf.float32, [None, 224, 224, 3]))
    network.add_output_layer(OutputLayer())
    network.connect_each_layer()

    sess.run(tf.global_variables_initializer())
    fc_layer = sess.run(network.output, feed_dict={network.input: image})
コード例 #17
0
def test():
    data = pd.read_csv("train.csv").values
    x = data[:, 1:]
    t = np.identity(10, dtype=np.uint8)[data[:, 0]]

    x = (x - x.min()) / x.max()
    x = (x - x.mean()) / x.std()

    validate = int(x.shape[0] * 0.75)

    x_train, t_train = x[:validate], t[:validate]
    x_test, t_test = x[validate:], t[validate:]

    network = NeuralNetwork(784, error="R2error", optimizer="Gradient")
    network.add(Layer(100, activation="Sigmoid"))
    network.add(Layer(50, activation="Sigmoid"))
    network.add(OutputLayer(10, activation="Softmax"))

    network.fit(x_train, t_train, epoch_time=EPOCH_TIME, batch_size=BATCH_SIZE)

    network.print_accurate(x_test, t_test)
コード例 #18
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
    def build_net_from_copy(self, copy):
        """
        Rebuild the net from a copy made by make_copy.
        """
        nnstore = copy
        self.num_layers = len(nnstore.layer)
        self.layer = []
        for i in range(self.num_layers):
            in_dim, out_dim = nnstore.layer[i].W.shape
            new_layer = Layer(in_dim, out_dim, nnstore.layer[i].act_type)
            new_layer.load_weight(nnstore.layer[i].W, nnstore.layer[i].b)
            self.layer.append(new_layer)

        in_dim, out_dim = nnstore.output.W.shape
        new_layer = OutputLayer(in_dim, out_dim, nnstore.output.act_type)
        new_layer.load_weight(nnstore.output.W, nnstore.output.b)
        self.output = new_layer
        if self.num_layers > 0:
            self.input_dim = self.layer[0].W.shape[0]
        else:
            self.input_dim = self.output.W.shape[0]
コード例 #19
0
ファイル: nn.py プロジェクト: yujiali/nn
    def init_net(self, config):
        """config is an instance of class Config"""
        
        import os

        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        self.train_data = self.read_data(config.train_data_file)

        if config.is_val:
            self.val = self.read_data(config.val_data_file)
        if config.is_test:
            self.test = self.read_data(config.test_data_file)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches += 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(0, self.num_layers):
            self.layer.append(Layer(
                in_dim, config.layer[i].out_dim, config.layer[i].act_type))
            in_dim = config.layer[i].out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                config.output.output_type)

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)
コード例 #20
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
class NN:
    """A class for general purpose neural networks, trained with
    backpropagation. The type of activation functions, number of hidden layers
    and number of units in each layer, the output function, and other options 
    during training can be configured."""
    def __init__(self):
        pass

    def init_net(self, config):
        """config is an instance of class Config"""
        
        import os

        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        self.train_data = self.read_data(config.train_data_file)

        if config.is_val:
            self.val_data = self.read_data(config.val_data_file)
        if config.is_test:
            self.test_data = self.read_data(config.test_data_file)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches = 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(0, self.num_layers):
            self.layer.append(Layer(
                in_dim, config.layer[i].out_dim, config.layer[i].act_type))
            in_dim = config.layer[i].out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                config.output.output_type)

        # To use multi-class hinge output, we need to specify the loss function
        if isinstance(self.output.act_type, act.MulticlassHingeOutput):
            if config.loss_file != None:
                self.output.act_type.set_loss(self.read_loss(config.loss_file))
            else:
                self.output.act_type.set_loss(1 - np.eye(self.train_data.K))

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)

    def _init_weights(self, init_scale, random_seed=None):
        if random_seed:
            np.random.seed(random_seed)

        for i in range(0, self.num_layers):
            self.layer[i].init_weight(init_scale)

        self.output.init_weight(init_scale)

    def train(self):
        config = self.config

        # convert t into a matrix in 1-of-K representation if it is a vector
        t = self.train_data.T
        if not self.config.is_regression:
            T_matrix = self.output.act_type.label_vec_to_mat(t, self.train_data.K)
        else:
            T_matrix = t

        layer_config = LayerConfig()
        layer_config.learn_rate = config.learn_rate
        layer_config.momentum = config.momentum
        layer_config.weight_decay = config.weight_decay

        nnstore = NNStore()
        nnstore.init_from_net(self)

        self.display_training_info(-1, 0, 0)
        t_start = time.time()

        for epoch in range(0, config.num_epochs):
            # shuffle the dataset 
            idx = np.random.permutation(self.num_total_cases)
            train_X = self.train_data.X[idx]
            train_T = T_matrix[idx]

            loss = 0

            for batch in range(0, self.num_minibatches):
                i_start = batch * config.minibatch_size
                if not batch == self.num_minibatches - 1:
                    i_end = i_start + config.minibatch_size
                else:
                    i_end = self.num_total_cases

                X = train_X[i_start:i_end]
                T = train_T[i_start:i_end]
                Xbelow = X

                # forward pass
                for i in range(0, self.num_layers):
                    Xbelow = self.layer[i].forward(Xbelow)
                self.output.forward(Xbelow)

                # compute loss
                loss += self.output.loss(T)

                # backprop
                dLdXabove = self.output.backprop(layer_config)
                for i in range(self.num_layers-1, -1, -1):
                    dLdXabove = self.layer[i].backprop(dLdXabove, layer_config)

            # statistics
            avg_loss = 1.0 * loss / self.num_total_cases

            if (epoch + 1) % config.epoch_to_display == 0:
                self.display_training_info(epoch, avg_loss, time.time() - t_start)
                t_start = time.time()

            if (epoch + 1) % config.epoch_to_save == 0:
                nnstore.update_from_net(self)
                nnstore.write(config.output_dir + '/m' + str(epoch + 1) + '.pdata')

    def display_training_info(self, epoch, loss, time):
        """Print training information. Use the config information to determine
        what information to display."""
        if self.config.is_val:
            if self.config.is_test:
                self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T,
                        val_data=self.val_data.X, val_labels=self.val_data.T,
                        test_data=self.test_data.X, test_labels=self.test_data.T)
            else:
                self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T,
                        val_data=self.val_data.X, val_labels=self.val_data.T)
        else:
            if self.config.is_test:
                self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T,
                        test_data=self.test_data.X, test_labels=self.test_data.T)
            else:
                self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T)

    def _display_training_info(self, epoch, loss, time, 
            train_data, train_labels, val_data=None, val_labels=None, 
            test_data=None, test_labels=None):
        """Print training information during training."""
        print 'epoch %d, loss %.4f,' % (epoch + 1, loss),
        
        # print loss if it is a regression problem
        if self.config.is_regression:
            if val_data != None and val_labels != None:
                self.predict(val_data)
                avg_loss = self.output.loss(val_labels) / val_labels.shape[0]
                print 'val_loss %.4f,' % (avg_loss),
            if test_data != None and test_labels != None:
                self.predict(test_data)
                avg_loss = self.output.loss(test_labels) / test_labels.shape[0]
                print 'test_loss %.4f,' % (avg_loss),
        else:
            # print accuracy if it is a classification problem
            ypred = self.predict(train_data)
            acc = (ypred == train_labels.squeeze()).mean()
            print 'acc %.4f,' % acc,

            if val_data != None and val_labels != None:
                ypred = self.predict(val_data)
                acc = (ypred == val_labels.squeeze()).mean()
                print 'val_acc %.4f,' % acc,
            if test_data != None and test_labels != None:
                ypred = self.predict(test_data)
                acc = (ypred == test_labels.squeeze()).mean()
                print 'test_acc %.4f,' % acc,

        if self.config.display_winc:
            for i in range(0, self.num_layers):
                print 'winc%d %.5f,' % (i+1, np.abs(self.layer[i].Winc).max()),
            print 'winc_out %.5f,' % np.abs(self.output.Winc).max(),

        print 'time %.2f' % time

    def _forward(self, X):
        """Do a forward pass without computing the output and predictions.
        Used as a subroutine for function predict and check_grad."""
        Xbelow = X
        for i in range(0, self.num_layers):
            Xbelow = self.layer[i].forward(Xbelow)
        self.output.forward(Xbelow)
       
    def predict(self, X):
        """Make prediction using the current network.
        
        X: N*D data matrix
        ispad: if True, X is padded by an extra dimension of constant 1's

        Return an N-element vector of predicted labels.
        """
        self._forward(X)
        return self.output.predict()

    def read_data(self, data_file_name):
        """(data_file_name) --> data
        Read from the specified data file, return a data object, which is an
        object with three attributes, X, T and K. X and T are the data and
        target matrices respectively, and K is the dimensionality of the output.
        Each of X and T is a matrix with N rows, N is the number of data
        cases."""

        f = open(data_file_name)

        data_dict = pickle.load(f)

        f.close()

        data = Data()
        data.X = data_dict['data']
        data.T = data_dict['labels']
        data.K = data_dict['K']

        return data

    def read_loss(self, loss_file_name):
        """(data_file_name) --> loss
        Read from the specified data file, return a loss matrix.
        """
        f = open(loss_file_name)
        d = pickle.load(f)
        f.close()

        return d['loss']

    def display(self):
        print '%d training cases' % self.train_data.X.shape[0]
        if self.config.is_val:
            print '%d validation cases' % self.val_data.X.shape[0]
        if self.config.is_test:
            print '%d test cases' % self.test_data.X.shape[0]
        print '[' + str(self.output) + ']'
        for i in range(self.num_layers-1, -1, -1):
            print '[' + str(self.layer[i]) + ']'
        print '[input ' + str(self.input_dim) + ']'

        print 'learn_rate : ' + str(self.config.learn_rate)
        print 'init_scale : ' + str(self.config.init_scale)
        print 'momentum : ' + str(self.config.momentum)
        print 'weight_decay : ' + str(self.config.weight_decay)
        print 'minibatch_size : ' + str(self.config.minibatch_size)
        print 'num_epochs : ' + str(self.config.num_epochs)
        print 'epoch_to_save : ' + str(self.config.epoch_to_save)

    def check_grad(self):
        # check the gradient of the 1st layer weights
        import scipy.optimize as opt

        ncases = 100

        def f(w):
            if self.num_layers == 0:
                Wtemp = self.output.W
                self.output.W = w.reshape(Wtemp.shape)
            else:
                Wtemp = self.layer[0].W
                self.layer[0].W = w.reshape(Wtemp.shape)

            self._forward(self.train_data.X[:ncases,:])

            Z = self.train_data.T[:ncases]
            if not self.config.is_regression:
                Z = self.output.act_type.label_vec_to_mat(Z, self.train_data.K)

            L = self.output.loss(Z) / Z.shape[0]
            if self.num_layers == 0:
                self.output.W = Wtemp
            else:
                self.layer[0].W = Wtemp

            return L

        def fgrad(w):
            if self.num_layers == 0:
                Wtemp = self.output.W
                self.output.W = w.reshape(Wtemp.shape)
            else:
                Wtemp = self.layer[0].W
                self.layer[0].W = w.reshape(Wtemp.shape)

            self._forward(self.train_data.X[:ncases,:])

            Z = self.train_data.T[:ncases]
            if not self.config.is_regression:
                Z = self.output.act_type.label_vec_to_mat(Z, self.train_data.K)
            self.output.loss(Z)

            self.output.gradient()
            dLdXabove = self.output.dLdXtop[:,:-1]
            for i in range(self.num_layers-1, -1, -1):
                self.layer[i].gradient(dLdXabove)
                dLdXabove = self.layer[i].dLdXbelow[:,:-1]

            if self.num_layers == 0:
                grad_w = self.output.dLdW
            else:
                grad_w = self.layer[0].dLdW

            if self.num_layers == 0:
                self.output.W = Wtemp
            else:
                self.layer[0].W = Wtemp

            return grad_w.reshape(np.prod(grad_w.shape)) / Z.shape[0]

        if self.num_layers == 0:
            #W = np.random.randn(
            #        self.output.W.shape[0], self.output.W.shape[1]) * 1e-3
            W = self.output.W
        else:
            #W = np.random.randn(
            #        self.layer[0].W.shape[0], self.layer[0].W.shape[1]) * 1e-3
            W = self.layer[0].W

        print "wmax: %f" % np.abs(fgrad(W.reshape(np.prod(W.shape)))).max()
        print "check_grad err: %f" % opt.check_grad(
                f, fgrad, W.reshape(np.prod(W.shape)))
コード例 #21
0
epoch = 1
batch_size = 8
interval = 1  # 経過の表示間隔
n_sample = 7  # 誤差計測のサンプル数
n_flt = 6  # n_flt:フィルタ数
flt_h = 3  # flt_h:フィルタ高さ
flt_w = 3  # flt_w:フィルタ幅
# stride:ストライド幅, pad:パディング幅

# -- 各層の初期化 --
cl_1 = ConvLayer(img_ch, img_h, img_w, n_flt, flt_h, flt_w, 1, 1)
pl_1 = PoolingLayer(cl_1.y_ch, cl_1.y_h, cl_1.y_w, 2, 0)

n_fc_in = pl_1.y_ch * pl_1.y_h * pl_1.y_w
ml_1 = MiddleLayer(n_fc_in, 10)
ol_1 = OutputLayer(10, 10)


# -- 順伝播 --
def forward_propagation(x):
    n_bt = x.shape[0]

    print("forward_propagation()")
    print("  x.shape", x.shape)
    images = x.reshape(n_bt, img_ch, img_h, img_w)
    print("  x.reshape", images.shape)

    cl_1.forward(images)
    pl_1.forward(cl_1.y)

    print("  pl_1.y.shape", pl_1.y.shape, "バッチ数,フィルタ数,img縦,img横")
コード例 #22
0
ファイル: nn.py プロジェクト: yujiali/nn
class NN:
    """A class for general purpose neural networks, trained with
    backpropagation. The type of activation functions, number of hidden layers
    and number of units in each layer, the output function, and other options 
    during training can be configured."""
    def __init__(self):
        pass

    def init_net(self, config):
        """config is an instance of class Config"""
        
        import os

        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        self.train_data = self.read_data(config.train_data_file)

        if config.is_val:
            self.val = self.read_data(config.val_data_file)
        if config.is_test:
            self.test = self.read_data(config.test_data_file)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches += 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(0, self.num_layers):
            self.layer.append(Layer(
                in_dim, config.layer[i].out_dim, config.layer[i].act_type))
            in_dim = config.layer[i].out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                config.output.output_type)

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)

    def _init_weights(self, init_scale, random_seed=None):
        if random_seed:
            np.random.seed(random_seed)

        for i in range(0, self.num_layers):
            self.layer[i].init_weight(init_scale)

        self.output.init_weight(init_scale)

    def train(self):
        config = self.config

        layer_config = LayerConfig()
        layer_config.learn_rate = config.learn_rate
        layer_config.momentum = config.momentum
        layer_config.weight_decay = config.weight_decay

        nnstore = NNStore()
        nnstore.init_from_net(self)

        for epoch in range(0, config.num_epochs):
            # shuffle the data cases
            idx = np.random.permutation(self.num_total_cases)
            train_X = self.train_data.X[idx]
            train_T = self.train_data.T[idx]

            loss = 0

            for batch in range(0, self.num_minibatches):
                i_start = batch * config.minibatch_size
                if not batch == self.num_minibatches - 1:
                    i_end = i_start + config.minibatch_size
                else:
                    i_end = self.num_total_cases

                X = train_X[i_start:i_end]
                T = train_T[i_start:i_end]
                Xbelow = X

                # forward pass
                for i in range(0, self.num_layers):
                    Xbelow = self.layer[i].forward(Xbelow)
                self.output.forward(Xbelow)

                # compute loss
                loss += self.output.loss(T)

                # backprop
                dLdXabove = self.output.backprop(layer_config)
                for i in range(self.num_layers-1, -1, -1):
                    dLdXabove = self.layer[i].backprop(dLdXabove, layer_config)

            # statistics
            avg_loss = 1.0 * loss / self.num_total_cases

            if (epoch + 1) % config.epoch_to_display == 0:
                print 'epoch ' + str(epoch + 1) + ', loss = ' + str(avg_loss)

            if (epoch + 1) % config.epoch_to_save == 0:
                nnstore.update_from_net(self)
                nnstore.write(config.output_dir + '/m' + str(epoch + 1) + '.pdata')

    def read_data(self, data_file_name):
        """(data_file_name) --> data
        Read from the specified data file, return a data object, which is an
        object with three attributes, X, T and K. X and T are the data and
        target matrix respectively, and K is the dimensionality of the output.
        Each of X and T is a matrix with N rows, N is the number of data
        cases"""

        f = open(data_file_name)

        data_dict = pickle.load(f)

        f.close()

        X = data_dict['data']
        t = data_dict['labels']
        K = data_dict['K']

        if len(t.shape) == 1 or t.shape[0] == 1 or t.shape[1] == 1:
            T = util.vec_to_mat(t, K)
        else:
            T = t

        data = Data()
        data.X = X
        data.T = T
        data.K = K

        return data

    def save_net(self, model_file_name):
        """Save the current neural net to a file."""
        pass
        
    def display(self):
        print '[' + str(self.output) + ']'
        for i in range(self.num_layers-1, -1, -1):
            print '[' + str(self.layer[i]) + ']'
        print '[input ' + str(self.input_dim) + ']'

        print 'learn_rate : ' + str(self.config.learn_rate)
        print 'init_scale : ' + str(self.config.init_scale)
        print 'momentum : ' + str(self.config.momentum)
        print 'weight_decay : ' + str(self.config.weight_decay)
コード例 #23
0
ファイル: lstm.py プロジェクト: Yevgnen/LSTM
    def create_cell(self):
        # FIXME: Should create cells base on network structure
        hidden = HiddenLayer(self.hidden_size)
        output = OutputLayer(self.hidden_size)

        return (hidden, output)
コード例 #24
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
class NN:
    """A class for general purpose neural networks, trained with
    backpropagation. The type of activation functions, number of hidden layers
    and number of units in each layer, the output function, and other options 
    during training can be configured."""
    def __init__(self):
        self.task_loss_fn = None

    def load_train_data(self, data):
        self.train_data = data
        self.train_data.X = gnp.garray(data.X)

    def load_val_data(self, data):
        self.val_data = data
        self.val_data.X = gnp.garray(data.X)

    def load_test_data(self, data):
        self.test_data = data
        self.test_data.X = gnp.garray(data.X)

    def init_net_without_loading_data(self, config):
        """This should be called after loading all required data."""
        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches = 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(self.num_layers):
            layer_spec = config.layer[i]
            self.layer.append(Layer(
                in_dim, layer_spec.out_dim, layer_spec.act_type, 
                layer_spec.weight_decay, layer_spec.weight_constraint, 
                layer_spec.dropout))
            in_dim = layer_spec.out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                config.output.output_type, config.output.weight_decay,
                config.output.weight_constraint, config.output.dropout)

        # if not linear output (regression) load task loss function
        if not isinstance(self.output.act_type, act.LinearOutput):
            if config.task_loss_file != None:
                self.task_loss = self.read_loss(config.task_loss_file)
                print 'Loading task loss from %s' % config.task_loss_file
            else:
                self.task_loss = 1 - np.eye(self.train_data.K)
                print 'No task loss specified, using 0-1 loss.'

        # To use multi-class hinge output, a training loss function is required
        if isinstance(self.output.act_type, act.MulticlassHingeOutput):
            if config.train_loss_file != None:
                self.train_loss = self.read_loss(config.train_loss_file)
                print 'Loading surrogate loss from %s' % config.train_loss_file
            else:
                self.train_loss = 1 - np.eye(self.train_data.K)
                print 'No surrogate loss specified, using 0-1 loss.'
            self.output.act_type.set_loss(self.train_loss)

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)

    def init_net(self, config):
        """config is an instance of class Config"""
        self.train_data = self.read_data(config.train_data_file)
        print 'Loading training data from %s' % config.train_data_file

        if config.is_val:
            self.val_data = self.read_data(config.val_data_file)
            print 'Loading validation data from %s' % config.val_data_file
        if config.is_test:
            self.test_data = self.read_data(config.test_data_file)
            print 'Loading test data from %s' % config.test_data_file

        self.init_net_without_loading_data(config)

    def load_net(self, model_file):
        """Load a saved model from a specified file."""
        nnstore = NNStore()
        nnstore.load(model_file)
        self.build_net_from_copy(nnstore)

    def make_copy(self):
        """
        Make a CPU copy of the net. This copy can be used to recover the net.
        """
        nnstore = NNStore()
        nnstore.init_from_net(self)
        return nnstore

    def build_net_from_copy(self, copy):
        """
        Rebuild the net from a copy made by make_copy.
        """
        nnstore = copy
        self.num_layers = len(nnstore.layer)
        self.layer = []
        for i in range(self.num_layers):
            in_dim, out_dim = nnstore.layer[i].W.shape
            new_layer = Layer(in_dim, out_dim, nnstore.layer[i].act_type)
            new_layer.load_weight(nnstore.layer[i].W, nnstore.layer[i].b)
            self.layer.append(new_layer)

        in_dim, out_dim = nnstore.output.W.shape
        new_layer = OutputLayer(in_dim, out_dim, nnstore.output.act_type)
        new_layer.load_weight(nnstore.output.W, nnstore.output.b)
        self.output = new_layer
        if self.num_layers > 0:
            self.input_dim = self.layer[0].W.shape[0]
        else:
            self.input_dim = self.output.W.shape[0]

    def _init_weights(self, init_scale, random_seed=None):
        if random_seed:
            np.random.seed(random_seed)

        for i in range(0, self.num_layers):
            self.layer[i].init_weight(init_scale)

        self.output.init_weight(init_scale)

    def set_task_loss(self, task_loss_fn):
        """Set the task loss function to be user defined task loss.
        
        task_loss_fn should have a signature like this:
        task_loss_fn(OutputType, Y, Z, A)
        """
        self.task_loss_fn = task_loss_fn

    def _compute_loss(self, X, T, batch_size=1000):
        n_total = X.shape[0]
        n_batches = n_total / batch_size
        loss = 0
        for i in range(n_batches):
            gnp.free_reuse_cache()
            i_start = i * batch_size
            if i < n_batches - 1:
                i_end = i_start + batch_size
            else:
                i_end = n_total

            Xbatch = X[i_start:i_end]
            Tbatch = T[i_start:i_end]

            self._forward(Xbatch)
            loss += self.output.loss(Tbatch)
        
        return loss / n_total

    def train(self):
        config = self.config

        # convert t into a matrix in 1-of-K representation if it is a vector
        t = self.train_data.T
        T_matrix = self.output.act_type.label_vec_to_mat(t, self.train_data.K)

        layer_config = LayerConfig()
        layer_config.learn_rate = config.learn_rate
        layer_config.momentum = config.init_momentum
        layer_config.weight_decay = config.weight_decay

        nnstore = NNStore()
        nnstore.init_from_net(self)

        best_net = NNStore()
        best_net.init_from_net(self)

        train_acc, val_acc, test_acc = self.display_training_info(
                -1, 
                self._compute_loss(
                    self.train_data.X, T_matrix, config.minibatch_size),
                0)
        acc_rec = np.zeros((config.num_epochs / config.epoch_to_display + 1, 4))
        acc_rec[0, 0] = 0
        acc_rec[0, 1] = train_acc
        if config.is_val:
            acc_rec[0, 2] = val_acc
        if config.is_test:
            acc_rec[0, 3] = test_acc

        t_start = time.time()

        best_acc = val_acc
        if self.config.is_test:
            best_test_acc = test_acc
        best_epoch = -1

        for epoch in range(0, config.num_epochs):
            gnp.free_reuse_cache()

            # decrease learning rate over time
            layer_config.learn_rate = config.learn_rate / \
                    (epoch / config.lr_drop_rate + 1)

            # TODO [dirty] special for Lnsvm
            if isinstance(self.output.act_type, act.LnsvmVariantOutput):
                #self.output.act_type.n = 3.0 - (3.0 - 0.5) / 50 * epoch
                self.output.act_type.n = 0.5
                if self.output.act_type.n < 0.5:
                    self.output.act_type.n = 0.5 

                if (epoch + 1) % config.epoch_to_display == 0:
                    print 'n %.4f' % self.output.act_type.n,
            
            if epoch >= config.switch_epoch:
                layer_config.momentum = config.final_momentum

            # shuffle the dataset 
            idx = np.random.permutation(self.num_total_cases)
            #idx = np.arange(self.num_total_cases)
            train_X = self.train_data.X[idx]
            train_T = T_matrix[idx]

            if config.input_noise > 0:
                train_X = train_X * (gnp.rand(train_X.shape) > config.input_noise)
                # train_X = train_X + gnp.randn(train_X.shape) * config.input_noise

            loss = 0

            for batch in range(0, self.num_minibatches):
                i_start = batch * config.minibatch_size
                if not batch == self.num_minibatches - 1:
                    i_end = i_start + config.minibatch_size
                else:
                    i_end = self.num_total_cases

                X = train_X[i_start:i_end]
                T = train_T[i_start:i_end]

                # forward pass
                self._forward(X)

                # compute loss
                loss += self.output.loss(T)

                if self.output.Y.isnan().any():
                    import ipdb
                    ipdb.set_trace()
                    print 'batch #%d <-- nan' % batch

                # backprop
                dLdXabove = self.output.backprop(layer_config)
                for i in range(self.num_layers-1, -1, -1):
                    dLdXabove = self.layer[i].backprop(dLdXabove, layer_config)

            # statistics
            avg_loss = 1.0 * loss / self.num_total_cases

            if (epoch + 1) % config.epoch_to_display == 0:
                train_acc, val_acc, test_acc = self.display_training_info(
                        epoch, avg_loss, time.time() - t_start)

                if val_acc == None:
                    val_acc = train_acc

                if (config.show_task_loss and val_acc < best_acc) or \
                        (not config.show_task_loss and val_acc > best_acc):
                    best_acc = val_acc
                    best_net.update_from_net(self)
                    if config.is_test:
                        best_test_acc = test_acc
                    best_epoch = epoch
                t_start = time.time()
                acc_rec[(epoch + 1) / config.epoch_to_display, 0] = epoch + 1
                acc_rec[(epoch + 1) / config.epoch_to_display, 1] = train_acc
                if config.is_val:
                    acc_rec[(epoch + 1) / config.epoch_to_display, 2] = val_acc
                if config.is_test:
                    acc_rec[(epoch + 1) / config.epoch_to_display, 3] = test_acc

            if (epoch + 1) % config.epoch_to_save == 0:
                nnstore.update_from_net(self)
                nnstore.write(config.output_dir + '/m' + str(epoch + 1) + '.pdata')


        print '----------------------------------------------------------------'

        if config.show_task_loss:
            s = 'loss'
        else:
            s = 'acc'
        
        if config.is_val:
            print 'Best val_%s %.4f' % (s, best_acc),
        else:
            print 'Best train_%s %.4f' % (s, best_acc),

        if config.is_test:
            print '--> test_%s %.4f' % (s, best_test_acc),
        print 'at epoch %d' % (best_epoch + 1)

        if config.is_output:
            f = open('%s/acc_rec.pdata' % config.output_dir, 'w')
            pickle.dump(acc_rec, f, -1)
            f.close()

            self.write_config('%s/cfg.txt' % config.output_dir)

            # save the best net
            fname = config.output_dir + '/best_net.pdata'
            print 'Saving the best model to ' + fname
            best_net.write(fname)

        if config.is_test:
            return (best_acc, best_test_acc)
        else:
            return (best_acc)

    def display_training_info(self, epoch, loss, time):
        """Print training information. Use the config information to determine
        what information to display.

        Return a 3-tuple (train acc, val acc, test acc)
        val acc and test acc will be 0 if no validation/test data are given
        """
        if self.config.is_val:
            if self.config.is_test:
                return self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T,
                        val_data=self.val_data.X, val_labels=self.val_data.T,
                        test_data=self.test_data.X, test_labels=self.test_data.T)
            else:
                return self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T,
                        val_data=self.val_data.X, val_labels=self.val_data.T)
        else:
            if self.config.is_test:
                return self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T,
                        test_data=self.test_data.X, test_labels=self.test_data.T)
            else:
                return self._display_training_info(epoch, loss, time,
                        self.train_data.X, self.train_data.T)

    def _display_training_info(self, epoch, loss, time, 
            train_data, train_labels, val_data=None, val_labels=None, 
            test_data=None, test_labels=None):
        """Print training information during training."""
        print 'epoch %d, surrogate loss %.4f,' % (epoch + 1, loss),

        train_acc = 0
        val_acc = None
        test_acc = None
        acc = 0

        # print loss if it is a regression problem
        if self.config.is_regression:
            # TODO [Dirty code]
            #self.predict(train_data)
            #avg_loss = self.output.task_loss(train_labels, self.task_loss_fn)
            avg_loss = np.sqrt(self._compute_loss(train_data, train_labels) * 2)
            print 'train_loss %.4f,' % avg_loss,

            if val_data != None and val_labels != None:
                #self.predict(val_data)
                #avg_loss = self.output.task_loss(val_labels, self.task_loss_fn)
                avg_loss = np.sqrt(self._compute_loss(val_data, val_labels) * 2)
                print 'val_loss %.4f,' % (avg_loss),
                val_acc = avg_loss
            if test_data != None and test_labels != None:
                #self.predict(test_data)
                #avg_loss = self.output.task_loss(test_labels, self.task_loss_fn)
                avg_loss = np.sqrt(self._compute_loss(test_data, test_labels) * 2)
                print 'test_loss %.4f,' % (avg_loss),
                test_acc = avg_loss
        else:
            # print accuracy if it is a classification problem
            ypred = self.predict(train_data)
            if self.config.show_accuracy:
                acc = (ypred == train_labels.squeeze()).mean()
                print 'acc %.4f,' % acc,
            if self.config.show_task_loss:
                acc = self.task_loss[ypred, train_labels].mean()
                print 'loss %.4f,' % acc,

            train_acc = acc

            if val_data != None and val_labels != None:
                ypred = self.predict(val_data)
                if self.config.show_accuracy:
                    acc = (ypred == val_labels.squeeze()).mean()
                    print 'val_acc %.4f,' % acc,
                if self.config.show_task_loss:
                    acc = self.task_loss[ypred, val_labels].mean()
                    print 'val_loss %.4f,' % acc,
                val_acc = acc
            if test_data != None and test_labels != None:
                ypred = self.predict(test_data)
                if self.config.show_accuracy:
                    acc = (ypred == test_labels.squeeze()).mean()
                    print 'test_acc %.4f,' % acc,
                if self.config.show_task_loss:
                    acc = self.task_loss[ypred, test_labels].mean()
                    print 'test_loss %.4f,' % acc,
                test_acc = acc

        if self.config.display_winc:
            self.display_winc()

        print 'time %.2f' % time

        return (train_acc, val_acc, test_acc)

    def display_winc(self):
        """Display scale of weight updates. This can be used by external
        applications."""
        for i in range(0, self.num_layers):
            print 'winc%d %.5f,' % (i+1, gnp.abs(self.layer[i].Winc).max()),
        print 'winc_out %.5f,' % gnp.abs(self.output.Winc).max(),

    def _forward(self, X):
        """Do a forward pass without computing the output and predictions.
        Used as a subroutine for function predict and check_grad."""
        Xbelow = X
        for i in range(self.num_layers):
            Xbelow = self.layer[i].forward(Xbelow)
        self.output.forward(Xbelow)
       
    def predict(self, X):
        """Make prediction using the current network.
        
        X: N*D data matrix

        Return an N-element vector of predicted labels.
        """
        self._forward(X)
        return self.output.predict()

    def forward(self, X):
        """Compute the activation for each class.
        
        X: N*D data matrix

        Return a N*D activation matrix A.
        """
        self._forward(X)
        return self.output.A

    def _backprop(self, config):
        """Backpropagate through the net from the output layer. This will be
        used as an external interface for semi-supervised application, and the
        backprop starts from the `update_weights` method of the output layer,
        rather than the `backprop` method."""
        dLdXabove = self.output.update_weights(config)
        for i in range(self.num_layers-1, -1, -1):
            dLdXabove = self.layer[i].backprop(dLdXabove, config)

    def eval_task_loss(self, X, z, loss):
        """Evaluate the performance of the net using task specific loss.
        Classification problems only.

        X: N*D data matrix
        z: N-d ground truth matrix.
        loss: K*K matrix, K is the number of classes.

        Return the average loss over all datacases.
        """
        y = self.predict(X)
        return loss[z, y].mean()

    def read_data(self, data_file_name):
        """(data_file_name) --> data
        Read from the specified data file, return a data object, which is an
        object with three attributes, X, T and K. X and T are the data and
        target matrices respectively, and K is the dimensionality of the output.
        Each of X and T is a matrix with N rows, N is the number of data
        cases."""

        f = open(data_file_name)

        data_dict = pickle.load(f)

        f.close()

        data = Data()
        data.X = gnp.garray(data_dict['data'])
        #data.T = data_dict['labels'].astype(np.float)
        data.T = data_dict['labels']
        data.K = data_dict['K']

        return data

    def read_loss(self, loss_file_name):
        """(data_file_name) --> loss
        Read from the specified data file, return a loss matrix.
        """
        f = open(loss_file_name)
        d = pickle.load(f)
        f.close()

        return d

    def write_config(self, filename):
        f = open(filename, 'w')
        f.write('%d training cases\n' % self.train_data.X.shape[0])
        if self.config.is_val:
            f.write('%d validation cases\n' % self.val_data.X.shape[0])
        if self.config.is_test:
            f.write('%d test cases\n' % self.test_data.X.shape[0])
        f.write('[' + str(self.output) + ']\n')
        for i in range(self.num_layers-1, -1, -1):
            f.write('[' + str(self.layer[i]) + ']\n')
        f.write('[input ' + str(self.input_dim) + ']\n')

        f.write('learn_rate : ' + str(self.config.learn_rate) + '\n')
        f.write('init_scale : ' + str(self.config.init_scale) + '\n')
        f.write('init_momentum : ' + str(self.config.init_momentum) + '\n')
        f.write('switch_epoch : ' + str(self.config.switch_epoch) + '\n')
        f.write('final_momentum : ' + str(self.config.final_momentum) + '\n')
        f.write('weight_decay : ' + str(self.config.weight_decay) + '\n')
        f.write('minibatch_size : ' + str(self.config.minibatch_size) + '\n')
        f.write('num_epochs : ' + str(self.config.num_epochs) + '\n')
        f.write('epoch_to_save : ' + str(self.config.epoch_to_save) + '\n')

        f.close()

    def display_structure(self):
        print '[' + str(self.output) + ']'
        for i in range(self.num_layers-1, -1, -1):
            print '[' + str(self.layer[i]) + ']'
        print '[input ' + str(self.input_dim) + ']'

    def display(self):
        print '%d training cases' % self.train_data.X.shape[0]
        if self.config.is_val:
            print '%d validation cases' % self.val_data.X.shape[0]
        if self.config.is_test:
            print '%d test cases' % self.test_data.X.shape[0]

        self.display_structure()

        print 'learn_rate : ' + str(self.config.learn_rate)
        print 'init_scale : ' + str(self.config.init_scale)
        print 'init_momentum : ' + str(self.config.init_momentum)
        print 'switch_epoch : ' + str(self.config.switch_epoch)
        print 'final_momentum : ' + str(self.config.final_momentum)
        print 'weight_decay : ' + str(self.config.weight_decay)
        print 'minibatch_size : ' + str(self.config.minibatch_size)
        print 'num_epochs : ' + str(self.config.num_epochs)
        print 'epoch_to_save : ' + str(self.config.epoch_to_save)

        if self.config.is_output:
            print 'output_dir : ' + self.config.output_dir

    def check_grad(self):
        # check the gradient of the 1st layer weights
        import scipy.optimize as opt

        ncases = 100

        def f(w):
            if self.num_layers == 0:
                Wtemp = self.output.W
                self.output.W = gnp.garray(w.reshape(Wtemp.shape))
            else:
                Wtemp = self.layer[0].W
                self.layer[0].W = gnp.garray(w.reshape(Wtemp.shape))

            self._forward(self.train_data.X[:ncases,:])

            Z = self.train_data.T[:ncases]
            Z = self.output.act_type.label_vec_to_mat(Z, self.train_data.K)

            L = self.output.loss(Z) / Z.shape[0]
            if self.num_layers == 0:
                self.output.W = Wtemp
            else:
                self.layer[0].W = Wtemp

            return L

        def fgrad(w):
            if self.num_layers == 0:
                Wtemp = self.output.W
                self.output.W = gnp.garray(w.reshape(Wtemp.shape))
            else:
                Wtemp = self.layer[0].W
                self.layer[0].W = gnp.garray(w.reshape(Wtemp.shape))

            self._forward(self.train_data.X[:ncases,:])

            Z = self.train_data.T[:ncases]
            Z = self.output.act_type.label_vec_to_mat(Z, self.train_data.K)
            self.output.loss(Z)

            self.output.gradient()
            dLdXabove = self.output.dLdXtop
            for i in range(self.num_layers-1, -1, -1):
                self.layer[i].gradient(dLdXabove)
                dLdXabove = self.layer[i].dLdXbelow

            if self.num_layers == 0:
                grad_w = self.output.dLdW
            else:
                grad_w = self.layer[0].dLdW

            if self.num_layers == 0:
                self.output.W = Wtemp
            else:
                self.layer[0].W = Wtemp

            return grad_w.reshape(np.prod(grad_w.shape)).asarray() / Z.shape[0]

        if self.num_layers == 0:
            W = self.output.W
        else:
            W = self.layer[0].W
        W = W.asarray()

        def finite_diff_grad(f, x0):
            eps = 1e-8
            approx = np.zeros(len(x0))
            for i in xrange(len(x0)):
                x0plus = x0.copy()
                x0minus = x0.copy()
                x0plus[i] += eps
                x0minus[i] -= eps
                approx[i] = (f(x0plus) - f(x0minus)) / (2 * eps)
            return approx

        net_grad = fgrad(W.reshape(W.size))
        fd_grad = finite_diff_grad(f, W.reshape(W.size))
        print "wmax: %f" % np.abs(net_grad).max()
        print "finite difference grad scale: %f" % np.abs(fd_grad).max()
        print "check_grad err: %f" % np.sqrt(((fd_grad - net_grad)**2).sum())
コード例 #25
0
    class TrainingMethodTest( unittest.TestCase ):
        @classmethod
        def setUpClass( self ):
            from opencl import OpenCL
            from layer import InputLayer, OutputLayer, ExecutionContext

            self.ocl = OpenCL( pyopencl.create_some_context() )

            self.i = InputLayer( 2, self.ocl )
            self.o = OutputLayer( 1, self.ocl )

            self.i.link_next( self.o )

            self.nnc = ExecutionContext( self.i, self.o, allow_training = True )

            self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) )
            self.o.set_weights( numpy.array( [ 0.3 ] * self.o.weights_count, numpy.float32 ) )

            self.tr = TrainingResults()

            self._create_method()

        @classmethod
        def _create_method( self ):
            pass

        def assertArrayEqual( self, ar1, ar2 ):
            self.assertEqual( len( ar1 ), len( ar2 ) )
            for x, y in zip( numpy.array( ar1, numpy.float32 ), numpy.array( ar2, numpy.float32 ) ):
                self.assertAlmostEqual( x, y, places = 5 )

        def test_create( self ):
            self.setUpClass()

            if not getattr( self, 'method', None ):
                return

            self.assertAlmostEqual( self.method.n, 0.5 )
            self.assertAlmostEqual( self.method.alpha, 0.2 )
            self.assertAlmostEqual( self.method.kw, 1.03 )
            self.assertAlmostEqual( self.method.pd, 0.7 )
            self.assertAlmostEqual( self.method.pi, 1.02 )
            self.assertAlmostEqual( self.method.last_error, 0.0 )
            self.assertEqual( self.method.offline, False )

        def test_randomize_weights( self ):
            if not getattr( self, 'method', None ):
                return

            self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) )
            self.assertTrue( all( map( lambda x: abs( x - 0.1 ) < 0.0001, self.i.get_weights() ) ) )

            self.method.randomize_weights( self.nnc )
            w1 = self.i.get_weights()
            self.assertFalse( all( map( lambda x: abs( x - 0.1 ) < 0.0001, self.i.get_weights() ) ) )
            self.method.randomize_weights( self.nnc )
            self.assertFalse( all( map( lambda x: abs( x - 0.1 ) < 0.0001, self.i.get_weights() ) ) )
            self.assertNotAlmostEqual( ( w1 - self.i.get_weights() ).sum(), 0.0 )

        def test_adjust_weights( self ):
            if not getattr( self, 'method', None ):
                return

            self.method.last_error = numpy.float32( 1.0 )
            self.method.n = numpy.float32( 0.5 )
            self.method.kw = numpy.float32( 1.03 )
            self.method.pd = numpy.float32( 0.5 )
            self.method.pi = numpy.float32( 1.5 )

            self.method.adjust_training_parameters( 1.2 )
            self.assertAlmostEqual( self.method.n, 0.25 )
            self.assertAlmostEqual( self.method.last_error, 1.2 )

            self.method.adjust_training_parameters( 1.0 )
            self.assertAlmostEqual( self.method.n, 0.375 )
            self.assertAlmostEqual( self.method.last_error, 1.0 )

            self.method.adjust_training_parameters( 1.0 )
            self.assertAlmostEqual( self.method.n, 0.5625 )
            self.assertAlmostEqual( self.method.last_error, 1.0 )

        def test_prepare_training( self ):
            if not getattr( self, 'method', None ):
                return

            self.method.prepare_training( self.nnc )
            self.assertIsInstance( self.method._weights_delta_buf, pyopencl.Buffer )
コード例 #26
0
ファイル: nn.py プロジェクト: jgera/Segmentation-Code
class NN:
    """A class for general purpose neural networks, trained with
    backpropagation. The type of activation functions, number of hidden layers
    and number of units in each layer, the output function, and other options 
    during training can be configured."""
    def __init__(self):
        pass

    def init_net(self, config):
        """config is an instance of class Config"""

        import os

        self.config = config

        if config.is_output and (not os.path.exists(config.output_dir)):
            os.makedirs(config.output_dir)

        self.train_data = self.read_data(config.train_data_file)

        if config.is_val:
            self.val_data = self.read_data(config.val_data_file)
        if config.is_test:
            self.test_data = self.read_data(config.test_data_file)

        [num_total_cases, input_dim] = self.train_data.X.shape
        self.num_total_cases = num_total_cases
        self.input_dim = input_dim

        self.num_minibatches = num_total_cases / config.minibatch_size
        if self.num_minibatches < 1:
            self.num_minibatches = 1

        # initialize the network
        self.num_layers = config.num_layers
        self.layer = []
        in_dim = input_dim
        for i in range(0, self.num_layers):
            self.layer.append(
                Layer(in_dim, config.layer[i].out_dim,
                      config.layer[i].act_type))
            in_dim = config.layer[i].out_dim

        self.output = OutputLayer(in_dim, config.output.out_dim,
                                  config.output.output_type)

        # To use multi-class hinge output, we need to specify the loss function
        if isinstance(self.output.act_type, act.MulticlassHingeOutput):
            if config.loss_file != None:
                self.output.act_type.set_loss(self.read_loss(config.loss_file))
            else:
                self.output.act_type.set_loss(1 - np.eye(self.train_data.K))

        # initialize the weights in every layer
        self._init_weights(config.init_scale, config.random_seed)

    def _init_weights(self, init_scale, random_seed=None):
        if random_seed:
            np.random.seed(random_seed)

        for i in range(0, self.num_layers):
            self.layer[i].init_weight(init_scale)

        self.output.init_weight(init_scale)

    def train(self):
        config = self.config

        # convert t into a matrix in 1-of-K representation if it is a vector
        t = self.train_data.T
        if not self.config.is_regression:
            T_matrix = self.output.act_type.label_vec_to_mat(
                t, self.train_data.K)
        else:
            T_matrix = t

        layer_config = LayerConfig()
        layer_config.learn_rate = config.learn_rate
        layer_config.momentum = config.momentum
        layer_config.weight_decay = config.weight_decay

        nnstore = NNStore()
        nnstore.init_from_net(self)

        self.display_training_info(-1, 0, 0)
        t_start = time.time()

        for epoch in range(0, config.num_epochs):
            # shuffle the dataset
            idx = np.random.permutation(self.num_total_cases)
            train_X = self.train_data.X[idx]
            train_T = T_matrix[idx]

            loss = 0

            for batch in range(0, self.num_minibatches):
                i_start = batch * config.minibatch_size
                if not batch == self.num_minibatches - 1:
                    i_end = i_start + config.minibatch_size
                else:
                    i_end = self.num_total_cases

                X = train_X[i_start:i_end]
                T = train_T[i_start:i_end]
                Xbelow = X

                # forward pass
                for i in range(0, self.num_layers):
                    Xbelow = self.layer[i].forward(Xbelow)
                self.output.forward(Xbelow)

                # compute loss
                loss += self.output.loss(T)

                # backprop
                dLdXabove = self.output.backprop(layer_config)
                for i in range(self.num_layers - 1, -1, -1):
                    dLdXabove = self.layer[i].backprop(dLdXabove, layer_config)

            # statistics
            avg_loss = 1.0 * loss / self.num_total_cases

            if (epoch + 1) % config.epoch_to_display == 0:
                self.display_training_info(epoch, avg_loss,
                                           time.time() - t_start)
                t_start = time.time()

            if (epoch + 1) % config.epoch_to_save == 0:
                nnstore.update_from_net(self)
                nnstore.write(config.output_dir + '/m' + str(epoch + 1) +
                              '.pdata')

    def display_training_info(self, epoch, loss, time):
        """Print training information. Use the config information to determine
        what information to display."""
        if self.config.is_val:
            if self.config.is_test:
                self._display_training_info(epoch,
                                            loss,
                                            time,
                                            self.train_data.X,
                                            self.train_data.T,
                                            val_data=self.val_data.X,
                                            val_labels=self.val_data.T,
                                            test_data=self.test_data.X,
                                            test_labels=self.test_data.T)
            else:
                self._display_training_info(epoch,
                                            loss,
                                            time,
                                            self.train_data.X,
                                            self.train_data.T,
                                            val_data=self.val_data.X,
                                            val_labels=self.val_data.T)
        else:
            if self.config.is_test:
                self._display_training_info(epoch,
                                            loss,
                                            time,
                                            self.train_data.X,
                                            self.train_data.T,
                                            test_data=self.test_data.X,
                                            test_labels=self.test_data.T)
            else:
                self._display_training_info(epoch, loss, time,
                                            self.train_data.X,
                                            self.train_data.T)

    def _display_training_info(self,
                               epoch,
                               loss,
                               time,
                               train_data,
                               train_labels,
                               val_data=None,
                               val_labels=None,
                               test_data=None,
                               test_labels=None):
        """Print training information during training."""
        print 'epoch %d, loss %.4f,' % (epoch + 1, loss),

        # print loss if it is a regression problem
        if self.config.is_regression:
            if val_data != None and val_labels != None:
                self.predict(val_data)
                avg_loss = self.output.loss(val_labels) / val_labels.shape[0]
                print 'val_loss %.4f,' % (avg_loss),
            if test_data != None and test_labels != None:
                self.predict(test_data)
                avg_loss = self.output.loss(test_labels) / test_labels.shape[0]
                print 'test_loss %.4f,' % (avg_loss),
        else:
            # print accuracy if it is a classification problem
            ypred = self.predict(train_data)
            acc = (ypred == train_labels.squeeze()).mean()
            print 'acc %.4f,' % acc,

            if val_data != None and val_labels != None:
                ypred = self.predict(val_data)
                acc = (ypred == val_labels.squeeze()).mean()
                print 'val_acc %.4f,' % acc,
            if test_data != None and test_labels != None:
                ypred = self.predict(test_data)
                acc = (ypred == test_labels.squeeze()).mean()
                print 'test_acc %.4f,' % acc,

        if self.config.display_winc:
            for i in range(0, self.num_layers):
                print 'winc%d %.5f,' % (i + 1, np.abs(
                    self.layer[i].Winc).max()),
            print 'winc_out %.5f,' % np.abs(self.output.Winc).max(),

        print 'time %.2f' % time

    def _forward(self, X):
        """Do a forward pass without computing the output and predictions.
        Used as a subroutine for function predict and check_grad."""
        Xbelow = X
        for i in range(0, self.num_layers):
            Xbelow = self.layer[i].forward(Xbelow)
        self.output.forward(Xbelow)

    def predict(self, X):
        """Make prediction using the current network.
        
        X: N*D data matrix
        ispad: if True, X is padded by an extra dimension of constant 1's

        Return an N-element vector of predicted labels.
        """
        self._forward(X)
        return self.output.predict()

    def read_data(self, data_file_name):
        """(data_file_name) --> data
        Read from the specified data file, return a data object, which is an
        object with three attributes, X, T and K. X and T are the data and
        target matrices respectively, and K is the dimensionality of the output.
        Each of X and T is a matrix with N rows, N is the number of data
        cases."""

        f = open(data_file_name)

        data_dict = pickle.load(f)

        f.close()

        data = Data()
        data.X = data_dict['data']
        data.T = data_dict['labels']
        data.K = data_dict['K']

        return data

    def read_loss(self, loss_file_name):
        """(data_file_name) --> loss
        Read from the specified data file, return a loss matrix.
        """
        f = open(loss_file_name)
        d = pickle.load(f)
        f.close()

        return d['loss']

    def display(self):
        print '%d training cases' % self.train_data.X.shape[0]
        if self.config.is_val:
            print '%d validation cases' % self.val_data.X.shape[0]
        if self.config.is_test:
            print '%d test cases' % self.test_data.X.shape[0]
        print '[' + str(self.output) + ']'
        for i in range(self.num_layers - 1, -1, -1):
            print '[' + str(self.layer[i]) + ']'
        print '[input ' + str(self.input_dim) + ']'

        print 'learn_rate : ' + str(self.config.learn_rate)
        print 'init_scale : ' + str(self.config.init_scale)
        print 'momentum : ' + str(self.config.momentum)
        print 'weight_decay : ' + str(self.config.weight_decay)
        print 'minibatch_size : ' + str(self.config.minibatch_size)
        print 'num_epochs : ' + str(self.config.num_epochs)
        print 'epoch_to_save : ' + str(self.config.epoch_to_save)

    def check_grad(self):
        # check the gradient of the 1st layer weights
        import scipy.optimize as opt

        ncases = 100

        def f(w):
            if self.num_layers == 0:
                Wtemp = self.output.W
                self.output.W = w.reshape(Wtemp.shape)
            else:
                Wtemp = self.layer[0].W
                self.layer[0].W = w.reshape(Wtemp.shape)

            self._forward(self.train_data.X[:ncases, :])

            Z = self.train_data.T[:ncases]
            if not self.config.is_regression:
                Z = self.output.act_type.label_vec_to_mat(Z, self.train_data.K)

            L = self.output.loss(Z) / Z.shape[0]
            if self.num_layers == 0:
                self.output.W = Wtemp
            else:
                self.layer[0].W = Wtemp

            return L

        def fgrad(w):
            if self.num_layers == 0:
                Wtemp = self.output.W
                self.output.W = w.reshape(Wtemp.shape)
            else:
                Wtemp = self.layer[0].W
                self.layer[0].W = w.reshape(Wtemp.shape)

            self._forward(self.train_data.X[:ncases, :])

            Z = self.train_data.T[:ncases]
            if not self.config.is_regression:
                Z = self.output.act_type.label_vec_to_mat(Z, self.train_data.K)
            self.output.loss(Z)

            self.output.gradient()
            dLdXabove = self.output.dLdXtop[:, :-1]
            for i in range(self.num_layers - 1, -1, -1):
                self.layer[i].gradient(dLdXabove)
                dLdXabove = self.layer[i].dLdXbelow[:, :-1]

            if self.num_layers == 0:
                grad_w = self.output.dLdW
            else:
                grad_w = self.layer[0].dLdW

            if self.num_layers == 0:
                self.output.W = Wtemp
            else:
                self.layer[0].W = Wtemp

            return grad_w.reshape(np.prod(grad_w.shape)) / Z.shape[0]

        if self.num_layers == 0:
            #W = np.random.randn(
            #        self.output.W.shape[0], self.output.W.shape[1]) * 1e-3
            W = self.output.W
        else:
            #W = np.random.randn(
            #        self.layer[0].W.shape[0], self.layer[0].W.shape[1]) * 1e-3
            W = self.layer[0].W

        print "wmax: %f" % np.abs(fgrad(W.reshape(np.prod(W.shape)))).max()
        print "check_grad err: %f" % opt.check_grad(
            f, fgrad, W.reshape(np.prod(W.shape)))