Exemple #1
0
 def __init__(self, input_size, output_size, param):
     self.input_size = input_size
     self.output_size = output_size
     self.weights = WeightsBias(self.input_size, self.output_size, param.init_method, param.optimizer, param.eta)
     self.weights.InitializeWeights()
     self.regular = param.regular
     self.lambd = param.lambd
Exemple #2
0
 def __init__(self, input_size, output_size, activator, param):
     self.input_size = input_size
     self.output_size = output_size
     self.activator = activator
     self.weights = WeightsBias(self.input_size, self.output_size,
                                param.init_method, param.optimizer_name,
                                param.eta)
     self.weights.Initialize()
Exemple #3
0
def train(params):

    wb = WeightsBias(1, 1, InitialMethod.Zero, params.optimizer_name,
                     params.eta)
    wb.InitializeWeights()

    # calculate loss to decide the stop condition
    loss_history = CLossHistory()
    # read data
    X, Y = ReadData()
    # count of samples
    num_example = X.shape[1]
    num_feature = X.shape[0]

    # if num_example=200, batch_size=10, then iteration=200/10=20
    max_iteration = (int)(num_example / params.batch_size)
    for epoch in range(params.max_epoch):
        print("epoch=%d" % epoch)
        X, Y = Shuffle(X, Y)
        for iteration in range(max_iteration):
            # get x and y value for one sample
            batch_x, batch_y = GetBatchSamples(X, Y, params.batch_size,
                                               iteration)
            # get z from x,y
            batch_z = ForwardCalculationBatch(wb.W, wb.B, batch_x)
            # calculate gradient of w and b
            wb.dW, wb.dB = BackPropagationBatch(batch_x, batch_y, batch_z)
            # update w,b
            wb.Update()
            loss = CheckLoss(wb.W, wb.B, X, Y)
            print(epoch, iteration, loss, wb.W, wb.B)
            loss_history.AddLossHistory(epoch * max_iteration + iteration,
                                        loss, wb.W[0, 0], wb.B[0, 0])
            if loss < params.eps:
                break
            #end if
            if loss < params.eps:
                break
        # end for
        if loss < params.eps:
            break
    # end for
    return loss_history
Exemple #4
0
class FcLayer(CLayer):
    def __init__(self, input_size, output_size, param):
        self.input_size = input_size
        self.output_size = output_size
        self.weights = WeightsBias(self.input_size, self.output_size,
                                   param.init_method, param.optimizer,
                                   param.eta)
        self.regular = param.regular
        self.lambd = param.lambd

    def initialize(self, folder):
        self.weights.InitializeWeights(folder)

    def forward(self, input, train=True):
        self.input_shape = input.shape
        if input.ndim == 3:  # come from pooling layer
            self.x = input.reshape(input.size, 1)
        else:
            self.x = input
        self.z = np.dot(self.x, self.weights.W) + self.weights.B
        return self.z

    # 把激活函数算做是当前层,上一层的误差传入后,先经过激活函数的导数,而得到本层的针对z值的误差
    def backward(self, delta_in, idx):
        dZ = delta_in
        m = self.x.shape[0]
        if self.regular == RegularMethod.L2:
            self.weights.dW = (np.dot(dZ, self.x.T) +
                               self.lambd * self.weights.W) / m
        elif self.regular == RegularMethod.L1:
            self.weights.dW = (np.dot(dZ, self.x.T) +
                               self.lambd * np.sign(self.weights.W)) / m
        else:
            #self.weights.dW = np.dot(dZ, self.x.T) / m
            self.weights.dW = np.dot(self.x.T, dZ) / m
        # end if
        self.weights.dB = np.sum(dZ, axis=0, keepdims=True) / m
        # calculate delta_out for lower level
        if idx == 0:
            return None

        #delta_out = np.dot(self.weights.W.T, dZ)
        delta_out = np.dot(dZ, self.weights.W.T)

        if len(self.input_shape) > 2:
            return delta_out.reshape(self.input_shape)
        else:
            return delta_out

    def pre_update(self):
        self.weights.pre_Update()

    def update(self):
        self.weights.Update()

    def save_parameters(self, folder, name):
        self.weights.SaveResultValue(folder, name)

    def load_parameters(self, folder, name):
        self.weights.LoadResultValue(folder, name)
Exemple #5
0
class FcLayer(CLayer):
    def __init__(self, input_size, output_size, activator):
        self.input_size = input_size
        self.output_size = output_size
        self.activator = activator

    def Initialize(self, param):
        self.weights = WeightsBias(self.input_size, self.output_size,
                                   param.init_method, param.optimizer_name,
                                   param.eta)
        self.weights.Initialize()

    def forward(self, input):
        self.input_shape = input.shape
        if input.ndim == 3:  # come from pooling layer
            self.x = input.reshape(input.size, 1)
        else:
            self.x = input
        self.z = np.dot(self.weights.W, self.x) + self.weights.B
        self.a = self.activator.forward(self.z)
        return self.a

    # 把激活函数算做是当前层,上一层的误差传入后,先经过激活函数的导数,而得到本层的针对z值的误差
    def backward(self, delta_in, flag):
        if flag == LayerIndexFlags.LastLayer or flag == LayerIndexFlags.SingleLayer:
            dZ = delta_in
        else:
            #dZ = delta_in * self.activator.backward(self.a)
            dZ, _ = self.activator.backward(self.z, self.a, delta_in)

        m = self.x.shape[1]
        self.weights.dW = np.dot(dZ, self.x.T) / m
        self.weights.dB = np.sum(dZ, axis=1, keepdims=True) / m
        # calculate delta_out for lower level
        delta_out = np.dot(self.weights.W.T, dZ)

        if len(self.input_shape) > 2:
            return delta_out.reshape(self.input_shape)
        else:
            return delta_out

    def pre_update(self):
        self.weights.pre_Update()

    def update(self):
        self.weights.Update()

    def save_parameters(self, name):
        self.weights.SaveResultValue(name)

    def load_parameters(self, name):
        self.weights.LoadResultValue(name)
Exemple #6
0
class FcLayer(CLayer):
    def __init__(self, input_size, output_size, param):
        self.input_size = input_size
        self.output_size = output_size
        self.weights = WeightsBias(self.input_size, self.output_size, param.init_method, param.optimizer_name, param.eta)
        self.weights.InitializeWeights()

    def forward(self, input, train=True):
        self.input_shape = input.shape
        if input.ndim == 3: # come from pooling layer
            self.x = input.reshape(input.size, 1)
        else:
            self.x = input
        self.z = np.dot(self.weights.W, self.x) + self.weights.B
        return self.z

    # 把激活函数算做是当前层,上一层的误差传入后,先经过激活函数的导数,而得到本层的针对z值的误差
    def backward(self, delta_in, idx):
        dZ = delta_in
        m = self.x.shape[1]
        self.weights.dW = np.dot(dZ, self.x.T) / m
        self.weights.dB = np.sum(dZ, axis=1, keepdims=True) / m
        # calculate delta_out for lower level
        #if flag == LayerIndexFlags.FirstLayer:
        if idx == 0:
            return None
        
        delta_out = np.dot(self.weights.W.T, dZ)

        if len(self.input_shape) > 2:
            return delta_out.reshape(self.input_shape)
        else:
            return delta_out

    def pre_update(self):
        self.weights.pre_Update()

    def update(self):
        self.weights.Update()
        
    def save_parameters(self, name):
        self.weights.SaveResultValue(name)

    def load_parameters(self, name):
        self.weights.LoadResultValue(name)
 def __init__(self, input_size, output_size, param):
     self.input_size = input_size
     self.output_size = output_size
     self.weights = WeightsBias(self.input_size, self.output_size,
                                param.init_method, param.eta)
Exemple #8
0
 def Initialize(self, param):
     self.weights = WeightsBias(self.input_size, self.output_size,
                                param.init_method, param.optimizer_name,
                                param.eta)
     self.weights.Initialize()