Пример #1
0
 def __init__(self, dr):
     self.dr = dr
     self.loss_fun = LossFunction_1_1(NetType.BinaryClassifier)
     self.t1 = timestep_1()
     self.t2 = timestep()
     self.t3 = timestep()
     self.t4 = timestep_4()
    def __init__(self, hp, model_name):
        self.hp = hp
        self.model_name = model_name
        self.subfolder = os.getcwd() + "/" + self.__create_subfolder()
        print(self.subfolder)
        assert (self.hp.num_hidden1 == self.hp.num_hidden2)
        if (self.load_parameters(ParameterType.Init) == False):
            self.U1, self.bU1 = WeightsBias_2_1.InitialParameters(
                self.hp.num_input, self.hp.num_hidden1, InitialMethod.Normal)
            self.U2, self.bU2 = WeightsBias_2_1.InitialParameters(
                self.hp.num_input, self.hp.num_hidden2, InitialMethod.Normal)
            self.V, self.bV = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden1, self.hp.num_output, InitialMethod.Normal)
            self.W1, _ = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden1, self.hp.num_hidden1, InitialMethod.Normal)
            self.W2, _ = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden2, self.hp.num_hidden2, InitialMethod.Normal)
            self.save_parameters(ParameterType.Init)
        # end if

        self.loss_fun = LossFunction_1_1(self.hp.net_type)
        self.loss_trace = TrainingHistory_3_0()
        self.ts_list = []
        for i in range(self.hp.num_step):
            ts = timestep()
            self.ts_list.append(ts)
Пример #3
0
    def __init__(self, hp, model_name):
        self.hp = hp
        self.model_name = model_name
        self.subfolder = os.getcwd() + "/" + self.__create_subfolder()
        print(self.subfolder)

        if (self.load_parameters(ParameterType.Init) == False):
            self.U, _ = WeightsBias_2_1.InitialParameters(
                self.hp.num_input, self.hp.num_hidden, InitialMethod.Normal)
            self.V, _ = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden, self.hp.num_output, InitialMethod.Normal)
            self.W, _ = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden, self.hp.num_hidden, InitialMethod.Normal)
            self.save_parameters(ParameterType.Init)
        #end if
        self.bu = np.zeros((1, self.hp.num_hidden))
        self.bv = np.zeros((1, self.hp.num_output))

        self.zero_state = np.zeros((self.hp.batch_size, self.hp.num_hidden))
        self.loss_fun = LossFunction_1_1(self.hp.net_type)
        self.loss_trace = TrainingHistory_3_0()
        self.ts_list = []
        for i in range(self.hp.num_step +
                       1):  # create one more ts to hold zero values
            ts = timestep_fit()
            self.ts_list.append(ts)
Пример #4
0
    def train(self, dataReader, checkpoint=0.1, need_test=True):
        t0 = time.time()
        self.lossFunc = LossFunction_1_1(self.hp.net_type)
        if self.hp.regular_name == RegularMethod.EarlyStop:
            self.loss_trace = TrainingHistory_3_0(True, self.hp.regular_value)
        else:
            self.loss_trace = TrainingHistory_3_0()

        if self.hp.batch_size == -1 or self.hp.batch_size > dataReader.num_train:
            self.hp.batch_size = dataReader.num_train
        # end if
        max_iteration = math.ceil(dataReader.num_train / self.hp.batch_size)
        checkpoint_iteration = (int)(math.ceil(max_iteration * checkpoint))
        need_stop = False
        for epoch in range(self.hp.max_epoch):
            dataReader.Shuffle()
            for iteration in range(max_iteration):
                # get x and y value for one sample
                batch_x, batch_y = dataReader.GetBatchTrainSamples(
                    self.hp.batch_size, iteration)
                # for optimizers which need pre-update weights
                if self.hp.optimizer_name == OptimizerName.Nag:
                    self.__pre_update()
                # get z from x,y
                self.__forward(batch_x, train=True)
                # calculate gradient of w and b
                self.__backward(batch_x, batch_y)
                # final update w,b
                self.__update()

                total_iteration = epoch * max_iteration + iteration
                if (total_iteration + 1) % checkpoint_iteration == 0:
                    #self.save_parameters()
                    need_stop = self.CheckErrorAndLoss(dataReader, batch_x,
                                                       batch_y, epoch,
                                                       total_iteration)
                    if need_stop:
                        break
                #end if
            # end for
            #self.save_parameters()  # 这里会显著降低性能,因为频繁的磁盘操作,而且可能会有文件读写失败
            if need_stop:
                break
            # end if
        # end for
        self.CheckErrorAndLoss(dataReader, batch_x, batch_y, epoch,
                               total_iteration)

        t1 = time.time()
        print("time used:", t1 - t0)

        self.save_parameters()

        self.__check_weights_from_fc_layer()

        if need_test:
            print("testing...")
            self.accuracy = self.Test(dataReader)
            print(self.accuracy)
Пример #5
0
    def train(self, dataReader, checkpoint=0.1, need_test=True):
        t0 = time.time()
        self.loss_trace = TrainingHistory_2_4()
        self.lossFunc = LossFunction_1_1(self.hp.net_type)
        # if num_example=200, batch_size=10, then iteration=200/10=20
        if self.hp.batch_size == -1 or self.hp.batch_size > dataReader.num_train:
            self.hp.batch_size = dataReader.num_train
        # end if
        max_iteration = math.ceil(dataReader.num_train / self.hp.batch_size)
        checkpoint_iteration = (int)(max_iteration * checkpoint)
        need_stop = False
        for epoch in range(self.hp.max_epoch):
            for iteration in range(max_iteration):
                # get x and y value for one sample
                batch_x, batch_y = dataReader.GetBatchTrainSamples(
                    self.hp.batch_size, iteration)
                # get z from x,y
                self.__forward(batch_x, train=True)
                # calculate gradient of w and b
                self.__backward(batch_x, batch_y)
                # final update w,b
                self.__update()

                total_iteration = epoch * max_iteration + iteration
                if (total_iteration + 1) % checkpoint_iteration == 0:
                    #self.save_parameters()
                    need_stop = self.CheckErrorAndLoss(dataReader, batch_x,
                                                       batch_y, epoch,
                                                       total_iteration)
                    if need_stop:
                        break
                #end if
            # end for
            #self.save_parameters()  # 这里会显著降低性能,因为频繁的磁盘操作,而且可能会有文件读写失败
            dataReader.Shuffle()
            if need_stop:
                break
            # end if
        # end for
        self.CheckErrorAndLoss(dataReader, batch_x, batch_y, epoch,
                               total_iteration)

        t1 = time.time()
        print("time used:", t1 - t0)

        self.save_parameters()

        weights, zeros, littles, total = self.__get_weights_from_fc_layer()
        print("total weights abs sum=", weights)
        print("total weights =", total)
        print("little weights =", littles)
        print("zero weights =", zeros)

        if need_test:
            print("testing...")
            accuracy = self.Test(dataReader)
            print(accuracy)
 def __init__(self, dr, input_size, hidden_size, output_size):
     self.dr = dr
     self.loss_fun = LossFunction_1_1(NetType.BinaryClassifier)
     self.loss_trace = TrainingHistory_3_0()
     self.times = 4
     self.input_size = input_size
     self.hidden_size = hidden_size
     self.output_size = output_size
     self.grucell = []
     self.linearcell = []
     for i in range(self.times):
         self.grucell.append(GRUCell(input_size, hidden_size))
         self.linearcell.append(LinearCell_1_2(hidden_size, output_size, Logistic(), bias=False))
Пример #7
0
 def __init__(self, hp):
     self.hp = hp
     self.U = np.random.random((self.hp.num_input, self.hp.num_hidden))
     self.W = np.random.random((self.hp.num_hidden, self.hp.num_hidden))
     self.V = np.random.random((self.hp.num_hidden, self.hp.num_output))
     self.zero_state = np.zeros((self.hp.batch_size, self.hp.num_hidden))
     self.loss_fun = LossFunction_1_1(self.hp.net_type)
     self.loss_trace = TrainingHistory_3_0()
     self.ts_list = []
     for i in range(self.hp.num_step + 1):
         ts = timestep()
         self.ts_list.append(ts)
     #end for
     self.ts_list[self.hp.num_step].s = np.zeros(
         (self.hp.batch_size, self.hp.num_hidden))
     self.ts_list[self.hp.num_step].dh = np.zeros(
         (self.hp.batch_size, self.hp.num_hidden))
    def __init__(self, hp, model_name):
        self.hp = hp
        self.model_name = model_name
        self.subfolder = os.getcwd() + "/" + self.__create_subfolder()
        print(self.subfolder)

        if (self.load_parameters(ParameterType.Init) == False):
            self.U, self.bu = WeightsBias_2_1.InitialParameters(
                self.hp.num_input, self.hp.num_hidden, InitialMethod.Normal)
            self.V, self.bv = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden, self.hp.num_output, InitialMethod.Normal)
            self.W, _ = WeightsBias_2_1.InitialParameters(
                self.hp.num_hidden, self.hp.num_hidden, InitialMethod.Normal)
            self.save_parameters(ParameterType.Init)
        #end if

        self.loss_fun = LossFunction_1_1(self.hp.net_type)
        self.loss_trace = TrainingHistory_3_0()
        self.ts_list = []
        for i in range(self.hp.num_step):
            if (i == 0):
                ts = timestep(self.hp.net_type,
                              self.hp.output_type,
                              isFirst=True,
                              isLast=False)
            elif (i == self.hp.num_step - 1):
                ts = timestep(self.hp.net_type,
                              self.hp.output_type,
                              isFirst=False,
                              isLast=True)
            else:
                ts = timestep(self.hp.net_type,
                              self.hp.output_type,
                              isFirst=False,
                              isLast=False)
            #endif
            self.ts_list.append(ts)
Пример #9
0
 def __init__(self, dr):
     self.dr = dr
     self.loss_fun = LossFunction_1_1(NetType.Fitting)
     self.t1 = timestep_1()
     self.t2 = timestep_2()
Пример #10
0
 def __init__(self, dr):
     self.dr = dr
     self.loss_fun = LossFunction_1_1(NetType.Fitting)
     self.loss_trace = TrainingHistory_3_0()
     self.t1 = timestep_1()
     self.t2 = timestep_2()