コード例 #1
0
ファイル: models.py プロジェクト: oya163/nlp-assignments
    def __init__(self, train_file, dev_file, n_gram, unk_threshold, unk_val):
        self.train_file = train_file
        self.dev_file = dev_file
        self.n_gram = n_gram
        self.unk_threshold = unk_threshold
        self.unk_val = unk_val

        self.train_dl = Dataloader(self.train_file, self.n_gram,
                                   self.unk_threshold)
        self.dev_dl = Dataloader(self.dev_file, self.n_gram,
                                 self.unk_threshold)
        self.t_sents, self.t_items, self.t_types, self.t_tokens = self.train_dl.read_file(
        )
        self.d_sents, self.d_items, self.d_types, self.d_tokens = self.dev_dl.read_file(
        )

        self.t_prob = {}
        self.d_prob = {}

        # Subtracting two because it contains <BOS> and <EOS>
        self.t_total_types = len(self.t_types) - 2
        self.d_total_types = len(self.d_types) - 2

        self.t_total_tokens = len(self.t_tokens) - 2
        self.d_total_tokens = len(self.d_tokens) - 2
コード例 #2
0
ファイル: models.py プロジェクト: oya163/nlp-assignments
class DataModule():
    def __init__(self, train_file, dev_file, n_gram, unk_threshold, unk_val):
        self.train_file = train_file
        self.dev_file = dev_file
        self.n_gram = n_gram
        self.unk_threshold = unk_threshold
        self.unk_val = unk_val

        self.train_dl = Dataloader(self.train_file, self.n_gram,
                                   self.unk_threshold)
        self.dev_dl = Dataloader(self.dev_file, self.n_gram,
                                 self.unk_threshold)
        self.t_sents, self.t_items, self.t_types, self.t_tokens = self.train_dl.read_file(
        )
        self.d_sents, self.d_items, self.d_types, self.d_tokens = self.dev_dl.read_file(
        )

        self.t_prob = {}
        self.d_prob = {}

        # Subtracting two because it contains <BOS> and <EOS>
        self.t_total_types = len(self.t_types) - 2
        self.d_total_types = len(self.d_types) - 2

        self.t_total_tokens = len(self.t_tokens) - 2
        self.d_total_tokens = len(self.d_tokens) - 2

    # Print general stats of the dataset
    def print_stat(self):
        print("**********DATA STATISTICS********")
        print("Length of type list in train_file", self.t_total_types)
        print("Length of type list in dev_file", self.d_total_types)
        print("Length of token list in train_file", self.t_total_tokens)
        print("Length of token list in dev_file", self.d_total_tokens)
        print("OOV = ", self.t_total_tokens - self.d_total_tokens)
コード例 #3
0
ファイル: models.py プロジェクト: oya163/nlp-assignments
class Backoff(DataModule):
    def __init__(self, train_file, dev_file, n_gram, unk_threshold, unk_val,
                 lambda_val, discount, norm_const):
        super().__init__(train_file, dev_file, n_gram, unk_threshold, unk_val)
        self.lambda_val = lambda_val
        self.discount = discount
        self.norm_const = norm_const
        self.t_total_length = self.t_total_types + (self.t_total_tokens *
                                                    self.lambda_val)

        if self.n_gram == 2:
            self.uni_train_dl = Dataloader(self.train_file, 1,
                                           self.unk_threshold)
            self.uni_dev_dl = Dataloader(self.dev_file, 1, self.unk_threshold)

            self.ut_sents, self.ut_items, self.ut_types, self.ut_tokens = self.uni_train_dl.read_file(
            )
            self.ud_sents, self.ud_items, self.ud_types, self.ud_tokens = self.uni_dev_dl.read_file(
            )

            self.ut_total_types = len(self.ut_types) - 2
            self.ut_total_tokens = len(self.ut_tokens) - 2

            self.ut_total_length = self.ut_total_types + (
                self.ut_total_tokens * self.lambda_val)

    # prob(X) = count(X)/count(total_types) + lambda
    def train(self):
        self.t_prob = {
            k: (v + self.lambda_val) / self.t_total_length
            for k, v in self.t_items.items()
        }
        if self.n_gram == 2:
            self.ut_prob = {
                k: (v + self.lambda_val) / self.ut_total_length
                for k, v in self.ut_items.items()
            }
        return self.t_prob

    # Calculate Perplexity
    def eval(self):
        prob_list = []
        for k, v in self.d_items.items():
            if k in self.t_prob:
                self.d_prob[k] = self.t_prob[k] - self.discount
            elif self.n_gram == 2:
                word = k[1]
                self.d_prob[k] = self.norm_const * self.ut_prob.get(
                    k, self.unk_val + self.lambda_val)
            else:
                self.d_prob[k] = self.unk_val + self.lambda_val
            prob_list.append(self.d_prob[k])

        return np.exp(-np.mean(np.log(prob_list)))
コード例 #4
0
 def test_sche_opt(self):
     self.sche = Sched()
     buffer = "./ut_lmdb"
     print(lmdb_op.len(buffer))
     dataloader = Dataloader(buffer,
                             lmdb_op,
                             worker_num=3,
                             batch_size=64,
                             batch_num=40)
     opt = ray.remote(Optimizer).options(num_gpus=0.3).remote(
         dataloader, iter_steps=10, update_period=10000)
     t0 = time.time()
     self.sche.add(opt, "__call__")
     self.sche.add(opt, "__next__")
     count_call = 0
     count_next = 0
     while 1:
         tsks, infos = self.sche.wait()
         if infos[0].method == "__call__":
             self.sche.add(opt, "__call__")
             count_call += 1
         elif infos[0].method == "__next__":
             self.sche.add(opt, "__next__")
             count_next += 1
         if count_call == 20 or count_next == 20:
             print(count_call, count_next)
             break
     t1 = time.time()
     print(t1 - t0)
コード例 #5
0
 def test_1_worker(self):
     buffer = "./ut_lmdb"
     buffer = db_op.init(buffer)
     # exc_worker = BasicWorker(db=buffer, db_write=db_op.write)
     exc_worker = DQN_Worker(db=buffer, db_write=db_op.write)
     # exc_worker.update(eps=0.9)
     dataloader = Dataloader(buffer,
                             db_op,
                             batch_size=256,
                             worker_num=8,
                             batch_num=20)
     # for _ in range(20):
     t0 = time.time()
     while db_op.len(buffer) < 10000:
         _ = exc_worker.__next__()
         # print(db_op.len(buffer))
     print(time.time() - t0)
     count = 0
     t0 = time.time()
     for data, _, _, _ in dataloader:
         fd = {k: torch.from_numpy(v) for k, v in data.items()}
         fd = {k: v.cuda().float() for k, v in fd.items()}
         time.sleep(0.02)
         count += 1
         if count == 1000:
             break
     print(time.time() - t0)
     db_op.clean(buffer)
コード例 #6
0
 def test_train(self):
     buffer = db_op.init("./ut_lmdb_l")
     exc_worker = DQN_Worker(db=buffer, db_write=db_op.write)
     dataloader = Dataloader(buffer,
                             db_op,
                             batch_size=64,
                             worker_num=3,
                             batch_num=40)
     opt = Optimizer(dataloader, iter_steps=400, update_period=10000)
     exc_worker.update(opt(), 1)
     count = 0
     while 1:
         wk_info = next(exc_worker)
         if wk_info is not None:
             # exc_worker.save("./train_video") if count % 100 == 0 else None
             print("worker reward: {} @ episod {}".format(
                 wk_info["episod_rw"], count))
             count += 1
         if db_op.len(buffer) >= 10000:
             opt_info = next(opt)
             print("loss {} @ step {} with buff {}".format(
                 opt_info["loss"], opt_info["opt_steps"],
                 db_op.len(buffer)))
             exc_worker.update(opt(), 0.05)
             if opt_info["opt_steps"] == 10000:
                 break
     db_op.clean(buffer)
コード例 #7
0
    def start_Gpu_training(self,
                           datafolder,
                           batchsize,
                           learning_rate,
                           number_of_epoch,
                           display=1000):
        data = Dataloader.loaddata(datafolder["data"])
        testData = Dataloader.loaddata(datafolder["test"])
        labels = Dataloader.loaddata(datafolder["label"])
        data = np.array(data)
        train, val, temptrainlabel, tempvallabel = train_test_split(
            data, labels, test_size=0.3)

        # train = data[0:25000, :, :]
        # val = data[25001:29160, :, :]
        # temptrainlabel = labels[0:25000]
        # tempvallabel = labels[25001:29160]
        trainlabel = Dataloader.toHotEncoding(temptrainlabel)
        vallabel = Dataloader.toHotEncoding(tempvallabel)
        run_trainingepoch(number_of_epoch, train, trainlabel, val, vallabel,
                          testData, batchsize, train.shape[0])
コード例 #8
0
ファイル: models.py プロジェクト: oya163/nlp-assignments
    def __init__(self, train_file, dev_file, n_gram, unk_threshold, unk_val,
                 lambda_val, discount, norm_const):
        super().__init__(train_file, dev_file, n_gram, unk_threshold, unk_val)
        self.lambda_val = lambda_val
        self.discount = discount
        self.norm_const = norm_const
        self.t_total_length = self.t_total_types + (self.t_total_tokens *
                                                    self.lambda_val)

        if self.n_gram == 2:
            self.uni_train_dl = Dataloader(self.train_file, 1,
                                           self.unk_threshold)
            self.uni_dev_dl = Dataloader(self.dev_file, 1, self.unk_threshold)

            self.ut_sents, self.ut_items, self.ut_types, self.ut_tokens = self.uni_train_dl.read_file(
            )
            self.ud_sents, self.ud_items, self.ud_types, self.ud_tokens = self.uni_dev_dl.read_file(
            )

            self.ut_total_types = len(self.ut_types) - 2
            self.ut_total_tokens = len(self.ut_tokens) - 2

            self.ut_total_length = self.ut_total_types + (
                self.ut_total_tokens * self.lambda_val)
コード例 #9
0
    def test_convengence(self):
        buffer = db_op.init("./ut_lmdb_l", alpha=0.5, maxp=0.1)
        if not os.path.exists("data.pkl"):
            data = []
            exc_worker = DQN_Worker(env_name="WizardOfWorNoFrameskip-v4",
                                    db=buffer,
                                    db_write=db_op.write)
            exc_worker.update(None, 1.0)
            while db_op.len(buffer) < 1000000:
                next(exc_worker)
                print(db_op.len(buffer))
            for i in range(1000000):
                data += db_op.read(buffer, i, decompress=False)
            with open("data.pkl", "wb") as fo:
                pickle.dump(data, fo)
        else:
            with open("data.pkl", "rb") as fo:
                data = pickle.load(fo, encoding='bytes')
            db_op.write(buffer, data[0::3], compress=False)

        dataloader = Dataloader(buffer,
                                db_op,
                                batch_size=256,
                                worker_num=4,
                                batch_num=5)
        opt = Optimizer(dataloader,
                        env_name="WizardOfWorNoFrameskip-v4",
                        iter_steps=5,
                        update_period=10000,
                        lr=0.625e-4)
        while 1:
            opt_info = next(opt)
            config = db_op.config(buffer)
            if "total" not in config:
                config["total"] = 0
            print("loss {} @ step {} with buff {} total {}".format(
                opt_info["loss"], opt_info["opt_steps"], db_op.len(buffer),
                config["total"]))
コード例 #10
0
ファイル: make_prediction.py プロジェクト: kefirski/teor_inf
from model.utils.positional_embedding import PositionalEmbedding
from utils.dataloader import Dataloader

if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='inf')
    parser.add_argument('--num-threads', type=int, default=4, metavar='BS',
                        help='num threads (default: 4)')
    parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA',
                        help='use cuda (default: False)')
    parser.add_argument('--save', type=str, default='trained_model', metavar='TS',
                        help='path where save trained model to (default: "trained_model")')
    args = parser.parse_args()

    t.set_num_threads(args.num_threads)
    loader = Dataloader('~/projects/teor_inf/utils/data/', '~/projects/wiki.ru.bin')

    model = Model(loader.vocab_size, 4, 10, 300, 30, 30, 9, n_classes=len(loader.idx_to_label))
    embeddings = PositionalEmbedding(loader.preprocessed_embeddings, loader.vocab_size, 1100, 300)

    model.load_state_dict(t.load(args.save))

    if args.use_cuda:
        model = model.cuda()

    model.eval()

    result = []

    for input in loader.test_data():
コード例 #11
0
ファイル: main.py プロジェクト: jnepal/nepali-ner
def main():
    """
        Main File
    """
    # Parse argument
    config, logger = parse_args()

    if config.kfold > 0 and not config.eval:
        logger.info("Splitting dataset into {0}-fold".format(config.kfold))
        splitter.main(input_file=config.data_file,
                      output_dir=config.root_path,
                      verbose=config.verbose,
                      kfold=config.kfold,
                      pos=config.use_pos,
                      log_file=config.data_log)

    tot_acc = 0
    tot_prec = 0
    tot_rec = 0
    tot_f1 = 0

    for i in range(0, config.kfold):
        # To match the output filenames
        k = str(i + 1)

        if not config.eval:
            logger.info("Starting training on {0}th-fold".format(k))

        # Load data iterator
        dataloader = Dataloader(config, k)

        # Debugging purpose. Don't delete
        #         sample = next(iter(train_iter))
        #         print(sample.TEXT)

        # Load model
        if config.use_char or config.use_graph:
            assert config.use_char ^ config.use_graph, "Either use Character-Level or Grapheme-Level. Not both!!!"
            lstm = CharLSTMTagger(config, dataloader).to(config.device)
        else:
            lstm = LSTMTagger(config, dataloader).to(config.device)

        # Print network configuration
        logger.info(lstm)

        model = Trainer(config, logger, dataloader, lstm, k)

        if not config.eval:
            # Train
            logger.info("Training started !!!")
            model.fit()

        # Test
        model.load_checkpoint()
        logger.info("Testing Started !!!")
        acc, prec, rec, f1 = model.predict()
        logger.info(
            "Accuracy: %6.2f%%; Precision: %6.2f%%; Recall: %6.2f%%; FB1: %6.2f "
            % (acc, prec, rec, f1))

        tot_acc += acc
        tot_prec += prec
        tot_rec += rec
        tot_f1 += f1

    logger.info(
        "Final Accuracy: %6.2f%%; Final Precision: %6.2f%%; Final Recall: %6.2f%%; Final FB1: %6.2f "
        % (tot_acc / config.kfold, tot_prec / config.kfold,
           tot_rec / config.kfold, tot_f1 / config.kfold))
コード例 #12
0
    def start_training(self,
                       datafolder,
                       batchsize,
                       learning_rate,
                       number_of_epoch,
                       display=1000):
        # self.initWeigth(saveParameter="Assignment_weight_3.pkl")
        self.initWeigth()
        self.optimizer.initADAM(5, 5)
        data = Dataloader.loaddata(datafolder["data"])
        testData = Dataloader.loaddata(datafolder["test"])
        labels = Dataloader.loaddata(datafolder["label"])
        data = np.array(data)
        train, val, temptrainlabel, tempvallabel = train_test_split(
            data, labels, test_size=0.2)
        # train = data[0:25000, :, :]
        # val = data[25001:29160, :, :]
        # temptrainlabel = labels[0:25000]
        # tempvallabel = labels[25001:29160]
        trainlabel = Dataloader.toHotEncoding(temptrainlabel)
        vallabel = Dataloader.toHotEncoding(tempvallabel)

        t = 0
        # numberOfImages = 10
        pEpochTrainLoss = []
        pEpochTrainAccuracy = []
        pEpochTestLoss = []
        pEpochTestAccuracy = []
        for epoch in range(number_of_epoch):
            train, temptrainlabel = sklearn.utils.shuffle(train,
                                                          temptrainlabel,
                                                          random_state=1)
            trainlabel = Dataloader.toHotEncoding(temptrainlabel)

            # if epoch > 20:
            #     learning_rate = 0.0001
            if epoch > 70:
                learning_rate = 0.00001
            if epoch > 130:
                learning_rate = 0.000001
            if epoch > 175:
                learning_rate = 0.0000001

            avgLoss = 0
            trainAcc = 0.0
            count = 0.0
            countacc = 0.0
            pIterLoss = []
            total_train_image = train.shape[0]
            iter = 0
            countiter = 0.0
            countitertemp = 0.0
            loss_iter = 0.0
            # for iter in range(total_train_image - batchsize):
            t += 1
            while iter < (total_train_image - batchsize):

                randomSelect = iter
                # randomSelect = np.random.randint(0 ,(total_train_image - batchsize))
                image = train[randomSelect:randomSelect + batchsize, :, :]
                labels = trainlabel[randomSelect:randomSelect + batchsize, :]
                image = np.array(image, dtype=np.float32)
                image = np.subtract(image, self.mean)
                image = np.divide(image, self.std_v)
                input_data = np.reshape(image, (batchsize, 108 * 108))
                input_data, labels = sklearn.utils.shuffle(input_data,
                                                           labels,
                                                           random_state=1)
                # label = np.reshape(label, (1, label.size))
                loss, outputs = self.Train(input_data, labels)
                # self.parameter = self.optimizer.SGD(self.parameter, self.grad, learning_rate)
                self.grad, reg_loss = self.optimizer.l2_regularization(
                    self.parameter, self.grad, 0)
                loss += reg_loss
                for outiter in range(batchsize):
                    # output = output[0]
                    pred = np.argmax(outputs[outiter, :])
                    gt = np.argmax(labels[outiter, :])
                    if pred == gt:
                        count += 1.0
                        countacc += 1.0
                    countiter += 1.0
                    countitertemp += 1.0
                    # print("True")
                    # self.parameter = self.optimizer.SGD(self.parameter, self.grad, learning_rate)

                pIterLoss.append(loss)
                avgLoss += loss
                if iter % display == 0:
                    print("Preiction: ", outputs[0, :])
                    print("Train Accuracy {} with prob : {}".format(
                        (countacc / float(countitertemp)), outputs[0, pred]))
                    print("Train Loss: ", loss)
                    countacc = 0.0
                    countitertemp = 0.0
                    loss, acc = self.Test(val, vallabel)
                    # if acc > 0.55:
                    #     assignmentOut = self.doTest(testData)
                    #     fileName = "result_" + str(acc) + "_.csv"
                    #     with open(fileName, 'w') as f:
                    #         for key in assignmentOut.keys():
                    #
                    # f.write("%s,%s\n" % (key, assignmentOut[key]))
                self.parameter = self.optimizer.ADAM(self.parameter, self.grad,
                                                     learning_rate, t)
                iter += batchsize
                loss_iter += 1.0

            trainAcc = (float(count) / float(countiter))
            print("##################Overall Accuracy & Loss Calculation")
            print(iter, ":TrainAccuracy: ", trainAcc)
            print(iter, ":TrainLoss: ", (float(avgLoss) / float(loss_iter)))
            avgtestloss, avgtestacc = self.Test(val, vallabel)
            totaloss = float(avgLoss) / float(total_train_image)
            pEpochTrainLoss.append(totaloss)
            pEpochTrainAccuracy.append(trainAcc)
            pEpochTestLoss.append(avgtestloss)
            pEpochTestAccuracy.append(avgtestacc)
            # fileName = "Assignment_weight_" + str(trainAcc) + "_" + str(avgtestacc) + ".pkl"
            file = open("Assignment_weight_4.pkl", "wb")
            file.write(pickle.dumps(self.parameter))
            file.close()
            fill2 = open("Assignment_parameter.pkl", "wb")
            fill2.write(
                pickle.dumps((pEpochTrainAccuracy, pEpochTrainLoss,
                              pEpochTestAccuracy, pEpochTestLoss)))
            fill2.close()
            print("############################################")
            if avgtestacc > 0.55:
                assignmentOut = self.doTest(testData)
                fileName = "result_ov_" + str(avgtestacc) + "_.csv"
                with open(fileName, 'w') as f:
                    for key in assignmentOut.keys():
                        f.write("%s,%s\n" % (key, assignmentOut[key]))
コード例 #13
0
    def start_training_mnist(self,
                             data_folder,
                             batch_size,
                             learning_rate,
                             NumberOfEpoch,
                             display=1000):
        self.initWeigth()
        self.optimizer.initADAM(5, 5)
        trainingImages, trainingLabels = Dataloader.loadMNIST(
            'train', data_folder)
        testImages, testLabels = Dataloader.loadMNIST('t10k', data_folder)
        trainLabelsHotEncoding = Dataloader.toHotEncoding(trainingLabels)
        testLabelsHotEncoding = Dataloader.toHotEncoding(testLabels)
        numberOfImages = trainingImages.shape[0]
        # numberOfImages = 10
        pEpochTrainLoss = []
        pEpochTrainAccuracy = []
        pEpochTestLoss = []
        pEpochTestAccuracy = []
        print("Training started")
        t = 0
        for epoch in range(NumberOfEpoch):
            avgLoss = 0
            trainAcc = 0.0
            count = 0.0
            countacc = 0.0
            pIterLoss = []
            print("##############EPOCH : {}##################".format(epoch))
            for iter in range(numberOfImages):
                t += 1
                image = trainingImages[iter, :, :]
                labels = trainLabelsHotEncoding[iter, :]
                loss, output = self.Train(image, labels)
                output = output[0]
                pred = np.argmax(output)
                gt = np.argmax(labels)
                if pred == gt:
                    count += 1.0
                    countacc += 1.0
                    # print("True")
                # self.parameter = self.optimizer.SGD(self.parameter, self.grad, learning_rate)

                pIterLoss.append(loss)
                avgLoss += loss
                if iter % display == 0:
                    print("Train Accuracy {} with prob : {}".format(
                        (countacc / float(display)), output[pred]))
                    print("Train Loss: ", loss)
                    countacc = 0.0
                    loss, acc = self.Test(testImages, testLabelsHotEncoding)

                self.parameter = self.optimizer.ADAM(self.parameter, self.grad,
                                                     learning_rate, t)
                self.parameter = self.optimizer.l2_regularization(
                    self.parameter, 0.001)
            trainAcc = (float(count) / float(numberOfImages))
            print("##################Overall Accuracy & Loss Calculation")
            print("TrainAccuracy: ", trainAcc)
            print("TrainLoss: ", (float(avgLoss) / float(numberOfImages)))
            avgtestloss, avgtestacc = self.Test(testImages,
                                                testLabelsHotEncoding)
            totaloss = float(avgLoss) / float(numberOfImages)
            pEpochTrainLoss.append(totaloss)
            pEpochTrainAccuracy.append(trainAcc)
            pEpochTestLoss.append(avgtestloss)
            pEpochTestAccuracy.append(avgtestacc)

            x_axis = np.linspace(0, epoch, len(pEpochTrainLoss), endpoint=True)
            plt.semilogy(x_axis, pEpochTrainLoss)
            plt.xlabel('epoch')
            plt.ylabel('loss')
            plt.draw()
            file = open("Assignment_test_2.pkl", "wb")
            file.write(pickle.dumps(self.parameter))
            file.close()
            fill2 = open("Assignment_parameter.pkl", "wb")
            fill2.write(
                pickle.dumps((pEpochTrainAccuracy, pEpochTrainLoss,
                              pEpochTestAccuracy, pEpochTestLoss)))
            fill2.close()
コード例 #14
0
def main():
    log_dir = YoloConfig.log_dir  # 获取输出日志的目录

    model = yolov3.Yolo(trainable=True)  # 生成 YOLO 模型,设置 trainable 为 True

    optimizer = tf.keras.optimizers.Adam()  # 使用 Adam 优化器,后面会随着 epoch 更新学习率

    if os.path.exists(log_dir):  # 检查输出日志的目录,清空目录下的过往日志
        shutil.rmtree(log_dir)

    writer = tf.summary.create_file_writer(log_dir)  # 生成一个 TensorBoard 的输出器

    global_steps = 1  # 全局步数,用于计算学习率

    for epoch in range(TrainConfig.total_epochs):  # 开始训练的迭代

        train_dataset = Dataloader('train')  # 通过数据集加载器,加载训练数据集
        steps_per_epoch = len(train_dataset)  # 计算该 epoch 内的步数

        # 计算 warm up 需要的步数和全局需要的步数
        warmup_steps = TrainConfig.warmup_epochs * steps_per_epoch
        total_steps = TrainConfig.total_epochs * steps_per_epoch

        with trange(len(train_dataset)) as t:  # 使用 tqdm 来显示输出条,同时可以修改描述和后缀描述
            # 计算各种损失的平均值,用于在后缀描述中的展示
            avg_giou_loss, avg_conf_loss, avg_prob_loss, avg_total_loss = 0.0, 0.0, 0.0, 0.0

            for step in t:
                # 获取图像数据和标签数据
                # target 包括 label 和 bbox
                # label 为 [batch_size, input_size, input_size, 3, 5 + classes_num]
                # bbox 为 [batch_size, max_anchor, 4]
                image_data, target = next(train_dataset)

                # 计算梯度
                with tf.GradientTape() as tape:
                    pred_result = model(image_data)  # 前向传播,得到预测的输出结果

                    giou_loss = conf_loss = prob_loss = 0  # 三种损失

                    # 对于三种分辨率下的预测框,计算相应的损失
                    for i in range(3):
                        # conv 是卷积输出的结果, pred 是卷积输出结果的解码
                        conv, pred = pred_result[i][0], pred_result[i][1]
                        loss_items = cal_statics.compute_loss(
                            pred, conv, target[i][0], target[i][1], i)
                        giou_loss += loss_items[0]
                        conf_loss += loss_items[1]
                        prob_loss += loss_items[2]

                    # 计算总损失,使用总损失进行梯度下降
                    total_loss = giou_loss + conf_loss + prob_loss

                    # 计算平均损失,用于输出
                    avg_giou_loss = avg_giou_loss + giou_loss
                    avg_conf_loss = avg_conf_loss + conf_loss
                    avg_prob_loss = avg_prob_loss + prob_loss
                    avg_total_loss = avg_total_loss + total_loss

                    # 拼接描述和后缀,在滚动条进行显示
                    des = time.strftime(
                        '%m-%d %H:%M:%S', time.localtime(
                            time.time())) + " Epoch {}".format(epoch)
                    post = "lr: {:.6f} giou_loss: {:.2f} conf_loss: {:.2f} prob_loss: {:.2f} total_loss: {:.2f}".format(
                        optimizer.lr.numpy(), avg_giou_loss / (step + 1),
                        avg_conf_loss / (step + 1), avg_prob_loss / (step + 1),
                        avg_total_loss / (step + 1))

                    # 设置描述和后缀
                    t.set_description(des)
                    t.set_postfix_str(post)

                    # 计算梯度,进行梯度下降优化
                    gradients = tape.gradient(total_loss,
                                              model.trainable_variables)
                    optimizer.apply_gradients(
                        zip(gradients, model.trainable_variables))

                    global_steps += 1  # 递增全局总步数

                    # 更新学习率,并且应用于优化器
                    if global_steps < warmup_steps:
                        lr = global_steps / warmup_steps * TrainConfig.lr_init
                    else:
                        lr = TrainConfig.lr_end + 0.5 * (
                            TrainConfig.lr_init - TrainConfig.lr_end) * (
                                (1 + tf.cos(
                                    (global_steps - warmup_steps) /
                                    (total_steps - warmup_steps) * np.pi)))
                    optimizer.lr.assign(lr)

                    # 将各种数值输出到可视化中
                    with writer.as_default():
                        tf.summary.scalar("lr",
                                          optimizer.lr,
                                          step=global_steps)
                        tf.summary.scalar("loss/total_loss",
                                          total_loss,
                                          step=global_steps)
                        tf.summary.scalar("loss/giou_loss",
                                          giou_loss,
                                          step=global_steps)
                        tf.summary.scalar("loss/conf_loss",
                                          conf_loss,
                                          step=global_steps)
                        tf.summary.scalar("loss/prob_loss",
                                          prob_loss,
                                          step=global_steps)
                    writer.flush()

        # 调用自定义函数,保存网络权重
        model.save_model()
コード例 #15
0
    write_prior = (args.buffer == "pmdb")
    ray.init(num_cpus=1 + 2 * n_worker + n_loader,
             object_store_memory=4 * 1024**3,
             memory=12 * 1024**3)

    buffer = lmdb_op.init(buffer, alpha=0.5)
    workers = [
        ray.remote(PriorDQN_Worker).options(num_gpus=0.1).remote(
            env_name=env_name, db=buffer, db_write=lmdb_op.write)
        for _ in range(n_worker)
    ]
    test_worker = ray.remote(DQN_Worker).options(num_gpus=0.1).remote(
        env_name=env_name, phase="valid", suffix=suffix)
    dataloader = Dataloader(buffer,
                            lmdb_op,
                            worker_num=n_loader,
                            batch_size=batch_size,
                            batch_num=n_iter)
    opt = ray.remote(Optimizer).options(num_gpus=0.2).remote(
        dataloader,
        env_name,
        suffix=suffix,
        iter_steps=n_iter,
        update_period=10000,
        lr=lr)
    glog = SummaryWriter("./logdir/{}/{}/{}.lr{}.batch{}".format(
        env_name, suffix, Optimizer.__name__, lr, batch_size))

    engine = Engine(opt, workers, test_worker, buffer, glog, speed)

    engine.reset()
コード例 #16
0
ファイル: train.py プロジェクト: kefirski/teor_inf
        '--save',
        type=str,
        default='trained_model',
        metavar='TS',
        help='path where save trained model to (default: "trained_model")')
    parser.add_argument('--tensorboard',
                        type=str,
                        default='default_tb',
                        metavar='TB',
                        help='Name for tensorboard model')
    args = parser.parse_args()

    writer = SummaryWriter(args.tensorboard)

    t.set_num_threads(args.num_threads)
    loader = Dataloader('~/projects/teor_inf/utils/data/',
                        '~/projects/wiki.ru.bin')

    model = Model(loader.vocab_size,
                  3,
                  5,
                  300,
                  75,
                  100,
                  40,
                  n_classes=len(loader.idx_to_label),
                  dropout=args.dropout)
    embeddings = PositionalEmbedding(loader.preprocessed_embeddings,
                                     loader.vocab_size, 1100, 300)
    if args.use_cuda:
        model = model.cuda()