def train(config):

    set_manual_seed(10)
    """ 1: 文本清洗和分词,构建词表 """
    print("Preparing the batch data ... \n")
    corpus_x, corpus_y, vocab = build_dataset(config)
    """ 2:计算类别权重,缓解类别不平衡问题 """
    class_weights = calcu_class_weights(corpus_y, config)
    config.class_weights = class_weights
    """ 3:加载预训练的词向量 """
    embed_matrix = load_embed_matrix(vocab, config)
    config.embed_matrix = embed_matrix
    """ 4: 划分数据集和生成batch迭代器 """
    train_iter, valid_iter, test_iter = batch_generator(
        corpus_x, corpus_y, 0.15, config)
    """ 5:模型初始化 """
    print("Building the textcnn model ... \n")
    model = TextCNN(config)
    print(f'The model has {count_params(model):,} trainable parameters\n')

    model.to(config.device)
    """ 6:开始训练模型 """
    print("Start the training ... \n")
    init_network(model)
    train_model(config, model, train_iter, valid_iter, test_iter)
示例#2
0
def train(config):

    set_manual_seed(10)
    """ 1: 划分数据集并保存 """
    print("Preparing the batch data ... \n")
    build_dataset(config)
    """ 2:计算类别权重,缓解类别不平衡问题 """
    class_weights = calcu_class_weights(config)
    config.class_weights = class_weights
    """ 3: 划分数据集和生成batch迭代器 """
    train_iter, valid_iter, test_iter = batch_generator(config)
    """ 5:模型初始化 """
    print("Building the textcnn model ... \n")
    model = TextCNN(config)
    print(f'The model has {count_params(model):,} trainable parameters\n')

    model.to(config.device)
    """ 6:开始训练模型 """
    print("Start the training ... \n")
    init_network(model)
    train_model(config, model, train_iter, valid_iter, test_iter)
def train(config):

    set_manual_seed(10)

    INPUT_DIM = config.vocab_size
    EMBED_DIM = config.embed_dim
    OUTPUT_DIM = config.output_dim
    PAD_IDX = config.pad_idx

    print("Building the fasttext model ... \n")
    model = FastText(INPUT_DIM, EMBED_DIM, OUTPUT_DIM, PAD_IDX)
    print(f'The model has {count_params(model):,} trainable parameters\n')

    model.to(config.device)

    print("Calculate class weigths ... \n")
    class_weights = calcu_class_weights(config)

    print("Preparing the batch data ... \n")
    train_iter, valid_iter, test_iter = batch_generator(config)

    init_network(model)
    train_model(config, model, train_iter, valid_iter, test_iter,
                class_weights)
示例#4
0
def run():
    print('Loading data...')

    #data_folder = '../data/weibo_xiaoice_large'
    training, validation, test, embedding_matrix, label_map, VOCAB = get_data(
        USE_FA=False, USE_GLOVE_EMBED=True)
    tr_gen = batch_generator(training[0],
                             training[-1],
                             batch_size=256,
                             shuffle=True)
    te_gen = batch_generator(validation[0],
                             validation[-1],
                             batch_size=1024,
                             shuffle=False)

    print('VOCAB size:{}'.format(VOCAB))

    # Summation of word embeddings
    LAYERS = 1
    USE_GLOVE = True
    TRAIN_EMBED = False
    EMBED_HIDDEN_SIZE = 256
    SENT_HIDDEN_SIZE = 256
    BATCH_SIZE = 512
    PATIENCE = 6  # 8
    MAX_EPOCHS = 100
    MAX_LEN_TITLE = 30
    MAX_LEN_DES = 128
    DP = 0.2
    L2 = 4e-06
    ACTIVATION = 'relu'
    OPTIMIZER = 'rmsprop'
    # OPTIMIZER = 'adadelta'
    MLP_LAYER = 1
    NGRAM_FILTERS = [1, 2, 3, 4]
    NUM_FILTER = 128
    RNN_Cell = 'BiLSTM'

    print('Embed / Sent = {}, {}'.format(EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE))
    print('GloVe / Trainable Word Embeddings = {}, {}'.format(
        USE_GLOVE, TRAIN_EMBED))

    LABEL_NUM = len(label_map.classes_)

    bst_model_path = '../model/rnn_attn_v2.hdf5'
    pred_path = '../res/rnn_attn_v2.pkl'
    res_path = '../res/rnn_attn_v2.csv'

    embed_title = get_embedding(embedding_matrix, USE_GLOVE, VOCAB,
                                EMBED_HIDDEN_SIZE, TRAIN_EMBED, MAX_LEN_TITLE)

    embed_des = get_embedding(embedding_matrix, USE_GLOVE, VOCAB,
                              EMBED_HIDDEN_SIZE, TRAIN_EMBED, MAX_LEN_DES)

    model = rnn_att_model(embed_title,
                          embed_des,
                          MAX_LEN_TITLE,
                          MAX_LEN_DES,
                          SENT_HIDDEN_SIZE,
                          ACTIVATION,
                          DP,
                          L2,
                          LABEL_NUM,
                          OPTIMIZER,
                          MLP_LAYER,
                          LAYERS,
                          RNN_Cell='BiLSTM')

    early_stopping = EarlyStopping(monitor='val_top_k_categorical_accuracy',
                                   patience=4)
    model_checkpoint = ModelCheckpoint(bst_model_path,
                                       save_best_only=True,
                                       save_weights_only=True)
    #print training[0][0].shape[0],validation[0][0].shape[0]
    #model.load_weights(bst_model_path)
    model.fit_generator(
        tr_gen,
        steps_per_epoch=int(training[0][0].shape[0] / BATCH_SIZE) + 1,
        epochs=100,
        verbose=1,
        validation_data=te_gen,
        validation_steps=int(validation[0][0].shape[0] / BATCH_SIZE) + 1,
        max_q_size=20,
        callbacks=[early_stopping, model_checkpoint])

    print 'load weights'
    model.load_weights(bst_model_path)
    pred = model.predict(test)
    pd.to_pickle(pred, pred_path)

    def get_ans(pred=pred, idd=np.random.random(3000)):
        pred = pred.argsort(axis=1)[:, ::-1][:, :5]
        ll = label_map.classes_
        ans = [[ll[item] for item in items] for items in pred]
        res = pd.DataFrame(ans)
        res.index = idd
        return res

    test_idx = get_testid()
    ans = get_ans(pred, test_idx)
    ans.to_csv(res_path, index=True, header=False)
示例#5
0
    # define hyper parameters
    batch_size = 32
    seq_len = 20
    num_epoch = 10
    grad_clip = 5.0
    max_step = 20000

    # Directory where the checkpoints will be saved
    checkpoint_dir = './training_checkpoints'

    # Name of the checkpoint files
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{step}")

    input_ids = vocabulary.encode(train_data)
    batch_data = batch_generator(input_ids, batch_size, seq_len)

    model = LSTMModel(vocabulary.vocab_size,
                      batch_size=batch_size,
                      num_steps=seq_len,
                      lstm_size=128,
                      num_layers=2,
                      sampling=False,
                      drop_out=0.5,
                      use_embedding=False,
                      embedding_size=128)

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)

    # define metrics
示例#6
0
def train(net_factory, model_path, img_dir, label_files,
          end_epoch=20, display=100, base_lr=0.01):
    """
    train PNet/RNet/ONet
    :param net_factory: pre-defined model structure
    :param model_path: str, model path
    :param img_dir:
    :param label_file:
    :param end_epoch: int, total training epoch
    :param display:
    :param base_lr:
    :return:
    """
    net = model_path.split('/')[-1]
    print("Current training net: {}".format(net))
    print("Model will be saved in {}".format(model_path))
    # label_file = '../data/bbx_landmark/label_24_aug.txt'
    num = 0
    for label_file in label_files:
        with open(label_file, 'r') as f:
            print("Label file found: {}".format(label_file))
            num += len(f.readlines())
    print("Size of the dataset is: ", num)

    # data loader for training
    train_batches = batch_generator(img_dir, label_files, config.BATCH_SIZE, net)
    iter = tf.data.Iterator.from_structure(train_batches.output_types, train_batches.output_shapes)
    image_batch, label_batch, bbox_batch, landmark_batch = iter.get_next()
    train_init_op = iter.make_initializer(train_batches)
    print("------------Data loader created------------")
        
    #landmark_dir    
    if net == 'PNet':
        image_size = 12
        radio_bbox_loss = 0.5; radio_landmark_loss = 0.5
    elif net == 'RNet':
        image_size = 24
        radio_bbox_loss = 0.5; radio_landmark_loss = 0.5
    else:
        image_size = 48
        radio_bbox_loss = 0.5; radio_landmark_loss = 1
    
    # define placeholder
    input_image = tf.placeholder(tf.float32, shape=[config.BATCH_SIZE, image_size, image_size, 3], name='input_image')
    label = tf.placeholder(tf.float32, shape=[config.BATCH_SIZE], name='label')
    bbox_target = tf.placeholder(tf.float32, shape=[config.BATCH_SIZE, 4], name='bbox_target')
    landmark_target = tf.placeholder(tf.float32,shape=[config.BATCH_SIZE,10],name='landmark_target')
    print("------------Input tensor placeholder defined------------")

    # get loss and accuracy
    input_image = image_color_distort(input_image)
    bbox_loss_op, landmark_loss_op, L2_loss_op = net_factory(inputs=input_image,
                                                             label=label,
                                                             bbox_target=bbox_target,
                                                             landmark_target=landmark_target,
                                                             training=True)
    total_loss_op  = radio_bbox_loss * bbox_loss_op + \
                     radio_landmark_loss * landmark_loss_op + \
                     L2_loss_op
    train_op, lr_op = train_model(base_lr, total_loss_op, num)
    print("------------Loss operation and training operation defined------------")

    # # for debugging
    # for var in tf.all_variables():
    #     print(var)
    # exit()

    # init
    init = tf.global_variables_initializer()
    sess = tf.Session()
    saver = tf.train.Saver(max_to_keep=4)
    sess.run(train_init_op)
    # sess.run(val_init_op)
    sess.run(init)
    print("------------Graph initialized------------")

    #visualize some variables
    tf.summary.scalar("bbox_loss", bbox_loss_op)
    tf.summary.scalar("landmark_loss", landmark_loss_op)
    tf.summary.scalar("total_loss", total_loss_op)
    summary_op = tf.summary.merge_all()
    logs_dir = "../logs/{}".format(net)
    if os.path.exists(logs_dir) == False:
        os.makedirs(logs_dir)
    writer = tf.summary.FileWriter(logs_dir, sess.graph)
    print("------------Summary created------------")

    num_train_batches = math.ceil(num / config.BATCH_SIZE)
    MAX_STEP = num_train_batches * end_epoch
    try:
        print("------------Start training!------------")
        for step in tqdm(range(MAX_STEP)):
            epoch = math.ceil((step+1) / num_train_batches)

            image_batch_array, label_batch_array, bbox_batch_array, landmark_batch_array = sess.run([image_batch,
                                                                                                     label_batch,
                                                                                                     bbox_batch,
                                                                                                     landmark_batch])
            # # for debugging only
            #             # print(type(image_batch_array))
            #             # print(image_batch_array[0].shape)
            #             # print(label_batch_array[0])
            #             # print(bbox_batch_array[0])
            #             # print(landmark_batch_array[0])
            #             # img = Image.fromarray(image_batch_array[0].astype('uint8')).convert('RGB')
            #             # draw = ImageDraw.Draw(img)
            #             # draw.rectangle([(bbox_batch_array[0][0]*image_size, bbox_batch_array[0][1]*image_size),
            #             #                 (bbox_batch_array[0][2]*image_size, bbox_batch_array[0][3]*image_size)],
            #             #                outline='yellow')
            #             # draw.point([(landmark_batch_array[0][2*i]*image_size,
            #             #              landmark_batch_array[0][2*i+1]*image_size) for i in range(5)], fill='red')
            #             # img.show()
            #             # exit()

            #random flip
            image_batch_array, landmark_batch_array = random_flip_images(image_batch_array,
                                                                         label_batch_array,
                                                                         landmark_batch_array)

            _, _, summary = sess.run([train_op, lr_op, summary_op],
                                   feed_dict={input_image: image_batch_array,
                                              label: label_batch_array,
                                              bbox_target: bbox_batch_array,
                                              landmark_target: landmark_batch_array})

            if (step+1) % display == 0:
                bbox_loss, landmark_loss, L2_loss = sess.run(
                    [bbox_loss_op, landmark_loss_op, L2_loss_op],
                    feed_dict={input_image: image_batch_array,
                               label: label_batch_array,
                               bbox_target: bbox_batch_array,
                               landmark_target: landmark_batch_array})

                total_loss = radio_bbox_loss * bbox_loss + \
                             radio_landmark_loss * landmark_loss + \
                             L2_loss
                # landmark loss: %4f,
                print("%s : Step: %d/%d, bbox loss: %4f, Landmark loss :%4f, L2 loss: %4f, Total Loss: %4f"
                      % (datetime.now(), step+1,MAX_STEP, bbox_loss,landmark_loss, L2_loss,total_loss))

            if (step+1) % num_train_batches == 0:
                model_output = "mtcnn_epoch{}".format(epoch)
                ckpt_name = os.path.join(model_path, model_output)
                saver.save(sess, model_path, global_step=step)
                print("after training of {} epochs, {} has been saved.".format(epoch, ckpt_name))
                sess.run(train_init_op)

            writer.add_summary(summary, global_step=step)

    except tf.errors.OutOfRangeError:
        print("Complete!")

    sess.close()
示例#7
0
def train(net_factory,
          model_path,
          img_dir,
          label_files,
          val_label_files,
          end_epoch=20,
          display=100,
          base_lr=0.01):
    """
    train PNet/RNet/ONet
    :param net_factory: pre-defined model structure
    :param model_path: str, model path
    :param img_dir: directory for saving images and label files
    :param label_files: list of paths of label files
    :param val_label_files: list of paths of validation label files
    :param end_epoch: int, total training epoch
    :param display: every $display$ steps show the training loss
    :param base_lr: initial learning rate
    :return:
    """
    net = "Manga_Net"
    image_size = 48
    if not os.path.exists(model_path): os.makedirs(model_path)
    logger.info("Model will be saved in {}".format(model_path))
    num = 0
    for label_file in label_files:
        with open(label_file, 'r') as f:
            logger.info("Label file found: {}".format(label_file))
            num += len(f.readlines())
    logger.info("Size of the dataset is: {}".format(num))

    # data loader for training
    train_batches = batch_generator(img_dir, label_files, config.BATCH_SIZE,
                                    image_size)
    iter = tf.data.Iterator.from_structure(train_batches.output_types,
                                           train_batches.output_shapes)
    image_batch, label_batch, bbox_batch = iter.get_next()
    train_init_op = iter.make_initializer(train_batches)
    logger.info("Data loader created")

    # load data for validation
    _, val_image_array, val_label_array, val_bbox_array = load_all(
        img_dir, val_label_files, image_size)

    radio_cls_loss = 0.5
    radio_bbox_loss = 0.5

    # define placeholder
    input_image = tf.placeholder(tf.float32,
                                 shape=[None, image_size, image_size, 3],
                                 name='input_image')
    label = tf.placeholder(tf.int32, shape=[None], name='label')
    bbox_target = tf.placeholder(tf.float32,
                                 shape=[None, 4],
                                 name='bbox_target')
    logger.info("Input tensor placeholder defined")

    # get loss and accuracy
    input_image = image_color_distort(input_image)
    cls_loss_op, bbox_loss_op, L2_loss_op, _, _ = net_factory(
        inputs=input_image, label=label, bbox_target=bbox_target)
    total_loss_op = radio_cls_loss * cls_loss_op + radio_bbox_loss * bbox_loss_op + L2_loss_op
    train_op, lr_op = train_model(base_lr, total_loss_op, num)
    logger.info("Loss operation and training operation defined")

    # # for debugging
    # for var in tf.all_variables():
    #     print(var)
    # exit()

    saver = tf.train.Saver(max_to_keep=0)
    sess = tf.Session()
    ckpt = tf.train.latest_checkpoint(model_path)
    if ckpt is None:
        logger.info("Initialize from scratch")
        sess.run(tf.global_variables_initializer())
    else:
        logger.info("Initialize from exiting model")
        saver.restore(sess, ckpt)
    sess.run(train_init_op)
    logger.info("Graph initialized")

    # visualize some variables
    tf.summary.scalar("bbox_loss", bbox_loss_op)
    tf.summary.scalar("landmark_loss", cls_loss_op)
    tf.summary.scalar("total_loss", total_loss_op)
    summary_op = tf.summary.merge_all()
    logs_dir = "../logs/{}".format(net)
    if not os.path.exists(logs_dir): os.makedirs(logs_dir)
    writer = tf.summary.FileWriter(logs_dir, sess.graph)
    logger.info("Summary created")

    num_train_batches = math.ceil(num / config.BATCH_SIZE)
    MAX_STEP = num_train_batches * end_epoch
    graph = tf.get_default_graph()
    global_step = graph.get_tensor_by_name('global_step:0')
    gs_ = sess.run(global_step)

    logger.info("Start training!")
    for step in tqdm(range(gs_, MAX_STEP)):
        epoch = math.ceil((step + 1) / num_train_batches)

        image_batch_array, label_batch_array, bbox_batch_array = sess.run(
            [image_batch, label_batch, bbox_batch])

        # # for debugging only
        # print(type(image_batch_array))
        # print(image_batch_array[0].shape)
        # print(label_batch_array[0])
        # print(bbox_batch_array[0])
        # print(landmark_batch_array[0])
        # img = Image.fromarray(image_batch_array[0].astype('uint8')).convert('RGB')
        # draw = ImageDraw.Draw(img)
        # draw.rectangle([(bbox_batch_array[0][0]*image_size, bbox_batch_array[0][1]*image_size),
        #                 (bbox_batch_array[0][2]*image_size, bbox_batch_array[0][3]*image_size)],
        #                outline='yellow')
        # draw.point([(landmark_batch_array[0][2*i]*image_size,
        #              landmark_batch_array[0][2*i+1]*image_size) for i in range(5)], fill='red')
        # img.show()
        # exit()

        _, _, summary = sess.run(
            [train_op, lr_op, summary_op],
            feed_dict={
                input_image: image_batch_array,
                label: label_batch_array,
                bbox_target: bbox_batch_array
            })

        if (step + 1) % display == 0:
            cls_loss, bbox_loss, L2_loss = sess.run(
                [cls_loss_op, bbox_loss_op, L2_loss_op],
                feed_dict={
                    input_image: image_batch_array,
                    label: label_batch_array,
                    bbox_target: bbox_batch_array
                })
            total_loss = radio_cls_loss * cls_loss + radio_bbox_loss * bbox_loss + L2_loss
            logger.info(
                "Step: %d/%d, cls loss : %4f, bbox loss: %4f, L2 loss: %4f, total Loss: %4f"
                %
                (step + 1, MAX_STEP, cls_loss, bbox_loss, L2_loss, total_loss))

        if (step + 1) % num_train_batches == 0:
            model_output = "manga_net_epoch{}".format(epoch)
            ckpt_name = os.path.join(model_path, model_output)
            saver.save(sess, ckpt_name)
            logger.info(
                "after training of {} epochs, {} has been saved.".format(
                    epoch, ckpt_name))

            cls_loss, bbox_loss, L2_loss = sess.run(
                [cls_loss_op, bbox_loss_op, L2_loss_op],
                feed_dict={
                    input_image: val_image_array,
                    label: val_label_array,
                    bbox_target: val_bbox_array
                })
            total_loss = radio_cls_loss * cls_loss + radio_bbox_loss * bbox_loss + L2_loss
            logger.info(
                "Epoch: %d/%d, cls loss: %4f, bbox loss: %4f, L2 loss: %4f, Total Loss: %4f"
                % (epoch, end_epoch, cls_loss, bbox_loss, L2_loss, total_loss))

            sess.run(train_init_op)

        writer.add_summary(summary, global_step=step)
    logger.info("Complete!")
    sess.close()