Ejemplo n.º 1
0
import tensorflow.compat.v1 as tf
from model import char_rnn,FLAGS
from utils import build_dataset
import numpy as np
tf.disable_eager_execution()

FLAG=FLAGS()
poems_vector, word_int_map, vocabularies = build_dataset(FLAG.poems_path,FLAG.name_path)
input_data = tf.placeholder(tf.int32, [1, None])
end_points = char_rnn(model='lstm', input_data=input_data, output_data=None, vocab_size=len(
    vocabularies),rnn_size=FLAG.rnn_size,num_layers=FLAG.num_layers,batch_size=FLAG.batch_size, learning_rate=FLAG.learning_rate)


def to_word(predict, vocabs):
    predict = predict[0]
    predict /= np.sum(predict)
    sample = np.random.choice(np.arange(len(predict)), p=predict)
    if sample > len(vocabs):
        return vocabs[-1]
    else:
        return vocabs[sample]


def gen_poem(begin_word):


    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)
Ejemplo n.º 2
0
@author: lumi
"""

import tensorflow as tf
import pickle
from model import Model
from utils import build_dict, build_dataset, batch_iter


with open("args.pickle", "rb") as f:
    args = pickle.load(f)

print("Loading dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("valid", args.toy)
print("Loading validation dataset...")
valid_x = build_dataset("valid", word_dict, article_max_len, summary_max_len, args.toy)
valid_x_len = [len([y for y in x if y != 0]) for x in valid_x]

with tf.Session() as sess:
    print("Loading saved model...")
    model = Model(reversed_dict, article_max_len, summary_max_len, args, forward_only=True)
    saver = tf.train.Saver(tf.global_variables())
    ckpt = tf.train.get_checkpoint_state("./saved_model/")
    saver.restore(sess, ckpt.model_checkpoint_path)

    batches = batch_iter(valid_x, [0] * len(valid_x), args.batch_size, 1)

    print("Writing summaries to 'result.txt'...")
    for batch_x, _ in batches:
        batch_x_len = [len([y for y in x if y != 0]) for x in batch_x]
Ejemplo n.º 3
0
quanSongCi = load_file('QuanSongCi.txt', '')

# pylint: disable=redefined-outer-name

print('Data size', len(quanSongCi))

# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 5000

# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
#   This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = utils.build_dataset(
    quanSongCi, vocabulary_size)
del quanSongCi  # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])

with open('dictionary' + '.json', 'w', encoding='utf8') as f:
    json.dump(dictionary, f, ensure_ascii=False)

with open('reverse_dictionary' + '.json', 'w', encoding='utf8') as f:
    json.dump(reverse_dictionary, f, ensure_ascii=False)

data_index = 0


# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model',
                    type=str,
                    default="bert",
                    help='choose a model: Bert, ERNIE')
args = parser.parse_args()

if __name__ == '__main__':
    dataset = 'intent_classify'  # 数据集

    model_name = args.model  # bert
    x = import_module('models.' + model_name)
    config = x.Config(dataset)
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    train_data, dev_data, test_data = build_dataset(config)
    train_iter = build_iterator(train_data, config)
    dev_iter = build_iterator(dev_data, config)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # train
    model = x.Model(config).to(config.device)
    train(config, model, train_iter, dev_iter, test_iter)
Ejemplo n.º 5
0
    # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
    embedding = 'embedding_SougouNews.npz'  # 要采用的预训练词向量
    if args.embedding == 'random':
        embedding = 'random'
    model_name = args.model
    print(model_name)
    x = import_module('models.' + model_name)  # 导入模型TextRNN
    config = x.Config(dataset, embedding)  # 生成模型的参数配置
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样,用以保证实验的可重复性

    start_time = time.time()  # 计时开始
    print('加载数据中...')
    vocab, train_data, dev_data, test_data = build_dataset(config)  # 生成数据集
    # print(len(train_data), len(dev_data), len(test_data))
    # 创建数据集迭代器
    train_iter = build_iterator(train_data, config)
    dev_iter = build_iterator(dev_data, config)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print('Time usage: ', time_dif)

    # 更新模型参数配置中的词表长度
    config.n_vocab = len(vocab)
    # 创建模型
    model = x.Model(config).to(config.device)
    # 初始化模型参数
    init_network(model)
    # 打印模型参数
Ejemplo n.º 6
0
def main(args):

    print("[STARTING] Facenet ResNet v1 for Facial Recognition")
    print(".\n.\n.")
    print("[LOADING] Loading face detector...")
    detector = FaceDetector(args["cascade"])

    print("[LOADING] Loading the faces dataset...")
    dataset, name_to_idx, idx_to_name = utils.build_dataset(args["dataset"])

    print("[LOADING] Loading the Convolutional Neural Network model...")
    type_mode = args["type"]
    use_pi = args['run'] == 'raspberry'
    assert type_mode in ["MobileFaceNet", "FaceNet"
                         ], "Only MobileFaceNet or FaceNet are supported."

    if type_mode == 'FaceNet':
        start = time.time()
        sess = tf.Session()
        utils.load_model(args["model"])

        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")
        print("[LOADING] Loading the FaceNet weights took %.2f" %
              (time.time() - start))
    else:
        K.clear_session()
        define_keras_functions()
        with open(args["json"]) as f:
            start = time.time()
            model_json = json.load(f)
            model = keras.models.model_from_json(model_json)
            print("[LOADING] Loadng the Weights...")
            model.load_weights(args["weights"])
            print("[LOADING] Loading the MobileFaceNet weights took %.2fs" %
                  (time.time() - start))

    print("[LOADING] Starting the video stream...")
    if use_pi:
        vs = VideoStream(usePiCamera=True).start()
    else:
        vs = VideoStream(src=0).start()
    time.sleep(2.0)
    fps = FPS().start()
    times = []

    while True:
        frame = vs.read()
        frame = imutils.resize(frame,
                               width=500)  # Width of the frame is configurable

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # Detect faces on the frame
        rects = detector.detect_faces(gray)
        nrof_faces = len(rects)
        if nrof_faces > 0:
            face_images = detector.extract_faces(rgb, rects)
            face_images = face_images / 255
            # Recognize the images
            if type_mode == 'FaceNet':
                start_time = time.time()
                feed_dict = {
                    images_placeholder: face_images,
                    phase_train_placeholder: False
                }
                embeddings_array = sess.run(embeddings, feed_dict=feed_dict)
                times.append(time.time() - start_time)
            else:
                start_time = time.time()
                embeddings_array = model.predict(face_images)
                times.append(time.time() - start_time)

            for idx, embedding in enumerate(embeddings_array):
                embedding = embedding.reshape((1, *embedding.shape))
                predicted = utils.predict_face(dataset,
                                               name_to_idx,
                                               idx_to_name,
                                               embedding,
                                               threshold=3,
                                               distance_metric='cosine')
                x, y, w, h = rects[idx]
                color = (0, 0, 255) if predicted == "Unknown" else (0, 255, 0)
                cv2.rectangle(frame, (x, y + h), (x + w, y), color, 2)
                top = y + h - 15 if y + h - 15 > 15 else y + h + 15
                cv2.putText(frame, predicted, (x, top),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)

        # Display the image
        cv2.imshow("Frame", frame)

        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break

        fps.update()

    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approximated FPS: {:.2f}fps".format(fps.fps()))
    print("[INFO] approximated forward propagation time: {:.2f}s".format(
        sum(times) / len(times)))

    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 7
0
    model_name = args.model  # 'TextRCNN'  # TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer
    # if model_name == 'FastText':
    #     from utils_fasttext import build_dataset, build_iterator, get_time_dif
    #     embedding = 'random'
    # else:
    from utils import build_dataset, build_iterator, get_time_dif

    x = import_module('models.' + model_name)
    config = x.Config(dataset, embedding)
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
    train_iter = build_iterator(train_data, config)
    dev_iter = build_iterator(dev_data, config)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # train
    config.n_vocab = len(vocab)
    model = x.Model(config).to(config.device)
    if model_name != 'Transformer':
        init_network(model)
    print(model.parameters)
    train(config, model, train_iter, dev_iter, test_iter)
Ejemplo n.º 8
0
def train():
    FLAG = FLAGS()
    poems_vector, word_to_int, vocabularies = build_dataset(
        FLAG.poems_path, FLAG.name_path)

    batches_inputs, batches_outputs = generate_batch(FLAG.batch_size,
                                                     poems_vector, word_to_int)

    input_data = tf.placeholder(tf.int32, [FLAG.batch_size, None],
                                name="Input")
    output_targets = tf.placeholder(tf.int32, [FLAG.batch_size, None])
    #z = tf.log(output_targets, name="namemodel")
    end_points = char_rnn(model='lstm',
                          input_data=input_data,
                          output_data=output_targets,
                          vocab_size=len(vocabularies),
                          rnn_size=FLAG.rnn_size,
                          num_layers=FLAG.num_layers,
                          batch_size=FLAG.batch_size,
                          learning_rate=FLAG.learning_rate)
    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    with tf.Session() as sess:
        sess.run(init_op)

        start_epoch = 0
        checkpoint = tf.train.latest_checkpoint(FLAG.result_dir)
        if checkpoint:  # 从上次结束的地方继续训练
            saver.restore(sess, checkpoint)
            print("## restore from the checkpoint {0}".format(checkpoint))
            start_epoch += int(checkpoint.split('-')[-1])
        print('## start training...')

        try:
            for epoch in range(start_epoch, FLAG.epochs):
                n = 0
                n_chunk = len(poems_vector) // FLAG.batch_size
                for batch in range(n_chunk):
                    loss, _, _ = sess.run(
                        [
                            end_points['total_loss'], end_points['last_state'],
                            end_points['train_op']
                        ],
                        feed_dict={
                            input_data: batches_inputs[n],
                            output_targets: batches_outputs[n]
                        })
                    n += 1
                    print('Epoch: %d, batch: %d, training loss: %.6f' %
                          (epoch, batch, loss))
                if epoch % 10 == 0:
                    saver.save(sess,
                               os.path.join(FLAG.result_dir,
                                            FLAG.model_prefix),
                               global_step=epoch)
        except KeyboardInterrupt:
            print('## Interrupt manually, try saving checkpoint for now...')
            saver.save(sess,
                       os.path.join(FLAG.result_dir, FLAG.model_prefix),
                       global_step=epoch)
            print(
                '## Last epoch were saved, next time will start from epoch {}.'
                .format(epoch))
        #saver.save(sess, FLAG.result_dir+'/model/'+"model.ckpt")
        #tf.train.write_graph(sess.graph_def, FLAG.result_dir+'/model/', 'graph.pb')

        builder = tf.saved_model.builder.SavedModelBuilder(FLAG.result_dir +
                                                           "/model_complex")
        SignatureDef = tf.saved_model.signature_def_utils.build_signature_def(
            inputs={
                'input_data':
                tf.saved_model.utils.build_tensor_info(input_data),
                'output_targets':
                tf.saved_model.utils.build_tensor_info(output_targets)
            },
            outputs={
                'prediction':
                tf.saved_model.utils.build_tensor_info(
                    end_points['prediction'])
            })
        builder.add_meta_graph_and_variables(
            sess, [tag_constants.TRAINING],
            signature_def_map={
                tf.saved_model.signature_constants.CLASSIFY_INPUTS:
                SignatureDef
            })
        builder.save()
Ejemplo n.º 9
0
        self.hidden_size = 768
        self.seed = 1314


if __name__ == '__main__':
    dataset = '../dataset'  # 数据集
    config = Config(dataset)
    seed = config.seed
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    test_data = build_dataset(config, config.test_path, test_flag=True)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    model = bert_model(config).to(config.device)
    predict_all = test(config, model, test_iter)

    # ---------------------生成文件--------------------------
    df_test = pd.read_csv(config.submit_example_path, encoding='utf-8')
    id2label, label2id = json.load(open(config.id2label_path))
    id2label = {int(i): j for i, j in id2label.items()}  # 转为int型(原本是字符串形式)
    class_labels = []
    rank_labels = []
    for i in predict_all:
        label = str(id2label[i])
                    help='need test set or not: Yes, No')
args = parser.parse_args()

if __name__ == '__main__':
    dataset = 'HITSZQA'  # 数据集

    model_name = args.model  # bert
    if args.need_test == 'Yes':
        need_test = True
    else:
        need_test = False
    x = import_module('models.' + model_name)
    config = x.Config(dataset)
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    train_data, dev_data, test_data = build_dataset(config, need_test)
    train_iter = build_iterator(train_data, config)
    dev_iter = build_iterator(dev_data, config)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # train
    model = x.Model(config).to(config.device)
    train(config, model, train_iter, dev_iter, test_iter)
Ejemplo n.º 11
0
    embedding = 'embedding_SougouNews.npz'  # npz 压缩存储多个数组,load后得到字典 {数组名:数组,......}
    if args.embedding == 'random':
        embedding = 'random'
    model_name = args.model  # RNN,RBF,SVM
    from utils import build_dataset, build_iterator, get_time_dif

    x = import_module('models.' + model_name)  # 导入模块
    config = x.Config(dataset, embedding)  # 配置模型参数
    np.random.seed(1)  # 没有使用GPU的时候设置的固定生成的随机数
    torch.manual_seed(1)  # 为CPU设置种子用于生成随机数,以使得结果是确定的
    torch.cuda.manual_seed_all(1)  # 为所有的GPU设置种子
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    X, Y = build_dataset(config)
    # _ = zip(X,Y)
    # np.random.shuffle(list(_))
    # X,Y = zip(*_)

    # 划分数据集
    x_train, x_text, y_train, y_text = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        shuffle=True)

    # 创建迭代器
    # train_iter = build_iterator(x_train,y_train, config)
    # dev_iter = build_iterator(dev_data, config)
    # test_iter = build_iterator(x_text,y_text, config)
    # time_dif = get_time_dif(start_time)
Ejemplo n.º 12
0
logging.basicConfig(
    format=
    '%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s',
    level=logging.DEBUG)

# 读入数据,返回的保存所有文字,标点符号的list
vocabulary = read_data(FLAGS.text)

print('Data size', len(vocabulary))

vocabulary_size = 5000

# 获得每个文字对应的编号字典dictionary{文字:编号}; reversed_dictionary{编号:文字}
# 同时会在当前路径保存这两个字典的json文件:dictionary.json;reversed_dictionary.json
_, _, dictionary, reversed_dictionary = build_dataset(vocabulary,
                                                      vocabulary_size)

model = Model(learning_rate=FLAGS.learning_rate,
              batch_size=FLAGS.batch_size,
              num_words=vocabulary_size,
              num_steps=FLAGS.num_steps)

model.build()

with tf.Session() as sess:
    summary_string_writer = tf.summary.FileWriter(FLAGS.output_dir, sess.graph)

    saver = tf.train.Saver(max_to_keep=5)
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    logging.debug('Initialized')
Ejemplo n.º 13
0
# boolean, whether or not you have access to a GPU
has_cuda = torch.cuda.is_available()

########## Setup train/validation dataset ##########

X = np.array([])
files = ['1565290032', '1565290425', '1565289740']
for featureFile in files:
    features = np.loadtxt(featureFile + '_features.txt')

    indexes = get_conditional_indexes(features[1, :], 0,
                                      (lambda a, b: a > b))  # greater than
    print("Training file count: {}".format(len(indexes)))

    tempX = build_dataset('./waveforms/', indexes, featureFile,
                          args.segment_size)
    X = np.append(X, tempX)

    print("Length of X so far: {}".format(X.shape))

# Restructure the dataset
X = FloatTensor(X)
X = X.view(-1, segment_size)

# Normalize the data in amplitude
X = (X - X.mean(dim=-1).unsqueeze(1)) / X.std(dim=-1).unsqueeze(1)

# Split training and validation sets from the same dataste
splitSize = round_down(int(len(X) * 0.8), segment_size)
print("Shape of X: {}".format(X.shape))
X_train = X[:splitSize]
Ejemplo n.º 14
0
x = import_module('models.' + model_name)
# 配置参数
config = x.Config(dataset)

# 固定以下参数是为了保证每次结果一样
np.random.seed(1)
# 为CPU设置种子用于生成随机数
torch.manual_seed(1)
# #为所有GPU设置随机种子
torch.cuda.manual_seed_all(1)
# 这个参数为True, 每次返回的卷积算法将是确定的,即默认算法
torch.backends.cudnn.deterministic = True  # 保证每次结果一样

start_time = time.time()
print("Loading data...")
OCNLI_train, OCNLI_dev, OCEMOTION_train, OCEMOTION_dev, TNEWS_train, TNEWS_dev = build_dataset(
    config, mode='train')
OCNLI_train_iter = build_iterator(OCNLI_train, config)
OCEMOTION_train_iter = build_iterator(OCEMOTION_train, config)
TNEWS_train_iter = build_iterator(TNEWS_train, config)
OCNLI_dev_iter = build_iterator(OCNLI_dev, config)
OCEMOTION_dev_iter = build_iterator(OCEMOTION_dev, config)
TNEWS_dev_iter = build_iterator(TNEWS_dev, config)

time_dif = get_time_dif(start_time)

# train
model = x.Model(config).to(config.device)
train(config, model, OCNLI_train_iter, OCNLI_dev_iter, OCEMOTION_train_iter,
      OCEMOTION_dev_iter, TNEWS_train_iter, TNEWS_dev_iter)
from train import train_first_stage
from val import val_first_stage
from test import test_first_stage

# 是否使用cuda
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

if args.mode == "train":
    isTraining = True
else:
    isTraining = False

database = build_dataset(args.dataset,
                         args.data_dir,
                         channel=args.input_nc,
                         isTraining=isTraining,
                         crop_size=(args.crop_size, args.crop_size),
                         scale_size=(args.scale_size, args.scale_size))
sub_dir = args.dataset + "/first_stage"  # + args.model + "/" + args.loss

if isTraining:  # train
    NAME = args.dataset + "_first_stage-2nd"  # + args.model + "_" + args.loss
    viz = Visualizer(env=NAME)
    writer = SummaryWriter(args.logs_dir + "/" + sub_dir)
    mkdir(args.models_dir + "/" +
          sub_dir)  # two stage时可以创建first_stage和second_stage这两个子文件夹

    # 加载数据集
    train_dataloader = DataLoader(database,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
Ejemplo n.º 16
0
def main():
    folder = './data/'
    files = os.listdir(folder)
    for name in files:
        print("\n\n-Filename=", name)
        filename = folder + name

        rates = [0.8, 0.5, 0.2]
        for rate in rates:
            learnCut = round(rate * 100)
            testCut = round((1 - rate) * 100)
            print("\n\n\n-Actual rate:", rate)
            learn, test = utils.build_dataset(filename,
                                              random=False,
                                              learnCut=rate)
            X_test, y_test, labels = format_dataset(test)
            X_learn, y_learn, _ = format_dataset(learn)
            data_dim = len(X_test[0])

            #Gaussian Bayes
            start = time.perf_counter()
            b = GaussianBayes(diag=True)
            b.fit(X_learn, y_learn)
            pred = b.predict(X_test)

            end = time.perf_counter()
            print("\n-Gaussian Bayes:\nTime : ", (end - start))
            print("Confusion Matrix :\n", confusion_matrix(y_test, pred),
                  "\nScore : ", score(pred, y_test))
            plot_confusion_matrix(
                confusion_matrix(y_test, pred),
                labels,
                title=
                "Confusion matrix, Bayes, dim=%d, learn/test division : %d%%/%d%%"
                % (data_dim, learnCut, testCut),
                filename="cm_bayes_dim%d_div%d" % (data_dim, learnCut))

            #K Neighbors Regressor
            success = []
            bestPredN = []
            bestTime = 0
            bestScore = 0
            bestK = 0

            #Test in different K
            for i in range(1, 40):
                start = time.perf_counter()
                neigh = KNeighborsRegressor(n_neighbors=i, weights='uniform')
                neigh.fit(X_learn, y_learn)
                predN = neigh.predict(X_test).astype(int)
                end = time.perf_counter()
                success.append(score(predN, y_test))
                if (bestScore < score(predN, y_test)):
                    bestPredN = predN
                    bestTime = end - start
                    bestScore = score(predN, y_test)
                    bestK = i

            print("\n-The best: K=", bestK, " Neighbors Regressor:\nTime : ",
                  bestTime)
            print("Confusion Matrix :\n", confusion_matrix(y_test, bestPredN),
                  "\nScore : ", bestScore)
            plot_confusion_matrix(
                confusion_matrix(y_test, bestPredN),
                labels,
                title=
                'Confusion matrix, KNN, k=%d, dim=%d, learn/test division : %d%%/%d%%'
                % (bestK, data_dim, learnCut, testCut),
                filename="cm_knn_k%d_dim%d_div%d" %
                (bestK, data_dim, learnCut))

            #Affichage comparaison K Neighbors Regressor
            plt.close()
            #plt.figure(figsize=(12,6))
            plt.plot([score(pred, y_test) for x in range(40)],
                     color='blue',
                     label="Bayes")
            plt.plot(range(1, 40),
                     success,
                     color='green',
                     linestyle='dashed',
                     marker='o',
                     markerfacecolor='green',
                     markersize=5,
                     label="KNN")
            plt.title(
                'Success Rate (higher is better), dim=%d, learn/test division : %d%%/%d%%'
                % (data_dim, learnCut, testCut))
            plt.xlabel('K value')
            plt.ylabel('Success Rate')
            plt.legend()
            plt.savefig("bayesVknn_dim%d_div%d" % (data_dim, learnCut))

        #plot effect of learn/test division
        bayesScores = []
        knnScores = []
        cutRange = range(5, 100, 5)
        for i in cutRange:
            rate = round(i / 100.0, 2)
            #print(rate)
            learn, test = utils.build_dataset(filename,
                                              random=False,
                                              learnCut=rate)
            X_test, y_test, labels = format_dataset(test)
            X_learn, y_learn, _ = format_dataset(learn)
            data_dim = len(X_test[0])

            b = GaussianBayes(diag=True)
            b.fit(X_learn, y_learn)
            pred = b.predict(X_test)
            bayesScores.append(score(pred, y_test))

            neigh = KNeighborsRegressor(n_neighbors=1, weights='uniform')
            neigh.fit(X_learn, y_learn)
            pred = neigh.predict(X_test).astype(int)
            knnScores.append(score(pred, y_test))
        plt.close()
        #plt.ylim(bottom=0, top=1.1)
        plt.xticks(ticks=range(len(cutRange)),
                   labels=[str(i) for i in cutRange])
        plt.plot(bayesScores, color='blue', label="Bayes")
        plt.plot(knnScores,
                 color='green',
                 linestyle='dashed',
                 marker='o',
                 markerfacecolor='green',
                 markersize=5,
                 label="KNN")
        plt.title('Success Rate with different learn/test division, dim=%d' %
                  (data_dim))
        plt.xlabel('Learn cut of the dataset (%)')
        plt.ylabel('Success Rate')
        plt.legend()
        plt.savefig("learn-test-div_dim%d" % (data_dim), pad_inches=1)
Ejemplo n.º 17
0
import torch
from torch.utils.data import TensorDataset
from utils import clean_text, config
from utils import build_vocab, build_dataset, get_pretrained_embedding
from seq2seq import EncoderRNN, DecoderRNN, training

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device:', device)
# 清洗文本
cleaned_news, cleaned_summaries = clean_text(config['text_path'], config['stopwords_path'])

# 建立词典
vocab = build_vocab(cleaned_news, cleaned_summaries, min_freq=3)

# 生成 dataset 是DataTensor 格式
news_dataset = build_dataset(vocab, cleaned_news, config['max_len_news'], type='news')
summaries_dataset = build_dataset(vocab, cleaned_summaries, config['max_len_summaries'], type='summaries')
# 合并在一起
dataset = TensorDataset(news_dataset, summaries_dataset)

# 加载预训练的word2vec模型(使用搜狗新闻训练得到的word2vec),维度是300
pre_embeddings = get_pretrained_embedding(config['pretrained_vector_path'], vocab, vector_dim=300).to(device)

# 构建模型,选择隐状态和词向量维度相同,都是300
vocab_size = len(vocab)
# encoder 使用的是单层双向gru
encoder = EncoderRNN(vocab_size, 300, 300, n_layers=1, pre_embeddings=pre_embeddings)
# decoder 使用双层单项gru
decoder = DecoderRNN(vocab_size, 300, 300, n_layers=2, pre_embeddings=pre_embeddings)

# 迁移到cuda上,training 要用
Ejemplo n.º 18
0
        final_result.append(dic)
    # 输出json文件
    import json
    with open(output_path, 'w') as f:
        for each in final_result:
            json_str = json.dumps(each)  # dumps
            f.write(json_str)
            f.write('\n')
        

if __name__ == '__main__':
    dataset = '.'  # 数据集

    model_name = 'bert'  # bert
    # 动态导入模块
    x = import_module('models.' + model_name)
    # 配置参数
    config = x.Config(dataset)

    model = x.Model(config).to(config.device)

    OCNLI_test, OCEMOTION_test, TNEWS_test = build_dataset(config, mode='test')
    OCNLI_test_iter = build_iterator(OCNLI_test, config)
    OCEMOTION_test_iter = build_iterator(OCEMOTION_test, config)
    TNEWS_test_iter = build_iterator(TNEWS_test, config)
    # 第一个任务的提交
    submit_test(config, model, OCNLI_test_iter, config.OCLI_submit_output_path, 0)
    submit_test(config, model, OCEMOTION_test_iter, config.OCEMOTION_submit_output_path, 1)
    submit_test(config, model, TNEWS_test_iter, config.TNEWS_submit_output_path, 2)

                    type=str,
                    default='BruceBert',
                    help='choose a model')
args = parser.parse_args()

if __name__ == '__main__':
    # 数据集地址
    dataset = 'THUCNews'
    model_name = args.model
    x = import_module('models.' + model_name)
    config = x.Config(dataset)
    np.random.seed(1)
    # 保证每次运行结果一致
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True

    start_time = time.time()
    print('开始加载数据集')
    train_data, dev_data, test_data = utils.build_dataset(config)

    train_iter = utils.build_iterator(train_data, config)
    dev_iter = utils.build_iterator(dev_data, config)
    test_iter = utils.build_iterator(test_data, config)

    time_dif = utils.get_time_dif(start_time)
    print("准备数据的时间:", time_dif)

    model = x.Model(config).to(config.device)
    train.train(config, model, train_iter, dev_iter, test_iter)
Ejemplo n.º 20
0
    parser.add_argument('-epochs', type=int, default=60)

    parser.add_argument('-seed', type=int, default=1)
    args = parser.parse_args()

    print(args)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    print("Loading Data...")

    dataests, vocab, type2label = utils.build_dataset(args.train_file,
                                                      args.dev_file,
                                                      args.test_file)

    train_loader, dev_loader, test_loader = (
        DataLoader(dataset=dataset,
                   batch_size=args.batch_size,
                   collate_fn=utils.collate_fn,
                   shuffle=dataset.train)
        for i, dataset in enumerate(dataests))

    print("Building Model...")
    if args.model == "bilstm_crf":
        model = BiLSTM_CRF_batch.Model(vocab_size=len(vocab),
                                       tag_to_ix=type2label,
                                       embedding_dim=args.embedding_size,
                                       hidden_dim=args.hidden_size,
Ejemplo n.º 21
0
    parser.add_argument('-dropout', type=float, default=0.5)
    parser.add_argument('-epochs', type=int, default=20)

    parser.add_argument('-seed', type=int, default=1)
    args = parser.parse_args()

    print(args)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    print("Loading Data...")
    dataests, vocab = utils.build_dataset(args.train_file,
                                          args.dev_file,
                                          args.test_file)

    train_loader, dev_loader, test_loader = (
        DataLoader(dataset=dataset,
                   batch_size=args.batch_size,
                   collate_fn=utils.collate_fn,
                   shuffle=dataset.train)
        for i, dataset in enumerate(dataests))

    print("Building Model...")
    if args.model == "cnn":
        model = CNN.Model(vocab_size=len(vocab),
                          embedding_size=args.embedding_size,
                          hidden_size=args.hidden_size,
                          filter_sizes=[3, 4, 5],
def main():
    print("bonjour")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    N_EPOCHS = 8
    WIN_SIZES = [3,4,5]
    BATCH_SIZE = 64
    EMB_DIM = 300
    OUT_DIM = 1
    L2_NORM_LIM = 3.0
    NUM_FIL = 100
    DROPOUT_PROB = 0.5
    V_STRATEGY = 'static'
    ALLOC_MEM = 4096

    if V_STRATEGY in ['rand', 'static', 'non-static']:
        NUM_CHA = 1
    else:
        NUM_CHA = 2

    # FILE paths
    W2V_PATH     = 'GoogleNews-vectors-negative300.bin'
    TRAIN_X_PATH = 'train_x.txt'
    TRAIN_Y_PATH = 'train_y.txt'
    VALID_X_PATH = 'valid_x.txt'
    VALID_Y_PATH = 'valid_y.txt'


    # Load pretrained embeddings
    pretrained_model = gensim.models.KeyedVectors.load_word2vec_format(W2V_PATH, binary=True)
    vocab = pretrained_model.wv.vocab.keys()
    w2v = pretrained_model.wv

    # Build dataset =======================================================================================================
    w2c = build_w2c(TRAIN_X_PATH, vocab=vocab)
    w2i, i2w = build_w2i(TRAIN_X_PATH, w2c, unk='unk')
    train_x, train_y = build_dataset(TRAIN_X_PATH, TRAIN_Y_PATH, w2i, unk='unk')
    valid_x, valid_y = build_dataset(VALID_X_PATH, VALID_Y_PATH, w2i, unk='unk')
    train_x, train_y = sort_data_by_length(train_x, train_y)
    valid_x, valid_y = sort_data_by_length(valid_x, valid_y)
    VOCAB_SIZE = len(w2i)
    print('VOCAB_SIZE:', VOCAB_SIZE)
    
    V_init = init_V(w2v, w2i)
    

    with open(os.path.join(RESULTS_DIR, './w2i.dump'), 'wb') as f_w2i, open(os.path.join(RESULTS_DIR, './i2w.dump'), 'wb') as f_i2w:
        pickle.dump(w2i, f_w2i)
        pickle.dump(i2w, f_i2w)

    # Build model =================================================================================
 
    model=CNN(VOCAB_SIZE, EMB_DIM, NUM_FIL, WIN_SIZES, OUT_DIM, 
                 DROPOUT_PROB, len(w2i))


    # Train model ================================================================================
   
    pretrained_embeddings = torch.tensor(V_init)
    model.embedding.weight.data.copy_(pretrained_embeddings)
    model.embedding.weight.data[len(w2i)-1] = torch.zeros(EMB_DIM)
    optimizer = optim.Adam(model.parameters())
    criterion = nn.BCEWithLogitsLoss()
    model = model.to(device)    
    criterion = criterion.to(device)
    n_batches_train = int(len(train_x)/BATCH_SIZE)
    n_batches_valid = int(len(valid_x)/BATCH_SIZE)
    #print(len(train_x))
    
    best_valid_loss = float('inf')

    for j in range(N_EPOCHS):


        start_time = time.time()
        epoch_loss = 0
        epoch_acc = 0 
        epoch_loss = 0
        epoch_acc = 0
  
  
    
        for i in range(n_batches_train-1):
            start = i*BATCH_SIZE
            end = start+BATCH_SIZE      
            train_loss, train_acc = train(model,train_x[start:end],train_y[start:end], criterion,optimizer)
            end_time = time.time()
            epoch_mins, epoch_secs = epoch_time(start_time, end_time)
        
            #if valid_loss < best_valid_loss:
             #   best_valid_loss = valid_loss
              #  torch.save(model.state_dict(), 'tut4-model.pt')
        
        for k in range(n_batches_valid-1):
            start = k*BATCH_SIZE
            end = start+BATCH_SIZE      
            valid_loss, valid_acc = evaluate(model,valid_x[start:end],valid_y[start:end], criterion,optimizer)
            end_time = time.time()
            epoch_mins, epoch_secs = epoch_time(start_time, end_time)
        
        print(f'Epoch: {j } | Epoch Time: {epoch_mins}m {epoch_secs}s')
        print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
        print(f'\t Val. Loss: {valid_loss:.3f} |  Val. Acc: {valid_acc*100:.2f}%')

    torch.save(model.state_dict(), 'training.pt')
    return model
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
with open("result/args.pickle", "wb") as out_data:
    pickle.dump(args, out_data)
# =======================end setting args====================

# =======================start preparing data================
if not os.path.exists("result/saved_model"):
    os.mkdir("result/saved_model")

print("building dictionary....")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict(
    "train", args.toy)
print("loading training dataset..")
train_x, train_y = build_dataset("train", word_dict, article_max_len,
                                 summary_max_len, args.toy)
# =======================end preparing data================

# ======================= start training===================
with tf.Session() as sess:
    model = Model(reversed_dict, article_max_len, summary_max_len, args)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(tf.global_variables())

    batches = batch_iter(train_x, train_y, args.batch_size, args.num_epochs)
    num_batches_per_epoch = (len(train_x) -
                             1) // args.batch_size + 1  # 每轮batch的数量

    print("Iteration starts.")
    print("Number of batches per epoch:", num_batches_per_epoch)
Ejemplo n.º 24
0
def run(*args, **kwargs):
    parser = argparse.ArgumentParser()
    parser.add_argument('-train_file', type=str, default='./data/train.csv')
    parser.add_argument('-dev_file', type=str, default='./data/dev.csv')
    parser.add_argument('-test_file', type=str, default='./data/test.csv')
    parser.add_argument('-save_path', type=str, default='./model.pkl')
    parser.add_argument('-model',
                        type=str,
                        default=kwargs['model'],
                        help="[cnn, bilstm]")

    parser.add_argument('-batch_size', type=int, default=kwargs['batch_size'])
    parser.add_argument('-embedding_size', type=int, default=128)
    parser.add_argument('-hidden_size', type=int, default=128)
    parser.add_argument('-learning_rate', type=float, default=1e-3)
    parser.add_argument('-dropout', type=float, default=0.5)
    parser.add_argument('-epochs', type=int, default=20)

    parser.add_argument('-seed', type=int, default=1)
    args = parser.parse_args()

    print(args)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    print("Loading Data...")
    datasets, vocab = utils.build_dataset(args.train_file, args.dev_file,
                                          args.test_file)

    train_loader, dev_loader, test_loader = (DataLoader(
        dataset=dataset,
        batch_size=args.batch_size,
        collate_fn=utils.collate_fn,
        shuffle=dataset.train) for i, dataset in enumerate(datasets))

    print("Building Model...")
    if args.model == "cnn":
        model = CNN.Model(vocab_size=len(vocab),
                          embedding_size=args.embedding_size,
                          hidden_size=args.hidden_size,
                          filter_sizes=[3, 4, 5],
                          dropout=args.dropout)
    elif args.model == "bilstm":
        model = BiLSTM.Model(vocab_size=len(vocab),
                             embedding_size=args.embedding_size,
                             hidden_size=args.hidden_size,
                             dropout=args.dropout)

    if torch.cuda.is_available():
        model = model.cuda()

    trainer = Trainer(model, args.learning_rate)

    train_loss_list = list()
    dev_loss_list = list()

    best_acc = 0
    for i in range(args.epochs):
        print("Epoch: {} ################################".format(i))
        train_loss, train_acc = trainer.train(train_loader)
        dev_loss, dev_acc = trainer.evaluate(dev_loader)

        train_loss_list.append(train_loss)
        dev_loss_list.append(dev_loss)

        print("Train Loss: {:.4f} Acc: {:.4f}".format(train_loss, train_acc))
        print("Dev   Loss: {:.4f} Acc: {:.4f}".format(dev_loss, dev_acc))
        if dev_acc > best_acc:
            best_acc = dev_acc
            trainer.save(args.save_path)
        print("###########################################")
    trainer.load(args.save_path)
    test_loss, test_acc = trainer.evaluate(test_loader)
    print("Test   Loss: {:.4f} Acc: {:.4f}".format(test_loss, test_acc))

    return train_loss_list, dev_loss_list
Ejemplo n.º 25
0
def main():
    parser = argparse.ArgumentParser(description='A Neural Attention Model for Abstractive Sentence Summarization in DyNet')

    parser.add_argument('--gpu', type=str, default='0', help='GPU ID to use. For cpu, set -1 [default: 0]')
    parser.add_argument('--n_epochs', type=int, default=10, help='Number of epochs [default: 10]')
    parser.add_argument('--n_train', type=int, default=3803957, help='Number of training data (up to 3803957 in gigaword) [default: 3803957]')
    parser.add_argument('--n_valid', type=int, default=189651, help='Number of validation data (up to 189651 in gigaword) [default: 189651]')
    parser.add_argument('--batch_size', type=int, default=32, help='Mini batch size [default: 32]')
    parser.add_argument('--vocab_size', type=int, default=60000, help='Vocabulary size [default: 60000]')
    parser.add_argument('--emb_dim', type=int, default=256, help='Embedding size [default: 256]')
    parser.add_argument('--hid_dim', type=int, default=256, help='Hidden state size [default: 256]')
    parser.add_argument('--encoder_type', type=str, default='attention', help='Encoder type. bow: Bag-of-words encoder. attention: Attention-based encoder [default: attention]')
    parser.add_argument('--c', type=int, default=5, help='Window size in neural language model [default: 5]')
    parser.add_argument('--q', type=int, default=2, help='Window size in attention-based encoder [default: 2]')
    parser.add_argument('--alloc_mem', type=int, default=4096, help='Amount of memory to allocate [mb] [default: 4096]')
    args = parser.parse_args()
    print(args)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    N_EPOCHS     = args.n_epochs
    N_TRAIN      = args.n_train
    N_VALID      = args.n_valid
    BATCH_SIZE   = args.batch_size
    VOCAB_SIZE   = args.vocab_size
    EMB_DIM      = args.emb_dim
    HID_DIM      = args.hid_dim
    ENCODER_TYPE = args.encoder_type
    C            = args.c
    Q            = args.q
    ALLOC_MEM    = args.alloc_mem

    # File paths
    TRAIN_X_FILE = './data/train.article.txt'
    TRAIN_Y_FILE = './data/train.title.txt'
    VALID_X_FILE = './data/valid.article.filter.txt'
    VALID_Y_FILE = './data/valid.title.filter.txt'

    # DyNet setting
    dyparams = dy.DynetParams()
    dyparams.set_autobatch(True)
    dyparams.set_random_seed(RANDOM_STATE)
    dyparams.set_mem(ALLOC_MEM)
    dyparams.init()

    # Build dataset ====================================================================================
    w2c = build_word2count(TRAIN_X_FILE, n_data=N_TRAIN)
    w2c = build_word2count(TRAIN_Y_FILE, w2c=w2c, n_data=N_TRAIN)

    train_X, w2i, i2w = build_dataset(TRAIN_X_FILE, w2c=w2c, padid=False, eos=True, unksym='<unk>', target=False, n_data=N_TRAIN, vocab_size=VOCAB_SIZE)
    train_y, _, _     = build_dataset(TRAIN_Y_FILE, w2i=w2i, target=True, n_data=N_TRAIN)

    valid_X, _, _ = build_dataset(VALID_X_FILE, w2i=w2i, target=False, n_data=N_VALID)
    valid_y, _, _ = build_dataset(VALID_Y_FILE, w2i=w2i, target=True, n_data=N_VALID)

    VOCAB_SIZE = len(w2i)
    OUT_DIM = VOCAB_SIZE
    print('VOCAB_SIZE:', VOCAB_SIZE)

    # Build model ======================================================================================
    model = dy.Model()
    trainer = dy.AdamTrainer(model)

    rush_abs = ABS(model, EMB_DIM, HID_DIM, VOCAB_SIZE, Q, C, encoder_type=ENCODER_TYPE)

    # Padding
    train_y = [[w2i['<s>']]*(C-1)+instance_y for instance_y in train_y]
    valid_y = [[w2i['<s>']]*(C-1)+instance_y for instance_y in valid_y]

    n_batches_train = math.ceil(len(train_X)/BATCH_SIZE)
    n_batches_valid = math.ceil(len(valid_X)/BATCH_SIZE)

    start_time = time.time()
    for epoch in range(N_EPOCHS):
        # Train
        train_X, train_y = shuffle(train_X, train_y)
        loss_all_train = []
        for i in tqdm(range(n_batches_train)):
            # Create a new computation graph
            dy.renew_cg()
            rush_abs.associate_parameters()

            # Create a mini batch
            start = i*BATCH_SIZE
            end = start + BATCH_SIZE
            train_X_mb = train_X[start:end]
            train_y_mb = train_y[start:end]

            losses = []
            for x, t in zip(train_X_mb, train_y_mb):
                t_in, t_out = t[:-1], t[C:]

                y = rush_abs(x, t_in)
                loss = dy.esum([dy.pickneglogsoftmax(y_t, t_t) for y_t, t_t in zip(y, t_out)])
                losses.append(loss)

            mb_loss = dy.average(losses)

            # Forward prop
            loss_all_train.append(mb_loss.value())

            # Backward prop
            mb_loss.backward()
            trainer.update()

        # Valid
        loss_all_valid = []
        for i in range(n_batches_valid):
            # Create a new computation graph
            dy.renew_cg()
            rush_abs.associate_parameters()

            # Create a mini batch
            start = i*BATCH_SIZE
            end = start + BATCH_SIZE
            valid_X_mb = valid_X[start:end]
            valid_y_mb = valid_y[start:end]

            losses = []
            for x, t in zip(valid_X_mb, valid_y_mb):
                t_in, t_out = t[:-1], t[C:]

                y = rush_abs(x, t_in)
                loss = dy.esum([dy.pickneglogsoftmax(y_t, t_t) for y_t, t_t in zip(y, t_out)])
                losses.append(loss)

            mb_loss = dy.average(losses)

            # Forward prop
            loss_all_valid.append(mb_loss.value())

        print('EPOCH: %d, Train Loss: %.3f, Valid Loss: %.3f' % (
            epoch+1,
            np.mean(loss_all_train),
            np.mean(loss_all_valid)
        ))

        # Save model ========================================================================
        dy.save('./model_e'+str(epoch+1), [rush_abs])
        with open('./w2i.dump', 'wb') as f_w2i, open('./i2w.dump', 'wb') as f_i2w:
            pickle.dump(w2i, f_w2i)
            pickle.dump(i2w, f_i2w)
Ejemplo n.º 26
0

if __name__ == '__main__':
    dataset = 'data'  # 数据集
    embedding = 'embedding_SougouNews.npz'
    model_name = "TextCNN"

    x = import_module('models.' + model_name)
    config = x.Config(dataset, embedding)
    np.random.seed(1)
    torch.manual_seed(1)
    torch.cuda.manual_seed_all(1)
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    start_time = time.time()
    print("Loading data...")
    vocab, train_data, dev_data, test_data = build_dataset(config, False)
    train_iter = build_iterator(train_data, config)
    dev_iter = build_iterator(dev_data, config)
    test_iter = build_iterator(test_data, config)
    time_dif = get_time_dif(start_time)
    print("Time usage:", time_dif)

    # train
    config.n_vocab = len(vocab)
    model = x.Model(config).to(config.device)
    if model_name != 'Transformer':
        init_network(model)
    print(model.parameters)
    train(config, model, train_iter, dev_iter, test_iter)
                                                 np.count_nonzero(gt)))
    print("Running an experiment with the {} model".format(MODEL),
          "run {}/{}".format(run + 1, N_RUNS))

    display_predictions(convert_to_color(train_gt),
                        viz,
                        caption="Train ground truth")
    display_predictions(convert_to_color(test_gt),
                        viz,
                        caption="Test ground truth")

    if MODEL == 'SVM_grid':
        print("Running a grid search SVM")
        # Grid search SVM (linear and RBF)
        X_train, y_train = build_dataset(img,
                                         train_gt,
                                         ignored_labels=IGNORED_LABELS)
        class_weight = 'balanced' if CLASS_BALANCING else None
        clf = sklearn.svm.SVC(class_weight=class_weight)
        clf = sklearn.model_selection.GridSearchCV(clf,
                                                   SVM_GRID_PARAMS,
                                                   verbose=5,
                                                   n_jobs=4)
        clf.fit(X_train, y_train)
        print("SVM best parameters : {}".format(clf.best_params_))
        prediction = clf.predict(img.reshape(-1, N_BANDS))
        save_model(clf, MODEL, DATASET)
        prediction = prediction.reshape(img.shape[:2])
    elif MODEL == 'SVM':
        X_train, y_train = build_dataset(img,
                                         train_gt,
Ejemplo n.º 28
0
    sample = np.random.choice(np.arange(len(predict)), p=predict)
    if sample > len(vocabs):
        return vocabs[-1]
    else:
        return vocabs[sample]


# 基本设置
start_token = 'B'
end_token = 'E'
result_dir = 'result/name'
corpus_file = 'data/names.txt'
lr = 0.0002

print("正在从%s加载数据...." % corpus_file)
poems_vector, word_to_int, vocabularies = build_dataset(corpus_file)

# 初始化
print("正在从%s加载训练结果" % result_dir)
batch_size = 1
input_data = tf.placeholder(tf.int32, [batch_size, None])
end_points = char_rnn(model='lstm',
                      input_data=input_data,
                      output_data=None,
                      vocab_size=len(vocabularies),
                      rnn_size=128,
                      num_layers=2,
                      batch_size=64,
                      learning_rate=lr)
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(),
Ejemplo n.º 29
0
def main():
    parser = argparse.ArgumentParser(description='Convolutional Neural Networks for Sentence Classification in DyNet')

    parser.add_argument('--gpu', type=int, default=-1, help='GPU ID to use. For cpu, set -1 [default: -1]')
    parser.add_argument('--model_file', type=str, default='./model', help='Model to use for prediction [default: ./model]')
    parser.add_argument('--input_file', type=str, default='./data/valid_x.txt', help='Input file path [default: ./data/valid_x.txt]')
    parser.add_argument('--output_file', type=str, default='./pred_y.txt', help='Output file path [default: ./pred_y.txt]')
    parser.add_argument('--w2i_file', type=str, default='./w2i.dump', help='Word2Index file path [default: ./w2i.dump]')
    parser.add_argument('--i2w_file', type=str, default='./i2w.dump', help='Index2Word file path [default: ./i2w.dump]')
    parser.add_argument('--alloc_mem', type=int, default=1024, help='Amount of memory to allocate [mb] [default: 1024]')
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    MODEL_FILE = args.model_file
    INPUT_FILE = args.input_file
    OUTPUT_FILE = args.output_file
    W2I_FILE = args.w2i_file
    I2W_FILE = args.i2w_file
    ALLOC_MEM = args.alloc_mem

    # DyNet setting
    dyparams = dy.DynetParams()
    dyparams.set_mem(ALLOC_MEM)
    dyparams.init()

    # Load model
    model = dy.Model()
    pretrained_model = dy.load(MODEL_FILE, model)
    if len(pretrained_model) == 3:
        V1, layers = pretrained_model[0], pretrained_model[1:]
        MULTICHANNEL = False
    else:
        V1, V2, layers = pretrained_model[0], pretrained_model[1], pretrained_model[2:]
        MULTICHANNEL = True

    EMB_DIM = V1.shape()[0]
    WIN_SIZES = layers[0].win_sizes

    # Load test data
    with open(W2I_FILE, 'rb') as f_w2i, open(I2W_FILE, 'rb') as f_i2w:
        w2i = pickle.load(f_w2i)
        i2w = pickle.load(f_i2w)

    max_win = max(WIN_SIZES)
    test_X, _, _ = build_dataset(INPUT_FILE, w2i=w2i, unksym='unk')
    test_X = [[0]*max_win + instance_x + [0]*max_win for instance_x in test_X]

    # Pred
    pred_y = []
    for instance_x in tqdm(test_X):
        # Create a new computation graph
        dy.renew_cg()
        associate_parameters(layers)

        sen_len = len(instance_x)

        if MULTICHANNEL:
            x_embs1 = dy.concatenate([dy.lookup(V1, x_t, update=False) for x_t in instance_x], d=1)
            x_embs2 = dy.concatenate([dy.lookup(V2, x_t, update=False) for x_t in instance_x], d=1)
            x_embs1 = dy.transpose(x_embs1)
            x_embs2 = dy.transpose(x_embs2)
            x_embs = dy.concatenate([x_embs1, x_embs2], d=2)
        else:
            x_embs = dy.concatenate([dy.lookup(V1, x_t, update=False) for x_t in instance_x], d=1)
            x_embs = dy.transpose(x_embs)
            x_embs = dy.reshape(x_embs, (sen_len, EMB_DIM, 1))

        y = f_props(layers, x_embs, train=False)
        pred_y.append(str(int(binary_pred(y.value()))))

    with open(OUTPUT_FILE, 'w') as f:
        f.write('\n'.join(pred_y))
Ejemplo n.º 30
0
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样

    #    start_time = time.time()
    #    print("Loading data...")
    #    train_data, dev_data, test_data = build_dataset(config)
    #    train_iter = build_iterator(train_data, config)
    #    dev_iter = build_iterator(dev_data, config)
    #    test_iter = build_iterator(test_data, config)
    #    time_dif = get_time_dif(start_time)
    #    print("Time usage:", time_dif)

    # train
    for i in range(3, 5):
        config.train_path = dataset + '/data/fold5/cvfold' + str(
            i) + '_train.txt'
        config.dev_path = dataset + '/data/fold5/cvfold' + str(i) + '_dev.txt'
        config.test_path = dataset + '/data/fold5/cv_valid.txt'
        config.save_path = dataset + '/saved_dict/' + config.model_name + '512-5fold-' + str(
            i) + '.bin'
        submit_data = build_dataset(config)
        #train_iter = build_iterator(train_data, config)
        #dev_iter = build_iterator(dev_data, config)
        #test_iter = build_iterator(test_data, config)
        submit_iter = build_iterator(submit_data, config)
        model = x.Model(config).to(config.device)
        #test(config, model, dev_iter, 'bertRCNN_train_'+str(i)+'.npy')
        #test(config, model, test_iter,'bertRCNN_valid_'+str(i)+'.npy')
        test(config, model, submit_iter, 'bertRCNN_submitb_' + str(i) + '.npy')

        #train(config, model, train_iter, dev_iter, test_iter)