Пример #1
0
    def test_doc2vec(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=50)
        corpus_generator = Doc2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        d2v = Doc2VecModel(size=16,
                           min_count=1,
                           dm=0,
                           alpha=0.025,
                           min_alpha=0.025)
        trainer = EmbeddingTrainer(d2v)
        trainer.train(corpus_generator)

        # Since the inference of doc2vec is an non-deterministic algorithm, we need to reset the random seed for testing.
        d2v.random.seed(0)
        v1 = d2v.infer_vector(["media", "news"])
        d2v.random.seed(0)
        v2 = d2v.infer_vector(["media", "news"])
        assert_array_equal(v1, v2)

        # test unseen word
        self.assertRaises(KeyError, lambda: d2v["sports"])

        # test unseen sentence
        v1 = d2v.infer_vector(["sports"])
        v2 = d2v.infer_vector(["sports"])
        assert_array_equal(v1, v2)
Пример #2
0
    def test_fasttext(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=50)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        fasttext = FastTextModel(size=16, min_count=3, window=6, iter=4)
        trainer = EmbeddingTrainer(fasttext)
        trainer.train(corpus_generator)

        v1 = fasttext.infer_vector(["media"])
        v2 = fasttext.infer_vector(["media"])

        assert_array_equal(v1, v2)

        # FastText models support vector lookups for out-of-vocabulary words by summing up character ngrams belonging to the word
        assert "sports" not in fasttext.wv.vocab
        assert fasttext["sports"].shape == (16, )

        # test unseen word and none of the character ngrams of the word are present in the training data
        self.assertRaises(KeyError, lambda: fasttext["axe"])

        # test a list that has some words not in vocab
        v1 = fasttext.infer_vector(["media", "sport", "axe"])
        v2 = fasttext.infer_vector(["media", "sport"])
        assert_array_equal(v1, v2)
    def test_embedding_feature(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=30)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields,
            raw=True)
        w2v = Word2VecModel(size=10, min_count=0, iter=4, window=6, workers=3)
        trainer = EmbeddingTrainer(w2v)
        trainer.train(corpus_generator)

        job_postings = RawCorpusCreator(
            JobPostingCollectionSample(num_records=50))
        raw1, raw2 = tee(job_postings)

        fc = SequenceFeatureCreator(raw1,
                                    sentence_tokenizer=sentence_tokenize,
                                    word_tokenizer=word_tokenize,
                                    embedding_model=w2v,
                                    features=["EmbeddingFeature"])
        fc = iter(fc)

        self.assertEqual(
            next(fc).shape[0],
            np.array(
                next(iter(word_tokenizer_gen(
                    sentence_tokenizer_gen(raw2))))).shape[0])
        self.assertEqual(next(fc)[0].shape[0], 10)
Пример #4
0
    def test_word2vec(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=50)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        w2v = Word2VecModel(size=16, min_count=3, iter=4, window=6, workers=3)
        trainer = EmbeddingTrainer(w2v)
        trainer.train(corpus_generator)

        v1 = w2v.infer_vector(["media"])
        v2 = w2v.infer_vector(["media"])

        assert_array_equal(v1, v2)

        # test unseen vocab
        assert w2v.infer_vector(["sports"]).shape[0] == 16

        # test a list that has some words not in vocab
        sentence_with_unseen_word = ["sports", "news", "and", "media"]
        sentecne_without_unseen_word = ["news", "and", "media"]
        assert_array_equal(w2v.infer_vector(sentence_with_unseen_word),
                           w2v.infer_vector(sentecne_without_unseen_word))
Пример #5
0
    def test_knn_doc2vec_cls_s3(self):
        client = boto3.client('s3')
        client.create_bucket(Bucket='fake-open-skills',
                             ACL='public-read-write')
        s3_path = f"s3://fake-open-skills/model_cache/soc_classifiers"
        s3_storage = S3Store(path=s3_path)
        model_storage = ModelStorage(s3_storage)
        corpus_generator = FakeCorpusGenerator()

        # Embedding has no lookup_dict
        d2v = Doc2VecModel(size=10,
                           min_count=1,
                           dm=0,
                           alpha=0.025,
                           min_alpha=0.025)
        trainer = EmbeddingTrainer(d2v, model_storage=model_storage)
        trainer.train(corpus_generator, lookup=False)

        self.assertRaises(ValueError,
                          lambda: KNNDoc2VecClassifier(embedding_model=d2v))

        d2v = Doc2VecModel(size=10,
                           min_count=1,
                           dm=0,
                           alpha=0.025,
                           min_alpha=0.025)
        trainer = EmbeddingTrainer(d2v, model_storage=model_storage)
        trainer.train(corpus_generator, lookup=True)

        # KNNDoc2VecClassifier only supports doc2vec now
        self.assertRaises(NotImplementedError,
                          lambda: KNNDoc2VecClassifier(Word2VecModel()))

        doc = docs.split(',')[0].split()

        knn = KNNDoc2VecClassifier(embedding_model=d2v, k=0)
        self.assertRaises(ValueError, lambda: knn.predict_soc([doc]))

        knn = KNNDoc2VecClassifier(embedding_model=d2v, k=10)
        soc_cls = SocClassifier(knn)

        assert knn.predict_soc([doc])[0][0] == soc_cls.predict_soc([doc])[0][0]

        # Build Annoy index
        knn.build_ann_indexer(num_trees=5)
        assert isinstance(knn.indexer, AnnoyIndexer)

        # Save
        s3 = s3fs.S3FileSystem()
        model_storage.save_model(knn, knn.model_name)
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert set(files) == set([knn.model_name])

        # Load
        new_knn = model_storage.load_model(knn.model_name)
        assert new_knn.model_name == knn.model_name
        assert new_knn.predict_soc([doc])[0][0] == '29-2061.00'

        # Have to re-build the index whenever ones load the knn model to the memory
        assert new_knn.indexer == None
Пример #6
0
    def test_embedding_trainer_multicore_local(self, mock_getcwd):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            model_storage = ModelStorage(FSStore(td))
            job_postings_generator = JobPostingCollectionSample()
            corpus_generator = Word2VecGensimCorpusCreator(
                job_postings_generator,
                document_schema_fields=document_schema_fields)
            trainer = EmbeddingTrainer(FastTextModel(size=10,
                                                     min_count=3,
                                                     iter=4,
                                                     window=6,
                                                     workers=3),
                                       FastTextModel(size=10,
                                                     min_count=3,
                                                     iter=4,
                                                     window=10,
                                                     workers=3),
                                       Word2VecModel(size=10,
                                                     workers=3,
                                                     window=6),
                                       Word2VecModel(size=10,
                                                     min_count=10,
                                                     window=10,
                                                     workers=3),
                                       model_storage=model_storage)
            trainer.train(corpus_generator, n_processes=4)
            trainer.save_model()

            assert set(os.listdir(os.getcwd())) == set(
                [model.model_name for model in trainer._models])
Пример #7
0
    def test_combined_cls_local(self, mock_getcwd):
        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            model_storage = ModelStorage(FSStore(td))
            jobpostings = JobPostingCollectionSample()
            corpus_generator = Word2VecGensimCorpusCreator(jobpostings,
                                                           raw=True)
            w2v = Word2VecModel(size=10,
                                min_count=0,
                                alpha=0.025,
                                min_alpha=0.025)
            trainer = EmbeddingTrainer(w2v, model_storage=model_storage)
            trainer.train(corpus_generator, lookup=True)

            matrix = DesignMatrix(jobpostings, self.major_group, self.pipe_x,
                                  self.pipe_y)
            matrix.build()

            X = matrix.X
            rf = ProxyObjectWithStorage(RandomForestClassifier(), None, None,
                                        matrix.target_variable)
            rf.fit(X, matrix.y)

            proxy_rf = ProxyObjectWithStorage(rf, None, None,
                                              matrix.target_variable)
            # Remove the last step in the pipe_x
            # the input of predict_soc should be tokenized words
            new_pipe_x = self.pipe_x
            new_pipe_x.generators.pop()

            new_matrix = DesignMatrix(JobPostingCollectionSample(),
                                      self.major_group, new_pipe_x)
            new_matrix.build()
            ccls = CombinedClassifier(w2v, rf)
            assert len(ccls.predict_soc([new_matrix.X[0]])[0]) == 2
Пример #8
0
    def test_embedding_trainer_word2vec_local(self, mock_getcwd):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]

        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            model_storage = ModelStorage(FSStore(td))
            job_postings_generator = JobPostingCollectionSample(num_records=30)
            corpus_generator = Word2VecGensimCorpusCreator(
                job_postings_generator,
                document_schema_fields=document_schema_fields)
            w2v = Word2VecModel(size=10,
                                min_count=3,
                                iter=4,
                                window=6,
                                workers=3)

            trainer = EmbeddingTrainer(corpus_generator, w2v, model_storage)
            trainer.train()
            trainer.save_model()

            vocab_size = len(w2v.wv.vocab.keys())

            assert w2v.model_name == trainer.model_name
            assert set(os.listdir(os.getcwd())) == set([trainer.model_name])

            # Test Online Training
            job_postings_generator = JobPostingCollectionSample(num_records=50)
            corpus_generator = Word2VecGensimCorpusCreator(
                job_postings_generator,
                document_schema_fields=document_schema_fields)

            w2v_loaded = model_storage.load_model(w2v.model_name)

            new_trainer = EmbeddingTrainer(corpus_generator, w2v_loaded,
                                           model_storage)
            new_trainer.train()
            new_trainer.save_model()

            new_vocab_size = len(w2v_loaded.wv.vocab.keys())

            assert set(os.listdir(os.getcwd())) == set(
                [trainer.model_name, new_trainer.model_name])
            assert new_trainer.metadata['embedding_trainer'][
                'model_name'] != trainer.metadata['embedding_trainer'][
                    'model_name']
            assert vocab_size <= new_vocab_size

            # Save as different name
            model_storage.save_model(w2v, 'other_name.model')
            assert set(os.listdir(os.getcwd())) == set([
                trainer.model_name, new_trainer.model_name, 'other_name.model'
            ])

            # Change the store directory
            new_path = os.path.join(td, 'other_directory')
            new_trainer.save_model(FSStore(new_path))
            assert set(os.listdir(new_path)) == set([new_trainer.model_name])
Пример #9
0
    def train_embedding(self):
        jobpostings = list(JobPostingCollectionSample())
        corpus_generator = Word2VecGensimCorpusCreator(jobpostings, raw=True)
        w2v = Word2VecModel(size=10, min_count=0, alpha=0.025, min_alpha=0.025)
        trainer = EmbeddingTrainer(corpus_generator, w2v)
        trainer.train(True)

        self.embedding_model = w2v
        self.jobpostings = jobpostings
Пример #10
0
    def test_embedding_trainer_doc2vec_s3(self):
        client = boto3.client('s3')
        client.create_bucket(Bucket='fake-open-skills',
                             ACL='public-read-write')
        s3_path = f"s3://fake-open-skills/model_cache/embedding"
        s3_storage = S3Store(path=s3_path)

        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=30)
        corpus_generator = Doc2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        d2v = Doc2VecModel(storage=s3_storage,
                           size=10,
                           min_count=3,
                           iter=4,
                           window=6,
                           workers=3)

        trainer = EmbeddingTrainer(corpus_generator, d2v)
        trainer.train(lookup=True)
        trainer.save_model()

        vocab_size = len(d2v.wv.vocab.keys())
        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert d2v.model_name == trainer.model_name
        assert set(files) == set([trainer.model_name])
        self.assertDictEqual(trainer.lookup_dict, d2v.lookup_dict)

        # Save as different name
        d2v.save('other_name.model')

        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert set(files) == set([trainer.model_name, 'other_name.model'])

        # Load
        d2v_loaded = Doc2VecModel.load(s3_storage, trainer.model_name)
        assert d2v_loaded.metadata['embedding_model']['hyperparameters'][
            'vector_size'] == trainer.metadata['embedding_model'][
                'hyperparameters']['vector_size']
        # Change the store directory
        new_s3_path = "s3://fake-open-skills/model_cache/embedding/other_directory"
        trainer.save_model(S3Store(new_s3_path))
        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(new_s3_path)]
        assert set(files) == set([trainer.model_name])
Пример #11
0
    def test_embedding_trainer_fasttext_local(self, mock_getcwd):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            model_storage = ModelStorage(FSStore(td))
            job_postings_generator = JobPostingCollectionSample(num_records=30)
            corpus_generator = Word2VecGensimCorpusCreator(
                job_postings_generator,
                document_schema_fields=document_schema_fields)
            fasttext = FastTextModel(size=10,
                                     min_count=3,
                                     iter=4,
                                     window=6,
                                     workers=3)

            trainer = EmbeddingTrainer(fasttext, model_storage=model_storage)
            trainer.train(corpus_generator)
            trainer.save_model()

            vocab_size = len(fasttext.wv.vocab.keys())

            assert fasttext.model_name == trainer._models[0].model_name
            assert set(os.listdir(os.getcwd())) == set(
                [trainer._models[0].model_name])

            # Test Online Training
            job_postings_generator = JobPostingCollectionSample(num_records=50)
            corpus_generator = Word2VecGensimCorpusCreator(
                job_postings_generator,
                document_schema_fields=document_schema_fields)

            fasttext_loaded = model_storage.load_model(fasttext.model_name)
            new_trainer = EmbeddingTrainer(fasttext_loaded,
                                           model_storage=model_storage)
            new_trainer.train(corpus_generator)
            new_trainer.save_model()

            new_vocab_size = len(fasttext_loaded.wv.vocab.keys())

            assert set(os.listdir(os.getcwd())) == set([
                trainer._models[0].model_name,
                new_trainer._models[0].model_name
            ])
            assert new_trainer.metadata['embedding_trainer'][
                'models'] != trainer.metadata['embedding_trainer']['models']
            assert vocab_size <= new_vocab_size
Пример #12
0
    def test_combined_cls_local(self, mock_getcwd):
        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            jobpostings = list(JobPostingCollectionSample())
            corpus_generator = Word2VecGensimCorpusCreator(jobpostings, raw=True)
            w2v = Word2VecModel(storage=FSStore(td), size=10, min_count=0, alpha=0.025, min_alpha=0.025)
            trainer = EmbeddingTrainer(corpus_generator, w2v)
            trainer.train(True)

            matrix = create_training_set(jobpostings, SOCMajorGroup())
            X = EmbeddingTransformer(w2v).transform(matrix.X)

            rf = RandomForestClassifier()
            rf.fit(X, matrix.y)
            ccls = CombinedClassifier(w2v, rf, matrix.target_variable)
            assert len(ccls.predict_soc([matrix.X[0]])[0]) == 2
Пример #13
0
    def test_embedding_trainer_doc2vec_local(self, mock_getcwd):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]

        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            model_storage = ModelStorage(FSStore(td))

            job_postings_generator = JobPostingCollectionSample(num_records=30)
            corpus_generator = Doc2VecGensimCorpusCreator(
                job_postings_generator,
                document_schema_fields=document_schema_fields)
            d2v = Doc2VecModel(size=10,
                               min_count=3,
                               iter=4,
                               window=6,
                               workers=3)

            trainer = EmbeddingTrainer(d2v, model_storage=model_storage)
            trainer.train(corpus_generator, lookup=True)
            trainer.save_model()

            vocab_size = len(d2v.wv.vocab.keys())
            assert d2v.model_name == trainer._models[0].model_name
            assert set(os.listdir(os.getcwd())) == set(
                [trainer._models[0].model_name])
            self.assertDictEqual(trainer.lookup_dict, d2v.lookup_dict)

            # Save as different name
            model_storage.save_model(d2v, 'other_name.model')
            assert set(os.listdir(os.getcwd())) == set(
                [trainer._models[0].model_name, 'other_name.model'])

            # Load
            d2v_loaded = model_storage.load_model(
                trainer._models[0].model_name)
            assert d2v_loaded.metadata["embedding_model"][
                "model_type"] == list(
                    trainer.metadata["embedding_trainer"]
                    ['models'].values())[0]['embedding_model']['model_type']

            # Change the store directory
            new_path = os.path.join(td, 'other_directory')
            trainer.save_model(FSStore(new_path))
            assert set(os.listdir(new_path)) == set(
                [trainer._models[0].model_name])
Пример #14
0
    def test_knn_doc2vec_cls_local(self, mock_getcwd):
        with tempfile.TemporaryDirectory() as td:
            mock_getcwd.return_value = td
            model_storage = ModelStorage(FSStore(td))
            corpus_generator = FakeCorpusGenerator()
            d2v = Doc2VecModel(size=10,
                               min_count=1,
                               dm=0,
                               alpha=0.025,
                               min_alpha=0.025)
            trainer = EmbeddingTrainer(d2v, model_storage=model_storage)
            trainer.train(corpus_generator, lookup=True)

            # KNNDoc2VecClassifier only supports doc2vec now
            self.assertRaises(NotImplementedError,
                              lambda: KNNDoc2VecClassifier(Word2VecModel()))

            doc = docs.split(',')[0].split()

            knn = KNNDoc2VecClassifier(embedding_model=d2v, k=0)
            self.assertRaises(ValueError, lambda: knn.predict_soc([doc]))

            knn = KNNDoc2VecClassifier(embedding_model=d2v, k=1)
            soc_cls = SocClassifier(knn)

            assert knn.predict_soc([doc
                                    ])[0][0] == soc_cls.predict_soc([doc
                                                                     ])[0][0]

            # Build Annoy index
            knn.build_ann_indexer(num_trees=5)
            assert isinstance(knn.indexer, AnnoyIndexer)

            # Save
            model_storage.save_model(knn, knn.model_name)
            assert set(os.listdir(os.getcwd())) == set([knn.model_name])
            assert isinstance(knn.indexer, AnnoyIndexer)

            # Load
            new_knn = model_storage.load_model(knn.model_name)
            assert new_knn.model_name == knn.model_name
            assert new_knn.predict_soc([doc])[0][0] == '29-2061.00'

            # Have to re-build the index whenever ones load the knn model to the memory
            assert new_knn.indexer == None
Пример #15
0
def test_embedding_trainer():
    s3_conn = boto.connect_s3()
    bucket_name = 'fake-jb-bucket'
    bucket = s3_conn.create_bucket(bucket_name)

    job_posting_name = 'FAKE_jobposting'
    s3_prefix_jb = 'fake-jb-bucket/job_postings'
    s3_prefix_model = 'fake-jb-bucket/model_cache/embedding/'
    quarters = '2011Q1'

    with tempfile.TemporaryDirectory() as td:
        with open(os.path.join(td, job_posting_name), 'w') as handle:
            json.dump(sample_document, handle)
        upload(s3_conn, os.path.join(td, job_posting_name), os.path.join(s3_prefix_jb, quarters))


    # Doc2Vec
    trainer = EmbeddingTrainer(s3_conn=s3_conn, quarters=['2011Q1'], jp_s3_path=s3_prefix_jb, model_s3_path=s3_prefix_model, model_type='doc2vec')
    trainer.train()
    files = list_files(s3_conn, os.path.join(s3_prefix_model, 'doc2vec_gensim_' + trainer.training_time))
    assert len(files) == 3

    assert files == ['doc2vec_gensim_' + trainer.training_time + '.model',
                     'lookup_doc2vec_gensim_' + trainer.training_time + '.json',
                     'metadata_doc2vec_gensim_' + trainer.training_time + '.json']

    with tempfile.TemporaryDirectory() as td:
        trainer.save_model(td)
        assert set(os.listdir(td)) == set(['doc2vec_gensim_' + trainer.training_time + '.model',
                                           'lookup_doc2vec_gensim_' + trainer.training_time + '.json',
                                           'metadata_doc2vec_gensim_' + trainer.training_time + '.json'])

    # Word2Vec
    trainer = EmbeddingTrainer(s3_conn=s3_conn, quarters=['2011Q1'], jp_s3_path=s3_prefix_jb, model_s3_path=s3_prefix_model, model_type='word2vec')
    trainer.train()
    files = list_files(s3_conn, os.path.join(s3_prefix_model, 'word2vec_gensim_' + trainer.training_time))
    assert len(files) == 2
    assert files == ['metadata_word2vec_gensim_' + trainer.training_time + '.json',
                     'word2vec_gensim_' + trainer.training_time + '.model']

    new_trainer = EmbeddingTrainer(s3_conn=s3_conn, quarters=['2011Q1'], jp_s3_path=s3_prefix_jb, model_s3_path=s3_prefix_model, model_type='word2vec')
    new_trainer.load(trainer.modelname, s3_prefix_model)
    assert new_trainer.metadata['metadata']['hyperparameters'] == trainer.metadata['metadata']['hyperparameters']
Пример #16
0
    def test_embedding_trainer_doc2vec_with_other(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=30)
        corpus_generator = Doc2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)

        trainer = EmbeddingTrainer(Doc2VecModel(), Word2VecModel(),
                                   FastTextModel())
        self.assertRaises(TypeError, lambda: trainer.train(corpus_generator))
Пример #17
0
    def test_visualize_in_tensorboard(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=50)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        w2v = Word2VecModel(size=16, min_count=3, iter=4, window=6, workers=3)
        trainer = EmbeddingTrainer(w2v)
        trainer.train(corpus_generator)

        with tempfile.TemporaryDirectory() as td:
            with mock.patch('os.getcwd') as mock_getcwd:
                mock_getcwd.return_value = td
                visualize_in_tensorboard(w2v)

                assert len(
                    set(
                        os.listdir(
                            os.path.join(os.getcwd(),
                                         w2v.model_name.split('.')[0])))) == 7
Пример #18
0
    def test_embedding_trainer_multicore_s3(self):
        client = boto3.client('s3')
        client.create_bucket(Bucket='fake-open-skills',
                             ACL='public-read-write')
        s3_path = f"s3://fake-open-skills/model_cache/embedding"
        s3_storage = S3Store(path=s3_path)
        model_storage = ModelStorage(s3_storage)

        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample()
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        trainer = EmbeddingTrainer(FastTextModel(size=10,
                                                 min_count=3,
                                                 iter=4,
                                                 window=6,
                                                 workers=3),
                                   FastTextModel(size=10,
                                                 min_count=3,
                                                 iter=4,
                                                 window=10,
                                                 workers=3),
                                   Word2VecModel(size=10, workers=3, window=6),
                                   Word2VecModel(size=10,
                                                 min_count=10,
                                                 window=10,
                                                 workers=3),
                                   model_storage=model_storage)
        trainer.train(corpus_generator)
        trainer.save_model()

        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert set(files) == set(
            [model.model_name for model in trainer._models])
Пример #19
0
    def test_skill_feature(self):
        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=30)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields,
            raw=True)
        w2v = Word2VecModel(size=10, min_count=0, iter=4, window=6, workers=3)
        trainer = EmbeddingTrainer(w2v)
        trainer.train(corpus_generator)

        raw = RawCorpusCreator(JobPostingCollectionSample())
        raw1, raw2 = tee(raw)

        # default
        fc = SequenceFeatureCreator(raw1, embedding_model=w2v)
        self.assertEqual(
            fc.selected_features,
            ["StructuralFeature", "ContextualFeature", "EmbeddingFeature"])
        self.assertEqual(
            fc.all_features,
            ["StructuralFeature", "ContextualFeature", "EmbeddingFeature"])

        fc = iter(fc)
        self.assertEqual(
            next(fc).shape[0],
            np.array(
                next(iter(word_tokenizer_gen(
                    sentence_tokenizer_gen(raw2))))).shape[0])
        self.assertEqual(next(fc)[0].shape[0], 29)

        # Not Supported
        fc = SequenceFeatureCreator(raw1, features=["FeatureNotSupported"])
        fc = iter(fc)
        self.assertRaises(TypeError, lambda: next(fc))
    def test_tester(self):
        document_schema_fields = ['description','experienceRequirements', 'qualifications', 'skills']
        corpus_generator = Word2VecGensimCorpusCreator(JobPostingCollectionSample(num_records=30), document_schema_fields=document_schema_fields)
        w2v = Word2VecModel(size=10, min_count=3, iter=4, window=6, workers=3)
        trainer = EmbeddingTrainer(w2v)
        trainer.train(corpus_generator)

        jp = JobPostingCollectionSample()
        train_gen = islice(jp, 30)
        test_gen = islice(jp, 30, None)
        train_matrix = DesignMatrix(train_gen, self.fullsoc, self.pipe_x, self.pipe_y)
        train_matrix.build()
        occ_trainer = OccupationClassifierTrainer(train_matrix, 2, grid_config=self.grid_config)
        occ_trainer.train(save=False)
        cc = CombinedClassifier(w2v, occ_trainer.best_estimators[0])

        steps = self.pipe_x.generators[:-1]

        test_gen = (t for t in test_gen if t['onet_soc_code'] is not '')

        tester = OccupationClassifierTester(test_data_generator=test_gen, preprocessing=steps, classifier=cc)
        result = list(tester)

        assert len(tester) == len(result) == 18
Пример #21
0
    def test_embedding_trainer_word2vec_s3(self):
        client = boto3.client('s3')
        client.create_bucket(Bucket='fake-open-skills',
                             ACL='public-read-write')
        s3_path = f"s3://fake-open-skills/model_cache/embedding"
        s3_storage = S3Store(path=s3_path)

        document_schema_fields = [
            'description', 'experienceRequirements', 'qualifications', 'skills'
        ]
        job_postings_generator = JobPostingCollectionSample(num_records=30)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)
        w2v = Word2VecModel(storage=s3_storage,
                            size=10,
                            min_count=3,
                            iter=4,
                            window=6,
                            workers=3)

        trainer = EmbeddingTrainer(corpus_generator, w2v)
        trainer.train()
        trainer.save_model()

        vocab_size = len(w2v.wv.vocab.keys())

        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert w2v.model_name == trainer.model_name
        assert set(files) == set([trainer.model_name])

        # Test online training
        job_postings_generator = JobPostingCollectionSample(num_records=50)
        corpus_generator = Word2VecGensimCorpusCreator(
            job_postings_generator,
            document_schema_fields=document_schema_fields)

        w2v_loaded = Word2VecModel.load(s3_storage, w2v.model_name)

        new_trainer = EmbeddingTrainer(corpus_generator, w2v_loaded)
        new_trainer.train()
        new_trainer.save_model()

        new_vocab_size = len(w2v_loaded.wv.vocab.keys())

        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert set(files) == set([new_trainer.model_name, trainer.model_name])
        assert new_trainer.metadata['embedding_trainer'][
            'model_name'] != trainer.metadata['embedding_trainer']['model_name']
        assert vocab_size <= new_vocab_size

        # Save as different name
        w2v.save('other_name.model')

        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(s3_path)]
        assert set(files) == set(
            [trainer.model_name, new_trainer.model_name, 'other_name.model'])

        # Change the store directory
        new_s3_path = "s3://fake-open-skills/model_cache/embedding/other_directory"
        new_trainer.save_model(S3Store(new_s3_path))
        s3 = s3fs.S3FileSystem()
        files = [f.split('/')[-1] for f in s3.ls(new_s3_path)]
        assert set(files) == set([new_trainer.model_name])
Пример #22
0
sys.path.append('../')

import boto
s3_conn = boto.connect_s3()
import multiprocessing
cores = multiprocessing.cpu_count()

import pandas as pd
from skills_utils.time import datetime_to_quarter

from skills_ml.algorithms.embedding.train import EmbeddingTrainer

def get_time_range(start='2011-01-01', freq='Q', periods=24):

    return list(map(lambda x: datetime_to_quarter(x), pd.date_range(start=start, freq=freq, periods=periods)))

if __name__ == '__main__':
    time_range = get_time_range(start='2011-01-01', freq='Q', periods=24)

    trainer = EmbeddingTrainer(s3_conn=s3_conn,
                               quarters=time_range,
                               source='nlx',
                               jp_s3_path='open-skills-private/job_postings_common',
                               model_s3_path='open-skills-private/model_cache/embedding/',
                               batch_size=4000,
                               model_type='word2vec')

    # The train method takes whatever arugments gensim.models.word2vec.Word2Vec or gensim.model.doc2vec.Doc2Vec has
    trainer.train(size=100, iter=4, window=8, workers=cores)