min_alpha=0.00025,
                min_count=1,
                dm=1,
                epoch=max_epochs,
                workers=multiprocessing.cpu_count())

model.build_vocab([x for x in tqdm_notebook(tagged_data)])

for epoch in tqdm_notebook(range(max_epochs)):
    model.train(tagged_data,
                total_examples=model.corpus_count,
                epochs=model.iter)
    # decrease the learning rate
    model.alpha -= 0.0002
    # fix the learning rate, no decay
    model.min_alpha = model.alpha

model.save("d2v.model")
print("Model Saved")

# In[376]:

tagged_data = pd.DataFrame(
    TaggedDocument(words=word_tokenize(i.lower()), tags=[str(_d)])
    for i, _d in zip(classic_raw.values.ravel(), encoded_label_classic))

# In[377]:

y_train, X_train = vec_for_learning(model, tagged_data)

# In[378]: