Example #1
0
model = Sentiment()
model.process(_df1, _df2, _df, custom_path.format('sentiment_test_csv'))
model.process(df1, df2, df, custom_path.format('sentiment_csv'))

model = WMD()
model.process(_df1, _df2, _df, custom_path.format('wmd_test_csv'))
model.process(df1, df2, df, custom_path.format('wmd_csv'))

import pandas as pd
custom_path = '/content/drive/My Drive/Stance Detection/custom_data/{}.csv'
count_features = pd.DataFrame(pd.read_csv(custom_path.format('count_csv')))
count_features = pd.merge(df, count_features, on='Headline ID', how='inner')
count_features.head()

cv = CountVectorizer()
cv.count_features(df, custom_path.format('count_test_csv'))

import xgboost as xgb
import nltk, string
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import tqdm
from xgboost import plot_tree
from nltk.corpus import wordnet
nltk.download('averaged_perceptron_tagger')
from sklearn.ensemble import AdaBoostClassifier
nltk.download('wordnet')
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score