# instantiate the visualizer with the Covariance ranking algorithm
visualizer = Rank2D(features=num_features, algorithm='pearson')
visualizer.fit(X)  # Fit the data to the visualizer
visualizer.transform(X)  # Transform the data
visualizer.poof(outpath="d://pcoords1.png")  # Draw/show/poof the data
plt.show()

# Step 9:  Compare variables against Survived and Not Survived
# set up the figure size
# %matplotlib inline
plt.rcParams['figure.figsize'] = (15, 7)
plt.rcParams['font.size'] = 50

# setup the color for yellowbrick visulizer
from yellowbrick.style import set_palette
set_palette('sns_bright')

# import packages
from yellowbrick.features import ParallelCoordinates
# Specify the features of interest and the classes of the target
classes = ['Not-survived', 'Survived']
num_features = ['Age', 'SibSp', 'Parch', 'Fare']

# copy data to a new dataframe
data_norm = data.copy()
# normalize data to 0-1 range
for feature in num_features:
    data_norm[feature] = (data[feature] - data[feature].mean(skipna=True)) / (
        data[feature].max(skipna=True) - data[feature].min(skipna=True))

# Extract the numpy arrays from the data frame
Example #2
0
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
import statsmodels.api as sm

from imblearn.over_sampling import SMOTE
from collections import Counter

from yellowbrick.classifier import ROCAUC
from yellowbrick.classifier import ConfusionMatrix
from yellowbrick.classifier import ClassificationReport
from yellowbrick.style.palettes import PALETTES, SEQUENCES, color_palette
color_palette(palette='flatui', n_colors=8)
from yellowbrick.style import set_palette
set_palette('pastel')

pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
sns.set_context('talk')
sns.set_style('ticks')
sns.set_palette('RdBu')
np.random.seed(42)

import warnings 
warnings.filterwarnings('ignore')


def SMOTE_graph(target,title,x=6,y=2):
    """Creates graph showing distribution of variable"""
Example #3
0
from yellowbrick.target import FeatureCorrelation, ClassBalance

from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ClassPredictionError
from yellowbrick.classifier import ConfusionMatrix

from sklearn.model_selection import GridSearchCV

from yellowbrick.model_selection import CVScores
from yellowbrick.model_selection import ValidationCurve
from yellowbrick.model_selection import LearningCurve

from yellowbrick.style import set_palette

set_palette('paired')


class Analytics(Model):
    """
	Class that inherits from Model class to draw the analytics of the model.
	Each drawing method is not described with the docstring, only those that do not draw.
	"""
    def __init__(self, model, data=None, labels=None):
        super().__init__(model, np.array(data), np.array(labels))

    def draw_rad_viz(self):
        visualizer = RadViz(classes=self.le.classes_,
                            features=self.get_feature_labels(),
                            alpha=0.4)
        visualizer.fit(self.training_data, self.training_labels)
                visualizerJPV.fit_transform(X, y)  # Fit and transform the data
                # Finalize and render the figure
                plt.ylabel('lambda_sigma', fontsize=14)
                plt.xlabel('lambda_weight', fontsize=14)
                locationFileNameJPV = os.path.join(
                    '/home/ak/Documents/Research/Papers/figures',
                    str(symbols[symbolIdx]) + '_idx_' + str(idx) + 'date' +
                    str(dateIdx) + '_label' + str(labelName) +
                    '_jointplotViz.png')
                visualizerJPV.show(outpath=locationFileNameJPV)
                plt.show()

                # # Instantiate the visualizer with the Covariance ranking algorithm

                set_palette('sns_dark')
                plt.figure()
                visualizerR2D = Rank2D(features=features,
                                       algorithm='pearson',
                                       title='  ')

                visualizerR2D.fit(X, y)  # Fit the data to the visualizer
                visualizerR2D.transform(X)  # Transform the data
                plt.xticks(fontsize=12)
                plt.yticks(fontsize=12)
                locationFileNameR2D = os.path.join(
                    '/home/ak/Documents/Research/Papers/figures',
                    str(symbols[symbolIdx]) + '_idx_' + str(idx) + '_label' +
                    str(labelName) + '_date_' + str(dateIdx) +
                    '_pearsonCorrel.png')
                visualizerR2D.show(outpath=locationFileNameR2D)
Example #5
0
from nltk.stem.wordnet import WordNetLemmatizer
import gensim
from gensim import corpora
import string
from sklearn.feature_extraction.text import CountVectorizer
from yellowbrick.style import set_palette
from gensim.parsing.preprocessing import remove_stopwords
import snscrape.modules.twitter as sntwitter
import nltk
nltk.download("stopwords")
nltk.download('punkt')
nltk.download('wordnet')

plt.rcParams['figure.figsize'] = (20.0, 20.0)
plt.rc('font', size=16)
set_palette('flatui')
st.markdown("<meta name='image' property='og:image' content='cool.jpg'>", unsafe_allow_html=True)

# Location = 'London, United Kingdom'
# Distance = '200mi'

#App Start
def app():
    st.title("Stock Tweet Analyzer 📈")

    st.subheader("Analyze the tweets of your favorite stocks")
    
    st.subheader("Watch me first!")
    video_file = open("apiUpdateVideo.webm","rb")
    video_bytes = video_file.read()
    st.video(video_bytes)