Пример #1
0
def add_numbers():
    a = request.args.get('a', 0, type=str)
    hashvalue = ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(6))
    #r = test_1()
    #print r
    data = get_data(a, hashvalue)
    data = [tuple(x) for x in data.to_records(index=True)]
    cur, conn = build_connection()
    run_sql(cur, conn, data)
    print jsonify(result=hashvalue)
    return jsonify(result=hashvalue)
# -*- coding: utf-8 -*-
"""
@author: Asma Baccouche
"""

from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from Get_Data import get_data
from sklearn.cluster import KMeans, SpectralClustering
from nltk.cluster import KMeansClusterer, util
from gensim.models import Word2Vec
from sklearn import metrics
import numpy as np
from Data_helper import sent_vectorizer

data, deps, Deps_Count = get_data()
sentences = [sentence for sentence in data['TITLE']]
s = [sentence.split() for sentence in data['TITLE']]

vectorizer1 = CountVectorizer()
vectorizer2 = TfidfVectorizer()

X_TF = vectorizer1.fit_transform(sentences)
X_TFIDF = vectorizer2.fit_transform(sentences)

kmeans1 = KMeans(n_clusters=8).fit(X_TF)
labels1 = kmeans1.labels_

kmeans2 = KMeans(n_clusters=8).fit(X_TFIDF)
labels2 = kmeans2.labels_

SpectralClustering1 = SpectralClustering(n_clusters=8,
Пример #3
0
import numpy as np
from Get_Data import get_data
tick=get_data(1)
tick['return']=abs(tick.close-tick.open)/tick.open
vol=np.random.normal(tick[-90:]['return'].mean(),tick[-90:]['return'].std(),5)

ud=[1,-1,1,1,-1]

price=25.2
vollist=(abs(vol)*ud+1)
print (vollist)
print(price*np.cumprod(vollist))
Пример #4
0
import numpy as np
import pandas as pd
import random
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

from Get_Data import get_data

##load data and calculate label
all_data = get_data(1)
all_data['close_1'] = all_data.close.shift(-1)
all_data['open_1'] = all_data.open.shift(-1)
all_data['stdev'] = (all_data.close - all_data.open) / all_data.open
all_data = all_data[:-1]
labeltest = []


def cal_label(shift_no=1):

    for i in range(len(all_data)):
        ##two class
        if all_data.close.shift(-shift_no)[i] / all_data.open.shift(
                -shift_no)[i] > (all_data.stdev.quantile(.55) + 1.000):
            labeltest.append([1, 0, 0])
        elif all_data.close.shift(-shift_no)[i] / all_data.open.shift(
                -shift_no)[i] < (all_data.stdev.quantile(.45) + 1.000):
            labeltest.append([0, 1, 0])
        else:
            labeltest.append([0, 0, 1])
    all_data['label'] = pd.Series(labeltest, index=all_data.index)
Пример #5
0
from Get_Data import get_data
from sklearn import preprocessing
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense,Dropout,Activation
from keras.layers.recurrent import LSTM
import keras


raw_data= get_data(1)
raw_data=raw_data[['open','high','low','volume','close']]
raw_data.dropna(how='any',inplace=True)

##data scaler
def normalize(df):
    new_data=df.copy()  
    min_max_scaler=preprocessing.MinMaxScaler()
    new_data['open']=min_max_scaler.fit_transform(raw_data.open.values.reshape(-1,1))
    new_data['high']=min_max_scaler.fit_transform(raw_data.high.values.reshape(-1,1))
    new_data['low']=min_max_scaler.fit_transform(raw_data.low.values.reshape(-1,1))
    new_data['volume']=min_max_scaler.fit_transform(raw_data.volume.values.reshape(-1,1))
    new_data['close']=min_max_scaler.fit_transform(raw_data.close.values.reshape(-1,1))

    return new_data


##classfy multiple set
new_data=normalize(raw_data)
No_feature=len(new_data.columns)
new_matrix=new_data.as_matrix()