def JohansenTest(self, instruments, quotes):
     data = pd()
     for instrument in instruments :
         data[instrument] = quotes[instrument]["AdjClose"]
     jres = johansen.coint_johansen(data, 0, 1)
     #johansen.print_johan_stats(jres)
     return jres
Beispiel #2
0
def get_new_token_dp(cmp_id, g_db):
    #     Модуль обновления токена в в АМО

    old_token = g_db.get(f'select * from tokens where wf_cmp_id = {cmp_id}')
    d_token = {i: e[0] for i, e in dict(old_token).items()}

    url = d_token['amo_domain'] + 'oauth2/access_token'
    data = json.dumps({
        "client_id": d_token['client_id'],
        "client_secret": d_token['client_secret'],
        "grant_type": "refresh_token",
        'redirect_uri': d_token['redirect_uri'],
        "refresh_token": d_token['refresh_token']
    })

    token_new = json.loads(
        requests.post(url,
                      headers={
                          "Content-Type": "application/json"
                      },
                      data=data).text)
    d_token['token'] = token_new['access_token']

    if d_token['token'] != token_new['access_token']:
        changes_pd = pd(
            [['token', "'" + d_token['token'] + "'", 'wf_cmp_id', cmp_id]],
            columns=[
                'changed_cols', 'changed_values', 'case_cols', 'case_vals'
            ])
        update_query = g_db.update_df_q(changes_pd, 'tokens')[0]
        g_db.put(update_query)
    if d_token['refresh_token'] != token_new['refresh_token']:

        changes_pd = pd([[
            'refresh_token', "'" + token_new['refresh_token'] + "'",
            'wf_cmp_id', cmp_id
        ]],
                        columns=[
                            'changed_cols', 'changed_values', 'case_cols',
                            'case_vals'
                        ])
        update_query = g_db.update_df_q(changes_pd, 'tokens')[0]
        g_db.put(update_query)

    return d_token
Beispiel #3
0
def getdata(query):
    conn = pyodbc.connect(driver)
    try:
        data = pd(query, conn)
    except:
        data = None
    conn.commit()
    conn.close()
    return data
def import_csv_data(token):
    global v
    csv_file_path = askopenfilename()
    if csv_file_path == '':
        return
    v.set(csv_file_path)
    df = pd(csv_file_path, header=1)
    df = df[df['Record Type'] == 1].reset_index()

    upload_to_gamebus(token, df)
Beispiel #5
0
    def saveCSVFile(self, data):

        # Opening the file using native file handler
        csv_column = ["---", "Position", "InTangent", "OutTangent"]

        my_df = pd(data)

        #Saving the file the macronizations
        try:
            my_df.to_csv(self.fileCSV, index=False, header=csv_column)
            grable.broadcastError("Success")
        except IOError:
            grable.progressUpdate(-1)
            grable.broadcastError("Please close the csv file first")
Beispiel #6
0
    def create_custom_fields(self, company_id):

        lead_data_to_post = [{
            'name': 'Спецификация',
            "sort": 510,
            'type': 'textarea'
        }, {
            'name': 'Тип оплаты',
            "sort": 511,
            'type': 'text'
        }, {
            'name': 'Тип доставки',
            "sort": 512,
            'type': 'text'
        }, {
            'name': 'Сделка на workface',
            "sort": 513,
            'type': 'text'
        }, {
            'name': 'Комментарий покупателя',
            "sort": 514,
            'type': 'text'
        }]

        custom_flds = self.post_data(
            'leads/custom_fields',
            lead_data_to_post)['_embedded']['custom_fields']

        cnt_custom_to_post = [{
            'name': 'Телефон',
            'sort': 515,
            'type': 'textarea'
        }, {
            'name': 'Email',
            'sort': 516,
            'type': 'textarea'
        }]
        custom_flds += self.post_data(
            'contacts/custom_fields',
            cnt_custom_to_post)['_embedded']['custom_fields']

        companies_custom_to_post = [{
            'name': 'Адрес',
            'sort': 515,
            'type': 'text'
        }, {
            'name': 'Реквизиты',
            'sort': 515,
            'type': 'text'
        }]
        custom_flds += self.post_data(
            'companies/custom_fields',
            companies_custom_to_post)['_embedded']['custom_fields']

        df_custom = pd(custom_flds)
        drop_columns = [
            'code', 'is_api_only', 'enums', 'request_id', 'required_statuses',
            'is_deletable', 'remind', '_links', 'group_id', 'is_predefined',
            'sort'
        ]
        df_custom = df_custom.drop(drop_columns, axis=1)

        df_custom['wf_company_id'] = company_id

        return df_custom
Beispiel #7
0
def select(clean, hierarchy):
    
    #default values, will update if values present in excel file
    timeDecayF = 30 #volatile, drop this number - days unchanged > 30 gets killed
    stdDevTol = 1.28 #tolerance for number of std devs
    staleF = 10 #standard in Trovo
    
    factorImport = open(glob(r'\\xsqnfs2.nyc.ime.reuters.com\TRPS\Bank Loans\Auto 2.0\Factor*.csv')[0], 'r')
    readFactors = csv.reader(factorImport)
    for row in readFactors:
        if "Time" in row[0] and row[1] not in (None, "", " "):
            timeDecayF = float(row[1])
        if "Standard" in row[0] and row[1] not in (None, "", " "):
            stdDevTol = float(row[1])
        if "Stale" in row[0] and row[1] not in (None, "", " "):
            staleF = float(row[1])
    
    factorImport.close()
    
    hierarchyDict = {}
                
    for row in hierarchy.iterrows(): #inserting dealer rankings into dictionary
        hierarchyDict.update({row[1][0] : row[1][1]})
        
    finalFile = pd()
        
    clean.insert(len(clean.columns), 'DealerW', 0) #initializing dealer weight
    clean.insert(len(clean.columns), 'UnchangedW', 0) #initializing days unchanged weight
    clean.insert(len(clean.columns), 'StaleW', 0) #initializing stale weight
    
    clean.insert(len(clean.columns), 'Weight', 0) #initializing weights
    clean.insert(len(clean.columns), 'StdDev', 0) #initializing stdDev
    clean.insert(len(clean.columns), 'Calculated Bid', 0) #initializing avg bid
    clean.insert(len(clean.columns), 'Calculated Offer', 0) #initializing avg offer
    select = len(clean.columns)
    clean.insert(select, 'Select', 0) #initializing selection column
    staleIndex = clean.columns.tolist().index('Not Stale')
    clean.iloc[:, select] = clean.iloc[:, staleIndex] #copying over initial select values from stale values

    columns = clean.columns.tolist()
    stdDevIndex = columns.index('StdDev')
    weightIndex = columns.index('Weight')
    dealerIndex = columns.index('dealer')
    lastBidIndex = columns.index('last_bid')
    lastOfferIndex = columns.index('last_offer')
    colorIndex = columns.index('Color')
    unchangedIndex = columns.index('unchanged_for')
    calcBid = columns.index('Calculated Bid')
    calcOffer = columns.index('Calculated Offer')
    days_stale = columns.index('days_stale')
    dealerWIndex = columns.index('DealerW')
    unchangedWIndex = columns.index('UnchangedW')
    staleWIndex = columns.index('StaleW')
    bidDate = columns.index('last_bid_date')
        
    lins = clean.groupby('lin') #grouping quotes by their LINs
    
    hasColor = False #color is midpoint
    
    now = timeComp.now()     
    year = now.year
    month = now.month
    day = now.day
    
    for group, quotes in lins: #iterating through each LIN to do quote selection
        colorQuote = 0 #color quote price
        midpoint = 0 #midpoint price
        midpointW = 0 #midpoint weight
        stdDev = 0 #variable to get std dev
        hasColor = False
        numIncluded = 0 #number of quotes considered
        stdDevList = [] # keep track of bids to avoid double dataFrame loop
        tempFrame = pd()
        bestStaleBid = 0
        bestStaleAsk = 0
        bidPx = 0
        offerPx = 0
        stdDevDen = 0
        bestStaleW = 0
        
        if len(quotes) == 1: #if only quote, must use it
            for row in quotes.iterrows():
                data = row[1].to_frame().transpose() #skipping over index column
                data.iloc[0, select] = 1
                dealer = data.iloc[0, dealerIndex]
                lastBid = data.iloc[0, lastBidIndex]
                weight = 0
                dealerCheck = hierarchyDict.get(dealer) #dealer val
                staleVal = data.iloc[0, days_stale] #increment days stale (yesterday's data)
                daysStale = 0
                if nancheck(staleVal):
                    daysStale = 1
                else:
                    daysStale = staleVal
                if dealerCheck == None:
                    dealerW = 0.75
                else:
                    dealerW = 1 - (hierarchyDict.get(dealer) - 1)*.05 #dealer weighting
                unchVal = data.iloc[0, unchangedIndex] #increment days unchanged (yesterday's data)
                daysU = 0
                if nancheck(unchVal):
                    daysU = 1
                elif unchVal < daysStale: #error checking because Trovo isn't good
                    daysU = daysStale
                else:
                    daysU = unchVal #increment days unchanged (yesterday's data)
                updateW = 0
                if timeDecayF < 1:
                    date = data.iloc[0, bidDate]
                    if daysStale != 0:
                        updateW = 0.25
                    else:
                        diff = 0
                        diff += 60 * date.hour
                        diff += now.minute - date.minute
                        updateW = (timeDecayF * 400) / (diff + timeDecayF * 400) #400 is max in a trading day, will act nicely
                elif daysU >= 8 * timeDecayF: #days unchanged cutoff
                    updateW = 0.2
                else: #days unchanged weighting
                    updateW = (3 * timeDecayF) / (daysU + (3 * timeDecayF))
                '''need size weight'''
                staleW = (3 * staleF) /(daysStale + (3 * staleF)) #days stale weight
                weight =  dealerW * updateW * staleW
                data.iloc[0, dealerWIndex] = '{0:.5g}'.format(dealerW) #dealer weighting                    
                data.iloc[0, staleWIndex] = '{0:.5g}'.format(staleW) #stale weighting                    
                data.iloc[0, unchangedWIndex] = '{0:.5g}'.format(updateW) #unchanged weighting                    
                data.iloc[0, weightIndex] = '{0:.5g}'.format(weight) #final weighting
                tempFrame = concat([tempFrame, data.iloc[0, :].to_frame().transpose()])
                midpoint = data.iloc[0, lastBidIndex]
                bidPx = midpoint
                offerPx = data.iloc[0, lastOfferIndex]
        else:
            for row in quotes.iterrows(): #otherwise, iterate through all quotes and do selection process
                data = row[1].to_frame().transpose() #skipping over index column
                dealer = data.iloc[0, dealerIndex]
                lastBid = data.iloc[0, lastBidIndex]
                weight = 0
                if isinstance(dealer, float): #has color, mark it- dealer is NaN
                    data.iloc[0, lastOfferIndex] = data.iloc[0, lastBidIndex] #need to set offer price for calculations
                    if hasColor: #if two with color, make midpoint average of two
                        colorQuote = (colorQuote + lastBid) / 2
                    else:
                        hasColor= True
                        colorQuote = lastBid
                    weight = 1
                    data.iloc[0, colorIndex] = 1
                    data.iloc[0, dealerWIndex] = '{0:.5g}'.format(1) #dealer weighting                    
                    data.iloc[0, staleWIndex] = '{0:.5g}'.format(1) #stale weighting                    
                    data.iloc[0, unchangedWIndex] = '{0:.5g}'.format(1) #unchanged weighting                    
                    data.iloc[0, weightIndex] = '{0:.5g}'.format(weight) #final weighting 
                else:
                    dealerCheck = hierarchyDict.get(dealer) #dealer val
                    staleVal = data.iloc[0, days_stale] #increment days stale (yesterday's data)
                    daysStale = 0
                    if nancheck(staleVal):
                        daysStale = 1
                    else:
                        daysStale = staleVal
                    if dealerCheck == None:
                        dealerW = 0.75
                    else:
                        dealerW = 1 - (hierarchyDict.get(dealer) - 1)*.05 #dealer weighting
                    unchVal = data.iloc[0, unchangedIndex] #increment days unchanged (yesterday's data)
                    daysU = 0
                    if nancheck(unchVal):
                        daysU = 1
                        data.iloc[0, unchangedIndex] = 1
                    elif unchVal < daysStale: #error checking because Trovo isn't good
                        daysU = daysStale
                        data.iloc[0, unchangedIndex] = daysU
                    else:
                        daysU = unchVal #increment days unchanged (yesterday's data)
                    updateW = 0
                    if timeDecayF < 1:
                        try:
                            date, time, timeZone = data.iloc[0, bidDate].split(" ")
                            if int(date.split("-")[0]) != year or int(date.split("-")[1]) != month or int(date.split("-")[2]) != day:
                                updateW = 0.25
                            else:
                                diff = 0
                                time = time.split(":")
                                diff += 60 * (now.hour - int(time[0]))
                                diff += now.minute - int(time[1])
                                updateW = (timeDecayF * 400) / (diff + timeDecayF * 400) #400 is max in a trading day, will act nicely
                        except:
                            updateW = 0.25 #either no date of quote or it was before we kept track of time, very old
                    elif daysU >= 8 * timeDecayF: #days unchanged cutoff
                        updateW = 0.2
                    else: #days unchanged weighting
                        updateW = (3 * timeDecayF) / (daysU + (3 * timeDecayF))
                    '''need size weight'''
                    '''IF THERE IS SIZE< TIGHTEN STD DEV'''
                    staleW = (3 * staleF) /(daysStale + (3 * staleF)) #days stale weight
                    weight =  dealerW * updateW * staleW
                    data.iloc[0, dealerWIndex] = '{0:.5g}'.format(dealerW) #dealer weighting                    
                    data.iloc[0, staleWIndex] = '{0:.5g}'.format(staleW) #stale weighting                    
                    data.iloc[0, unchangedWIndex] = '{0:.5g}'.format(updateW) #unchanged weighting                    
                    data.iloc[0, weightIndex] = '{0:.5g}'.format(weight) #final weighting                    
                if data.iloc[0, select] == 1:
                    if weight > midpointW:
                        midpoint = lastBid
                        midpointW = weight
                    numIncluded += 1 #increment n
                    if weight > 0.6 or dealerW == 1:        
                        stdDevList.append(lastBid)
                        stdDevDen += 1
                if weight > bestStaleW:
                    bestStaleBid = lastBid
                    bestStaleAsk = data.iloc[0, lastOfferIndex]
                    bestStaleW = weight
                tempFrame = concat([tempFrame, data])
            if hasColor: #if color, auto midpoint
                midpoint = colorQuote
            if numIncluded == 0: #all stale
                if len(quotes) > 2:
                    numCalc = np(True, tempFrame['unchanged_for'], 0)
                    midpointUnch = min(numCalc)
                    midpointDraw = np(midpointUnch == tempFrame['unchanged_for'], tempFrame['last_bid'], 0)
                    midpoint = max(midpointDraw)
                    stdDev = 2
                    tempFrame.iloc[:, select] = np((abs(tempFrame['last_bid'] - midpoint) <= (stdDev*stdDevTol)), 1, 0)
                    numCalc = np((tempFrame['Select'] == 1), tempFrame['last_bid'], 0)
                    numInc = nonZ(numCalc)
                    bidPx = sum(numCalc)/numInc
                    offerPx = sum(np((tempFrame['Select'] == 1), tempFrame['last_offer'], 0))/numInc
                else:
                    tempFrame.iloc[:, select] = np((abs(tempFrame['last_bid'] - midpoint) <= (stdDev*stdDevTol)), 1, 0)
                    bidPx = bestStaleBid
                    offerPx = bestStaleAsk
                    midpoint = bestStaleBid
                    stdDev = 0
            else:
                '''IF THERE IS SIZE, TIGHTEN STD DEV, else do everything below'''
                if stdDevDen == 0:
                    stdDev = .25 #make very tight parameter but allow for depth
                else:
                    stdDev = ((sum([(x - midpoint)**2 for x in stdDevList])/(stdDevDen))**(1/2))
                    if stdDev < 0.25:
                        stdDev = 0.25
                    if stdDev > 5.0:
                        if midpoint < 80:
                            stdDev = 5.0
                        else:
                            stdDev = 3.0
                    elif stdDev > 3.0:
                        if midpoint >= 80:
                            stdDev = 3.0
                tempFrame.iloc[:, select] = np((abs(tempFrame['last_bid'] - midpoint) <= (stdDev*stdDevTol)) & tempFrame['Not Stale'] == 1, 1, 0)
                numCalc = np((tempFrame['Select'] == 1), tempFrame['last_bid'], 0)
                numInc = nonZ(numCalc)
                if numInc < 3 and stdDev == 0.25:
                    tempFrame.iloc[:, select] = np(((abs(tempFrame['last_bid'] - midpoint) <= (0.25*stdDevTol))), 1, 0) #too tight, add depth, stdDev 0.25
                    numCalc = np((tempFrame['Select'] == 1), tempFrame['last_bid'], 0)
                    numInc = nonZ(numCalc)
                bidPx = sum(numCalc)/numInc
                offerPx = sum(np((tempFrame['Select'] == 1), tempFrame['last_offer'], 0))/numInc
        tempFrame.iloc[:, stdDevIndex] = stdDev
        tempFrame.iloc[:, calcBid] = '%.3f' % bidPx
        tempFrame.iloc[:, calcOffer] = '%.3f' % offerPx
        finalFile = concat([finalFile, tempFrame])
    return finalFile
    def get_for_tickers(self, tickers):
        _, from_date, till_date = self.parameterService.init_params(
            self.timeframe)

        table = []
        for ticker_record in tickers:
            ticker_name = ticker_record['name']
            ticker_id = ticker_record['id']
            if ticker_name in self.cache:
                ticker_data = self.cache[ticker_name]
            else:
                ticker_data = self.tickerRateService.get_rate(
                    ticker_name, from_date, till_date)
                #self.cache[ticker] = ticker_data #enable cache for debug

            if ticker_data is not None:
                ticker_data = self.ticketAnalysisService.analyze_dataframe(
                    ticker_data)
                simmul_data = self.priceChangeSimulationService.get_simmulation_data(
                    ticker_data, weeks=self.weeks_to_simul)

                lr_data = self.linearRegressionSerice.calculate_slope_and_rsquare_kernel(
                    simmul_data["weeks"], simmul_data["prices"])

                slope_index = None
                if lr_data["slope"] > 0:
                    slope_index = 1
                if lr_data["slope"] < 0:
                    slope_index = -1

                last_index = 0
                if simmul_data['last'] > simmul_data['start']:
                    last_index = (simmul_data['last'] -
                                  simmul_data['start']) / simmul_data['start']
                elif simmul_data['last'] < simmul_data['start']:
                    last_index = (simmul_data['start'] -
                                  simmul_data['last']) / simmul_data['start']

                record = {
                    "id":
                    ticker_id,
                    "ticker":
                    ticker_name,
                    "start":
                    simmul_data['start'],
                    "low":
                    simmul_data['low'],
                    "median":
                    simmul_data['median'],
                    "high":
                    simmul_data['high'],
                    "last":
                    simmul_data['last'],
                    "slope":
                    lr_data["slope"],
                    "slope_index":
                    slope_index,
                    "last_index":
                    last_index,
                    "r_squared":
                    lr_data["r_squared"],
                    "rs_measure":
                    self.linearRegressionSerice.rsquare_group(
                        lr_data["r_squared"]),
                    "delete_link":
                    "<a href='/stock_game/delete?id={}'>delete</a>".format(
                        ticker_id)
                }
                table.append(record)

        if len(tickers) > 0:
            df = pd.from_records(table)
            df = df.sort_values(by=['rs_measure', 'slope_index', 'last_index'],
                                ascending=[0, 0, 0])
            df = df.reset_index(drop=True)
            df = df.reset_index()
            df = df.reindex_axis([
                "index", "ticker", "start", "high", "last", "slope",
                "r_squared", "delete_link"
            ],
                                 axis=1)
            df["index"] += 1
            df.columns = [
                "rank", "ticker", "start", "high", "last", "slope",
                "r_squared", "delete_link"
            ]

        else:
            df = pd(data=[],
                    columns=[
                        "rank", "ticker", "start", "high", "last", "slope",
                        "r_squared", "delete_link"
                    ])

        return df
 def __init__(self):
     self.path = dirname(__file__)
     self.locale = setlocale(LC_ALL, 'ru_RU')
     self.morph = MorphAnalyzer()
     self.read_file = pd(join(self.path, 'Регистрация.xlsx'), na_filter=False).to_dict('index')
     self.count = len(self.read_file) - 1
Beispiel #10
0
    integrated_client.append(dict(i[1]))

for client in integrated_client[:1]:
    g_db.reopen()
    client_json = client.copy()
    supplier_company_id = client_json['supplier_company_id']
    token = get_new_token_dp(supplier_company_id, g_db)
    amo_connect = get_AMO(token['token'], token['amo_domain'])
    dicts_amo = amo_connect.get_data(
        "account?with=pipelines,custom_fields,users")
    pips = amo_connect.pipiline_loc(dicts_amo['_embedded']['pipelines'])

    if 'Воронка WorkFace' not in [pips[i][0] for i in pips]:

        creat_states_map = amo_connect.creat_new_funnels2(supplier_company_id)
        df_funnles = pd(creat_states_map,
                        columns=['wf_status', 'amo_fld_id', 'company_id'])
        qu_add_new_funnels = g_db.insert_pd(df_funnles, 'funnels')
        g_db.put(qu_add_new_funnels)
        fields_pd = amo_connect.create_custom_fields(supplier_company_id)
        qu_add_custom_fields = g_db.insert_pd(fields_pd, 'custom_fields')
        g_db.put(qu_add_custom_fields)
        names = ['Интернет-Магазин', 'Workface']
        data = []
        for i in names:
            data.append({'name': i})
        tags_ids = amo_connect.post_data('leads/tags', data)
        tags_pd = pd(tags_ids['_embedded']['tags'])
        tags_pd = tags_pd.drop(columns=['request_id'])
        tags_pd['company_id'] = supplier_company_id
        q_tags = g_db.insert_pd(tags_pd, 'tags')
        g_db.put(q_tags)