Example #1
0
def bestPolyFit():
    import matplotlib.pyplot as plt
    data, headers = para.readFile('data_/3_D_SYSTEMS_CORP_DEL.csv')
    dates = data['Date']
    close = data['Close']
    #groups = grouping.groupUp(data, close)
    start = 230
    end = 379

    #dataList = para.averageLast(5)([close], len(close))
    dataList = close

    #group = groups[23]
    #coeffs = fitPoly(3)(dataList, group[0], group[1])
    
    plt.plot(dataList[start:end])
    bestRatio = 0
    bestDegree = -1
    lastValue = -1
    savedPlot = None
    for i in range(2,20):
        coeffs = fitPoly(i)(dataList, start, end)

        p = polynomialFunction(coeffs)
        estimate = list(map(p, range(0,end-start)))

        score = similarity.lpNorms(2)(estimate, dataList[start:end])
        print('Degree ' + str(i) + ' score = ' + str(score))
        #plt.plot(estimate)
        if lastValue == -1:
            lastValue = score
        else:
            ratio = lastValue/score
            lastValue = score
            if ratio > bestRatio:
                savedPlot = estimate
                bestRatio = ratio
                bestDegree = i

    print('Best degree = ' + str(bestDegree) + ' with score = ' + str(bestRatio))
    plt.plot(savedPlot)
    plt.show()
Example #2
0
def testAlgo(algo, target):
#    global data
#    testAlgoOnData(algo, target, data['Close'])
#
#def testAlgoOnData(algo, target, dataList):
    dates = data['Date']
    groups = grouping.groupUp(data, data['Close'])

    targetNext = target+const.ma
    if targetNext >= len(groups):
        return None
    similarity._normalizeFuns = [similarity.byMean]
    similarity._measureFun = algo
    results = compareAllGroupsBefore(groups, target)
    results2 = compareAllGroupsBefore(groups, targetNext)
    
    results.reverse()
    results.sort(key=lambda x : x[2])
    results2.sort(key=lambda x : x[2])

    ### Uses Average Data: useOnlyAverageData = True
    #tradePolicy = tradingmeasure.dontSell
    #tradePolicy = tradingmeasure.sellOrKeep
    #tradePolicy = tradingmeasure.riskAverseSellOrKeep
    #tradePolicy = tradingmeasure.largestReturn

    ### Doesn't use Average Data: useOnlyAverageData = False
    tradePolicy = tradingmeasure.confidenceFilter(0.2, tradingmeasure.sellOrKeep)

    useOnlyAverageData = False

    totalRank = 0
    lpScore = 0
    nResults = 10
    for v in results[0:nResults]:
        rank = getRank(results2, v[0]+const.ma)
        totalRank += rank
        lpScore += similarity.computeWith(groups[v[0]+const.ma], groups[targetNext], [similarity.byFirst], similarity.lpNorms(2))
    
    dataLists = getDataLists(groups, results[0:nResults], const.ma)
    if usingOnlyAverageData:
        dataLists = tradingmeasure.averageData(dataLists)

    money = tradingmeasure.computeWithFunOn(dataLists, groups[targetNext][2], tradePolicy)
    #print(money)
    totalRank *= 100        # normalize totalRank for equal weightage.
    totalRank /= len(results2) # normalize totalRank for equal weightage.

    return (lpScore/nResults, totalRank/nResults, money)
Example #3
0
    #'frechet': similarity.tsdist('frechetDistance'), # (USE?) prints a lot of nonsense
    'inf.norm': similarity.tsdist('inf.normDistance'),
    'int.per': similarity.tsdist('int.perDistance'),
    'lbKeogh_3': similarity.tsdist('lb.keoghDistance', 3),
    'lcss_05': similarity.tsdist('lcssDistance', 0.05),
    'lcss_15': similarity.tsdist('lcssDistance', 0.15),
    'lcss_30': similarity.tsdist('lcssDistance', 0.3),
    'lcss_50': similarity.tsdist('lcssDistance', 0.5),
    'lp': similarity.tsdist('lpDistance'),
    'manhattan': similarity.tsdist('manhattanDistance'),
    'mindist.sax_1': similarity.tsdist('mindist.saxDistance',1),
    'mindist.sax_2': similarity.tsdist('mindist.saxDistance',2),
    'mindist.sax_4': similarity.tsdist('mindist.saxDistance',4),
    'mindist.sax_8': similarity.tsdist('mindist.saxDistance',8),
    'mindist.sax_16': similarity.tsdist('mindist.saxDistance',16),
    'minkowski_25': similarity.lpNorms(2.5), #otherwise known as lp-norms
    'minkowski_30': similarity.lpNorms(3),
    'minkowski_05': similarity.lpNorms(0.5),
    #'ncd': similarity.tsdist('ncdDistance'),  # Unknown internal error
    'pacf': similarity.tsdist('pacfDistance'),
    'pdc': similarity.tsdist('pdcDistance'),
    'per': similarity.tsdist('perDistance'),
    #'pred': similarity.tsdist('predDistance'),
    #'spec.glk': similarity.tsdist('spec.glkDistance'), # {USE} SLOW. Also, I'm getting strange L-BFGS-B errors.
    #'spec.isd': similarity.tsdist('spec.isdDistance'), # {USE) SLOW. Also, I'm getting strange L-BFGS-B errors.
    'spec.llr': similarity.tsdist('spec.llrDistance'),
    'sts': similarity.tsdist('stsDistance'),
    'tquest': similarity.tsdist('tquestDistance', tau=0.5), #seems to do nothing...?
    'wav': similarity.tsdist('wavDistance'),
}
def testAlgoWeighted(algo, target, weightDataFun, data):
    dates = data['Date']
    groupsWeighted = grouping.groupUp(data, weightDataFun(data))
    groupsClose = grouping.groupUp(data, data['Close'])

    targetNext = target+const.ma
    if targetNext >= len(groupsWeighted):
        return None
    similarity._normalizeFuns = [similarity.byMean]
    similarity._measureFun = algo
    results = testalgos.compareAllGroupsBefore(groupsWeighted, target)
    results2 = testalgos.compareAllGroupsBefore(groupsClose, targetNext)
    
    results.reverse()
    results.sort(key=lambda x : x[2])
    results2.sort(key=lambda x : x[2])

    tradePolicy = tradingmeasure.sellOrKeep
    usingOnlyAverageData = True

    totalRank = 0
    lpScore = 0
    nResults = 10

    for v in results[0:nResults]:
        rank = testalgos.getRank(results2, v[0]+const.ma)
        totalRank += rank
        lpScore += similarity.computeWith(groupsClose[v[0]+const.ma], groupsClose[targetNext], [similarity.byFirst], similarity.lpNorms(2))
    
    dataLists = testalgos.getDataLists(groupsClose, results[0:nResults], const.ma)
    if usingOnlyAverageData:
            dataLists = tradingmeasure.averageData(dataLists)

    money = tradingmeasure.computeWithFunOn(dataLists, groupsClose[targetNext][2], tradePolicy)
    #totalRank *= 100        # normalize totalRank for equal weightage.
    #totalRank /= len(results2) # normalize totalRank for equal weightage.

    return (lpScore/nResults, totalRank/nResults, money)