def draw_line(x0, y0, x1, y1, screen, color): if x0 == x1 or y0 == y1: return # draw_line(x1, y1, x0, y0, screen, color) print(x0, y0, x1, y1) if x0 == x1 or y0 == y1: return midX = floor((x1 + x0) / 2) midY = floor((y1 + y0) / 2) if midX == x1 or midY == y1: return if midX == x0 and midX != x1: x0 += 1 midX += 1 if midY == x0 and midY != x1: y0 += 1 midY += 1 plot(screen, color, midX, midY) draw_line(x0, y0, midX, midY, screen, color) draw_line(midX, midY, x1, y1, screen, color)
import random import time import display def generate_image(): X, Y = numpy.meshgrid(numpy.linspace(0, numpy.pi, 512), numpy.linspace(0, 2, 512)) z = (numpy.sin(X) + numpy.cos(Y))**2 + 0.5 return z i1 = generate_image() i2 = generate_image() display.image(i1, title='gradient') # display.images([i2, i2, i2, i2], width=200, title='super fabio', labels=['a', 'b', 'c', 'd']) data = [] for i in range(15): data.append([i, random.random(), random.random() * 2]) win = display.plot(data, labels=['position', 'a', 'b'], title='progress') for i in range(15, 25): time.sleep(0.2) data.append([i, random.random(), random.random() * 2]) display.plot(data, win=win)
def train(self, fImgs, fLbls, fIterations, fName): '''Training algorithm. Can evolved according to your need. INPUT : Images set, labels set (None for autoencoders), number of iterations before stopping, name for save OUTPUT : Nothing''' if PREPROCESSING: fImgs, _key = ld.normalization(fName, fImgs) print "Training...\n" _gcost = [] _gtime = [] _gperf = [] _done = fIterations for i in xrange(fIterations): _gtime.append(tm.time()) _gcost.append(0) _gperf.append(0) for j in xrange(self.mCycle): _trn, _tst = self.cross_validation(j, fImgs, fLbls) for k in xrange(len(_trn[0]) / self.mBatchSize): if DEBUG: print "Learning rates :", self.mEpsilon print "Momentums :", self.mMomentum # Input and labels batch _in, _lbls = self.build_batch(k,_trn[0],_trn[1]) # Activation propagation _out = self.propagation(_in, DROPOUT) # Local error for each layer _err = self.layer_error(_out, _lbls, SPARSITY) # Gradient for stochastic gradient descent _wGrad, _bGrad = self.gradient(_err, _out) # Gradient checking if GRAD_CHECK: print "Gradient checking ..." self.gradient_checking(_in,_in,_wGrad,_bGrad) # Adapt learning rate if (i > 0 or j > 0 or k > 0) and ANGLE_DRIVEN: self.angle_driven_approach(_wGrad) # Weight variations self.variations(_wGrad) # Update weights and biases self.update(_bGrad) # Adapt learning rate if AVG_GRADIENT: self.average_gradient_approach(_wGrad) # Global cost and perf update in a cycle _cost, _perf = self.evaluate(_tst[0], _tst[1]) _gcost[i] += _cost _gperf[i] += _perf if DEBUG: print "Cost :", _cost #Iteration information _gtime[i] = tm.time() - _gtime[i] print "Iteration {0} in {1}s".format(i, _gtime[i]) # Global cost for one cycle _gcost[i] /= self.mCycle print "Cost of iteration : {0}".format(_gcost[i]) # Global perf for one cycle _gperf[i] /= self.mCycle print "Current performances : {0}".format(_gperf[i]) # Parameters print "Epsilon {0} Momentum {1}\n".format(self.mEpsilon, self.mMomentum) # Stop condition if(i > 0): if(abs(_gcost[i-1] - _gcost[i]) < CONVERGENCE): _done = i + 1 break dy.plot(xrange(_done), _gcost, fName, "_cost.png") dy.plot(xrange(_done), _gperf, fName, "_perf.png") dy.plot(xrange(_done), _gtime, fName, "_time.png")
from input import read_data from segment_detector import detect_segments from arc_detector import detect_arcs from display import plot import tkinter as tk from tkinter import filedialog root = tk.Tk() root.withdraw() file_path = filedialog.askopenfilename() data = read_data(file_path) segments, filtered_data = detect_segments(data) # plot(filtered_data, segments) arcs, filtered_data = detect_arcs(filtered_data) plot(data, segments, arcs)
import client import pandas as pd import numpy as np import display as d df = pd.read_csv('./equities.csv', index_col='symbol') for index, item in df.iterrows(): symbol = item.name equity = client.get_last(symbol) #normalize norm_equity = equity.drop(['date','volume'], axis=1) min_equity = norm_equity['low'].min() max_equity = norm_equity['high'].max() norm_equity = (norm_equity - min_equity) / (max_equity - min_equity) norm_volume = equity.drop(['date','open', 'high', 'low', 'close'], axis=1) min_volume = norm_volume['volume'].min() max_volume = norm_volume['volume'].max() norm_volume = (norm_volume - min_volume) / (max_volume - min_volume) d.plot(symbol, norm_equity, norm_volume) print("Done.")
#!/usr/bin/env python import numpy import random import time import display def generate_image(): X, Y = numpy.meshgrid(numpy.linspace(0, numpy.pi, 512), numpy.linspace(0, 2, 512)) z = (numpy.sin(X) + numpy.cos(Y)) ** 2 + 0.5 return z i1 = generate_image() i2 = generate_image() display.image(i1, title='gradient') # display.images([i2, i2, i2, i2], width=200, title='super fabio', labels=['a', 'b', 'c', 'd']) data = [] for i in range(15): data.append([i, random.random(), random.random() * 2]) win = display.plot(data, labels=[ 'position', 'a', 'b' ], title='progress') for i in range(15, 25): time.sleep(0.2) data.append([i, random.random(), random.random() * 2]) display.plot(data, win=win)
def train(self, fImgs, fLbls, fIterations, fName): '''Training algorithm. Can evolved according to your need. INPUT : Images set, labels set (None for autoencoders), number of iterations before stopping, name for save OUTPUT : Nothing''' if PREPROCESSING: fImgs, _key = ld.normalization(fName, fImgs) print "Training...\n" _gcost = [] _gtime = [] _done = fIterations for i in xrange(fIterations): _gtime.append(tm.time()) _gcost.append(0) for j in xrange(self.mCycle): _trn, _tst = self.cross_validation(j, fImgs) for k in xrange(len(_trn) / self.mBatchSize): if DEBUG: print "Learning rates :", self.mEpsilon print "Momentums :", self.mMomentum # Inputs and labels batch _in = self.build_batch(k, _trn) # Activation propagation _out = self.propagation(_in, DROPOUT) # Local error for each layer _err = self.layer_error(_out, _in, SPARSITY) # Gradient for stochastic gradient descent _wGrad, _bGrad = self.gradient(_err, _out) # Gradient checking if GRAD_CHECK: print "Gradient checking ..." self.gradient_checking(_in, _in, _wGrad, _bGrad) # Adapt learning rate if (i > 0 or j > 0 or k > 0) and ANGLE_DRIVEN: self.angle_driven_approach(_wGrad) # Weight variations self.variations(_wGrad) # Update weights and biases self.update(_bGrad) # Adapt learning rate if AVG_GRADIENT: self.average_gradient_approach(_wGrad) # Evaluate the network _cost = self.evaluate(_tst) _gcost[i] += _cost if DEBUG: print "Cost :", _cost # Iteration information _gtime[i] = tm.time() - _gtime[i] print "Iteration {0} in {1}s".format(i, _gtime[i]) # Global cost for one cycle _gcost[i] /= self.mCycle print "Cost of iteration : {0}".format(_gcost[i]) # Parameters print "Epsilon {0} Momentum {1}\n".format(self.mEpsilon, self.mMomentum) # Stop condition if i > 0 and abs(_gcost[i - 1] - _gcost[i]) < 0.001: _done = i + 1 break elif self.mStop: _done = i + 1 break dy.plot(xrange(_done), _gcost, fName, "_cost.png") dy.plot(xrange(_done), _gtime, fName, "_time.png") if fName is not None: self.save_output(fName, "train", fImgs)
def draw_line(screen, x0, y0, x1, y1, color): dx = x1 - x0 dy = y1 - y0 if dx + dy < 0: dx = 0 - dx dy = 0 - dy tmp = x0 x0 = x1 x1 = tmp tmp = y0 y0 = y1 y1 = tmp if dx == 0: y = y0 while y <= y1: plot(screen, color, x0, y) y = y + 1 elif dy == 0: x = x0 while x <= x1: plot(screen, color, x, y0) x = x + 1 elif dy < 0: d = 0 x = x0 y = y0 while x <= x1: plot(screen, color, x, y) if d > 0: y = y - 1 d = d - dx x = x + 1 d = d - dy elif dx < 0: d = 0 x = x0 y = y0 while y <= y1: plot(screen, color, x, y) if d > 0: x = x - 1 d = d - dy y = y + 1 d = d - dx elif dx > dy: d = 0 x = x0 y = y0 while x <= x1: plot(screen, color, x, y) if d > 0: y = y + 1 d = d - dx x = x + 1 d = d + dy else: d = 0 x = x0 y = y0 while y <= y1: plot(screen, color, x, y) if d > 0: x = x + 1 d = d - dy y = y + 1 d = d + dx
state_win = 1 l_stats = [] w_stats = [] b_stats = [] for t in range(500): W.grad.data.zero_() b.grad.data.zero_() y_pred = torch.mm(W, x_data) y_pred += b.unsqueeze(0).expand_as(y_pred) loss = ((y_pred - y_data)**2).mean() loss.backward() W.data -= learning_rate * W.grad.data b.data -= learning_rate * b.grad.data l_stats.append([t, loss.data[0]]) w_stats.append([t, W.data[0][0], W.data[0][1]]) b_stats.append([t, b.data[0]]) if t % 20 == 0: # display.plot(l_stats, title="loss", win=state_win) display.plot(w_stats, title="weight", width=200, win=state_win + 1) display.plot(b_stats, title="bias", win=state_win + 2) print("it: #{} loss: {} W: [{}, {}], b: {}".format( t, loss.data[0], W.data[0][0], W.data[0][1], b.data[0]))
def draw_line_backup(x0, y0, x1, y1, screen, color): if x0 == x1 and y0 == y1: return if x0 > x1: return draw_line(x1, y1, x0, y0, screen, color) if x1 - x0 == 0: if y1 > y0: plot(screen, color, y0 + 1, y1) return draw_line(x0, y0 + 1, x1, y1, screen, color) else: plot(screen, color, y0 - 1, y1) return draw_line(x0, y0 - 1, x1, y1, screen, color) slope = (y1 - y0) / (x1 - x0) if slope > 1: plot(screen, color, x0, y0 + 1) return draw_line_backup(x0, y0 + 1, x1, y1, screen, color) elif slope == 1: plot(screen, color, x0 + 1, y0 + 1) return draw_line_backup(x0 + 1, y0 + 1, x1, y1, screen, color) elif slope > 0: plot(screen, color, x0 + 1, y0) return draw_line_backup(x0 + 1, y0, x1, y1, screen, color) elif slope > -1: plot(screen, color, x0 + 1, y0) return draw_line_backup(x0 + 1, y0, x1, y1, screen, color)
symbol = item.name equity = client.get_last(symbol) #normalize norm_equity = equity.drop(['date', 'volume'], axis=1) min_equity = norm_equity['low'].min() max_equity = norm_equity['high'].max() norm_equity = (norm_equity - min_equity) / (max_equity - min_equity) norm_equity = norm_equity.reset_index(drop=True) norm_volume = equity.drop(['date', 'open', 'high', 'low', 'close'], axis=1) min_volume = norm_volume['volume'].min() max_volume = norm_volume['volume'].max() norm_volume = (norm_volume - min_volume) / (max_volume - min_volume) norm_volume = norm_volume.reset_index(drop=True) norm_sample = norm_equity.join(norm_volume) sample = norm_sample.values # drop those that dont have proper shape if sample.shape[0] > 0 and sample.shape[0] == 10: sample = sample.reshape(1, 10, 5) predicted = model.predict(sample) predicted = np.reshape(predicted, (predicted.size, )) predicted = predicted[0] # get the prediction predicted = predicted * 100 # turn it to percentage print(symbol, '%.2f' % predicted, 'PSEi' if item.psei else 'not PSEi') display.plot(symbol, norm_equity, norm_volume) # TODO TEST THE VERACITY OF THE MODEL!!! print('Done.')
from game import Game from display import plot game = Game() game.run() plot(game.game_stats)
# 3.数据标准化 ana4_std = StandardScaler().fit_transform(ana1_selection_drop) # 数据标准化 ana4_std # In[182]: # 5.k_means聚类分析 kmeans_model = KMeans(n_clusters=5).fit(ana4_std) # K-means聚类分析 print('done') # In[186]: # 6.绘制雷达图 plot(kmeans_model, ana1_selection.columns) # 绘制客户分群结果 # In[185]: # 测试 # 选择合适的特征,构建聚类模型,分析每一类学生群体的消费特点 # 1导入库,选取数据 data_ana1 = data5 # data_ana1.shape # 2构建特征:Dep,Money,FundMoney,CardCount,Date 2.1 ana1_selection = data_ana1.loc[:, [ 'Money', 'CardCount', 'hour', 'Sex', 'Surplus', 'FundMoney' ]]