def _draw_legend(self, data, lines, wordspacing, linespacing, *args, **kwargs): """ Draw a legend by iterating through all the lines. The labels are drawn on the same line as the corresponding token. :data: The text data as a dictionary. This is used to look for `label` attributes. :type data: dict :param lines: The drawn lines. :type lines: list of list of :class:`matplotlib.text.Text` :param wordspacing: The space between tokens. :type wordspacing: float :param linespacing: The space between lines. :type linespacing: float :return: A list of lines, each containing a list of labels on that line. :rtype: list of list of :class:`matplotlib.text.Text` """ labels = [] figure = self.drawable.figure axis = self.drawable.axis """ Iterate through each line, and then through each token in that line. """ drawn_labels = [] i = 0 for line, line_tokens in enumerate(lines): line_labels = [] for token in line_tokens: label, style = data[i].get('label', ''), data[i].get('style', { }) i += 1 """ If the token has a label associated with it, draw it the first time it appears. """ if label and label not in drawn_labels: drawn_labels.append(label) token = text_util.draw_token(figure, axis, label, 0, line, style, wordspacing, va='top', *args, **kwargs) line_labels.append(token) """ After drawing the labels on each line, re-align the legend. The labels are aligned to the right. They are reversed so that the first label appears on the left. """ util.align(figure, axis, line_labels[::-1], 'right', wordspacing * 4, (-1, - wordspacing * 4)) labels.append(line_labels) return labels
def cmd_verify(self, ui, args): """verify memory against file""" x = util.file_mem_args(ui, args, self.cpu.device) if x is None: return (name, adr, size) = x # check the file filesize = util.file_arg(ui, name) if filesize is None: return # round up the filesize - the io object will return 0xff for any bytes beyond EOF filesize = util.roundup(filesize, 32) if size is None: # no length on the command line - verify the filesize size = filesize if size > filesize: # region is larger than file - just verify the filesize size = filesize # adjust the address and length adr = util.align(adr, 32) n = util.nbytes_to_nwords(size, 32) # read memory, verify against file object mf = iobuf.verify_file(ui, 'verify %s (%d bytes):' % (name, n * 4), name, n * 4) self.cpu.rdmem32(adr, n, mf) mf.close()
def seasonANNForecasting(ts, dataset, freq, lag): # 序列分解 #ts.index = pd.date_range(start='19960318',periods=len(ts), freq='Q') trend, seasonal, residual = season_decompose.seasonDecompose(ts, freq=freq) # print trend.shape # print seasonal.shape # print residual.shape # 分别预测 trendWin = lag resWin = trendWin t1 = time.time() trTrain, trTest, mae1, mrse1, smape1 = ANNFORECAST.ANNforecasting( trend, inputDim=trendWin, epoch=100, hiddenNum=100) resTrain, resTest, mae2, mrse2, smape2 = ANNFORECAST.ANNforecasting( residual, inputDim=resWin, epoch=100, hiddenNum=100) t2 = time.time() print(t2 - t1) #''' # 数据对齐 trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin) # 获取最终预测结果 finalPred = trendPred + seasonal + resPred trainPred = trTrain + seasonal[trendWin:trendWin + trTrain.shape[0]] + resTrain testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest # 获得ground-truth数据 data = dataset[freq // 2:-(freq // 2)] trainY = data[trendWin:trendWin + trTrain.shape[0]] testY = data[2 * resWin + resTrain.shape[0]:] # 评估指标 # MAE = eval.calcMAE(trainY, trainPred) # print ("train MAE",MAE) # MRSE = eval.calcRMSE(trainY, trainPred) # print ("train MRSE",MRSE) # MAPE = eval.calcMAPE(trainY, trainPred) # print ("train MAPE",MAPE) MAE = eval.calcMAE(testY, testPred) print("test MAE", MAE) MRSE = eval.calcRMSE(testY, testPred) print("test RMSE", MRSE) MAPE = eval.calcMAPE(testY, testPred) print("test MAPE", MAPE) SMAPE = eval.calcSMAPE(testY, testPred) print("test SMAPE", SMAPE) # plt.plot(data) # plt.plot(finalPred) # plt.show() #''' return trainPred, testPred, MAE, MRSE, SMAPE
def decompose_RNN_forecasting(ts, dataset, freq, lag, epoch=20, hidden_num=64, batch_size=32, lr=1e-3, unit="GRU", varFlag=False, maxLen=48, minLen=24, step=8): # 序列分解 trend, seasonal, residual = decompose.ts_decompose(ts, freq) print("trend shape:", trend.shape) print("peroid shape:", seasonal.shape) print("residual shape:", residual.shape) # 分别预测 resWin = trendWin = lag t1 = time.time() trTrain, trTest, MAE1, MRSE1, SMAPE1 = RNN_forecasting(trend, lookBack=lag, epoch=epoch, batchSize=batch_size, hiddenNum=hidden_num, varFlag=varFlag, minLen=minLen, maxLen=maxLen, step=step, unit=unit, lr=lr) resTrain, resTest, MAE2, MRSE2, SMAPE2 = RNN_forecasting(residual, lookBack=lag, epoch=epoch, batchSize=batch_size, hiddenNum=hidden_num, varFlag=varFlag, minLen=minLen, maxLen=maxLen, step=step, unit=unit, lr=lr) t2 = time.time() print(t2-t1) print("trTrain shape:", trTrain.shape) print("resTrain shape:", resTrain.shape) # 数据对齐 trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin) print("trendPred shape is", trendPred.shape) print("resPred shape is", resPred.shape) # 获取最终预测结果 # finalPred = trendPred+seasonal+resPred trainPred = trTrain+seasonal[trendWin:trendWin+trTrain.shape[0]]+resTrain testPred = trTest+seasonal[2*resWin+resTrain.shape[0]:]+resTest # 获得ground-truth数据 data = dataset[freq//2:-(freq//2)] trainY = data[trendWin:trendWin+trTrain.shape[0]] testY = data[2*resWin+resTrain.shape[0]:] # 评估指标 MAE = eval.calcMAE(testY, testPred) print("test MAE", MAE) MRSE = eval.calcRMSE(testY, testPred) print("test RMSE", MRSE) MAPE = eval.calcMAPE(testY, testPred) print("test MAPE", MAPE) SMAPE = eval.calcSMAPE(testY, testPred) print("test SMAPE", SMAPE) plt.plot(testY, label='ground-truth') plt.plot(testPred, label='prediction') plt.xlabel("Time", fontsize=10) plt.ylabel("CPU Utilization(%)", fontsize=10) plt.legend() foo_fig = plt.gcf() foo_fig.savefig('M_1955_CPU.eps', format='eps', dpi=1000, bbox_inches='tight') plt.show() return trainPred, testPred, MAE, MRSE, SMAPE
def seasonSVRForecasting(ts, dataset, freq, lag): # 序列分解 trend, seasonal, residual = season_decompose.seasonDecompose(ts, freq=freq) # print trend.shape # print seasonal.shape # print residual.shape # 分别预测 trendWin = lag resWin = trendWin t1 = time.time() trTrain, trTest, mae1, mrse1, mape1 = SVRFORECAST.SVRforecasting( trend, lookBack=trendWin) resTrain, resTest, mae2, mrse2, mape2 = SVRFORECAST.SVRforecasting( residual, lookBack=resWin) t2 = time.time() print(t2 - t1) #''' # 数据对齐 trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin) # 获取最终预测结果 finalPred = trendPred + seasonal + resPred trainPred = trTrain + seasonal[trendWin:trendWin + trTrain.shape[0]] #+resTrain testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] #+resTest # 获得ground-truth数据 data = dataset[freq // 2:-(freq // 2)] trainY = data[trendWin:trendWin + trTrain.shape[0]] testY = data[2 * resWin + resTrain.shape[0]:] # 评估指标 # MAE = eval.calcMAE(trainY, trainPred) # print ("train MAE",MAE) # MRSE = eval.calcRMSE(trainY, trainPred) # print ("train MRSE",MRSE) # MAPE = eval.calcMAPE(trainY, trainPred) # print ("train MAPE",MAPE) MAE = eval.calcMAE(testY, testPred) print("test MAE", MAE) MRSE = eval.calcRMSE(testY, testPred) print("test RMSE", MRSE) MAPE = eval.calcMAPE(testY, testPred) print("test MAPE", MAPE) SMAPE = eval.calcSMAPE(testY, testPred) print("test SMAPE", SMAPE) # plt.plot(data) # plt.plot(finalPred) # plt.show() #''' return trainPred, testPred, MAE, MRSE, SMAPE
def cmd_rd(self, ui, args, n): """memory read command for n bits""" if util.wrong_argc(ui, args, (1, )): return adr = util.sex_arg(ui, args[0], self.cpu.width) if adr == None: return adr = util.align(adr, n) ui.put('[0x%08x] = ' % adr) ui.put('0x%%0%dx\n' % (n / 4) % self.cpu.rd(adr, n))
def wr(self, adr, val, n): """write to memory - n bits aligned""" adr = util.align(adr, n) if n == 32: return self.dbgio.wr32(adr, val) elif n == 16: return self.dbgio.wr16(adr, val) elif n == 8: return self.dbgio.wr8(adr, val) assert False
def rd(self, adr, n): """read from memory - n bits aligned""" adr = util.align(adr, n) if n == 32: return self.dbgio.rd32(adr) elif n == 16: return self.dbgio.rd16(adr) elif n == 8: return self.dbgio.rd8(adr) assert False
def cmd_rd(self, ui, args, n): """memory read command for n bits""" if util.wrong_argc(ui, args, (1,)): return adr = util.sex_arg(ui, args[0], self.cpu.width) if adr == None: return adr = util.align(adr, n) ui.put('[0x%08x] = ' % adr) ui.put('0x%%0%dx\n' % (n/4) % self.cpu.rd(adr, n))
def decompose_SVR_forecasting(ts, dataset, freq, lag, C=0.1, epsilon=0.01): # 序列分解 trend, seasonal, residual = decompose.ts_decompose(ts, freq=freq) print("trend shape:", trend.shape) print("peroid shape:", seasonal.shape) print("residual shape:", residual.shape) # 分别预测 resWin = trendWin = lag t1 = time.time() trTrain, trTest, mae1, mrse1, mape1 = SVR_forecasting(trend, lookBack=lag, C=C, epsilon=epsilon) resTrain, resTest, mae2, mrse2, mape2 = SVR_forecasting(residual, lookBack=lag, C=C, epsilon=epsilon) t2 = time.time() print(t2 - t1) # 数据对齐 trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin) # 获取最终预测结果 finalPred = trendPred + seasonal + resPred trainPred = trTrain + seasonal[trendWin:trendWin + trTrain.shape[0]] + resTrain testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest # 获得ground-truth数据 data = dataset[freq // 2:-(freq // 2)] trainY = data[trendWin:trendWin + trTrain.shape[0]] testY = data[2 * resWin + resTrain.shape[0]:] # 评估指标 MAE = eval.calcMAE(testY, testPred) print("test MAE", MAE) MRSE = eval.calcRMSE(testY, testPred) print("test RMSE", MRSE) MAPE = eval.calcMAPE(testY, testPred) print("test MAPE", MAPE) SMAPE = eval.calcSMAPE(testY, testPred) print("test SMAPE", SMAPE) # plt.plot(data) # plt.plot(finalPred) # plt.show() return trainPred, testPred, MAE, MRSE, SMAPE
def cmd_mem2file(self, ui, args): """read from memory, write to file""" x = util.file_mem_args(ui, args, self.cpu.device) if x is None: return (name, adr, size) = x if size is None: ui.put('invalid length') return # adjust the address and length adr = util.align(adr, 32) n = util.nbytes_to_nwords(size, 32) # read memory, write to file object mf = iobuf.write_file(ui, 'writing to %s' % name, name, n * 4) self.cpu.rdmem32(adr, n, mf) mf.close()
def cmd_wr(self, ui, args, n): """memory write command for n bits""" if util.wrong_argc(ui, args, (1, 2)): return adr = util.sex_arg(ui, args[0], self.cpu.width) if adr == None: return adr = util.align(adr, n) val = 0 if len(args) == 2: val = util.int_arg(ui, args[1], util.limit_32, 16) if val == None: return val = util.mask_val(val, n) self.cpu.wr(adr, val, n) ui.put('[0x%08x] = ' % adr) ui.put('0x%%0%dx\n' % (n / 4) % val)
def cmd_wr(self, ui, args, n): """memory write command for n bits""" if util.wrong_argc(ui, args, (1,2)): return adr = util.sex_arg(ui, args[0], self.cpu.width) if adr == None: return adr = util.align(adr, n) val = 0 if len(args) == 2: val = util.int_arg(ui, args[1], util.limit_32, 16) if val == None: return val = util.mask_val(val, n) self.cpu.wr(adr, val, n) ui.put('[0x%08x] = ' % adr) ui.put('0x%%0%dx\n' % (n/4) % val)
def align_from_file(self, reference_image_obj, align_method='cv2.TM_CCOEFF_NORMED', roi_size=0.5): """Align an image taking by reference another image. roi_size is entered as input parameter as tant per one of the original image size""" image_ref = reference_image_obj.image image_to_align = self.image aligned_image, mv_vector = align(image_ref, image_to_align, align_method=align_method, roi_size=roi_size) ref_fn = reference_image_obj.h5_image_filename ref_dataset_name = reference_image_obj.image_dataset description = ("Image " + self.image_dataset + " has been aligned taking as reference image " + ref_dataset_name + "@" + path.basename(ref_fn)) return aligned_image, mv_vector, description
def cmd_disassemble(self, ui, args): """disassemble memory""" if util.wrong_argc(ui, args, (0, 1, 2)): return n = 16 if len(args) == 0: # read the pc self.halt() adr = self.dbgio.rdreg('pc') if len(args) >= 1: adr = util.sex_arg(ui, args[0], 32) if adr is None: return if len(args) == 2: n = util.int_arg(ui, args[1], (1, 2048), 16) if n is None: return # align the address to 32 bits adr = util.align(adr, 32) # disassemble md = iobuf.arm_disassemble(ui, adr) self.rdmem32(adr, n, md)
def seasonRNNForecasting(ts, dataset, freq, lag, unit="GRU"): # 序列分解 #ts.index = pd.date_range(start='19960318',periods=len(ts), freq='Q') trend, seasonal, residual = season_decompose.seasonDecompose(ts, freq) print(trend.shape) print(seasonal.shape) print(residual.shape) # 分别预测 trendWin = lag resWin = trendWin t1 = time.time() trTrain, trTest, MAE1, MRSE1, SMAPE1 = RNNFORECAST.RNNforecasting( trend, lookBack=trendWin, epoch=50, unit=unit) resTrain, resTest, MAE2, MRSE2, SMAPE2 = RNNFORECAST.RNNforecasting( residual, lookBack=resWin, epoch=60, unit=unit, hiddenNum=100) # trTrain, trTest, MAE1, MRSE1, SMAPE1= RNNFORECAST.RNNforecasting(trend, lookBack=resWin, epoch=30, unit=unit, # varFlag=True, minLen=20, maxLen=lag, step=4, # hiddenNum=100) # resTrain, resTest, MAE2, MRSE2, SMAPE2 = RNNFORECAST.RNNforecasting(residual, lookBack=resWin, epoch=30, unit=unit, # varFlag=True, minLen=20, maxLen=lag, step=4, hiddenNum=100) t2 = time.time() print(t2 - t1) print("trTrain shape is", trTrain.shape) print("resTrain shape is", resTrain.shape) #''' # 数据对齐 trendPred, resPred = util.align(trTrain, trTest, trendWin, resTrain, resTest, resWin) print("trendPred shape is", trendPred.shape) print("resPred shape is", resPred.shape) # 获取最终预测结果 finalPred = trendPred + seasonal + resPred trainPred = trTrain + seasonal[trendWin:trendWin + trTrain.shape[0]] + resTrain testPred = trTest + seasonal[2 * resWin + resTrain.shape[0]:] + resTest # 获得ground-truth数据 data = dataset[freq // 2:-(freq // 2)] trainY = data[trendWin:trendWin + trTrain.shape[0]] testY = data[2 * resWin + resTrain.shape[0]:] # 评估指标 MAE = eval.calcMAE(trainY, trainPred) print("train MAE", MAE) MRSE = eval.calcRMSE(trainY, trainPred) print("train MRSE", MRSE) MAPE = eval.calcMAPE(trainY, trainPred) print("train MAPE", MAPE) MAE = eval.calcMAE(testY, testPred) print("test MAE", MAE) MRSE = eval.calcRMSE(testY, testPred) print("test RMSE", MRSE) MAPE = eval.calcMAPE(testY, testPred) print("test MAPE", MAPE) SMAPE = eval.calcSMAPE(testY, testPred) print("test SMAPE", SMAPE) # plt.plot(data) # plt.plot(finalPred) # plt.show() #''' return trainPred, testPred, MAE, MRSE, SMAPE
resTrain = resTest = None regNum = 4 for i in range(regNum): tmpresTrain, tmpresTest = RNNFORECAST.RNNforecasting(residual,lookBack=resWin,epoch=50,varFlag=True,maxLen=20,inputNum=1500) if i == 0: resTrain = tmpresTrain resTest = tmpresTest else: resTrain += tmpresTrain resTest += tmpresTest resTrain /= regNum resTest /= regNum #''' # 数据对齐 trendPred,resPred = util.align(trTrain,trTest,trendWin,resTrain,resTest,resWin) # 获取最终预测结果 finalPred = trendPred+resPred+seasonal # 分别获得训练集测试集结果 trainPred = trTrain+resTrain+seasonal[trendWin:trendWin+trTrain.shape[0]] testPred = trTest+resTest+seasonal[2*resWin+resTrain.shape[0]:] # 获得ground-truth数据 data = dataset[2:-2] trainY = data[trendWin:trendWin+trTrain.shape[0]] testY = data[2*resWin+resTrain.shape[0]:] # 评估指标 MAE = eval.calcMAE(trainY, trainPred)
para_dct['__EVENTTARGET'] = 'ddl_ywyl' para_dct['__EVENTARGUMENT'] = '' para_dct['__LASTFOCUS'] = '' para_dct['__VIEWSTATE'] = viewstate para_dct['__EVENTVALIDATION'] = eventvali para_data = urllib.urlencode(para_dct) response = opener.open(baseurl + paramters, para_data) content = read(response).decode('gbk').encode('utf-8') classes = re.compile('(<tr[\s\S]*?<\/tr>)').findall(content) regnum = re.compile('kcmcGrid_ctl(\d+)') regname = re.compile('target=\'_blank\'>(.*?)<\/a>') reginfo = re.compile('<td>(.*?)<\/td>') for i in range(1, len(classes) - 1): sclass = classes[i] codeg = regnum.findall(sclass) if codeg: code = codeg[0] else: break group1 = regname.findall(sclass) name = group1[0] teacher = group1[1] group2 = reginfo.findall(sclass) print( align(code, 4) + align(name, 35) + align(teacher, 18) + align(group2[4], 20) + align(group2[5], 5) + align(group2[6], 10) + align(group2[7], 7) + group2[9] + '/' + group2[8])
from util import align align(['Afr1953','ESV','DutSVV'], ['Afr1953_short_translated.csv','ESV_short_translated.csv','DutSVV_short_translated.csv'], 'test_aligned.csv')
def _draw_tokens(self, tokens, x, y, wordspacing, lineheight, align, va, transform=None, *args, **kwargs): """ Draw the tokens on the plot. :param tokens: The text tokens to draw. The method expects a `list` of tokens, each one a `dict`. :type tokens: list of str :param x: The start and end x-position of the annotation. :type x: tuple :param y: The starting y-position of the annotation. :type y: float :param wordspacing: The space between words. If `None` is given, the space is calculated based on the height of the line. :type wordspacing: float or None :param lineheight: The space between lines. :type lineheight: float :param align: The text's alignment. Possible values: - left - center - right - justify - justify-start (or justify-left) - justify-center - justify-end or (justify-right) :type align: str :param va: The vertical alignment, can be one of `top` or `bottom`. If the vertical alignment is `bottom`, the annotation grows up. If the vertical alignment is `top`, the annotation grows down. :type va: str :param transform: The bounding box transformation. If `None` is given, the data transformation is used. :type transform: None or :class:`matplotlib.transforms.TransformNode` :return: The drawn lines. Each line is made up of the text tokens. :rtype: list of list of :class:`matplotlib.text.Text` """ figure = self.drawable.figure axis = self.drawable.axis transform = transform if transform is not None else axis.transData linespacing = util.get_linespacing( figure, axis, wordspacing, transform=transform, *args, ** kwargs) * lineheight if wordspacing is None: wordspacing = linespacing / 10. """ Go through each token and draw it on the axis. """ drawn_lines, line_tokens = [], [] offset = x[0] for token in tokens: """ Draw the text token. If the vertical alignment is top, the annotation grows downwards: one line after the other. If the vertical alignment is bottom, the annotation grows upwards. When the vertical alignment is bottom, new text is always added to the same place. New lines push previous lines up. Note that the center alignment is not considered here. There is no way of knowing how many lines there will be in advance. Therefore lines are centered at a later stage. """ va = 'top' if va == 'center' else va text = text_util.draw_token( figure, axis, token.get('text'), offset, y - len(drawn_lines) * linespacing if va == 'top' else y, token.get('style', {}), wordspacing, va=va, transform=transform, *args, **kwargs) line_tokens.append(text) """ If the token exceeds the x-limit, break it into a new line. The offset is reset to the left, and a new line is added. The token is moved to this new line. Lines do not break on punctuation marks. Note that lists are passed by reference. Therefore when the last token is removed from drawn lines when create a new line, the change is reflected here. """ bb = util.get_bb(figure, axis, text, transform=transform) if bb.x1 > x[1] and token.get('text') not in string.punctuation: self._newline(line_tokens, drawn_lines, linespacing, x[0], y, va, transform=transform) util.align(figure, axis, line_tokens, xpad=wordspacing, align=util.get_alignment(align), xlim=x, va=va, transform=transform) offset = x[0] line_tokens = [text] offset += bb.width + wordspacing """ Align the last line. """ drawn_lines.append(line_tokens) util.align(figure, axis, line_tokens, xpad=wordspacing, align=util.get_alignment(align, end=True), xlim=x, va=va, transform=transform) return drawn_lines