def TransferData(self, data): """ Transfers at the most 4095 bytes of data """ myutils.debug_print(myutils.program_trace, "UDS::TransferData") self.blockSequenceCounter = (self.blockSequenceCounter + 1) % 255 self.cantp.Init() uds_data = [0x36, self.blockSequenceCounter] uds_data.extend(data) self.xmit(uds_data)
def on_receive(self): myutils.debug_print(myutils.program_trace, "CanTp::on_receive") if self.canif.received_data[0] <> 0x30: if self.DecodeFrame(self.canif.received_data) == True: if self.event_sink <> None: self.event_sink() else: myutils.debug_print(myutils.can_msg_trace, 'Flow control frame received.')
def RoutineControl(self, routine_control_type, routine_id, op): myutils.debug_print(myutils.program_trace, "UDS::RoutineControl") self.cantp.Init() uds_data = [0x31] uds_data.append(routine_control_type) uds_data.append((routine_id >> 8) & 0xFF) uds_data.append(routine_id & 0xFF) uds_data.extend(op) self.xmit(uds_data)
def RequestDownload(self, address, data_size_bytes): myutils.debug_print(myutils.program_trace, "UDS::RequestDownload") self.cantp.Init() self.blockSequenceCounter = 0 uds_data = [0x34] uds_data.extend([self.dataFormatIdentifier, self.addressAndLengthFormatIdentifier]) uds_data.extend(myutils.long_to_list(address)) uds_data.extend(myutils.long_to_list(data_size_bytes)) self.xmit(uds_data)
def Task(self): assert self.state in self.states.values() continue_execution = False if self.state == self.states['UDS_REQUEST_DOWNLOAD']: self.uds.event_sink = self.on_rcv_data data = self.sr.get_data() assert self.srec_idx < len(data) self.uds_data = [] self.start_address = data[self.srec_idx][0] next_address = data[self.srec_idx][0] # concatenate all contiguous data while (self.srec_idx < len(data)) and (data[self.srec_idx][0] == next_address) and (len(self.uds_data) < (self.chunk_size*2)): self.uds_data.extend(data[self.srec_idx][1]) next_address = data[self.srec_idx][0] + len(data[self.srec_idx][1]) self.srec_idx = self.srec_idx + 1 """ find out if the block contains non-zero elements """ is_zero = True for idx, byte in enumerate(self.uds_data): if byte <> 0: is_zero = False myutils.debug_print(myutils.debug_info, "Non-zero offset %d" % idx) break """ if length of block is non-zero, download the data """ if (len(self.uds_data) > 0): if not is_zero: self.uds.RequestDownload(self.start_address, len(self.uds_data)) self.chunk_idx = 0 self.state = self.states['UDS_TRANSFER_DATA'] else: print 'Skipped zero block at address 0x%08x' % self.start_address if self.srec_idx < len(data): continue_execution = True else: self.state = self.states['IDLE'] else: self.state = self.states['IDLE'] elif self.state == self.states['UDS_TRANSFER_DATA']: print '0x%08x' % (self.start_address+self.chunk_idx) self.uds.TransferData(self.uds_data[self.chunk_idx:self.chunk_idx+self.chunk_size]) self.chunk_idx = self.chunk_idx + self.chunk_size if self.chunk_idx >= len(self.uds_data): self.state = self.states['UDS_TRANSFER_EXIT'] elif self.state == self.states['UDS_TRANSFER_EXIT']: self.uds.RequestTransferExit() data = self.sr.get_data() """ switch to IDLE state if no more records to download """ if self.srec_idx < len(data): self.state = self.states['UDS_REQUEST_DOWNLOAD'] else: self.state = self.states['IDLE'] #if (myutils.debug_switch & 0x8000) == 0x8000: # stop on first transfer # self.state = self.states['IDLE'] return(continue_execution)
def RequestDownload(self, address, data_size_bytes): myutils.debug_print(myutils.program_trace, "UDS::RequestDownload") self.cantp.Init() self.blockSequenceCounter = 0 uds_data = [0x34] uds_data.extend( [self.dataFormatIdentifier, self.addressAndLengthFormatIdentifier]) uds_data.extend(myutils.long_to_list(address)) uds_data.extend(myutils.long_to_list(data_size_bytes)) self.xmit(uds_data)
def TaskThread(self): myutils.debug_print(myutils.program_trace, "CanTp::TaskThread") self.active = True start = time.clock() while self.active == True: if (time.clock()-start) > 0.001: # 1 ms start = time.clock() can_data_bytes = self.EncodeFrame() if len(can_data_bytes) > 0: self.canif.xmit(can_data_bytes) self.timedout = False else: self.active = False
def TaskThread(self): myutils.debug_print(myutils.program_trace, "CanTp::TaskThread") self.active = True start = time.clock() while self.active == True: if (time.clock() - start) > 0.001: # 1 ms start = time.clock() can_data_bytes = self.EncodeFrame() if len(can_data_bytes) > 0: self.canif.xmit(can_data_bytes) self.timedout = False else: self.active = False
def __init__(self, input, n_in, n_out): """ Initialize the parameters of the logistic regression """ # compute vector of class-membership probabilities in symbolic form self.s_y_given_x = T.nnet.sigmoid(input) self.s_y_given_x = debug_print(self.s_y_given_x, 'scores', False) super(SigmoidLoss, self).__init__(input, n_in, n_out)
def build_feature_vector_noMention(self, features, DEBUG=False): embeddings, _ = self.get_embeddings() embedding_size = embeddings.shape[1] print embeddings.shape lookup = StaticLookupTable(embeddings.shape[0], embeddings.shape[1]) lookup.allocate() lookup.W.set_value(embeddings) fea = 'contexts' mycnf = self._config[fea] mymodel = mycnf['model'] max_len = mycnf['max_len'] x_ctx1 = T.matrix(fea + '1', dtype='int32') x_ctx2 = T.matrix(fea + '2', dtype='int32') embedded_ctx1 = lookup.apply( x_ctx1) #embedded_x.shape = (batch_size, len(x), embedding_size) embedded_ctx2 = lookup.apply( x_ctx2) #embedded_x.shape = (batch_size, len(x), embedding_size) embedded_ctx1.name = fea + '_embed1' embedded_ctx2.name = fea + '_embed2' l_emb1, l_size1, r_emb1, r_size1 = self.split_inp( max_len, embedded_ctx1, mymodel, DEBUG) l_emb2, l_size2, r_emb2, r_size2 = self.split_inp( max_len, embedded_ctx2, mymodel, DEBUG) fv1, fv2, fvlen = self.apply_cnn(l_emb1, l_size1, l_emb2, l_size2, r_emb1, r_size1, r_emb2, r_size2, embedding_size, mycnf) logger.info('feature size for each input token: %d', embedding_size) logger.info('feature vector length: %d', fvlen) fv1 = debug_print(fv1, 'fv1', DEBUG) return fv1, fv2, fvlen
def softmax_layer(h, y, hidden_size, num_targets, cost_fn='cross'): hidden_to_output = Linear(name='hidden_to_output', input_dim=hidden_size, output_dim=num_targets) initialize([hidden_to_output]) linear_output = hidden_to_output.apply(h) linear_output.name = 'linear_output' y_pred = T.argmax(linear_output, axis=1) label_of_predicted = debug_print(y[T.arange(y.shape[0]), y_pred], 'label_of_predicted', False) pat1 = T.mean(label_of_predicted) updates = None if 'ranking' in cost_fn: cost, updates = ranking_loss(linear_output, y) print 'using ranking loss function!' else: y_hat = Logistic().apply(linear_output) y_hat.name = 'y_hat' cost = cross_entropy_loss(y_hat, y) cost.name = 'cost' pat1.name = 'precision@1' misclassify_rate = MultiMisclassificationRate().apply( y, T.ge(linear_output, 0.5)) misclassify_rate.name = 'error_rate' return cost, pat1, updates, misclassify_rate
def build_network(self, num_labels, features, max_len=None, hidden_units=None, l2=None, use_cnn=None, cnn_filter_size=None, cnn_pool_size=None, cnn_num_filters=None, cnn_filter_sizes=None, embedding_size=None, DEBUG=False): """ Build the neural network used for training. :param num_labels: Number of labels to classify :param features: the input features we use :param max_len: Configured window-size :param hidden_units: Number of units in the MLP's hiddden layer :returns: The cost function, the misclassification rate function, the computation graph of the cost function and the prediction function """ logger.info( 'building the network, with one CNN for left and one for right') hidden_units = hidden_units or self._config['hidden_units'] logger.info('#hidden units: %d', hidden_units) # building the feature vector from input. mlp_in_e1, mlp_in_e2, mlp_in_dim = self.build_feature_vector_noMention( features) logger.info('feature vector size: %d', mlp_in_dim) mlp = MLP(activations=[Rectifier()], dims=[mlp_in_dim, hidden_units], seed=self.curSeed) initialize([mlp]) before_out_e1 = mlp.apply(mlp_in_e1) before_out_e2 = mlp.apply(mlp_in_e2) hidden_to_output = Linear(name='hidden_to_output', input_dim=hidden_units, output_dim=num_labels) initialize([hidden_to_output]) linear_output_e1 = hidden_to_output.apply(before_out_e1) linear_output_e2 = hidden_to_output.apply(before_out_e2) linear_output_e1.name = 'linear_output_e1' linear_output_e2.name = 'linear_output_e2' y_hat_e1 = Logistic(name='logistic1').apply(linear_output_e1) y_hat_e2 = Logistic(name='logistic2').apply(linear_output_e2) y_hat_e1.name = 'y_hat_e1' y_hat_e2.name = 'y_hat_e2' y_hat_e1 = debug_print(y_hat_e1, 'y_1', DEBUG) return y_hat_e1, y_hat_e2, before_out_e1, before_out_e2
def create_rec(xemb, mycnf, embedding_size): hiddensize = mycnf['rnn_config']['hidden'] mymodel = mycnf['model'] assert mymodel in REC_MODELS inpsize = embedding_size if 'bilstm' in mymodel: for i in range(1): xemb = bilstm_layer(inpsize, xemb, hiddensize, i) xemb.name = 'bilstm' + str(i) + xemb.name inpsize = hiddensize * 2 fv_len = hiddensize * 2 elif 'lstm' in mymodel: for i in range(1): xemb = lstm_layer(embedding_size, xemb, hiddensize, 1) embedding_size = hiddensize xemb.name = 'lstm' + str(i) + xemb.name fv_len = hiddensize else: xemb = rnn_layer(embedding_size, xemb, hiddensize, 1) xemb.name = 'rnn' + str(i) + xemb.name fv_len = hiddensize fv = xemb fv = debug_print(fv[fv.shape[0] - 1], 'outRec', False) return fv, fv_len
def on_rcv_data(self): myutils.debug_print(myutils.program_trace, 'UDS::on_rcv_data') if self.rcv_timer <> None: self.rcv_timer.cancel() self.rcv_timer = None self.event_sink()
def RequestTransferExit(self): myutils.debug_print(myutils.program_trace, "UDS::RequestTransferExit") self.cantp.Init() self.xmit([0x37])