class RNN(object): def __init__(self, input_size): super(RNN, self).__init__() self.encoder = Encoder(input_size) self.decoder = Decoder(input_size) self.loss = nn.CrossEntropyLoss() self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=0.1) self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=0.1) def train(self, input, target): self.encoder_optimizer.zero_grad() self.decoder_optimizer.zero_grad() hidden_state = None #Encoder #hidden_state = self.encoder.first_hidden() _, hidden_state = self.encoder.forward(input.long(), hidden_state) #Decoder #softmax batch * 2 total_loss, outputs = 0, [] _, softmax, hidden_state = self.decoder.forward(input, hidden_state) total_loss = self.loss(softmax, target.squeeze(1).long()) total_loss.backward() acc_sum = (softmax.argmax(dim=1) == target.squeeze(1)).sum().item() self.decoder_optimizer.step() self.encoder_optimizer.step() return total_loss.item(), outputs, acc_sum
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25) self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d( embed_dim, ddconfig["z_channels"], 1) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.image_key = image_key if colorize_nlabels is not None: assert type(colorize_nlabels) == int self.register_buffer( "colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor
def __init__(self, model_params): self.word_indexer = model_params["word_indexer"] self.word_embedder = model_params["word_embedder"] self.embedding_size = model_params["embedding_size"] self.state_size = model_params["state_size"] self.mode_size = model_params["mode_size"] self.position_size = model_params["position_size"] self.ques_attention_size = model_params["ques_attention_size"] self.kb_attention_size = model_params["kb_attention_size"] self.dis_embedding_dim = model_params["dis_embedding_dim"] self.dis_hidden_dim = model_params["dis_hidden_dim"] self.max_fact_num = model_params["max_fact_num"] self.max_ques_len = model_params["max_ques_len"] self.learning_rate = model_params["learning_rate"] self.mode_loss_rate = model_params["mode_loss_rate"] self.position_loss_rate = model_params["position_loss_rate"] self.L2_factor = model_params["L2_factor"] self.batch_size = model_params["batch_size"] self.adv_batch_size = model_params["adv_batch_size"] self.epoch_size = model_params["epoch_size"] self.adv_epoch_size = model_params["adv_epoch_size"] self.instance_weight = 1.0 / self.batch_size self.MAX_LENGTH = model_params["MAX_LENGTH"] self.has_trained = False self.oracle_samples = [] ################ Initialize graph components ######################## self.encoder = Encoder(self.word_indexer.wordCount, self.state_size, self.embedding_size) self.decoder = Decoder(output_size=self.word_indexer.wordCount, state_size=self.state_size, embedding_size=self.embedding_size, mode_size=self.mode_size, kb_attention_size=self.kb_attention_size, max_fact_num=self.max_fact_num, ques_attention_size=self.ques_attention_size, max_ques_len=self.max_ques_len, position_size=self.MAX_LENGTH) self.positioner = Positioner(self.state_size, self.position_size, self.MAX_LENGTH) self.dis = Discriminator(self.dis_embedding_dim, self.dis_hidden_dim, self.word_indexer.wordCount, self.MAX_LENGTH, gpu=use_cuda) if use_cuda: self.encoder.cuda() self.decoder.cuda() self.positioner.cuda() self.dis.cuda() self.optimizer = optim.Adam(list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(self.positioner.parameters()), lr=self.learning_rate, weight_decay=self.L2_factor) self.dis_optimizer = optim.Adagrad(dis.parameters())
def __init__(self, input_size): super(RNN, self).__init__() self.encoder = Encoder(input_size) self.decoder = Decoder(input_size) self.loss = nn.CrossEntropyLoss() self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=0.1) self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=0.1)
def samples_generator_c(batch_data_list, X_list, X_list_c, types_list, batch_size, z_dim, y_dim, y_dim_partition, s_dim, tau, normalization_params): samples_test = dict.fromkeys(['s', 'z', 'y', 'x'], []) test_params = dict() X = tf.concat(X_list, 1) X_c = tf.concat(X_list_c, 1) # Create the proposal of q(s|x^o) _, params = Encoder.s_proposal_multinomial_c(X, X_c, batch_size, s_dim, tau, reuse=True) samples_test['s'] = tf.one_hot(tf.argmax(params, 1), depth=s_dim) # Create the proposal of q(z|s,x^o) _, params = Encoder.z_proposal_GMM_factorized_c(X_list, X_c, samples_test['s'], batch_size, z_dim, reuse=True) samples_test['z'] = params[0] # Create deterministic layer y samples_test['y'] = tf.layers.dense( inputs=samples_test['z'], units=y_dim, activation=None, kernel_initializer=tf.random_normal_initializer(stddev=0.05), trainable=True, name='layer_h1_', reuse=True) grouped_samples_y = Decoder.y_partition(samples_test['y'], types_list, y_dim_partition) # Compute the parameters h_y theta = Decoder.theta_estimation_from_y(grouped_samples_y, types_list, batch_size, reuse=True) # Compute loglik and output of the VAE log_p_x, samples_test['x'], test_params[ 'x'] = Evaluation.loglik_evaluation(batch_data_list, types_list, theta, normalization_params, reuse=True) return samples_test, test_params, log_p_x, theta
def __init__(self, DEBUG=False): """ Initializer of QRDetector class. Create a QR code decoder from ZBar. """ """ The QR code decoder provided by ZBar """ self.dc = Decoder() """ Set 'True' to show intermediate results """ self.debug = DEBUG """ The dilation used when searching for candidate of finder patterns Used in 'self.__find_qr_finder()' """ self.dilation = 3 """ The offset of y when finding finder patterns horizontally e.g. Given a center location for y: y_c With offset_y = 10, the code will search from (y_c - 10) to (y_c + 10) Used in 'self.__locate_qr_horizontally_once()' """ self.offset_y = 10 """ The padding used when cropping the QR code from source image Do not set too large, 0 is the best Used in 'self.__crop_qr_region()' """ self.pad = 0 """ The configuration used in enhancing the cropped QR region Should be an odd number, like 37 Change this value according to the type of QR code Used in 'self.__enhance_cropped_qr_region()' """ self.length = 37 """ The configuration used in enhancing the cropped QR region Provide a list for thresholds to test on Used in 'self.__enhance_cropped_qr_region()' """ self.thres = np.arange(0.1, 1, 0.1) """ The configuration used in enhancing the cropped QR region Make the binary image larger to better calculate the grid size (avoid decimal numbers) Used in 'self.__enhance_cropped_qr_region()' """ self.enlarge_ratio = 50
def _move_window(self): self.base_seq_num += 1 if not self.unacked_queue.empty(): pkt = Decoder.Packet(self.unacked_queue.get()) Debug.debug_print("ack and dequeue seq_num:{}".format(pkt.seq_num)) if not self.unsent_queue.empty(): sending_pkt_byte = self.unsent_queue.get() self.unacked_queue.put(sending_pkt_byte) self.udpsocket.sendto(sending_pkt_byte, self.addr) # packet = Decoder.Packet(sending_pkt_byte) Debug.debug_print("send_packet : (s:{}, a:{}) payload:{}".format( packet.seq_num, packet.ack_num, packet.payload))
def __init__(self, nb_features, nb_encoder_cell, nb_decoder_cell, learning_rate=1e-2): self.features = tf.placeholder(tf.float32, [None, None, nb_features]) # Batch, step, features batch_shape = tf.shape(self.features) self.batch_size = batch_shape[0] self.nb_step = batch_shape[1] self.nb_features = nb_features with tf.variable_scope("Encoder"): self.encoder = Encoder(self.features, nb_encoder_cell) with tf.variable_scope("decoder"): self.decoder = Decoder(batch_shape, nb_features, self.encoder.last_state, nb_decoder_cell) self._create_learning_tensors(learning_rate)
def code(frequency): global p_id while (1): print('started') b = NECD.necd() i = 0 pi = pigpio.pi() print( "______________________________________________PRESS_______________________________________________________________" ) pin = 4 b.init(pi, pin) s = str(pi) + " " + str(pin) #print(s) b.enable() a = b._analyse_ir_pulses(frequency) pi.stop() while (i <= 30000): i = i + 1 if a == 1: result = b.result_return() print(hex(result)) p_id = b.return_protcol_id() print(p_id) return result break
class Imprint: def __init__(self, bio_tree): self.decoder = Decoder() self.bio_tree = bio_tree self.bio_vector = bio_tree.vectorialize() self.encoded_bio_vector = self.decoder.encode_vector(self.bio_vector) def get_imprint_vector(self): return self.bio_vector def get_encoded_imprint_vector(self): return self.encoded_bio_vector def write_on_file(self, file_name): """ Print imprint on file. Parameters ---------- self: Imprint """ path_name = ".." + os.sep + "imprints" + os.sep dir = os.path.dirname(path_name) try: os.stat(dir) except: os.mkdir(dir) FILE = open(path_name + file_name,"w") FILE.write(self.encoded_bio_vector) FILE.close()
def __init__(self, input_size, word_vec_dim, hidden_size, output_size, n_layers=4, dropout_p=.2): self.input_size = input_size self.word_vec_dim = word_vec_dim self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p super(Seq2Seq, self).__init__() self.emb_src = nn.Embedding(input_size, word_vec_dim) self.emb_dec = nn.Embedding(output_size, word_vec_dim) self.encoder = Encoder(word_vec_dim, hidden_size, n_layers=n_layers, dropout_p=dropout_p) self.decoder = Decoder(word_vec_dim, hidden_size, n_lyaers=n_layers, dropout_p=dropout_p) self.attn = Attention(hidden_size) self.concat = nn.Linear(hidden_size*2, hidden_size) self.tanh = nn.Tanh() self.generator = Generator(hidden_size, output_size)
def fbCheck(self): from Decoder import * dcd = Decoder("ASEntryExample.xml").dictionary from HandleDict import HandleDictionary astr = HandleDictionary(dcd) v = astr.findall("platforms")
def __init__(self, num_layers, num_heads, d_model, dense_dim, in_vocab_size, tar_vocab_size, input_max_position, target_max_position, rate=0.1): super().__init__() self.encoder = Encoder(num_layers, num_heads, d_model, dense_dim, in_vocab_size, max_encoding_position=input_max_position, dropout=0.1) self.decoder = Decoder(num_layers, num_heads, d_model, dense_dim, tar_vocab_size, max_encoding_position=target_max_position, dropout=0.1) self.dense = tf.keras.layers.Dense(tar_vocab_size)
def decode_image_command(): img = dco.decode_image(var_1.get()) picsize = 400, 400 img.thumbnail(picsize, Image.ANTIALIAS) im = ImageTk.PhotoImage(img) panel.configure(image=im) panel.image = im
def verify(self): # attempts to verify the piece against # its hash value. returns boolean of result # will also set the verified to 1 # check all the blocks if they have been # downloaded. Once they are all downloaded if not self.downloaded: self.verified = False return False # check against SHA1 hash. hash_of_data = Decoder.create_hash(self.extract_data()) #Decoder.print_escaped_hex(hash_of_data, True) #Decoder.print_escaped_hex(self.hash, True) # print "checking piece, data: " + str(self.extract_data()) # print "size: " + str(len(self.extract_data())) # print "hash : " + str(hash_of_data) # print "hash in list: " + str(self.hash) if hash_of_data == self.hash: self.verified = True return True else: self.verified = False return False
def enviar_datos(self, intento=0): if intento < 10: try: # Comprobamos si el servidor existe remote_ip = socket.gethostbyname(self.__SERVER_NAME) # Nos conectamos al servidor self.ssl_sock.connect((remote_ip, self.__PORT)) # Serializamos y enviamos self.ssl_sock.send(json.dumps(self.datos_enviar)) # Recibo la respuesta del servidor y la devuelvo result = Decoder.Decoder(self.ssl_sock.recv(8192)) ##### BETA FUNCTION # result = Decoder.Decoder(self._recv_timeout(self.ssl_sock)) # Correcto, cerramos y devolvemos. self.ssl_sock.close() return result.decode_json() except socket.error as msg: # Se ha producido un error con el socket print msg, " -- Intento número", intento # Cerramos el socket y creamos uno nuevo self.ssl_sock.close() self.__init__(self.datos_enviar) intento += 1 time.sleep(5) self.enviar_datos(intento) else: # Después de 10 intentos no ha sido posible enviar el socket # Devolvemos un None para indicar el error. print "El envío del socket ha excedido el número de intentos" self.ssl_sock.close() return None
def __init__(self, enc_unit, dec_unit, batch_size, horizon_size, dropout_rate): super(Seq2Seq, self).__init__() self.batch_size = batch_size self.encoder = Encoder(enc_unit, batch_size, horizon_size+1, dropout_rate) self.decoder = Decoder(dec_unit, batch_size, horizon_size+1, dropout_rate)
def __init__(self, image_embed_size, word_embed_size, rnn_hidden_size, num_rnn_steps, vocab_size, latent_size, cluster_embed_size): self.encoder = Encoder(image_embed_size, word_embed_size, rnn_hidden_size, num_rnn_steps, latent_size, vocab_size, cluster_embed_size) self.decoder = Decoder(image_embed_size, latent_size, word_embed_size, rnn_hidden_size, num_rnn_steps, vocab_size, cluster_embed_size) self.image_embed_size = image_embed_size self.latent_size = latent_size self.word_embed_size = word_embed_size self.rnn_hidden_size = rnn_hidden_size self.num_rnn_steps = num_rnn_steps self.vocab_size = vocab_size self.latent_size = latent_size self.cluster_embed_size = cluster_embed_size
def sequential(): de=Decoder(mlen, numDel, numChecker, lengthExtension) ttime = 0 count=0 for num in orange: for dels in irange: orgdata= Encoder.genMsg(num,mlen) # converts the integer num to its binary string representation. deldata=Encoder.pop(orgdata,dels) # pop one or more bits out to create a deleted sequence. parity=en.paritize(orgdata) # Compute the parity integers in based on the original sequence (encoder's end). print(parity) t1=time.time() r = de.decode(deldata, parity) ttime+=(time.time()-t1) record(orgdata, r, True) count+=1 print('Average time: ',ttime/count*1000,'ms') print('Failure rate:',(stat['f']+stat['u'])/count*100,'%')
def __init__(self, enc_num_layers, dec_num_layers, d_model, vocab_size, enc_num_heads, dec_num_heads, enc_dff, dec_dff, enc_used_rnn=False, sos_id=0, eos_id=1, max_enc_length=1200, max_dec_length=48, enc_rate=0.1, dec_rate=0.1): super(Seq2Seq, self).__init__() self.enc_num_layers = enc_num_layers self.dec_num_layers = dec_num_layers self.d_model = d_model self.vocab_size = vocab_size self.enc_num_heads = enc_num_heads self.dec_num_heads = dec_num_heads self.enc_dff = enc_dff self.dec_dff = dec_dff self.enc_used_rnn = enc_used_rnn self.sos_id = sos_id self.eos_id = eos_id self.max_enc_length = max_enc_length self.max_dec_length = max_dec_length self.enc_rate = enc_rate self.dec_rate = dec_rate self.encoder = Encoder(num_layers=self.enc_num_layers, d_model=self.d_model, num_heads=self.enc_num_heads, dff=self.enc_dff, rate=self.enc_rate, used_rnn=self.enc_used_rnn, max_width=self.max_enc_length) self.decoder = Decoder(num_layers=self.dec_num_layers, vocab_size=self.vocab_size, d_model=self.d_model, num_heads=self.dec_num_heads, dff=self.dec_dff, sos_id=self.sos_id, eos_id=self.eos_id, max_length=self.max_dec_length, rate=self.dec_rate) self.final_layer = tf.keras.layers.Dense(self.vocab_size)
def send_packet_fromQueue(self): if self.unacked_queue.empty() and (not self.retransmit_on): while (not self.unacked_queue.full()) and ( not self.unsent_queue.empty()): sending_pkt_byte = self.unsent_queue.get() self.unacked_queue.put(sending_pkt_byte) self.udpsocket.sendto(sending_pkt_byte, self.addr) # packet = Decoder.Packet(sending_pkt_byte) Debug.debug_print( "send_packet : (s:{}, a:{}) payload:{}".format( packet.seq_num, packet.ack_num, packet.payload))
def __init__(self, model_params): self.word_indexer = model_params["word_indexer"] self.embedding_size = model_params["embedding_size"] self.state_size = model_params["state_size"] self.mode_size = model_params["mode_size"] self.ques_attention_size = model_params["ques_attention_size"] self.kb_attention_size = model_params["kb_attention_size"] self.max_fact_num = model_params["max_fact_num"] self.max_ques_len = model_params["max_ques_len"] self.learning_rate = model_params["learning_rate"] self.mode_loss_rate = model_params["mode_loss_rate"] self.L2_factor = model_params["L2_factor"] self.batch_size = model_params["batch_size"] self.epoch_size = model_params["epoch_size"] self.instance_weight = 1.0 / self.batch_size self.MAX_LENGTH = model_params["MAX_LENGTH"] self.has_trained = False ################ Initialize graph components ######################## self.embedding = nn.Embedding(self.word_indexer.wordCount, self.embedding_size) self.encoder = Encoder(self.word_indexer.wordCount, self.state_size, self.embedding) self.decoder = Decoder(output_size=self.word_indexer.wordCount, state_size=self.state_size, embedding=self.embedding, mode_size=self.mode_size, kb_attention_size=self.kb_attention_size, max_fact_num=self.max_fact_num, ques_attention_size=self.ques_attention_size, max_ques_len=self.max_ques_len) if use_cuda: self.encoder.cuda() self.decoder.cuda() self.optimizer = optim.Adam(list(self.encoder.parameters()) + list(self.decoder.parameters()), lr=self.learning_rate, weight_decay=self.L2_factor)
def __init__(self, vocab_size=1000, embedding_dim=96, SOS_ID = 0, EOS_ID = 1, dec_units=128, enc_units=128, attention_name='luong', attention_type=0, rnn_type='gru', max_length=10, teacher_forcing_ratio=0.5): super(Seq2Seq, self).__init__() self.encoder = ResNet18(enc_units) self.decoder = Decoder(vocab_size, embedding_dim, SOS_ID, EOS_ID, dec_units, enc_units, attention_name, attention_type, rnn_type, max_length ) self.vocab_size = vocab_size self.embedding_dim = embedding_dim self.SOS_ID = SOS_ID self.EOS_ID = EOS_ID self.loss_value = 0.0 self.dec_units = dec_units self.enc_units = enc_units self.attention_name = attention_name self.attention_type = attention_type self.rnn_type = rnn_type self.max_length = max_length self.teacher_forcing_ratio = teacher_forcing_ratio
def __init__(self, opt, device): super(Basemodel, self).__init__() if opt.TPS: self.TPS = Trans.TPS_SpatialTransformerNetwork( F=opt.num_fiducial, i_size=(opt.img_h, opt.img_w), i_r_size=(opt.img_h, opt.img_w), i_channel_num=3, #input channel device=device) self.encoder = Encoder.Resnet_encoder(opt) self.decoder = Decoder.Decoder(opt, device) self.generator = GlyphGen.Generator(opt, device)
def classify(lnas, model, exppath): """Label lna states as speech/non-speech, outputs in .exp files. Basically replaces the old `test_token_pass` binary. """ hmms = model + '.ph' lexicon = './vad_models/sp_nsp.lex' ngram = './vad_models/malli.bin' t = Decoder.Toolbox(hmms, None) # Some parameters have been disassembled from the compiled binary # `test_token_pass` that was in the old Voice Activity Detection software. t.set_lm_lookahead(0) t.set_require_sentence_end(False) # True in `test_token_pass` t.set_optional_short_silence(True) t.set_cross_word_triphones(False) global_beam = 120.0 t.set_global_beam(int(global_beam)) # 320 in `test_token_pass` t.set_word_end_beam(int(2 * global_beam / 3)) # 200 in `test_token_pass` # t.set_word_end_beam(100) # 200 in `test_token_pass` t.set_token_limit(30000) t.set_prune_similar(2) t.set_word_boundary('<w>') t.set_print_text_result(True) t.set_print_state_segmentation(False) t.set_print_probs(False) # These 3 from tokenpass.pm t.set_transition_scale(2) # 1 in `test_token_pass` t.set_lm_scale(10) # 10 in `test_token_pass` t.set_insertion_penalty(1) # 1 in `test_token_pass` # Put > 1 to print run frame, > 0 to print text_result t.set_verbose(2) t.set_duration_scale(3) # Load our models t.lex_read(lexicon) # t.set_sentence_boundary("<s>", "</s>") # Fails t.ngram_read(ngram, True) # t.run() prints to stdout, redirecting to a temporary file for lna in lnas: stdout_buffer = mkstemp()[1] with open(stdout_buffer, 'w') as f: with redirect_stdout(f.fileno()): t.lna_open(lna, 1024) t.reset(0) t.set_end(-1) while t.run(): pass with open(stdout_buffer, 'r') as f: stdo_buffer = f.read() with open(_lna2exp(exppath, lna), 'w') as f: f.write(_clean_decoder_output(stdo_buffer))
def write_result(dest_fileName, type): result = [] for row in range(sample): temp = [0, 0, 0, 0] for j in range(test): packet = gene.packing(length) if type == 0: coded = gene.code_hamming(packet) if type == 1: coded = gene.multiple_bit(multi, packet) if type == 2: coded = gene.code_crc(packet) for i in range(len(coded)): if random.random() < probability: coded[i] = int(not coded[i]) if type == 0: decoded, fixed = deco.decode_hamming(coded) if type == 1: decoded, fixed = deco.decodeMulti(coded, multi) if type == 2: decoded, fixed = deco.decodeCrc(coded) # Musimy teraz ocenic ile pakietow zostalo przeslanych bez bledow, ile mialo bledy i czy udalo sie je naprawic. ORAZ ile niewykrytych bledow wystapilo if fixed: # Wystapila naprawa przy dekoderze. Sprawdzam czy naprawil poprawnie. Robie to przez porownanie listy przed i po wysyle if packet == decoded: temp[1] = temp[1] + 1 else: temp[2] = temp[2] + 1 else: # Nie wystapila naprawa ale moze wystapil niewykryty blada if packet == decoded: temp[0] = temp[0] + 1 else: temp[3] = temp[3] + 1 result.append(temp) handler.handle(sheetNames, columnname, sample, result, dest_fileName)
def __init__(self, config): super(Data2Text, self).__init__() self.config = config self.encoder = Encoder.Encoder(config) self.decoder = Decoder.Decoder(config) def weights_init(m): if isinstance(m, torch.nn.Linear): torch.nn.init.normal_(m.weight.data, mean=0., std=0.08) if m.bias is not None: torch.nn.init.normal_(m.bias.data, mean=0., std=0.08) elif isinstance(m, torch.nn.LSTM): torch.nn.init.normal_(m.all_weights[0][0], mean=0., std=0.08) torch.nn.init.normal_(m.all_weights[0][1], mean=0., std=0.08) torch.nn.init.normal_(m.all_weights[1][0], mean=0., std=0.08) torch.nn.init.normal_(m.all_weights[1][1], mean=0., std=0.08) self.apply(weights_init)
class Seq2Seq: def __init__(self, nb_features, nb_encoder_cell, nb_decoder_cell, learning_rate=1e-2): self.features = tf.placeholder(tf.float32, [None, None, nb_features]) # Batch, step, features batch_shape = tf.shape(self.features) self.batch_size = batch_shape[0] self.nb_step = batch_shape[1] self.nb_features = nb_features with tf.variable_scope("Encoder"): self.encoder = Encoder(self.features, nb_encoder_cell) with tf.variable_scope("decoder"): self.decoder = Decoder(batch_shape, nb_features, self.encoder.last_state, nb_decoder_cell) self._create_learning_tensors(learning_rate) def _create_learning_tensors(self, learning_rate): loss = tf.losses.mean_squared_error(self.features, self.decoder.outputs) self.loss = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer(learning_rate) self.training_op = optimizer.minimize(self.loss) features_argmax = tf.argmax(self.features, axis=2) predi_argmax = tf.argmax(self.decoder.outputs, axis=2) equality = tf.equal(predi_argmax, features_argmax) self.accuracy = tf.reduce_mean(tf.cast(equality, tf.float32)) # Activate the encoder / decoder with given batch def forward(self, session, batch): feed_dict = {self.features: batch} return self.decoder.forward(session, feed_dict) def get_embedding(self, session, batch): return self.encoder.get_sequence_embedding(session, batch) # Execute a train step def train(self, session, batch, initial_states=None): feed_dict = {self.features: batch} loss, accuracy, _ = session.run([self.loss, self.accuracy, self.training_op], feed_dict=feed_dict) return loss, accuracy
def executeProgram(memory_dictionary): for i in range(len(memory_dictionary)): if Decoder.decoderInstruction( Memory.loadInstruction('M[' + str(programCounter) + ']'), programCounter): Memory.flush() Register.flush() Cache.flush() print('Cache Miss') print(Cache.cache_miss) print('Cache Hit') print(Cache.cache_hit) print('Program was finished!') return if debug: Memory.flush() Register.flush() Cache.flush() incrementProgramCounter()
def evaluate(dataset, embedding_matrix, pro_dic): #model.load_weights #checkpoint_dir = './training_checkpoints' #checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt)') #checkpoint = tf.train.load_checkpoint(checkpoint_prefix) #model.load_weights(checkpoint_path) #status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) optimizer = tf.train.AdamOptimizer(0.001) encoder = Encoder(embedding_matrix) decoder = Decoder() checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt)') checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder) checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) preds, targets, binary_preds = list(), list(), list() X = encoder(pro_dic) cos_X = cosine(X, X) for (batch, data) in enumerate(dataset): hatt = tf.zeros([BATCH_SIZE, len(pro_dic), LSTM_UNITS]) data_t = tf.expand_dims(data[:, 0, :], 1) for t in range(1, data.shape[1]): xt = data_forstu(data_t, X) xt = tf.expand_dims(xt, 1) prediction, hatt = decoder(xt, data_t, hatt, X, cos_X) data_t = tf.expand_dims(data[:, t, :], 1) pred, binary_pred, target_correctness = cal_pre(prediction, data_t) preds.append(pred) binary_preds.append(binary_pred) targets.append(target_correctness) preds = np.concatenate(preds) binary_preds = np.concatenate(binary_preds) targets = np.concatenate(targets) auc_value = roc_auc_score(targets, preds) accuracy = accuracy_score(targets, binary_preds) precision, recall, f_score, _ = precision_recall_fscore_support( targets, binary_preds) print("\n auc={0}, accuracy={1}, precision={2}, recall={3}".format( auc_value, accuracy, precision, recall))
def decipher(): global potentialEmails x = 0 for mail in potentialEmails: print(str(x) + ". " + mail['from'] + mail["Date"] + "\n") x += 1 selector = input( "input the number associated with the message you wish to decode:") if int(selector) < 0 or int(selector) > len(potentialEmails): print("\n ∆˚Whoa there Bucko thats not on the list!˚∆") return '' print("") print("Message:") selected = potentialEmails[int(selector)] body = str(selected).split("!start!") message = body[1].split("!end!") decodedMessage = Decoder.decode(message[0]) potentialEmails = [] return decodedMessage
def handle(self): try: reciv = Decoder.Decoder(self.request.recv(4096).strip()) data = reciv.decode_json() # Programamos el diccionario para elegir las acciones a realizar. operaciones = { 'iniciar_sesion': self.iniciar_sesion, 'obtener_todos_los_propietarios': self.obtener_todos_los_propietarios, 'obtener_grupos': self.obtener_grupos, 'obtener_alumnos': self.obtener_alumnos, 'obtener_scripts': self.obtener_scripts, 'obtener_tags': self.obtener_tags, 'obtener_scripts_disponibles': self.obtener_scripts_disponibles, 'obtener_tags_disponibles': self.obtener_tags_disponibles, 'obtener_tags_usuario': self.obtener_tags_usuario, 'obtener_scripts_tag': self.obtener_scripts_tag, 'obtener_scripts_no_en_tag': self.obtener_scripts_no_en_tag, 'borrar_grupo': self.borrar_grupo, 'cambiar_nombre': self.cambiar_nombre, 'crear_tag_usuario': self.crear_tag_usuario, 'crear_grupo': self.crear_grupo, 'aplicar_cambios': self.aplicar_cambios, 'eliminar_tag_usuario': self.eliminar_tag_usuario, 'modificar_tag': self.modificar_tag, 'obtener_historial': self.obtener_historial, } print "Hay que llamar a al gestor %s" % data[0]['metodo'] # operaciones[seleccion](datos_entrada_del_metodo_elegido) resultado_operacion = operaciones[data[0]['metodo']](data) # devolvemos el resultado obtenido al cliente self.request.sendall(json.dumps(resultado_operacion)) # send some 'ok' back # self.request.sendall(json.dumps({'return':'ok'})) except Exception, e: print "Exception al recibir el mensaje del cliente: ", e self.request.sendall(json.dumps({'return': 'fail'}))
import Decoder # MAIN SECTION OF CODE Decoder.setup() cont = 1 Decoder.MenuFxCheck() Decoder.MainMenu() number = "" while(cont == 1): number = raw_input("\nSelect a mode (00 - 0A), \n\tSend a code with <SEND XXXX>,\n\tRPM for engine RPM,\n\tSPEED for the vehicle speed, \n\tVIN to display Vehicle Identification Number,\n\tCMV for Control Module Voltage,\n\tTSES for Time Since Engine Start,\n\tDTWM for Distance Traveled with MIL,\n\tNWSCC for Number of Warmups Since Codes Cleared,\n\tDTSCC for Distance Traveled Since Codes Cleared,\n\tTRWM for Time run with MIL,\n\tTSTCC for Time Since Trouble Codes Cleared,\n\tMENU to display options or \n\tEXIT to exit program\n") if (number == "01"): print(Decoder.Menu01()) elif (number == "02"): print(Decoder.Menu02()) elif (number == "03"): print(Decoder.Menu03()) elif (number == "04"): print(Decoder.Menu04()) elif (number == "05"): print(Decoder.Menu05()) elif (number == "06"): print(Decoder.Menu06()) elif (number == "07"): print(Decoder.Menu07()) elif (number == "08"):
def __init__(self, bio_tree): self.decoder = Decoder() self.bio_tree = bio_tree self.bio_vector = bio_tree.vectorialize() self.encoded_bio_vector = self.decoder.encode_vector(self.bio_vector)
def startRefresh(): while(1): speed.set(Decoder.getSpeed()) rpm.set(Decoder.getRPM()) sleep(0.5) root = Tk() global Response global speed global rpm Response = StringVar() speed = StringVar() rpm = StringVar() Decoder.setup() startbutton = Button(root, text = 'Start', command = clickMethod).grid(row=0, column=0, columnspan=6) Label(root, text = "Speed:").grid(row=1, column=0) Label(root, textvariable=speed).grid(row=1, column=1) Label(root, text = "km/h").grid(row=1, column=2) Label(root, text = "Engine RPM:").grid(row=1, column=3) Label(root, textvariable=rpm).grid(row=1, column=4) Label(root, text = "rpm").grid(row=1, column=5) Label(root, text = "PID Requested:").grid(row=2) Label(root, text = "Response:").grid(row=3) L3 = Label(root, textvariable=Response).grid(row=3, column=1, columnspan=5) global e1
print 'file length (bytes): ' + str(len(file_content)) file_length = len(file_content) pSize = 100 BlOCK_SIZE = 4 # create file data file_data = {} file_data['multi_file'] = False file_data['file_name'] = file_name file_data['length'] = len(file_content) file_data['piece_length'] = pSize # create the hash_list hash_list = [] x = 0 while(x < file_length): hash_list.extend(Decoder.create_hash(file_content[x:x + pSize])) x = x + pSize hash_list = ''.join(hash_list) file_data['pieces_hash'] = hash_list # num_of_pieces = int(math.ceil(float(file_length) / pSize)) print 'Number of pieces: ' + str(num_of_pieces) print 'length of hash_list (length*pSize): ' + str(len(hash_list)) # now start up the PieceManager pm = PieceManager(file_data) print "starting Piecemanager...."
def startRefresh(): while(1): speed.set(Decoder.getSpeed()) rpm.set(Decoder.getRPM()) sleep(0.5)
def button_play_code_click(self): Decoder.play(self.vars)