def debug(self, data, turn=3, sample_num=10): logger.info("Debugging ...") for i, batch in enumerate(data.train_iter): assert len(batch) == 1, 'batch size must be 1 in debugging' input_message = batch.hist1 history1 = input_message[0] logger.info(" sample %d ", (i + 1)) for t in range(turn): agent = self.agent[(t % 2)] logits_matrix = agent.generate(input_message) _, decoder_out = logits_matrix.max(dim=2) logger.info( "[turn %d] IN : " + " ".join(reverse(input_message[0], data.vocab)), (t + 1)) logger.info( "[turn %d] OUT: " + " ".join(reverse(decoder_out, data.vocab)), (t + 1)) history2 = decoder_out input_message = torch.cat([history1, history2], dim=1) # TODO concat without padding input_message = [ input_message, torch.LongTensor([input_message.size(1)]) ] # TODO need to pack with batch history1 = history2 if (i + 1) == sample_num: break
def main(): print utils.get_max_vowels(first_sentence) print print utils.get_max_len(first_sentence) print print utils.reverse(second_sentence) print print utils.get_info_for_obj(os) print print utils.get_info_for_obj(sys) print print utils.get_pseudo_sum(124) print print utils.get_primes(10000)
def resolve_target(_target, _indent=0): global target, _import_section, _current outgoings = get_outgoings(_target) if _target.tag == EXCLUSIVEGATEWAY: _current['lines'].append("%sif %s:" % (INDENT * _indent, name(_target))) flows = [by_id(outgoings[0].text), by_id(outgoings[1].text)] if 'Y' not in name(flows[1]): reverse(flows) next_node = el_from_flow(flows[1]) endif = get_my_parent( _target, [by_id(targetRef(flows[0])), by_id(targetRef(flows[1]))]) target = endif iterate_from_node(next_node, endif, _indent) if el_from_flow(flows[0]) != endif: _current['lines'].append('%selse:' % (INDENT * _indent)) iterate_from_node(el_from_flow(flows[0]), endif, _indent) elif _target.tag == TASK and is_loop(_target): _name = name(_target) tmplt = '%s%s:' % ( INDENT * _indent, _name ) if "for " in _name and " in " in _name else "%swhile %s:" % ( INDENT * _indent, _name) _current['lines'].append(tmplt) el0 = el_from_flow_by_id(outgoings[0].text) el1 = el_from_flow_by_id( outgoings[1].text) if len(outgoings) > 1 else None if len(outgoings) == 2: loop0 = is_loop(el0) loop1 = is_loop(el1) if loop0 or loop1: iterate_from_node(el0 if loop0 else el1, _target, _indent + 1) target = el1 if loop0 else el0 else: out1 = find_friend(_target, el0) iterate_from_node(el0 if out1 else el1, _target, _indent + 1) target = el1 if out1 else el0 elif len(outgoings) == 1: iterate_from_node(el0, _target, _indent + 1) else: _name_text = name(_target) for _text_line in _name_text.split('\n'): _text = search_for_imports(_text_line, _import_section) if _text: _current['lines'].append('%s%s' % (INDENT * _indent, _text)) target = get_target(_target)
def edit_character(self, item): try: instance = item.instance except: intance = None if instance is not None: self.parent.central_widget = reverse('character.create', instance=instance, parent=self.parent)
def getOriginal(self, outputfile=""): orig = self.getMetaData().GetOriginal() input = open(self.filename, "rb") input.seek(0, 2) input_length = input.tell() input.seek(0) print input.tell() com_start = input_length output = open(outputfile, "wb") for item in self.items: if item[1] == "COM": com_start = item[0] index = self.items.index(item) com_end = self.items[index + 1][0] input.seek(0) break # write until old comment output.write(input.read(com_start)) if len(orig) > 0: output.write("\xFF\xFE") output.write(reverse(ol16(len(orig)))) output.write(orig) input.seek(com_end) # write file footer while input.tell() < input_length: output.write(input.read(1)) output.close() input.close()
def evaluate(self, data_type, epoch=None): data_iter = getattr(self.data, '{}_iter'.format(data_type)) write_list= [] for batch in data_iter: originals = [] for field in batch.input_fields: originals.append(reverse(getattr(batch, field)[0], self.data.vocab)) summarized = self.model.inference(batch) summarized = reverse(summarized, self.data.vocab) reference = reverse(batch.summ[0], self.data.vocab) write_list.append(zip(zip(*originals), summarized, reference)) metrics_dict = self.evaluator.compute_metrics([reference], summarized) msg = 'quantitative results from {} data'.format(data_type) + '\n' +\ str(metrics_dict) logger.info(msg) write_to_file(write_list, msg, data_type, epoch) return metrics_dict
def evaluate(self): self.data.valid_iter.shuffle = True import random a = random.randint(0, len(self.data.valid_iter)) # temp test for i, batch in enumerate(self.data.valid_iter): if i != a: continue (x, lengths), l, l_ = prepare_batch(batch) generated = self.model((x, lengths), l, l_, is_gen=True) print('=' * 50) print('original \t\t -> \t\t changed') for idx in random.sample(range(lengths.size(0)), 5): ori = reverse(x, self.data.vocab)[idx] chg = reverse(generated[0], self.data.vocab)[idx] print(' '.join(ori)) print('\t\t->', ' '.join(chg)) print('=' * 50) return
def test_lychrel(num): n = 1 candidate = num while n <= 50: candidate += utils.reverse(candidate) if utils.palindrome(candidate): return False n += 1 return True
def feed_links(self, obj): return ({ 'rel': u'alternate', 'href': self.feed_id(obj) }, { 'rel': u'self', 'href': link(reverse('%s_feed' % type, '/'.join(['tag', obj.name]))) })
def is_reversible(n): if n % 10 == 0: return False r = reverse(n) if r % 10 == 0: return False else: if r + n & 1 == 0: return False else: return is_all_odd(n + r)
def evaluate(self, data_type): data_iter = getattr(self.data, '{}_iter'.format(data_type)) paraphrased, original, reference = [], [], [] # TODO: valid batch size for idx, batch in enumerate(data_iter): para = self.model.inference(batch.orig) paraphrased += reverse(para, self.data.vocab) original += reverse(batch.orig[0], self.data.vocab) reference += reverse(batch.para[0], self.data.vocab) # for qualitative evaluation print('sample paraphrases in {} data'.format(data_type)) for _ in range(data_iter.batch_size): random_idx = random.randint(0, len(data_iter)) print(original[random_idx], '\t => \t', paraphrased[random_idx]) print('\t\t\t reference: ', reference[random_idx]) # for quantitative evaluation metrics_dict = self.evaluator.compute_metrics([reference], paraphrased) print('quantitative results from {} data'.format(data_type)) print(metrics_dict) return metrics_dict
def forward(self, x): x1, x2 = x # pad to outlength-1 # padding is added the highest digit # ones digit, ..., last digit, <padding> assert x1.size(1) == self.outlength assert x2.size(1) == self.outlength # # now reverse, such that the ones digit is seen last # now concatenate x = torch.cat((x1, x2), dim=2) # now input to network b, t, d = x.size() rnn_hid = self.init_hidden(b) # (nlayers, b, h) encoder_out, encoder_hid = self.encoder(x, rnn_hid) # encoder_out (b, t, h), encoder_hid (nlayers, b, h) padded_encoder_out = self.pad(encoder_out[:, -1]) # padded_encoder_out (b, outlength, h) decoder_out, _ = self.decoder(padded_encoder_out, encoder_hid) # decoder_out (bsize, outlength, outdim) decoder_out = reverse(decoder_out, dim=1) return decoder_out, F.softmax(decoder_out, dim=-1)
def writeMetaData(self, data, outputfile): index = -1 for item in self.items: if item[1] == "SOF0": index = self.items.index(item) if index < 0: raise SyntaxError("wrong filetype") if self.items[index - 1][1] == "COM": # update value _end = self.items[index - 1][0] print "update comment" input = open(self.filename, "rb") input.seek(self.items[index - 1][0] + 4) data = LZAMetadata(data) data.addOriginal( input.read(self.items[index][0] - self.items[index - 1][0] - 4)) input.close() else: # insert value _end = self.items[index][0] input = open(self.filename, "rb") output = open(outputfile, "wb") output.write(input.read(_end)) output.write("\xFF\xFE") output.write(reverse(ol16(len(str(data))))) output.write(str(data)) input.seek(self.items[index][0]) output.write(input.read(self.src_length - input.tell())) input.close() output.close()
def writeMetaData(self, data, outputfile): index = -1 for item in self.items: if item[1] == "SOF0": index = self.items.index(item) if index < 0: raise SyntaxError("wrong filetype") if self.items[index - 1][1] == "COM": # update value _end = self.items[index - 1][0] print "update comment" input = open(self.filename, "rb") input.seek(self.items[index - 1][0] + 4) data = LZAMetadata(data) data.addOriginal(input.read(self.items[index][0] - self.items[index - 1][0] - 4)) input.close() else: # insert value _end = self.items[index][0] input = open(self.filename, "rb") output = open(outputfile, "wb") output.write(input.read(_end)) output.write("\xFF\xFE") output.write(reverse(ol16(len(str(data))))) output.write(str(data)) input.seek(self.items[index][0]) output.write(input.read(self.src_length - input.tell())) input.close() output.close()
def initUI(self): self.setWindowTitle('Don Jon') self.setWindowIcon(QtGui.QIcon(get_asset("icon.png"))) self.toolbar = self.addToolBar(_(u'Personnages')) g = ChangeViewAction( icon="save-new.png", label=_(u"Créer un personnage"), parent=self, toolbar=self.toolbar, shortcut='Ctrl+A', route='character.create' ) g = ChangeViewAction( icon="character-list.png", label=_(u"Tous les personnages"), parent=self, toolbar=self.toolbar, shortcut='Ctrl+L', route='character.list' ) self.central_widget = reverse(self.default_view, parent=self) self.show()
def test_array_none(self): n = NULL result = utils.reverse(n) self.assertIsNone(NULL, result, "Array is NULL")
def feed_id(self, obj): return link(reverse('by_section', slug=obj[0]))
def send_connection_created(sender, connection, signal=None, **kwargs): alias = reverse(connection) connection_created_to.send(sender=alias, wrapper=sender, connection=connection, alias=alias, **kwargs)
def central_widget(self, new_value): if isinstance(new_value, str): # try to reverse route new_value = reverse(new_value, parent=self) self.setCentralWidget(new_value)
def feed_links(self, obj): return ({'rel': u'alternate', 'href': self.feed_id(obj[1])}, {'rel': u'self', 'href': link(reverse('%s_feed' % type, '/'.join(['tag', obj[1]])))})
def feed_id(self, obj): if not obj: raise Http404 return link(reverse('by_tag', tag=obj))
def test_array_empty(self): n =[] result= utils.reverse(n) self.assertEqual([], result, "Empty Array")
#################### from utils import reverse path = 'models/forward_0.95.pt' model = build_model(name='forward', device=DEVICE, vocab_size=len(data.vocab), embedding_size=EMBEDDING_SIZE, hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS, dropout=DROPOUT) model.load_state_dict(torch.load(path)) for batch in data.valid_iter: generated, lengths = model.generate(batch.merged_hist) print(reverse(generated, data.vocab)) input() ################################# if TRAIN_MODE == 'SUPERVISED_PRETRAIN': seq2seq = build_model(name='forward', device=DEVICE, vocab_size=len(data.vocab), embedding_size=EMBEDDING_SIZE, hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS, dropout=DROPOUT) trainer = build_trainer(kind='supervised', model=seq2seq, data=data, lr=1e-3,
def test_reverse(): """Make sure our complex string reversal logic works.""" original = "hello" expected = "olleh" actual = utils.reverse(original) assert_equal(expected, actual)
import mfilt_funcs as mine importlib.reload(mine) from mfilt_funcs import * # In[5]: x = img.imread('imagenes/Im1T4.png') # In[6]: plt.imshow(x, cmap='gray') # In[7]: x = reverse(x) # In[8]: plt.imshow(x, cmap='gray') # In[9]: binaria = binarise(x) plt.imshow(binaria, cmap='gray') # In[10]: help(opening) # # Opening
def test_array_equals(self): n = [1, 2, 3] result = utils.reverse(n) self.assertEqual([1, 2, 3], result, "Arrays Not Equal")
Can probably build up a hash map at each index if that number will eventually turn into a palindromic number """ CACHE = {} lychrel_count = 0 for n in range(1, 10000): if n in CACHE: continue # an array to hold all the tested values for this number # if the number winds up being a lychrel number, all the other numbers # should be cached as "True" to avoid recomputing in a subsequent iteration m = n rev_add_count = 0 # how many times has it been reversed and added to self attempted = [m] while rev_add_count < 50: m += utils.reverse(m) if utils.palindrome(m): # update cache for each to True for a in attempted: CACHE[a] = True break else: # try to reverse again, and add this number to list of those attempted rev_add_count += 1 attempted.append(m) if n not in CACHE: lychrel_count += 1 print lychrel_count
sort_key=lambda x: len(x.summ), sort_within_batch=True, repeat=False, device=self.device) return train_iter, valid_iter, test_iter if __name__ == "__main__": import torch from utils import reverse PATH = '~/hwijeen/MulDocSumm/data' FILE = 'rottentomatoes_prepared' data = MulSumData(PATH, FILE, 99, torch.device('cuda')) print(len(data.train_iter)) # only 94 print(len(data.valid_iter)) # only 12 print(len(data.test_iter)) # only 12 for batch in data.train_iter: print('batch: ', batch) print(batch.doc1[0]) input() print(reverse(batch.doc1[0], data.vocab)) print(reverse(batch.doc2[0], data.vocab)) print(reverse(batch.doc3[0], data.vocab)) print(reverse(batch.doc4[0], data.vocab)) print(reverse(batch.doc5[0], data.vocab)) print(reverse(batch.summ[0], data.vocab)) input()
def on_trigger(self): self.parent.central_widget = reverse(self.route, parent=self.parent)
def main(): utils.area() utils.reverse() utils.time_date()
def generate(self, path, *args, **kwargs): model = load_model(path, *args, **kwargs) for batch in self.data.valid_iter: generated = model.generate(batch.merged_hist) print(reverse(generated, self.data.vocab)) input()
ckpt_file = tf.train.latest_checkpoint(model_path) print(ckpt_file) paths['model_path'] = ckpt_file model = BiLSTM_CRF(embeddings, tag2label, word2id, paths, config=config) model.build_graph() saver = tf.train.Saver() with tf.Session(config=config) as sess: print('============= demo =============') saver.restore(sess, ckpt_file) with open('data/old_test.txt','r',encoding='utf-8')as f1: lines=f1.readlines() new_lines=[] for line in lines: new_line = list(line.strip().split('_')) new_lines.append(new_line) # print(new_line) demo_data = [] for demo_sent in lines: demo_sent = list(demo_sent.strip().split('_')) demo_data.append((demo_sent, ['O'] * len(demo_sent))) # demo_data = [(demo_sent, ['O'] * len(demo_sent))] # print(demo_data) tags = model.demo_one(sess, demo_data) with open('data/out_0820_1.txt','w',encoding='utf-8') as f2: # demo_data=[] # print('newlines:',new_lines) results = reverse(new_lines, tags) f2.write('\n'.join(results)) f2.write('\n') print('Output the predicted txt file!')
def is_lychrel(n, i = 50): for j in range(i): n = n + reverse(n) if is_palindrome(n): return False return True