def glossolalia(self): result = None loop = 10 t = self.sine_temp() min_length = DEF_MIN if 'minimum' in self.cf: min_length = self.cf['minimum'] while not result and loop > 0: lines = torchrnn.generate_lines(n=self.cf['sample'], temperature=t, model=self.cf['model'], max_length=self.api.char_limit, min_length=min_length) lines = self.clean_glosses(lines) if 'logs' in self.cf: log = os.path.join(self.cf['logs'], str(time.time())) + '.log' with open(log, 'wt') as f: for line in lines: f.write(line) f.write("\n") f.write("Length = {}\n\n".format(len(line))) print("Got {} lines on loop {}".format(len(lines), loop)) if len(lines) > 5: result = random.choice(lines[2:-2]) loop = loop - 1 return result
def bombinan(self): twhtmls = torchrnn.generate_lines(model=self.cf['model'], n=10) for t in twhtmls: tt = self.greptw(t) if tt: return tt return None
def sample(self): if 'sample_method' in self.cf and self.cf['sample_method'] == 'text': print("Sampling using text") self.raw_rnn = torchrnn.generate_text(temperature=self.temperature, model=self.cf['model'], length=self.cf['sample'], opts=self.options) else: self.raw_rnn = torchrnn.generate_lines( n=self.cf['sample'], temperature=self.temperature, model=self.cf['model'], max_length=self.max_length, min_length=self.min_length, opts=self.options) return self.raw_rnn
def glossolalia(self): result = None loop = 10 accept_re = re.compile(self.cf['accept']) t = self.sine_temp() print("Temperature = {}".format(t)) while not result and loop > 0: lines = torchrnn.generate_lines(n=self.cf['sample'], temperature=t, model=self.cf['model']) lines = [ l for l in lines if accept_re.match(l) ] log = os.path.join(self.cf['logs'], str(time.time())) + '.log' with open(log, 'wt') as f: for line in lines: f.write(line) f.write("\n") if len(lines) > 5: result = random.choice(lines[2:-2]) loop = loop - 1 return result
#!/usr/bin/env python import torchrnn, argparse MODEL = '/Users/mike/torch/torch-rnn/cv_glossatory/checkpoint_215100.t7' if __name__ == '__main__': ap = argparse.ArgumentParser() ap.add_argument('-m', '--model', type=str, default=MODEL, help="torch-rnn checkpoint") ap.add_argument('-t', '--temperature', type=float, default=0.8, help="sample temperature") ap.add_argument('-n', '--number', type=int, default=10, help="number of tweets to sample") args = ap.parse_args() tweets = torchrnn.generate_lines(temperature=args.temperature, model=args.model, n=args.number) for tweet in tweets: print(tweet)