def on_status(self, status): text = get_status_text(status, self.screen_name) if text is None: logging.info("Status %d is not relevant", status.id) return fp = io.BytesIO() generate_image(text, fp) self.api.update_with_media( "filename.png", file=fp, in_reply_to_status_id=status.id, auto_populate_reply_metadata=True, )
def shengcheng_image(): strr = request.values.get('message') print strr url = generate_image(strr) return json.dumps({ "code": 0, "images": url, })
def find_points(width, height, min_x, max_x, min_y, max_y): """ Calls the calculate() function on each point. Paramters: width, height: dimensions of the image/complex plane min_x, min_y: the minimum x and yi values for the plane max_x, max_y: the maximum x and yi values for the plane """ num = 0 # TODO: Fix arduino job status indicator print("True total points: ", width * height) # TODO: Remove unnecessary prints results_txt = open('results.txt', 'w+') write_serial(b'1') totalJobs = width * height write_serial(bytes(str(totalJobs), encoding="utf-8")) x_step = (max_x - min_x) / width y_step = (max_y - min_y) / height current_point = [min_x, min_y] x_point = 1 y_point = 1 while y_point <= height: # TODO: Fix precision issues (Perturbation or store in array) row = [] while x_point <= width: point_result = calculate(*current_point) row.append(f"{point_result},{current_point[0]},{current_point[1]}") current_point[0] += x_step x_point += 1 num += 1 # TODO: Rewrite arduino progress indicator for point in row: results_txt.write(f"{point}\n") current_point[1] += y_step current_point[0] = min_x y_point += 1 x_point = 1 print("Total calculated points: ", num) results_txt.close() write_serial(b'9') write_serial(b'x') generate.generate_image(int(width), int(height))
def generate_all_images(): global IMAGES last_progress = time.time() print("Generating") words = [x.strip() for x in open("frees.txt", "r").readlines()] for i, word in enumerate(words): if time.time() - last_progress > 5: last_progress = time.time() print(i, "/", len(words)) png_data = generate.image_to_png(generate.generate_image(word)) IMAGES.append(png_data) print("Generated all images")
#coding:utf-8 from generate import generate_image a = generate_image(100) a.check_font() a.generate() a.reset_config(text_mode='chinese', picnum=100, is_sample_from_corpus=False, dict_path='dic_num.txt') a.generate() #a=generate_image(0) #a.check_font()
# divide by 2 or else loss is weighted 2x towards disc vs gen disc_loss = (real_loss + fake_loss) / 2 total_disc_loss += disc_loss.data optimizer_d.zero_grad() disc_loss.backward() optimizer_d.step() progress_bar.set_description( f"epoch: {epoch} || disc loss: {total_disc_loss/batch_num} || gen loss: {total_gen_loss/batch_num}" ) denom = opts.numImages // opts.batchSize avg_gen_loss = total_gen_loss / denom avg_disc_loss = total_disc_loss / denom losses[0].append(avg_gen_loss) losses[1].append(avg_disc_loss) torch.save(gen.state_dict(), './generator.pt') torch.save(disc.state_dict(), './discriminator.pt') generate_image(fixed_noise, epoch=epoch) # display loss graph plt.plot(losses[0], label="gen") plt.plot(losses[1], label="disc") plt.xlabel('epochs') plt.ylabel('loss') plt.legend(loc='upper left') plt.show()