def main(model, epochs, batch_size, save_intervals, category): my_model = core.create_model(model, category) print( "Python main program for generating images using {} with category {}". format(model, category)) ## preprocess data images if init_train and save the images as pickle file final_images_stacked = core.load_data(my_model.rows) my_model.train(data=final_images_stacked, epochs=epochs, batch_size=batch_size, save_intervals=save_intervals, sample_intervals=save_intervals, hi_sample_intervals=save_intervals)
action='store_const', help='do epoch cycle generation') parser.add_argument('--epoch', dest='epoch', default=None, type=int, help='specify exact epoch to use') parser.add_argument('--treshold', dest='treshold', default=0.95, type=float, help='treshold to select best images in DCGAN') args = parser.parse_args() m = core.create_model(args.model) def main(m, vec): imgs = m.generate_images(vec) filename = args.filename.replace('{EP}', str(m.epoch)) filename = filename.replace( '{D}', "{}{}".format(int(args.use_img), int(args.noise))) mi, ma = np.min(imgs), np.max(imgs) imgs = ((imgs - mi) / (ma - mi) * 255).astype(np.uint8) for i, im in enumerate(imgs): fn = filename.replace('{N}', str(i)) utils.save_image(im, os.path.join(args.path, fn)) if "VAE" in args.model:
def xover(v1, v2): v = v1[:] n = random.randint(1, len(v2) - 2) v[n:] = v2[n:] return v def mutate(v): n = random.randint(0, len(v) - 1) v[n] = random.normalvariate(0, 1) return v m = core.create_model("DCGAN_1") vec = np.random.normal(0, 1, (13, m.latent_dim)) while True: g = m.generate_images(vec) show_images(g) n = [int(x) for x in input().split()] if len(n) != 3: break # n = [x for x in range(10) if x not in n1] v = [vec[i, :] for i in n] for i in range(3): vec[i] = v[i] j = 3 for i, k in [(0, 1), (1, 2), (0, 2)]: vec[j] = xover(v[i], v[k]) vec[j + 1] = xover(v[i], v[k])
import cv2 import os # 导入项目下的其他需要的包 from core import create_model from core.predict import predict from core import create_dataset # 导入数据集 (train_x, train_y), (test_x, test_y) = create_dataset() # 导入模型结构 model = create_model() # 导入模型数据 model.load_weights('./data/weights.h5') # 用测试集验证模型 out = model.evaluate(test_x, test_y) # 遍历data/mnist下的图片文件名, 并保存成列表 file_list = os.listdir('./data/mnist/') # 将列表中文件名排序, 方便之后操作, 不做也可以hhh file_list.sort(key=lambda x: int(x[:-4])) # 打印文件名列表的内容, 程序出错的时候可以看看是不是没有找到文件 print('file_list: ', file_list) # 遍历这个文件列表依次取出里面的文件名来放入模型预测 for file_name in file_list: # 因为file_name只是文件名, 读取文件的时候需要完整的文件路径, 这里拼接一下文件路径 file_name = os.path.join('./data/mnist/', file_name) # 打印文件路径方便对照和调试, 不想加可以去掉 print(file_name) # 读取图片文件 picture = cv2.imread(file_name)