def get_frame(): tic = tc() print('----------------------------------------------------------------') log_time = time.strftime('%Y-%m-%d %H:%M:%S') print(log_time) cap_file = request.files['image'] cap_img = Image.open(cap_file) word_ind, objs_ind = model.predict(cap_img) Result = {'Result': []} for i, ind in enumerate(objs_ind): if ind == word_ind[0]: Result['Result'].append(i + 1) if Result['Result'].__len__() <= 0: Result['Result'].append(np.random.randint(1, 9)) print(classes[word_ind[0]], Result['Result']) pic = log_time.replace(' ', '').replace('-', '').replace( ':', '') + '_' + classes[word_ind[0]] + '_' + '_'.join( [str(v) for v in Result['Result']]) + '.png' print(pic) cap_img.save(os.path.join(logdir, pic)) print('fetch time: {:.5f} s.'.format(tc() - tic)) print('----------------------------------------------------------------\n') return json.dumps(Result)
def crack_captcha(captcha_img): tic = tc() print("模型加载完毕") word_ind, objs_ind = model.predict(captcha_img) word = classes[word_ind] objs = classes[objs_ind] print(unicode(word[0], "utf-8"), objs[0]) print('time elapsed:{:.2f}'.format(tc()-tic)) return word[0]
def file_merge(src_path, dst_path, batch_size=100, is_training=True): intensity_path = os.path.join(src_path, 'intensity') pts_path = os.path.join(src_path, 'pts') category_path = os.path.join(src_path, 'category') fns = [fn for fn in os.listdir(pts_path) if fn[-4:] == '.csv'] n_files = len(fns) flame_index = 0 tic = tc() for batch_i in range(int(np.ceil(n_files / batch_size))): print('batch: {}/{} {:.2f}s.'.format(batch_i + 1, round(n_files / batch_size + 0.5), tc() - tic)) start = batch_i * batch_size end = start + batch_size index, pts, intensity, category = [], [], [], [] test_fn_index = [] for fn in fns[start:end]: tmp1 = np.loadtxt(os.path.join(pts_path, fn), dtype=np.float32, delimiter=',') pts.append(tmp1) tmp2 = np.loadtxt(os.path.join(intensity_path, fn), dtype=np.float32).reshape((-1, 1)) intensity.append(tmp2) if is_training: category.append(np.loadtxt(os.path.join(category_path, fn), dtype=np.float32).reshape((-1, 1))) index.append(flame_index*np.ones_like(tmp2, np.float32)) test_fn_index.append([flame_index, fn]) flame_index += 1 index = np.vstack(index) pts = np.vstack(pts) intensity = np.vstack(intensity) if is_training: category = np.vstack(category) merged_data = np.hstack((index, pts, intensity, category)) save_path = os.path.join(dst_path, 'train_batch_{}.npy'.format(str(batch_i))) else: merged_data = np.hstack((index, pts, intensity)) save_path = os.path.join(dst_path, 'test_batch_{}.npy'.format(str(batch_i))) pd.DataFrame(test_fn_index).to_csv( os.path.join(dst_path, 'test_batch_{}.name'.format(str(batch_i))), header=None, index=False, encoding='utf-8' ) np.float32(merged_data).tofile(save_path)
with tf.Session() as sess: saver = tf.train.Saver(tf.global_variables(), max_to_keep=20) sess.run(init) # tensorboard 可视化 merged = tf.summary.merge_all() summary_writer_train = tf.summary.FileWriter('logs/train', sess.graph) summary_writer_test = tf.summary.FileWriter('logs/test') for epoch in range(1, total_epochs + 1): if epoch in reduce_lr_epoch: epoch_learning_rate /= 10 # 训练 tic = tc() train_acc, train_loss = train_epoch(sess, epoch_learning_rate) train_time = tc() - tic # 测试 test_acc, test_loss = eva(sess) print('\nepoch: {}, training time: {:.0f} s.'.format( epoch, train_time)) print('train loss: {:.5f}, accuracy: {:.3f}'.format( train_loss, train_acc)) print('test loss: {:.5f}, accuracy: {:.3f}'.format( test_loss, test_acc)) history.append( [epoch, train_acc, train_loss, test_acc, test_loss,
# 读取数据块 chunk_data, chunk_type_code = png_read_chunk(fp) if chunk_type_code not in chunk_type.keys(): chunk_type[chunk_type_code] = 1 else: chunk_type[chunk_type_code] += 1 print(image_info) print(chunk_type) if __name__ == "__main__": tic = tc() block_size = (512, 512) # ------------- train data ---------------- # train image_1.png png_file = '../data/jingwei_round1_train_20190619/image_1.png' # check_png(png_file) png_sub(png_file, '../data/sub/train_1', block_size) print('train image_1.png', tc() - tic) # train image_2.png png_file = '../data/jingwei_round1_train_20190619/image_2.png' # check_png(png_file) png_sub(png_file, '../data/sub/train_2', block_size) print('train image_2.png', tc() - tic)
ocr_model_meta = './model_data/ocr.ckpt-1108000.meta' ocr_model_para = './model_data/ocr.ckpt-1108000' cap_model_meta = './model_data/densenet_121.ckpt-145000.meta' cap_model_para = './model_data/densenet_121.ckpt-145000' classes_path = './model_data/12306_classes.txt' classes = read_classes(classes_path) model = CaptchaModel(ocr_model_meta=ocr_model_meta, ocr_model_para=ocr_model_para, cap_model_meta=cap_model_meta, cap_model_para=cap_model_para) from time import time as tc tic = tc() print("load model done") for x in range(2, 19): word_ind, objs_ind = model.predict( '/Users/dtstack/PycharmProjects/Crack12306WithAI/misc/captcha/e_{}.jpg' .format(str(x))) word = classes[word_ind] objs = classes[objs_ind] print("e_{}.jpg".format(str(x)), unicode(word[0], "utf-8"), objs[0]) print('time elapsed:{:.2f}'.format(tc() - tic)) model.close()
print("Is there a prime between 1328 and 1361:", any(is_prime(x) for x in range(1328, 1361))) monday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11] tuesday = [x * 2 - 10 for x in monday] print(monday, tuesday) for item in zip(monday, tuesday): print(item, type(item)) for d1, d2 in zip(monday, tuesday): print(f"Hourly average is {(d1 + d2)/2}°C") wednesday = [x * 2 - 20 for x in tuesday] for temps in zip(monday, tuesday, wednesday): print( f"min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps)/len(temps):4.1f}" ) from itertools import chain temperatures = chain(monday, tuesday, wednesday) print(monday, tuesday, wednesday) # concatenation print(list(temperatures)) # lazy concatenation from md_lucas import lucas from time import perf_counter as tc start = tc() for x in (p for p in lucas() if is_prime(p)): print(x, "time:", tc() - start)
sess.run(init) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print('loading model {}'.format(ckpt.model_checkpoint_path)) saver.restore(sess, ckpt.model_checkpoint_path) global_step = int(ckpt.model_checkpoint_path.split('-')[-1]) + 1 else: global_step = 0 if is_training: merged = tf.summary.merge_all() summary_writer_train = tf.summary.FileWriter(log_dir + '/train', sess.graph) summary_writer_valid = tf.summary.FileWriter(log_dir + '/valid') tic = tc() epoch_learning_rate = init_learning_rate for epoch in range(total_epochs): local_step = 0 train_idxs = np.random.permutation(499) for i in range(499): print('flame: {}/{} '.format(i + 1, 499)) data, _ = npy_item_read(train_path, train_idxs[i]) flame_idxs = np.unique(data[:, 0]) np.random.shuffle(flame_idxs) for flame in flame_idxs: # 处理每一帧数据 lg = data[:, 0] == flame pts, intensity, category = data[lg, 1:4], data[lg,