def block(): # <分程序>的分析子程序 index = 3 globalvar.set_index(3) tx0 = len(globalvar.get_table()) - 1 cx0 = generator.gen("jmp", 0, 0) # 保存当前p-code指令在code列表中的位置 while globalvar.get_symbol() == token.CONST: # 判断是否为 常量说明部分 constant_declare() while globalvar.get_symbol() == token.VAR: # 判断是否为 变量说明部分 var_declare() index = globalvar.get_index() # 在这保存一下应该分配的大小 while globalvar.get_symbol() == token.PROCEDURE: # 判断是否为 过程说明部分 proc_declare() # 是应该在这建符号表吗?? code = globalvar.get_code() # 获取一个p-code表的副本 table = globalvar.get_table() # 获取一个符号表的副本 code[cx0].s2 = len(globalvar.get_code()) if not globalvar.get_level() == 0: # 判断一下是不是最外层的程序,如果是的话则不修改表 table[tx0].adr = len( globalvar.get_code()) # 将符号表中过程的adr值改为在p-code中的初始位置 globalvar.set_table(table) generator.gen("int", 0, index) statement() # 调用<语句>的分析子程序 generator.gen("opr", 0, 0) print("this is block")
def while_statement(): # <当循环语句> 的分析子程序 if globalvar.get_symbol() == token.WHILE: cx1 = len(globalvar.get_code()) # 判断条件的p-code位置 print("watch me ,判断条件在" + str(cx1)) token.getsym() condition() cx2 = len(globalvar.get_code()) - 1 + 1 # 意思为循环结束的下一个位置 generator.gen("jpc", 0, 0) # 这里尚且不知道要跳转到哪,所以先把第二个参数置为0 if globalvar.get_symbol() == token.DO: token.getsym() statement() generator.gen("jmp", 0, cx1) code = globalvar.get_code() # 构造一个p-code副本 if code[cx2].operator == 'jpc': code[cx2].s2 = len(globalvar.get_code()) # 这里修改了之前未决定的位置 else: print("error!!!!!!!!" "&&&&&&&&&&&&&&&&&&&&&&" "$$$$$$$$$$$$$$$$$$$$$4") globalvar.set_code(code) print("this is while_statement") else: # 没有do的情况 error_handling.error(18) # 应为do,弹出错误信息18 # print("error in while_statement() -->no do") return 0 else: print("error in while_statement() -->no while") return 0
def if_statement(): # <条件语句> 的分析子程序 if globalvar.get_symbol() == token.IF: token.getsym() condition() if globalvar.get_symbol() == token.THEN: token.getsym() generator.gen("jpc", 0, 0) cx1 = len(globalvar.get_code()) - 1 # 记录当前p-code位置 statement() code = globalvar.get_code() # 创建一个code副本 if code[cx1].operator == 'jpc': code[cx1].s2 = len(globalvar.get_code()) else: print("error$$$$$$$$$$$") globalvar.set_code(code) # 更新p-code列表 print("this is if_statement") if globalvar.get_symbol() == token.ELSE: token.getsym() generator.gen("jpc", 0, 0) cx2 = len(globalvar.get_code()) - 1 # 记录当前p-code位置 statement() code = globalvar.get_code() # 创建一个code副本 if code[cx2].operator == 'jpc': code[cx2].s2 = len(globalvar.get_code()) else: print("error$$$$$$$$$$$") globalvar.set_code(code) else: error_handling.error(16) # 应该为then,弹出错误信息16 # print("error in if_statement() -->no then") return 0 else: print("error in if_statement() -->no if") return 0
def term(): # <项>的分析子程序 factor() # 调用<因子>分析子程序 while globalvar.get_symbol() == token.STARSY or globalvar.get_symbol( ) == token.DIVISY: mulop = multiply_operator() factor() if mulop == token.STARSY: generator.gen("opr", 0, 4) else: generator.gen("opr", 0, 5) print("this is term")
def read_statement(): # <读语句> 的分析子程序 if globalvar.get_symbol() == token.READ: token.getsym() if globalvar.get_symbol() == token.LPARSY: # 判断是否为左括号 token.getsym() name = identifier() i = tab.position(name) table = globalvar.get_table() if i == -1: # 返回值为-1代表没有在符号表里找到 error_handling.error(11) # 读语句中的标识符未声明,弹出出错信息11 # print ("error in read_statement()") else: if not table[i].typ == "variable": error_handling.error(12) # 不可向常量或着过程赋值,弹出错误信息12 # print("error in assign_statement-->not a variable") return 0 generator.gen("red", globalvar.get_level() - table[i].lev, table[i].adr) while globalvar.get_symbol() == token.COMMASY: token.getsym() name = identifier() i = tab.position(name) table = globalvar.get_table() if i == -1: # 返回值为-1代表没有在符号表里找到 error_handling.error(11) # 读语句中的标识符未声明,弹出出错信息11 #print ("error in read_statement()") else: if not table[i].typ == "variable": error_handling.error(12) # 不可向常量或着过程赋值,弹出错误信息12 # print("error in assign_statement-->not a variable") return 0 generator.gen("red", globalvar.get_level() - table[i].lev, table[i].adr) # print("here******") # print(globalvar.get_pointer()) # print(globalvar.get_symbol()) if globalvar.get_symbol() == token.RPARSY: # 判断是不是右括号 token.getsym() print("this is read_statement") else: error_handling.error(22) # 缺少右括号,弹出错误信息22 print(globalvar.get_symbol()) # print("error in read_statement()--no )") return 0 else: error_handling.error(40) # 缺少左括号,弹出错误信息40 # print ("error in read_statement()--no (") exit() else: print("error in read_statement()--no read") return 0
def repeat_statement(): cx1 = len(globalvar.get_code()) # 判断条件的p-code位置 token.getsym() statement() while globalvar.get_symbol() == token.SEMISY: token.getsym() statement() if globalvar.get_symbol() == token.UNTIL: token.getsym() generator.gen("jpc", 0, cx1) else: error_handling.error(19)
def new_board(self): #creating a new board self.board = gen() self.board_reset = self.board self.cubes = [[ Cube(self.board[i][j], i, j, self.width, self.height) for j in range(self.cols) ] for i in range(self.rows)] self.draw()
def evaluate(model, batch_num=20): batch_acc = 0 generator = gen(width=width, height=height) for i in tqdm(range(batch_num)): X, y = generator.next() y_pred = model.predict(X) batch_acc += np.mean(map(np.array_equal, np.argmax(y, axis=2).T, np.argmax(y_pred, axis=2).T)) return batch_acc / batch_num
def assign_statement(): # <赋值语句> 的分析子程序 name = identifier() i = tab.position(name) table = globalvar.get_table() if i == -1: error_handling.error(11) # 赋值语句中的标识符未声明,弹出出错信息11 # print("error in assign_statement-->no such variable") return 0 else: if not table[i].typ == "variable": error_handling.error(12) # 不可向常量或着过程赋值,弹出错误信息12 # print("error in assign_statement-->not a variable") return 0 if globalvar.get_symbol() == token.ASSIGNSY: token.getsym() expression() generator.gen("sto", globalvar.get_level() - table[i].lev, table[i].adr) else: error_handling.error(13) # 应为赋值运算符,弹出错误信息13 print("this is assign_statement")
def load(self): load_index = self.index[self.idx:min((self.idx + self.batch_size), self.nb_img)] self.idx += self.batch_size if self.idx >= self.nb_img: np.random.shuffle(self.index) self.idx = 0 #TODO input_image, batch_data = gen(batch_size=self.batch_size, is_val=False) input_images = np.array(input_image).transpose(0, 3, 1, 2) ture_map = batch_data.transpose(0, 3, 1, 2) return (input_images, ture_map)
def proc_call_statement(): # <过程调用语句> 的分析子程序 if globalvar.get_symbol() == token.CALL: token.getsym() name = identifier() # 获取当前标识符的名字 if name == 0: error_handling.error(14) # call后面应为标识符,弹出错误信息14 return 0 i = tab.position(name) table = globalvar.get_table() if i == -1: error_handling.error(11) # 过程调用语句中的标识符未声明,弹出出错信息11 # print("error in proc_call_statement()-->no such name") return 0 else: if not table[i].typ == 'procedure': error_handling.error(15) return 0 # print("error in proc_call_statement()-->not a procedure") generator.gen("cal", globalvar.get_level() - table[i].lev, table[i].adr) print("this is proc_call_statement") else: print("error in proc_call_statement()-->no call")
def main(): parser = argparse.ArgumentParser( description='Model Runner for team rwightman') parser.add_argument('bagfile', type=str, help='Path to ROS bag') parser.add_argument('--alpha', type=float, default=0.1, help='Path to the metagraph path') parser.add_argument('--graph_path', type=str, help='Path to the self contained graph def') parser.add_argument('--metagraph_path', type=str, help='Path to the metagraph path') parser.add_argument('--checkpoint_path', type=str, help='Path to the checkpoint path') parser.add_argument('--debug_print', dest='debug_print', action='store_true', help='Debug print of predicted steering commands') args = parser.parse_args() def get_model(): model = RwightmanModel(alpha=args.alpha, graph_path=args.graph_path, metagraph_path=args.metagraph_path, checkpoint_path=args.checkpoint_path) # Push one empty image through to ensure Tensorflow is ready. # There is typically a large wait on the first frame through. model.predict(np.zeros(shape=[480, 640, 3])) return model def process(model, img): steering_angle = model.predict(img) if args.debug_print: print(steering_angle) return steering_angle model = get_model() print calc_rmse(lambda image_pred: model.predict(image_pred), gen(args.bagfile))
def expression(): # <表达式> 的分析子程序 if globalvar.get_symbol() == token.PLUSSY: # 表达式开头可能会出现正负号 token.getsym() elif globalvar.get_symbol() == token.MINUSSY: # 这是出现负号的情况 generator.gen("opr", 0, 1) # 生成取负的指令 token.getsym() term() # 调用项的分析子程序,这是必须存在的 while globalvar.get_symbol() == token.MINUSSY or globalvar.get_symbol( ) == token.PLUSSY: addop = addtion_operator() # 记录此时的符号 term() if addop == token.PLUSSY: generator.gen("opr", 0, 2) # 生成加法指令 else: generator.gen("opr", 0, 3) # 生成减法指令 print("this is expression")
def generate() -> Any: try: img = request.data except Exception as err: return f"problem with image file: {err}", 400 img_io = BytesIO() # open the image from file -> generate image im = Image.open(BytesIO(img)).convert("L") im = to_tensor(im) im = torch.cat((im, im, im)) # im = 2 * ((im - im.min()) / (im.max() - im.min())) - 1 im = (im - 0.5) / 0.5 out = gen(im) out = out * 0.5 + 0.5 out = torch.squeeze(out) # convert to image and write to buffer out_img = to_img(out) out_img.save(img_io, "JPEG", quality=100) img_io.seek(0) return send_file(img_io, mimetype="image/jpeg")
def factor(): # <因子>的分析子程序 if globalvar.get_symbol() == token.IDSY: name = identifier() i = tab.position(name) table = globalvar.get_table() if i == -1: # 返回值为-1代表没有在符号表里找到 error_handling.error(11) # 因子中的标识符未声明,弹出出错信息11 # print ("error in factor()") else: if table[i].typ == "constant": generator.gen("lit", 0, table[i].num) # 产生指令,将第二个参数取到栈顶 elif table[i].typ == "variable": generator.gen("lod", globalvar.get_level() - table[i].lev, table[i].adr) # 将变量的值取到栈顶,第一个参数为层差,第二个参数为偏移量 else: # 是过程标识符 error_handling.error(21) # print("error in factor()") elif globalvar.get_symbol() == token.INTSY: unsigned_int() if globalvar.get_num() > token.MAX_NUM: error_handling.error(30) return 0 generator.gen("lit", 0, globalvar.get_num()) elif globalvar.get_symbol() == token.LPARSY: # 如果是左括号 token.getsym() expression() if globalvar.get_symbol() == token.RPARSY: token.getsym() else: # 没有右括号 error_handling.error(22) # 没有右括号,弹出错误信息22 # print ("error in factor-->no right par") else: print("error in factor()") print(globalvar.get_pointer()) return 0 print("this is factor") # 成功运行
import generator as g import start as s s.start() g.gen()
# coding: utf8 import network import generator model = network.create_model() model.fit_generator(generator.gen(), samples_per_epoch=51200, nb_epoch=2, nb_worker=32, pickle_safe=True, validation_data=generator.gen(), nb_val_samples=1280) model.save_weights('my_model_weights.h5')
return self.mean_angle[0] else: img = img1 - self.img0 img = rescale_intensity(img, in_range=(-255, 255), out_range=(0, 255)) img = np.array(img, dtype=np.uint8) # to replicate initial model self.state.append(img) self.img0 = img1 X = np.concatenate(self.state, axis=-1) X = X[:, :, ::-1] X = np.expand_dims(X, axis=0) X = X.astype('float32') X -= self.X_mean X /= 255.0 return self.model.predict(X)[0][0] if __name__ == "__main__": parser = argparse.ArgumentParser(description='Model Runner for team rambo') parser.add_argument('bagfile', type=str, help='Path to ROS bag') args = parser.parse_args() model = Model("tanel/final_model.hdf5", "tanel/X_train_mean.npy") print calc_rmse(lambda image_pred: model.predict(image_pred), gen(args.bagfile))
print("light Green at t = " + str(env.now)) yield env.timeout(30) print("light Yellow at t = " + str(env.now)) yield env.timeout(5) print("light Red at t = " + str(env.now)) yield env.timeout(20) # return the UpperBound def getCeil(n): return np.ceil(n) # return the FloorBound def getFloor(n): return np.floor(n) def binomial(n, p, k=None): return np.random.binomial(n, p, k) if __name__ == '__main__': main() print(getCeil(3.3), getFloor(3.3)) for j in range(9): print generator.gen(j).next()
x = Conv2D(32*2**i, (3, 3), activation='relu')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.25)(x) x = [Dense(n_class, activation='softmax', name='c%d'%(i+1))(x) for i in range(6)] model = Model(input=input_tensor, outputs=x) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit_generator(gen(width=width, height=height), steps_per_epoch=2000, epochs=30, validation_data=gen(width=width, height=height), validation_steps=500) model.save('mycnn_v20170426_adadelta.h5') print 'saved mycnn_v20170426_adadelta.h5' from tqdm import tqdm def evaluate(model, batch_num=20): batch_acc = 0 generator = gen(width=width, height=height) for i in tqdm(range(batch_num)): X, y = generator.next() y_pred = model.predict(X) batch_acc += np.mean(map(np.array_equal, np.argmax(y, axis=2).T, np.argmax(y_pred, axis=2).T)) return batch_acc / batch_num
decay = 5e-5 style_loss_weight = 10 mode = 'simple' in_memory = False val_batch_size = 100 epochs = 160 batch_size = 8 steps_per_epoch = 1000 if __name__ == '__main__': loss_model = create_model(input_shape, N, coef=style_loss_weight) opt = Adam(learning_rate=lr, decay=decay) loss_model.compile(optimizer=opt) train_gen = gen(train_dir_name, input_shape[:2], batch_size, mode, in_memory) val_gen = gen(style_dir_name, input_shape[:2], val_batch_size, 'simple', in_memory) val_data = next(val_gen) ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=loss_model) manager = tf.train.CheckpointManager(ckpt, '/tf_ckpts', max_to_keep=3) ckpt.restore(manager.latest_checkpoint) if manager.latest_checkpoint: print("Restored from {}".format(manager.latest_checkpoint)) else: print("Initializing from scratch.")
'--cnn-meta', action='store', dest='cnn_graph', default='autumn/autumn-cnn-model-tf.meta') parser.add_argument('--lstm-json', '--lstm-meta', action='store', dest='lstm_json', default='autumn/autumn-lstm-model-keras.json') parser.add_argument('--cnn-weights', action='store', dest='cnn_weights', default='autumn/autumn-cnn-weights.ckpt') parser.add_argument('--lstm-weights', action='store', dest='lstm_weights', default='autumn/autumn-lstm-weights.hdf5') args = parser.parse_args() def make_predictor(): model = AutumnModel(args.cnn_graph, args.lstm_json, args.cnn_weights, args.lstm_weights) return lambda img: model.predict(img) def process(predictor, img): return predictor(img) model = make_predictor() print calc_rmse(lambda image_pred: model(image_pred), gen(args.bagfile))
def write_statement(): # <写语句>的分析子程序 if globalvar.get_symbol() == token.WRITE: token.getsym() if globalvar.get_symbol() == token.LPARSY: # 判断是否有左括号 token.getsym() expression() generator.gen("wrt", 0, 0) # 生成输出指令 while globalvar.get_symbol() == token.COMMASY: token.getsym() expression() generator.gen("wrt", 0, 0) # 生成输出指令 if globalvar.get_symbol() == token.RPARSY: token.getsym() print("this is write_statement") else: # 缺少右括号 error_handling.error(22) # 缺少右括号,弹出错误信息22 # print("error in write_statement()--no )") return 0 else: error_handling.error(40) # 缺少左括号,弹出错误信息40 # print ("error in write_statement()--no (") # return 0 return 0 else: print("error in write_statement()--no write") return 0 # def alpha(): # <字母> 的分析子程序 # print("this is alpha") # # # def digit(): # <数字> 的分析子程序 # print("this is digit") # 下面是主程序 # token.getsym() # program() #tab.show() #generator.show() #tab.show() #print(globalvar.get_pointer()) #stri="if" #token.getsym() #print(stri) #print(symbol) #statement() # globalvar.show() # globalvar.set_stri("if if#####") # print("***************") # globalvar.show() # token.getsym() # print("***************") # statement() # globalvar.show() # token.getsym() # statement() #token.getsym() #statement() #print(globalvar.get_symbol()) # token.getsym() # print(globalvar.get_symbol())
class Grid: # Initial board board = gen.gen() def __init__(self, rows, cols, width, height): self.rows = rows self.cols = cols self.cubes = [[ Cube(self.board[i][j], i, j, width, height) for j in range(cols) ] for i in range(rows)] self.width = width self.height = height self.model = None self.selected = None def update_model(self): self.model = [[self.cubes[i][j].value for j in range(self.cols)] for i in range(self.rows)] # finally, place the probable number to the board def place(self, val): row, col = self.selected if self.cubes[row][col].value == 0: self.cubes[row][col].set(val) self.update_model() if valid(self.model, val, (row, col)) and solve(self.model): return True else: self.cubes[row][col].set(0) self.cubes[row][col].set_temp(0) self.update_model() return False # sketch the propbable number to the board def sketch(self, val): row, col = self.selected self.cubes[row][col].set_temp(val) # Draw the board def draw(self, win): # Draw Grid Lines gap = self.width / 9 for i in range(self.rows + 1): if i % 3 == 0 and i != 0: thick = 4 else: thick = 1 pygame.draw.line(win, (27, 73, 179), (0, i * gap), (self.width, i * gap), thick) pygame.draw.line(win, (27, 73, 179), (i * gap, 0), (i * gap, self.height), thick) # Draw Cubes for i in range(self.rows): for j in range(self.cols): self.cubes[i][j].draw(win) # Select the cube def select(self, row, col): # Reset all other for i in range(self.rows): for j in range(self.cols): self.cubes[i][j].selected = False self.cubes[row][col].selected = True self.selected = (row, col) # clear the sketched number def clear(self): row, col = self.selected if self.cubes[row][col].value == 0: self.cubes[row][col].set_temp(0) # return the cubes position after mouse click def click(self, pos): """ :param: pos :return: (row, col) """ if pos[0] < self.width and pos[1] < self.height: gap = self.width / 9 x = pos[0] // gap y = pos[1] // gap return (int(y), int(x)) else: return None # return True if the board is finished def is_finished(self): for i in range(self.rows): for j in range(self.cols): if self.cubes[i][j].value == 0: return False clock = pygame.time.Clock() time.sleep(1) screen_timer(win, 100, solved, clock) screen_timer(win, 100, thanks, clock) screen_timer(win, 150, code_loop, clock) screen_timer(win, 200, the_as8_org, clock) screen_timer(win, 150, poster, clock) pygame.quit() return True
def main(): import argparse as argparse parser = argparse.ArgumentParser() parser.add_argument('--train-txt', type=str, required=True) parser.add_argument('--test-txt', type=str, required=True) parser.add_argument('--save-dir', type=str, required=True) parser.add_argument('--lr', type=float, required=False, default=0.001) parser.add_argument('--batch-size', type=int, required=False, default=50) parser.add_argument('--epochs', type=int, required=False, default=10) parser.add_argument('--data-dir', type=str, required=True) parser.add_argument('--enc-pb', type=str, required=True) parser.add_argument('--tensor-json', type=str, required=True) parser.add_argument('--message', type=str, required=True) args = parser.parse_args() data_dir = args.data_dir image_dir = os.path.join(data_dir, "images/") anno_dir = os.path.join(data_dir, "annotations/") train_path = args.train_txt test_path = args.test_txt froz_enc_pb = args.enc_pb tensor_names = json.load(open(args.tensor_json, 'r')) encoder = { "path": froz_enc_pb, "input_tensor_name": tensor_names['inputs']['image_input'], "output_tensor_name": tensor_names['outputs']['embedding'], "name": "vae" } # Load list of image names for train and test raw_train = load_dataset(train_path) raw_test = load_dataset(test_path) # Create train and test generators batch_size = args.batch_size train_gen = gen(batch_size=50, data_set=raw_train, image_dir=image_dir, anno_dir=anno_dir, preprocess_fn=process_fn, prepare_batch_fn=prep_batch) test_gen = gen(batch_size=300, data_set=raw_test, image_dir=image_dir, anno_dir=anno_dir, preprocess_fn=process_fn, prepare_batch_fn=prep_batch) # Kick-off NUM_BINS = 15 #name = args.name save_dir = args.save_dir epochs = args.epochs in_shape = [120, 160, 3] lr = args.lr classes = [i for i in range(NUM_BINS)] message = args.message if not os.path.exists(save_dir): os.makedirs(save_dir) best_ckpt = "must have crashed during training :-(" save_config(save_dir, data_dir, NUM_BINS, lr, batch_size, epochs, in_shape, best_ckpt, message) layer_def = [] layer_def.append({ "neurons": 30, "activation": tf.nn.relu, "name": "mod1", "init": xavier(), "dropout": 1. }) layer_def.append({ "neurons": 1, "activation": None, "name": "logits", "init": xavier(), "dropout": 1. }) car_brain = Module(encoder, layer_def, classes=classes) return_info = car_brain.train(train_gen, test_gen, save_dir, epochs) frozen_meta = freeze_meta(return_info["graph_path"], return_info["ckpt_path"], return_info["out_path"] + "/frozen.pb", return_info["tensor_json"])
from loss_model import create_model from generator import gen import itertools from utils import plot from tensorflow.keras.models import Model if __name__ == "__main__": loss_model = create_model() loss_model.load_weights('weights/weights.h5') model = Model(inputs=loss_model.inputs, outputs=loss_model.get_layer('decoded').output) model.load_weights('weights/weights.h5') g = gen('images/validation', batch_size=30) (C, S), _ = next(g) pred = model.predict([C, S]) plot(itertools.chain(*zip(C, S, pred)), 6, 6, (24,24))
Dense(n_class, activation='softmax', name='c%d' % (i + 1))(x) for i in range(n_len) ] model = Model(inputs=input_tensor, outputs=x) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model my_classifier = KerasClassifier(make_model, batch_size=32) print 'create 60000 train samples' print 'start at ', datetime.now() traingen = gen(width, height, batch_size=320) X_train, y_train = traingen.next() print 'create 60000 train samples' print 'end at ', datetime.now() print 'create 10000 test samples' print 'start at ', datetime.now() testgen = gen(width, height, batch_size=64) X_test, y_test = testgen.next() print 'create 10000 test samples' print 'end at ', datetime.now() validator = GridSearchCV( my_classifier, param_grid={ # nb_epoch is avail for tuning even when not # an argument to model building function 'nb_epoch': [3, 6],
x = Conv2D(32 * 2**i, (3, 3), activation='relu')(x) x = MaxPooling2D((2, 2))(x) x = Flatten()(x) x = Dropout(0.25)(x) x = [ Dense(n_class, activation='softmax', name='c%d' % (i + 1))(x) for i in range(6) ] model = Model(input=input_tensor, outputs=x) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit_generator(gen(width=width, height=height), steps_per_epoch=2000, epochs=20, validation_data=gen(width=width, height=height), validation_steps=500) model.save('mycnn_v20170424_adagrad.h5') print 'saved mycnn_v20170424_adagrad.h5' from tqdm import tqdm def evaluate(model, batch_num=20): batch_acc = 0 generator = gen(width=width, height=height) for i in tqdm(range(batch_num)):
def condition(): # <条件>的分析子程序 if globalvar.get_symbol() == token.ODD: # 判断是否有odd token.getsym() expression() generator.gen("opr", 0, 6) # 生成odd指令 else: # 普通的条件 expression() relop = relation_operator() # 保存关系符号的值 expression() # 以下指令都是针对次栈顶和栈顶的比较 if relop == token.EQUSY: generator.gen("opr", 0, 8) # 生成比较是否相等的指令 elif relop == token.NOEQUSY: generator.gen("opr", 0, 9) # 生成比较是否不等的指令 elif relop == token.LESSTHANSY: generator.gen("opr", 0, 10) # 生成比较是否小于的指令 elif relop == token.NOLESSTHANSY: generator.gen("opr", 0, 11) # 生成比较是否大于等于的指令 elif relop == token.MORETHANSY: generator.gen("opr", 0, 12) # 生成比较是否大于的指令 elif relop == token.NOMORETHANSY: generator.gen("opr", 0, 13) # 生成比较是否小于等于的指令 print("this is condition")
def feed(x, target, training=False): return gen(x, target, 64, 1000, training=training)
import generator __author__ = 'momo9' if __name__ == '__main__': l = range(10) it = l.__iter__() while True: try: print it.next() except StopIteration: break print '=' * 60 g = generator.gen(2) it = g.__iter__() while True: try: print it.next() except StopIteration: break