def pic(path): with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32, [ 1, lenet5_inference.IMAGE_SIZE, lenet5_inference.IMAGE_SIZE, lenet5_inference.NUM_CHANNELS ]) xs = pre.pre(path) reshaped_xs = np.reshape( xs, (1, lenet5_inference.IMAGE_SIZE, lenet5_inference.IMAGE_SIZE, lenet5_inference.NUM_CHANNELS)) pic_feed = {x: reshaped_xs} regularizer = tf.contrib.layers.l2_regularizer( lenet5_train.REGULARIZATION_RATE) y = lenet5_inference.inference(x, False, regularizer) variable_averages = tf.train.ExponentialMovingAverage( lenet5_train.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(lenet5_train.MODEL_SAVE_PATH) print('loading model...\n\n') if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) yy = sess.run(y, feed_dict=pic_feed) res = sess.run(tf.argmax(yy, 1)) print('预测结果是:', res) else: print("No checkpoint file found!") return
def dtwt(mat_paths): # do the whole thing prototipo = np.empty([48, 93, 1]) kpca = np.empty([48], dtype=object) for rotation in range(48): wav_coefs = np.empty([0, 3685]) for pose in range(93): print 'rotacao ' + str(rotation) + '/48 pose ' + str(pose) + ' /93' wav_mean = np.empty([0, 3685]) for person in range(15): img_cropped = pre(mat_paths[pose, person]) trafo_image = wavextract(img_cropped) magnitude = np.abs( trafo_image[rotation]) # usa somente a magnitude wav_vet = np.reshape(magnitude, 3685) # vetoriza a matriz wav_mean = np.append(wav_mean, [wav_vet], axis=0) wav_mean_vet = np.mean(wav_mean, axis=0) wav_coefs = np.append(wav_coefs, [wav_mean_vet], axis=0) prototipo[rotation], kpca[rotation] = projecaokpca(wav_coefs) with open('treino.pkl', 'w') as f: # salva no arquivo treino.pkl pickle.dump([prototipo, kpca], f) # as variaveis prototipo e kpca
def pic(path): with tf.Graph().as_default() as g: x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input') pic_feed = {x: pre.pre(path)} y = mnist_inference.inference(x, None) variable_averages = tf.train.ExponentialMovingAverage( mnist_train.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH) print(ckpt) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) yy = sess.run(y, feed_dict=pic_feed) res = sess.run(tf.argmax(yy, 1)) print(res) else: print("No checkpoint file found!") return
def run(self, file): ''' The run method. Iterate over every line in the file, lex, parse, and run it. ''' code = pre.pre(file.read()) # Preprocess the input code lines = code.split('\n') # Split the code into lines self.lines = lines # Store the lines so functions can access them while '' in lines: lines.remove('') # Remove empty lines while self.pos < len(lines): line = lines[self.pos] # Get the current line stream = lexer.lex(line) # Lex the line if parser.end( line ): # The line is a } (used for ending statements), so we should skip it self.pos += 1 continue elif stream: func, stream = parser.parse(stream) # Parse the lexed stream stream = [ token[0] for token in stream ] # We don't need the tag part of the stream, only the text func = mapping[func](stream) func.run(self) # Run the function we got else: self.pos += 1 self.write( 'ret' ) # Return from main function, needed to avoid segmenation fault self.indent = 1 self.write('section .data', False) # The data section, store variables here for var in self.vars: type = 'd' + self.vars[var][0][ 0] # Define a variable of the right type if self.vars[var][1][0] == '"': self.write('%s: %s %s' % (var, type, self.vars[var][1])) else: self.write('%s: %s 0' % (var, type))
def __init__(self): # para.device_ids = [0, 1, 2] self.pool_size = [8, 4, 2, 1] # os.environ["CUDA_VISIBLE_DEVICES"] = match.get(self.seed) os.environ["CUDA_VISIBLE_DEVICES"] = '0' self.tool = rpn_tool_d() self.tool2 = rcn_tool_c() path_base = os.path.join(os.getcwd(), 'base_a1_max.p') path_RPN = os.path.join(os.getcwd(), 'rpn_a1_max.p') print(path_base) print(path_RPN) from tool.batch.roi_layers import ROIPool self.pool4 = ROIPool(self.pool_size[3], 1 / 64) self.pool3 = ROIPool(self.pool_size[2], 1 / 32) self.pool2 = ROIPool(self.pool_size[1], 1 / 16) self.pool1 = ROIPool(self.pool_size[0], 1 / 8) self.pre = pre().cuda() self.ROI = roi().cuda() self.RPN = RPN().cuda().eval() self.features = mb().cuda().eval() tmp = load(path_RPN) self.RPN.load_state_dict(tmp) tmp = load(path_base) self.features.load_state_dict(tmp) self.features = DataParallel(self.features, device_ids=[0]) self.RPN = DataParallel(self.RPN, device_ids=[0]) self.ROI = DataParallel(self.ROI, device_ids=[0]) self.pre = DataParallel(self.pre, device_ids=[0]) get_paprams(self.features) get_paprams(self.RPN) get_paprams(self.ROI) self.ROI.apply(weights_init) self.batch = True self.flag = 3