def setUp(self): super(AxiomsTest, self).setUp() # Make a linear model for testing. graph_lin = Graph() with graph_lin.as_default(): x_lin = placeholder('float32', (None, self.input_size)) y_lin = x_lin @ self.model_lin_weights + self.model_lin_bias self.model_lin = ModelWrapper(graph_lin, x_lin, y_lin) # Make a deeper model for testing. graph_deep = Graph() with graph_deep.as_default(): x_deep = placeholder('float32', (None, self.input_size)) z1_deep = (x_deep @ self.model_deep_weights_1 + self.model_deep_bias_1) z2_deep = relu(z1_deep) z3_deep = (z2_deep @ self.model_deep_weights_2 + self.model_deep_bias_2) z4_deep = relu(z3_deep) y_deep = (z4_deep @ self.model_deep_weights_3 + self.model_deep_bias_3) self.model_deep = ModelWrapper(graph_deep, x_deep, y_deep, dict(layer2=z2_deep, layer3=z3_deep)) self.layer2 = 'layer2' self.layer3 = 'layer3'
class DetectFace: def __init__(self): self.graph = Graph() with self.graph.as_default(): self.session = Session(graph=self.graph) with self.session.as_default(): self.detector = MTCNN() def processe(self, args): faces = [] image = args k.set_session(self.session) with self.graph.as_default(): results = self.detector.detect_faces(image) for res in results: x1, y1, width, height = res['box'] x1, y1 = abs(x1), abs(y1) x2, y2 = x1 + width, y1 + height face = image[y1:y2, x1:x2] faces.append(face) return faces
class LightClassifier(): def __init__(self): # Set TF configuration config = tf.ConfigProto(gpu_options=tf.GPUOptions( per_process_gpu_memory_fraction=0.2)) config.gpu_options.allow_growth = True self.graph = Graph() with self.graph.as_default(): self.session = tf.Session(config=config) with self.session.as_default(): # Load model K.set_learning_phase(0) #with open("./data/model.json", 'r') as json_file: # loaded_model_json = json_file.read() #model = model_from_json(loaded_model_json) model = load_model("./data/model1.h5") K.set_learning_phase(0) # compile requirement for inrefence... model.compile(optimizer='SGD', loss='binary_crossentropy', metrics=['acc']) K.set_learning_phase(0) self.model = model def classify(self, input): with self.graph.as_default(): with self.session.as_default(): result = self.model.predict(input)[0] return result
def _dojob(ready, e, queue): prctl.set_name('AI detector - do job') global session1, session2, ip_model, mac_model ip_graph = Graph() config = ConfigProto() config.gpu_options.allow_growth = True with ip_graph.as_default(): session1 = Session(config=config) with session1.as_default(): ip_model = K.models.load_model( 'gru_ip_4tuple.hdf5', custom_objects={'attention': attention}) ip_model._make_predict_function() mac_graph = Graph() with mac_graph.as_default(): session2 = Session(config=config) with session2.as_default(): mac_model = K.models.load_model( 'gru_mac_4tuple.hdf5', custom_objects={'attention': attention}) mac_model._make_predict_function() ready.set() print 'set ready' last = time.time() global ignore_packet while e.is_set() == False: if queue.empty() == False: obj = queue.get() if (obj[0], obj[1]) in ignore_packet: if obj[3] <= ignore_packet[(obj[0], obj[1])]: continue feature_extract((obj[2], obj[3])) if time.time() - last >= polling_interval: print queue.qsize() global flow_statics, src_addr_list, memory_data # calculate features in last 5 seconds result = calculate_feature(flow_statics) memory_data.pop(0) memory_data.append(result) t_run_exp = threading.Thread(target=_run_exp, args=( result, src_addr_list, memory_data, )) t_run_exp.start() t_run_exp.join() flow_statics = {} src_addr_list = {} last = time.time() K.backend.clear_session() del ip_model del mac_model
def _dojob(e, queue): prctl.set_name('AI detector - do job') global session1, session2, ip_model, mac_model ip_graph = Graph() with ip_graph.as_default(): session1 = Session() with session1.as_default(): ip_model = K.models.load_model(ip_model_path) ip_model._make_predict_function() mac_graph = Graph() with mac_graph.as_default(): session2 = Session() with session2.as_default(): mac_model = K.models.load_model(mac_model_path) mac_model._make_predict_function() #pcap_file = open('test.pcap', 'wb') #writer = dpkt.pcap.Writer(pcap_file) global total_tp, total_tn, total_fp, total_fn total_tp = 0 total_tn = 0 total_fp = 0 total_fn = 0 last = time.time() #count_lock = threading.Lock() while e.is_set() == False: if queue.empty() == False: obj = queue.get() feature_extract(obj) current = obj[1] else: current = time.time() if current - last >= polling_interval: global flow_statics, src_addr_list, attacker #calculate features in last 5 seconds result = calculate_feature(flow_statics) memory_data.pop(0) memory_data.append(result) #t_run_exp = threading.Thread(target=_run_exp, args=(flow_statics, src_addr_list, attacker, memory_data, count_lock, )) t_run_exp = threading.Thread(target=_run_exp, args=(flow_statics, src_addr_list, attacker, memory_data, )) t_run_exp.start() t_run_exp.join() flow_statics = {} src_addr_list = [] attacker = [] last = current K.backend.clear_session() del ip_model del mac_model
class MyPredictBrain(): def __init__(self): self.basepath = os.path.dirname(__file__) self.Imgs_Test = np.ndarray((1, 512, 512, 1), dtype=np.float32) self.Predict = np.ndarray((512, 512), dtype=np.float32) keras.backend.clear_session() # 用于重复使用模型 self.graph = Graph() with self.graph.as_default(): self.session = Session() with self.session.as_default(): unet = myUnet3() self.model = unet.Model self.model.load_weights('Unet_Brain.hdf5') def LoadPic(self): self.img = cv2.imread( self.basepath + "/static/auto_photos/auto_picture.jpg", cv2.IMREAD_GRAYSCALE) self.img = cv2.resize(self.img, (512, 512), interpolation=cv2.INTER_NEAREST) pix = np.float32(self.img) max = np.max(pix) pix = np.divide(pix, max) pix = img_to_array(pix) for i in range(512): for j in range(512): self.Imgs_Test[0][i][j] = pix[i][j] def PredictPic(self): K.set_session(self.session) with self.graph.as_default(): self.Result = self.model.predict(self.Imgs_Test, verbose=1) def SavePic(self): for m in range(512): for n in range(512): if ((self.Result[0][m][n][0] >= self.Result[0][m][n][1]) and (self.Result[0][m][n][0] >= self.Result[0][m][n][2])): self.Predict[m][n] = 0 elif (self.Result[0][m][n][1] >= self.Result[0][m][n][2]): self.Predict[m][n] = 127.5 else: self.Predict[m][n] = 255 # scipy.misc.imsave(self.basepath + '\\1.jpg', self.Predict) path = self.basepath + "/static/after_auto_brain_photo/" cv2.imwrite(os.path.join(path, 'after_auto_brain_temp.jpg'), self.Predict)
def ai_predict(imgPath): print("PATH:", imgPath) # #preprocessing frame to predict its label # config = tf.ConfigProto() # config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU # config.log_device_placement = True # to log device placement (on which device the operation ran) # sess = tf.Session(config=config) ### PRIMEIRO #### # #load previously trained model #K.clear_session() #global model #model = load_model(os.path.join(SAVE_DIR, CRAPPY_MODEL)) #model._make_predict_function() #global graph #model = applications.VGG16(include_top=False, weights='imagenet', input_shape=(IMAGE_SIZE,IMAGE_SIZE,3)) #graph = tf.get_default_graph() ### SEGUNDO #### graph1 = Graph() with graph1.as_default(): session1 = Session(graph=graph1) with session1.as_default(): model_1 = load_model(os.path.join(SAVE_DIR, CRAPPY_MODEL)) frame2 = cv2.imread(imgPath) frame2 = cv2.resize(frame2, (IMAGE_SIZE, IMAGE_SIZE)) frame2 = img_to_array(frame2) frame2 = np.array(frame2, dtype="float32") / 255.0 Image.fromarray((frame2[-1] * 255).round().astype(np.uint8)) frame2 = np.expand_dims(frame2, axis=0) # # generating a prdiction of the frame ## PRIMEIRO #with graph.as_default(): #y_pred = model.predict_classes(frame2) ## SEGUNDO K.set_session(session1) with graph1.as_default(): try: y_pred = model_1.predict_classes(frame2) except: print() #y_pred = top_model.predict_classes(model.predict(frame2[None,:,:,:])) print("y_pred:", int(y_pred)) return int(y_pred[0])
class TensoflowFaceDector(object): def __init__(self, PATH_TO_CKPT): """Tensorflow detector """ self.detection_graph = Graph() with self.detection_graph.as_default(): od_graph_def = compat.v1.GraphDef() with io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) import_graph_def(od_graph_def, name='') with self.detection_graph.as_default(): config = compat.v1.ConfigProto() config.gpu_options.allow_growth = True self.sess = compat.v1.Session(graph=self.detection_graph, config=config) self.windowNotSet = True def run(self, image): """image: bgr image return (boxes, scores, classes, num_detections) """ image_np = cvtColor(image, COLOR_BGR2RGB) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = expand_dims(image_np, axis=0) image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. scores = self.detection_graph.get_tensor_by_name('detection_scores:0') classes = self.detection_graph.get_tensor_by_name('detection_classes:0') num_detections = self.detection_graph.get_tensor_by_name('num_detections:0') # Actual detection. start_time = time() (boxes, scores, classes, num_detections) = self.sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) elapsed_time = time() - start_time print('inference time cost: {}'.format(elapsed_time)) return (boxes, scores, classes, num_detections)
def test_mesh_evaluate_models( graph: tf.Graph, session: tf.Session, space: gym.Space, n_models: int, n_mesh: int = 64, ): """Checks `canonical_sample.mesh_evaluate_models` agrees with `mesh_evaluate_models_slow`.""" with datasets.sample_dist_from_space(space) as dist: obs = dist(n_mesh) actions = dist(n_mesh) next_obs = dist(n_mesh) with graph.as_default(): models = {} for i in range(n_models): with tf.variable_scope(str(i)): models[i] = rewards.MLPRewardModel(space, space) session.run(tf.global_variables_initializer()) with session.as_default(): expected = mesh_evaluate_models_slow(models, obs, actions, next_obs) actual = epic_sample.mesh_evaluate_models(models, obs, actions, next_obs) assert expected.keys() == actual.keys() for k in expected: assert np.allclose(expected[k], actual[k]), f"difference in model {k}"
def loadKerasModel(self, filePath): global PMMLMODELSTORAGE fO = pathlib.Path(filePath) keyToModel = fO.name.replace(fO.suffix, '') # print (PMMLMODELSTORAGE) try: model_graph = Graph() with model_graph.as_default(): tf_session = Session() with tf_session.as_default(): seqModel = load_model(filePath) tempDictModel = { 'modelObj': seqModel, 'model_graph': model_graph, 'modelGeneratedFrom': 'Keras', 'tf_session': tf_session, 'inputShape': seqModel.input_shape, } PMMLMODELSTORAGE[keyToModel] = tempDictModel messageToWorld = "Model Loaded Successfully" reStat = 200 except: messageToWorld = "Model load failed, please connect with admin" keyToModel = None reStat = 500 resultResp = {'message': messageToWorld, 'keytoModel': keyToModel} return JsonResponse(resultResp, status=reStat)
def load_KerasGraph(path): print("> ====== loading Keras model for classification") thread_graph = Graph() with thread_graph.as_default(): thread_session = Session() with thread_session.as_default(): input_shape = (28, 28, 1) num_classes = 6 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), input_shape=input_shape)) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3))) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3))) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3))) model.add(ReLU()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(ReLU()) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Softmax()) model.load_weights(path) graph = tf.get_default_graph() print("> ====== Keras model loaded") return model, graph, thread_session
def test_internal_slice_multiple_layers(self): graph = Graph() with graph.as_default(): x1 = tf.placeholder('float32', (None, 5)) z1 = x1 @ tf.random.normal((5, 6)) x2 = tf.placeholder('float32', (None, 1)) z2 = x2 @ tf.random.normal((1, 2)) z3 = z2 @ tf.random.normal((2, 4)) z4 = tf.concat([z1, z3], axis=1) z5 = z4 @ tf.random.normal((10, 7)) y = z5 @ tf.random.normal((7, 3)) model = ModelWrapper( graph, [x1, x2], y, dict(cut_layer1=z1, cut_layer2=z2)) infl = InternalInfluence( model, Cut(['cut_layer1', 'cut_layer2']), ClassQoI(1), PointDoi()) res = infl.attributions( [np.array([[1., 2., 3., 4., 5.]]), np.array([[1.]])]) self.assertEqual(len(res), 2) self.assertEqual(res[0].shape, (1, 6)) self.assertEqual(res[1].shape, (1, 2))
def create_dataset(self, graph: tf.Graph) -> tf.data.Dataset: with graph.as_default(): dataset = self._create_dataset() dataset = dataset.batch(self._batch_size) dataset = dataset.prefetch(self._prefetch_size) return dataset
def load(): global session global graph global model global data_result data_result = DataResult(None, None) with open(script_dir + '/../temp/processed_data.json', 'r') as output: json_data = json.load(output) data_result.loadJSON(json_data) graph = Graph() with graph.as_default(): session = Session(graph=graph) with session.as_default(): temp_encoder = Encoder(data_result.input_data) temp_decoder = Decoder(data_result.output_data, temp_encoder) temp_model = Model([temp_encoder.inputs, temp_decoder.inputs], temp_decoder.outputs) temp_model.compile(optimizer='rmsprop', loss='categorical_crossentropy') temp_model.load_weights( os.path.dirname(__file__) + '/../model_weights.h5') model = temp_model
def __init__(self, config: RnnNetConfig, graph: tf.Graph): self.config = config self.graph = graph print('Creating model averaging network input...') with graph.as_default(): with tf.variable_scope('input') as scope: self.input = tf.placeholder( tf.float32, [None, None, self.config.FEATURES_DIM], name='input') self.seq_len = tf.placeholder(tf.int32, [None], name='seq_len') self.labels = tf.placeholder(tf.float32, [None, None], name='labels') self.mask = tf.placeholder(tf.float32, [None, None], name='mask') self.keep_prob = tf.placeholder(tf.float32) self.state = {} self.new_state = {} self.rnn_cell = {} self.returns = {} self.sse = {} self.cost = {} self.optimizer = {} self.vars = {} self.grads_and_vars = {} self.saver = {} self.train = {}
def buildTripletPairs(datasets, filename): embed_graph = Graph() triplet_paths_array = [] with embed_graph.as_default(): X_input = Input((height, width, 3)) X = InceptionResnetV2(X_input) model = Model(inputs=X_input, outputs=X) with tf.Session(graph=embed_graph) as sess: sess.run(tf.global_variables_initializer()) for file_inc in range(max_nrof_epochs): image_paths, num_per_class = sample_people( datasets, people_per_batch, images_per_person) nrof_examples = people_per_batch * images_per_person emb_array = np.zeros((nrof_examples, embedding_size)) embeds = model.predict(np.stack(getImages(image_paths))) for loc in range(nrof_examples): emb_array[loc, :] = embeds[loc] triplets, nrof_random_negs, nrof_triplets = select_triplets( emb_array, num_per_class, image_paths, people_per_batch, 0.2) triplet_paths = list(itertools.chain(*triplets)) triplet_paths_array.extend( np.reshape(np.expand_dims(np.array(triplet_paths), 1), (-1, 3))) np.savetxt(filename, triplet_paths_array, fmt=("%s", "%s", "%s"), delimiter=",")
def _parse_graph_topologic_order(graph_def, output_nodes=None): # https://en.wikipedia.org/wiki/Topological_sorting if output_nodes is None: output_nodes = _parse_graph_layers(graph_def)[-1] graph = Graph() with graph.as_default(): import_graph_def(graph_def, name='') queue = deepcopy(output_nodes) visited = set() # temporary mark perm_visit = set() # Permanent mark ops_torder = [] # L def visit(node_name): if node_name in perm_visit: return if node_name in visited: raise ValueError("Input graph is not a DAG") visited.add(node_name) op = graph.get_operation_by_name(node_name) for tensor in op.inputs: visit(tensor.op.name) perm_visit.add(node_name) ops_torder.insert(0, node_name) while queue: node_name = queue.pop(0) visit(node_name) # ops_bfs.reverse() return ops_torder, output_nodes
def pipeline(self, filename_queue, graph: tf.Graph): with graph.as_default(), tf.name_scope("Pipeline"): filename_queue = tf.train.string_input_producer( filename_queue, num_epochs=num_epochs) reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) batch_serialized_examples = tf.train.shuffle_batch( [serialized_example], batch_size=self.batch_size, num_threads=self.num_threads, capacity=LibsvmHelper.MIN_AFTER_DEQUEUE + (self.num_threads + 1) * self.batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=LibsvmHelper.MIN_AFTER_DEQUEUE) features = tf.parse_example(batch_serialized_examples, features={ 'label': tf.FixedLenFeature([], tf.int64), 'num_features': tf.FixedLenFeature([], tf.int64), 'index': tf.VarLenFeature(tf.int64), 'value': tf.VarLenFeature(tf.float32), }) return features['label'], features['index'], features['value']
def build_session(graph: tf.Graph): print('Building Session...') session = tf.Session(graph=graph) with graph.as_default(): session.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=3) return session, saver
def rsna_evaluate(model_path, backbone, anchor_boxes, score_threshold, nms_threshold, rsna_path, rsna_test_json, anchor_scales): """ Evaluate an json using retinanet model, print mAP based on GT and generate kaggle submission file. """ save_path = None from tensorflow import Graph, Session graph1 = Graph() with graph1.as_default(): session1 = Session() with session1.as_default(): model2 = models.load_model(model_path, backbone_name=backbone, convert=True, nms_threshold=nms_threshold, anchors_ratios=anchor_boxes, anchors_scales=anchor_scales) # create the generator generator = create_generator(rsna_test_json, rsna_path) map = evaluate(generator, model2, iou_threshold=0.5, score_threshold=score_threshold, max_detections=100, generate_kaggle_output='teste.csv') del model2 import gc gc.collect() with open('output_map.txt', 'a') as output_map: output_map.write('{} : {} \n'.format(model_path, map))
def pretrainSingleClass(self, modelname, dataset, class_name, batch_size, epochs, lr): #K.clear_session() graph2 = Graph() with graph2.as_default(): session2 = Session() with session2.as_default(): reader = LFWReader(dir_images=dataset, class_name=class_name) gen_train = TripletGeneratorSingleID(reader) gen_test = TripletGeneratorSingleID(reader) embedding_model, triplet_model = GetModel() for layer in embedding_model.layers[-3:]: layer.trainable = True for layer in embedding_model.layers[:-3]: layer.trainable = False triplet_model.compile(loss=None, optimizer=Adam(lr)) history = triplet_model.fit_generator(gen_train, validation_data=gen_test, epochs=epochs, verbose=1, steps_per_epoch=50, validation_steps=5) embedding_model.save_weights('./trained-models/weights/' + modelname + '.h5') self.embeddingmodel(modelname, dataset) K.get_session()
def loadModels(self, path): """Load models from the json file given.""" logger.info('Loading models from %s', path) self.data_location = None self.models = [] if path == self.data_location: return if not os.path.exists(path): logger.error('Could not find file: %s', path) self.data_location = path with open(path, 'r') as f: data = json.load(f) if 'models' not in data: logger.error('No models defined in data') return models = data['models'] for i, model in enumerate(models): if not model: self.models.append(None) continue vertices = data['joint_map'][i] graph = Graph() with graph.as_default(): session = Session() with session.as_default(): meta = model.get('meta') root = model.get('root') saver = tf.train.import_meta_graph(meta) saver.restore(session, tf.train.latest_checkpoint(root)) in_tensor = session.graph.get_tensor_by_name( model['input']) out_tensor = session.graph.get_tensor_by_name( model['output']) normalized = model['normalized'] verts_max, verts_min, trans_max, trans_min = None, None, None, None if normalized: trans_max = np.array(model['trans_max']) trans_min = np.array(model['trans_min']) verts_max = np.array(model['verts_max']) verts_min = np.array(model['verts_min']) tfmodel = TFModel(graph=session.graph, session=session, input_tensor=in_tensor, output_tensor=out_tensor, vertices=vertices, normalized=normalized, trans_max=trans_max, trans_min=trans_min, verts_max=verts_max, verts_min=verts_min) self.models.append(tfmodel)
def make_mlp(input_placeholder, hidden_layer_sizes: list, activation: str, graph: tf.Graph, name: str = None): """ :param input_placeholder: NOT a tf.placeholder or tf.Tensor! monkey patched versions of those with attribute 'shape' :param hidden_layer_sizes: :param activation: :param graph: :param name: :return: """ graph = graph or tf.get_default_graph() assert isinstance(graph, tf.Graph) with graph.as_default(): with tf.name_scope(name, 'make_mlp', [input_placeholder]): output = input_placeholder # Just masking it with the name output for convenience in the for loop for output_dim in hidden_layer_sizes: output = keras_dense(output_dim, glorot_uniform, graph=graph, activation=activation)(output) return output
def __init__(self, net, graph: tf.Graph, mixfrac=1.0, maxiter=25): EzPickle.__init__(self, net, mixfrac, maxiter) self.net = net self.mixfrac = mixfrac self.x_nx = net.input ypred_ny = net with graph.as_default(): ytarg_ny = tf.placeholder(dtype=ypred_ny.dtype, shape=(None, None)) # T.matrix("ytarg") var_list = net.trainable_weights l2 = 1e-3 * tf.add_n([tf.reduce_sum(tf.square(v)) for v in var_list]) N = tf.cast(tf.shape(self.x_nx)[0], dtype=tf.float32) mse = tf.reduce_sum(tf.square(ytarg_ny - ypred_ny)) / N symb_args = [self.x_nx, ytarg_ny] loss = mse + l2 self.opt = optimizers.LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter, extra_losses={ "mse": mse, "l2": l2 })
class FaceID: def __init__(self): self.to_identify_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','To_Identify', 'Raw') self.to_identify_processed_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','To_Identify', 'Processed') self.to_process_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','To_Process') self.dataset_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','Dataset','DC') self.dataset_ds_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','Dataset','DS') self.weigths_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','Weigths') self.current_sbj = 0 while(os.path.exists(os.path.join(self.dataset_path,'sbj-'+str(self.current_sbj)))): self.current_sbj = self.current_sbj + 1 self.graph = Graph() with self.graph.as_default(): self.session = Session() with self.session.as_default(): self.model = faceIDNet() def train(self, epochs, save_name, load=False): if load: self.load(save_name) K.set_session(self.session) with self.graph.as_default(): gen = generator(24, self.dataset_ds_path) save_folder = os.path.join(self.weigths_path, save_name) if not os.path.exists(save_folder): os.makedirs(save_folder) cp_callback = ModelCheckpoint(os.path.join(save_folder, 'faceID_weights'), save_weights_only=True) self.model.fit_generator(gen, steps_per_epoch=30, epochs=epochs, validation_steps=20, callbacks=[cp_callback]) lossTrain = self.model.evaluate_generator(gen, steps=30) print('* - Loss: '+str(lossTrain)) def predict(self, inputs, threshold=0.2): K.set_session(self.session) with self.graph.as_default(): inputs = [inputs[0,:].reshape((1,100,100,4)), inputs[1,:].reshape((1,100,100,4))] out = self.model.predict(inputs) return (out <= threshold) def load(self, save_name): K.set_session(self.session) with self.graph.as_default(): self.model.load_weights(os.path.join(self.weigths_path, save_name, 'faceID_weights')) self.model._make_predict_function() print('--- Weights loaded ---')
def _stamp_value_into_graph(value: Any, type_signature: computation_types.Type, graph: tf.Graph) -> Any: """Stamps `value` in `graph` as an object of type `type_signature`. Args: value: A value to stamp. type_signature: The type of the value to stamp. graph: The graph to stamp in. Returns: A Python object made of tensors stamped into `graph`, `tf.data.Dataset`s, or `structure.Struct`s that structurally corresponds to the value passed at input. """ if value is None: return None if type_signature.is_tensor(): if isinstance(value, np.ndarray) or tf.is_tensor(value): value_type = computation_types.TensorType( tf.dtypes.as_dtype(value.dtype), tf.TensorShape(value.shape)) type_signature.check_assignable_from(value_type) with graph.as_default(): return tf.constant(value) else: with graph.as_default(): return tf.constant(value, dtype=type_signature.dtype, shape=type_signature.shape) elif type_signature.is_struct(): if isinstance(value, (list, dict)): value = structure.from_container(value) stamped_elements = [] named_type_signatures = structure.to_elements(type_signature) for (name, type_signature), element in zip(named_type_signatures, value): stamped_element = _stamp_value_into_graph(element, type_signature, graph) stamped_elements.append((name, stamped_element)) return structure.Struct(stamped_elements) elif type_signature.is_sequence(): return tensorflow_utils.make_data_set_from_elements( graph, value, type_signature.element) else: raise NotImplementedError( 'Unable to stamp a value of type {} in graph.'.format( type_signature))
def load_KerasGraph(path): thread_graph = Graph() with thread_graph.as_default(): thread_session = Session() with thread_session.as_default(): model = keras.models.load_model(path)#model._make_predict_function() graph = tf.get_default_graph() return model, graph, thread_session
def _session(detection_graph: tf.Graph) -> tf.compat.v1.Session: with detection_graph.as_default(): od_graph_def = tf.compat.v1.GraphDef() with tf.io.gfile.GFile(PATH_TO_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return tf.compat.v1.Session(graph=detection_graph, config=tf.compat.v1.ConfigProto())
class EmotionsDetection: def __init__(self): self.emotion_model_path = 'emotion_model/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5' self.emotion_labels = get_labels('fer2013') self.graph = Graph() with self.graph.as_default(): self.session = Session(graph=self.graph) with self.session.as_default(): self.emotion_classifier = load_model(self.emotion_model_path, compile=False) # starting lists for calculating modes self.emotion_target_size = self.emotion_classifier.input_shape[1:3] self.emotion_window = [] self.frame_window = 10 self.emotion_offsets = (20, 40) ################################## def processe(self, args): res = [] faces = args for face in faces: face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY) # try: face = cv2.resize(face, self.emotion_target_size) # except : # continue face = preprocess_input(face, True) face = np.expand_dims(face, 0) face = np.expand_dims(face, -1) t1 = time.time() k.set_session(self.session) with self.graph.as_default(): emotion_prediction = self.emotion_classifier.predict(face) print('time is :', time.time() - t1) emotion_probability = np.max(emotion_prediction) all_emotions = emotion_prediction emotion_label_arg = np.argmax(all_emotions) if round(all_emotions[0][6] - all_emotions[0][4], 2) >= 0.45: emotion_label_arg = 4 elif round(all_emotions[0][0], 2) >= 0.2: emotion_label_arg = 0 emotion_text = self.emotion_labels[emotion_label_arg] res.append([emotion_text, emotion_probability]) return res
def load_from_file(filename): graph = Graph() with graph.as_default(): sess = utils.get_nn_config() with sess.as_default(): model = load_model(filename, compile=False) compile_model(model) return model, graph, sess return None
def create_detector(self, verbose, mtcnn_kwargs): """ Create the mtcnn detector """ self.verbose = verbose if self.verbose: print("Adding MTCNN detector") self.kwargs = mtcnn_kwargs mtcnn_graph = Graph() with mtcnn_graph.as_default(): mtcnn_session = Session() with mtcnn_session.as_default(): pnet, rnet, onet = create_mtcnn(mtcnn_session, self.data_path) mtcnn_graph.finalize() self.kwargs["pnet"] = pnet self.kwargs["rnet"] = rnet self.kwargs["onet"] = onet self.initialized = True
def load_model(self, verbose, dummy, ratio): """ Load the Keras Model """ self.verbose = verbose if self.verbose: print("Initializing keras model...") keras_graph = Graph() with keras_graph.as_default(): config = ConfigProto() if ratio: config.gpu_options.per_process_gpu_memory_fraction = ratio self.session = Session(config=config) with self.session.as_default(): self.model = keras.models.load_model( self.model_path, custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D}) self.model.predict(dummy) keras_graph.finalize() self.initialized = True