Exemple #1
0
def _dojob(ready, e, queue):
    prctl.set_name('AI detector - do job')
    global session1, session2, ip_model, mac_model
    ip_graph = Graph()
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    with ip_graph.as_default():
        session1 = Session(config=config)
        with session1.as_default():
            ip_model = K.models.load_model(
                'gru_ip_4tuple.hdf5', custom_objects={'attention': attention})
            ip_model._make_predict_function()

    mac_graph = Graph()
    with mac_graph.as_default():
        session2 = Session(config=config)
        with session2.as_default():
            mac_model = K.models.load_model(
                'gru_mac_4tuple.hdf5', custom_objects={'attention': attention})
            mac_model._make_predict_function()
    ready.set()
    print 'set ready'
    last = time.time()
    global ignore_packet
    while e.is_set() == False:
        if queue.empty() == False:
            obj = queue.get()
            if (obj[0], obj[1]) in ignore_packet:
                if obj[3] <= ignore_packet[(obj[0], obj[1])]:
                    continue
            feature_extract((obj[2], obj[3]))
        if time.time() - last >= polling_interval:
            print queue.qsize()
            global flow_statics, src_addr_list, memory_data

            # calculate features in last 5 seconds
            result = calculate_feature(flow_statics)
            memory_data.pop(0)
            memory_data.append(result)
            t_run_exp = threading.Thread(target=_run_exp,
                                         args=(
                                             result,
                                             src_addr_list,
                                             memory_data,
                                         ))
            t_run_exp.start()
            t_run_exp.join()
            flow_statics = {}
            src_addr_list = {}
            last = time.time()
    K.backend.clear_session()
    del ip_model
    del mac_model
Exemple #2
0
def facial_Emotion(filepath):

    imgName = (filepath)
    img = Image.open(imgName).convert('L')

    pixels = list(img.getdata())

    # 픽셀 값 reshape

    reshapeImgPixels = np.zeros((1, 48 * 48))

    for i in range(48 * 48):
        reshapeImgPixels[0, i] = int(pixels[i])

    img = reshapeImgPixels
    img = img / 255
    img = img.reshape((img.shape[0], 1, 48, 48))

    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            model = load_model('./testModel/detectEmotionModel_epoch10.h5')
            global graph
            graph = tf.get_default_graph()

    with graph.as_default():
        with thread_session.as_default():
            global result
            result = model.predict(img)

    #result = model.predict(img)

    max = 0
    maxIdx = 0
    for i in range(0, 4):
        if result[0][i] > max:
            max = result[0][i]
            maxIdx = i

    if maxIdx == 0:
        emotion = 'Angry'
    elif maxIdx == 1:
        emotion = 'Happy'
    elif maxIdx == 2:
        emotion = 'Sad'
    elif maxIdx == 3:
        emotion = 'neutral'

    return emotion
Exemple #3
0
def _dojob(e, queue):
    prctl.set_name('AI detector - do job')
    global session1, session2, ip_model, mac_model
    ip_graph = Graph()
    with ip_graph.as_default():
        session1 = Session()
        with session1.as_default():
            ip_model = K.models.load_model(ip_model_path)
            ip_model._make_predict_function()

    mac_graph = Graph()
    with mac_graph.as_default():
        session2 = Session()
        with session2.as_default():
            mac_model = K.models.load_model(mac_model_path)
            mac_model._make_predict_function()
    #pcap_file = open('test.pcap', 'wb')
    #writer = dpkt.pcap.Writer(pcap_file)
    global total_tp, total_tn, total_fp, total_fn 
    total_tp = 0
    total_tn = 0
    total_fp = 0
    total_fn = 0
    last = time.time()
    #count_lock = threading.Lock()
    while e.is_set() == False:
        if queue.empty() == False:
            obj = queue.get()
            feature_extract(obj)
            current = obj[1]
        else:
            current = time.time()
        if current - last >= polling_interval:
            global flow_statics, src_addr_list, attacker
            #calculate features in last 5 seconds
            result = calculate_feature(flow_statics)
            memory_data.pop(0)
            memory_data.append(result)
            #t_run_exp = threading.Thread(target=_run_exp, args=(flow_statics, src_addr_list, attacker, memory_data, count_lock, ))
            t_run_exp = threading.Thread(target=_run_exp, args=(flow_statics, src_addr_list, attacker, memory_data, ))
            t_run_exp.start()
            t_run_exp.join()
            flow_statics = {}
            src_addr_list = []
            attacker = []
            last = current
            
    K.backend.clear_session()
    del ip_model
    del mac_model
Exemple #4
0
class DetectFace:
    def __init__(self):
        self.graph = Graph()
        with self.graph.as_default():
            self.session = Session(graph=self.graph)
            with self.session.as_default():
                self.detector = MTCNN()

    def processe(self, args):
        faces = []
        image = args
        k.set_session(self.session)
        with self.graph.as_default():
            results = self.detector.detect_faces(image)

        for res in results:
            x1, y1, width, height = res['box']

            x1, y1 = abs(x1), abs(y1)
            x2, y2 = x1 + width, y1 + height

            face = image[y1:y2, x1:x2]
            faces.append(face)

        return faces
Exemple #5
0
    def loadKerasModel(self, filePath):
        global PMMLMODELSTORAGE
        fO = pathlib.Path(filePath)
        keyToModel = fO.name.replace(fO.suffix, '')
        # print (PMMLMODELSTORAGE)
        try:
            model_graph = Graph()
            with model_graph.as_default():
                tf_session = Session()
                with tf_session.as_default():
                    seqModel = load_model(filePath)

            tempDictModel = {
                'modelObj': seqModel,
                'model_graph': model_graph,
                'modelGeneratedFrom': 'Keras',
                'tf_session': tf_session,
                'inputShape': seqModel.input_shape,
            }
            PMMLMODELSTORAGE[keyToModel] = tempDictModel
            messageToWorld = "Model Loaded Successfully"
            reStat = 200
        except:
            messageToWorld = "Model load failed, please connect with admin"
            keyToModel = None
            reStat = 500
        resultResp = {'message': messageToWorld, 'keytoModel': keyToModel}
        return JsonResponse(resultResp, status=reStat)
def load_KerasGraph(path):
    print("> ====== loading Keras model for classification")
    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            input_shape = (28, 28, 1)
            num_classes = 6

            model = Sequential()
            model.add(Conv2D(32, kernel_size=(3, 3), input_shape=input_shape))
            model.add(ReLU())
            model.add(Conv2D(32, kernel_size=(3, 3)))
            model.add(ReLU())
            model.add(Conv2D(32, kernel_size=(3, 3)))
            model.add(ReLU())
            model.add(Conv2D(32, kernel_size=(3, 3)))
            model.add(ReLU())
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Dropout(0.25))
            model.add(Flatten())
            model.add(Dense(128))
            model.add(ReLU())
            model.add(Dropout(0.5))
            model.add(Dense(num_classes))
            model.add(Softmax())
            model.load_weights(path)

            graph = tf.get_default_graph()
    print(">  ====== Keras model loaded")
    return model, graph, thread_session
    def save(self, sess: tf.Session, folder: str, step: int):
        p = os.path.join(folder, f"full-model-{step:05}.save")
        self.forward_model.save(p, overwrite=True, include_optimizer=False)

        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Graph().as_default(), tf.Session(
                config=config) as sess, sess.as_default():
            model = models.load_model(p,
                                      custom_objects=custom_layers,
                                      compile=False)
            with tf.name_scope("export"):
                x = tf.placeholder(tf.float32, shape=(None, None, 1))
                y = model(x)
            model.load_weights(p)

            model_input = tf.saved_model.utils.build_tensor_info(x)
            model_output = tf.saved_model.utils.build_tensor_info(y)

            signature_definition = tf.saved_model.signature_def_utils.build_signature_def(
                inputs={"x": model_input},
                outputs={"y": model_output},
                method_name=tf.saved_model.signature_constants.
                PREDICT_METHOD_NAME)

            export_path = os.path.join(folder, "saver", "1")
            builder = tf.saved_model.builder.SavedModelBuilder(export_path)
            builder.add_meta_graph_and_variables(
                sess,
                [tf.saved_model.tag_constants.SERVING],
                signature_def_map={
                    "mincall": signature_definition,
                },
            )
            builder.save()
def rsna_evaluate(model_path, backbone, anchor_boxes, score_threshold,
                  nms_threshold, rsna_path, rsna_test_json, anchor_scales):
    """ Evaluate an json using retinanet model, print mAP based on GT and generate kaggle submission file.
    """

    save_path = None
    from tensorflow import Graph, Session
    graph1 = Graph()
    with graph1.as_default():
        session1 = Session()
        with session1.as_default():
            model2 = models.load_model(model_path,
                                       backbone_name=backbone,
                                       convert=True,
                                       nms_threshold=nms_threshold,
                                       anchors_ratios=anchor_boxes,
                                       anchors_scales=anchor_scales)
            # create the generator
            generator = create_generator(rsna_test_json, rsna_path)

            map = evaluate(generator,
                           model2,
                           iou_threshold=0.5,
                           score_threshold=score_threshold,
                           max_detections=100,
                           generate_kaggle_output='teste.csv')
            del model2
            import gc
            gc.collect()
        with open('output_map.txt', 'a') as output_map:
            output_map.write('{} : {} \n'.format(model_path, map))
Exemple #9
0
    def _predict(cls, user_ids: List, item_ids: List, with_user_embedding,
                 graph: GraphConvolutionalMatrixCompletionGraph,
                 dataset: GcmcGraphDataset, session: tf.Session) -> np.ndarray:
        if graph is None:
            RuntimeError('Please call fit first.')

        rating_adjacency_matrix = dataset.train_rating_adjacency_matrix()
        user_indices, item_indices = dataset.to_indices(user_ids, item_ids)
        if not with_user_embedding:
            user_indices = np.array(
                [0] * len(user_indices))  # TODO use default user index.

        user_feature_indices, item_feature_indices = dataset.to_feature_indices(
            user_ids, item_ids)
        input_data = dict(user=user_indices,
                          item=item_indices,
                          user_feature_indices=user_feature_indices,
                          item_feature_indices=item_feature_indices)
        feed_dict = cls._feed_dict(input_data,
                                   graph,
                                   dataset,
                                   rating_adjacency_matrix,
                                   is_train=False)
        with session.as_default():
            predictions = session.run(graph.expectation, feed_dict=feed_dict)
        predictions = predictions.flatten()
        predictions = np.clip(predictions,
                              dataset.rating()[0],
                              dataset.rating()[-1])
        return predictions
Exemple #10
0
def load():
    global session
    global graph
    global model
    global data_result

    data_result = DataResult(None, None)

    with open(script_dir + '/../temp/processed_data.json', 'r') as output:
        json_data = json.load(output)
        data_result.loadJSON(json_data)

    graph = Graph()
    with graph.as_default():
        session = Session(graph=graph)
        with session.as_default():
            temp_encoder = Encoder(data_result.input_data)
            temp_decoder = Decoder(data_result.output_data, temp_encoder)
            temp_model = Model([temp_encoder.inputs, temp_decoder.inputs],
                               temp_decoder.outputs)
            temp_model.compile(optimizer='rmsprop',
                               loss='categorical_crossentropy')
            temp_model.load_weights(
                os.path.dirname(__file__) + '/../model_weights.h5')

            model = temp_model
Exemple #11
0
    def loadModels(self, path):
        """Load models from the json file given."""
        logger.info('Loading models from %s', path)
        self.data_location = None
        self.models = []

        if path == self.data_location:
            return

        if not os.path.exists(path):
            logger.error('Could not find file: %s', path)

        self.data_location = path
        with open(path, 'r') as f:
            data = json.load(f)

        if 'models' not in data:
            logger.error('No models defined in data')
            return

        models = data['models']
        for i, model in enumerate(models):
            if not model:
                self.models.append(None)
                continue
            vertices = data['joint_map'][i]
            graph = Graph()
            with graph.as_default():
                session = Session()
                with session.as_default():
                    meta = model.get('meta')
                    root = model.get('root')
                    saver = tf.train.import_meta_graph(meta)
                    saver.restore(session, tf.train.latest_checkpoint(root))

                    in_tensor = session.graph.get_tensor_by_name(
                        model['input'])
                    out_tensor = session.graph.get_tensor_by_name(
                        model['output'])

                    normalized = model['normalized']
                    verts_max, verts_min, trans_max, trans_min = None, None, None, None
                    if normalized:
                        trans_max = np.array(model['trans_max'])
                        trans_min = np.array(model['trans_min'])
                        verts_max = np.array(model['verts_max'])
                        verts_min = np.array(model['verts_min'])

                    tfmodel = TFModel(graph=session.graph,
                                      session=session,
                                      input_tensor=in_tensor,
                                      output_tensor=out_tensor,
                                      vertices=vertices,
                                      normalized=normalized,
                                      trans_max=trans_max,
                                      trans_min=trans_min,
                                      verts_max=verts_max,
                                      verts_min=verts_min)

                    self.models.append(tfmodel)
Exemple #12
0
def test_mesh_evaluate_models(
    graph: tf.Graph,
    session: tf.Session,
    space: gym.Space,
    n_models: int,
    n_mesh: int = 64,
):
    """Checks `canonical_sample.mesh_evaluate_models` agrees with `mesh_evaluate_models_slow`."""
    with datasets.sample_dist_from_space(space) as dist:
        obs = dist(n_mesh)
        actions = dist(n_mesh)
        next_obs = dist(n_mesh)

    with graph.as_default():
        models = {}
        for i in range(n_models):
            with tf.variable_scope(str(i)):
                models[i] = rewards.MLPRewardModel(space, space)

        session.run(tf.global_variables_initializer())
        with session.as_default():
            expected = mesh_evaluate_models_slow(models, obs, actions,
                                                 next_obs)
            actual = epic_sample.mesh_evaluate_models(models, obs, actions,
                                                      next_obs)

    assert expected.keys() == actual.keys()
    for k in expected:
        assert np.allclose(expected[k], actual[k]), f"difference in model {k}"
    def pretrainSingleClass(self, modelname, dataset, class_name, batch_size,
                            epochs, lr):
        #K.clear_session()
        graph2 = Graph()
        with graph2.as_default():
            session2 = Session()
            with session2.as_default():

                reader = LFWReader(dir_images=dataset, class_name=class_name)
                gen_train = TripletGeneratorSingleID(reader)
                gen_test = TripletGeneratorSingleID(reader)
                embedding_model, triplet_model = GetModel()
                for layer in embedding_model.layers[-3:]:
                    layer.trainable = True

                for layer in embedding_model.layers[:-3]:
                    layer.trainable = False
                triplet_model.compile(loss=None, optimizer=Adam(lr))

                history = triplet_model.fit_generator(gen_train,
                                                      validation_data=gen_test,
                                                      epochs=epochs,
                                                      verbose=1,
                                                      steps_per_epoch=50,
                                                      validation_steps=5)

                embedding_model.save_weights('./trained-models/weights/' +
                                             modelname + '.h5')
                self.embeddingmodel(modelname, dataset)
                K.get_session()
Exemple #14
0
    def _get_feature(cls, user_ids: List, item_ids: List, with_user_embedding,
                     graph: GraphConvolutionalMatrixCompletionGraph,
                     dataset: GcmcGraphDataset, session: tf.Session,
                     feature: str) -> np.ndarray:
        if graph is None:
            RuntimeError('Please call fit first.')

        rating_adjacency_matrix = dataset.train_rating_adjacency_matrix()
        user_indices, item_indices = dataset.to_indices(user_ids, item_ids)
        if not with_user_embedding:
            user_indices = np.array(
                [0] * len(user_indices))  # TODO use default user index.

        user_feature_indices, item_feature_indices = dataset.to_feature_indices(
            user_ids, item_ids)
        input_data = dict(user=user_indices,
                          item=item_indices,
                          user_feature_indices=user_feature_indices,
                          item_feature_indices=item_feature_indices)
        feed_dict = cls._feed_dict(input_data,
                                   graph,
                                   dataset,
                                   rating_adjacency_matrix,
                                   is_train=False)
        encoder_map = dict(user=graph.user_encoder, item=graph.item_encoder)
        with session.as_default():
            feature = session.run(encoder_map[feature], feed_dict=feed_dict)
        return feature
def load_KerasGraph(path): 
    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            model = keras.models.load_model(path)#model._make_predict_function()
            graph = tf.get_default_graph()
    return model, graph, thread_session
def graphandsession():

	model_graph = Graph()
	with model_graph.as_default():
		tf_session = Session()
		with tf_session.as_default():
			mdl=load_model("C:/Users/Ritik/Desktop/Machine Learning Practice/Bank Loan Classification/DjangoAPI/MyAPI/models/model.h5")

	return (model_graph,tf_session,mdl)
Exemple #17
0
def load_KerasGraph(path):
    print("> ====== loading Keras model for classification")
    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            model = keras.models.load_model(path)
            graph = tf.get_default_graph()
    print(">  ====== Keras model loaded")
    return model, graph, thread_session
def load_single_model(path):
    graph = Graph()
    with graph.as_default():
        session = Session()
        with session.as_default():
            model = load_model(path)
            model._make_predict_function()

            MODELS.append(model)
            GRAPHS.append(graph)
            SESSIONS.append(session)
Exemple #19
0
    def test_compare_synthetic(
        self,
        graph: tf.Graph,
        session: tf.Session,
        rel_upperbound: float,
        fudge_factor: float,
        rescale: bool,
        env_kwargs,
        kwargs,
    ):
        """Try comparing randomly generated reward models, same scale."""
        with graph.as_default():
            with session.as_default():
                noise = np.array([0.0, 0.5, 1.0])  # coarse-grained for speed
                df, _ = synthetic.compare_synthetic(
                    reward_noise=noise,
                    potential_noise=noise,
                    model_affine=rescale,
                    **env_kwargs,
                    **kwargs,
                )
                with pd.option_context("display.max_rows", None, "display.max_columns", None):
                    logging.info("Results: %s", df)

                for k in ["Intrinsic", "Shaping", "Extrinsic"]:
                    assert (df[k] >= 0).all(axis=None), f"distances {k} should not be negative"

                # No reward noise, but potential noise
                no_rew_noise = df.loc[(0.0, slice(0.1, None)), :]
                rel = no_rew_noise["Intrinsic"] / no_rew_noise["Extrinsic"]
                assert rel.max(axis=None) < (rel_upperbound * fudge_factor)

                if not rescale:
                    # When ground truth and noised reward are on the same scale,
                    # shaping distance should increase proportionally with potential
                    # magnitude. When reward-noise is non-zero there's a confounder as
                    # the shaping noise we add can *cancel* with shaping in the reward
                    # noise. So just consider zero reward noise.
                    deltas = no_rew_noise["Shaping"].diff().dropna()  # first row is N/A
                    assert deltas.min(axis=None) > 0.0
                    mean_delta = deltas.mean()
                    # Increment should be similar: allow it to vary by 2x up & down
                    assert (deltas < mean_delta * 2).all(axis=None)
                    assert (deltas > mean_delta * 0.5).all(axis=None)

                # We're no more than 10% of intrinsic upper bound at any point.
                # The upper bound is based on the magnitude of the reward noise
                # we added. It's an upper bound since it may include some potential
                # shaping, so we actually could find a shorter intrinsic distance.
                # Add 10% margin of error since we don't expect perfect optimization.
                some_noise = df.loc[df.index.get_level_values("Reward Noise") > 0.0]
                rel = some_noise["Intrinsic"] / some_noise["Intrinsic Upper Bound"]
                assert rel.max(axis=None) < (1 + 0.1 * fudge_factor)
def test_regress(
    graph: tf.Graph,
    session: tf.Session,
    target: str,
    loss_ub: float,
    rel_loss_lb: float,
    discount: float = 0.99,
):
    """Test regression onto target.

    Args:
        target: The target reward model type. Must be a hardcoded reward:
            we always load with a path "dummy".
        loss_ub: The maximum loss of the model at the end of training.
        rel_loss_lb: The minimum relative improvement to the initial loss.
    """
    env_name = "evaluating_rewards/PointMassLine-v0"
    venv = vec_env.DummyVecEnv([lambda: gym.make(env_name)])

    with datasets.transitions_factory_from_random_model(
            env_name) as dataset_generator:
        with graph.as_default():
            with session.as_default():
                with tf.variable_scope("source") as source_scope:
                    source = base.MLPRewardModel(venv.observation_space,
                                                 venv.action_space)

                with tf.variable_scope("target"):
                    target_model = serialize.load_reward(
                        target, "dummy", venv, discount)

                with tf.variable_scope("match") as match_scope:
                    match = comparisons.RegressModel(source, target_model)

                init_vars = source_scope.global_variables(
                ) + match_scope.global_variables()
                session.run(tf.initializers.variables(init_vars))

                stats = match.fit(dataset_generator,
                                  total_timesteps=1e5,
                                  batch_size=512)

        loss = pd.DataFrame(stats["loss"])["singleton"]
        logging.info(f"Loss: {loss.iloc[::10]}")
        initial_loss = loss.iloc[0]
        logging.info(f"Initial loss: {initial_loss}")
        final_loss = loss.iloc[-10:].mean()
        logging.info(f"Final loss: {final_loss}")

        assert initial_loss / final_loss > rel_loss_lb
        assert final_loss < loss_ub
Exemple #21
0
def get_eval_embeddings(eval_datasets):
    outer_graph = Graph()
    with outer_graph.as_default():
        outer_session = Session()
        with outer_session.as_default():
            with open(
                    parent_directory + model_directory +
                    model_architecture_as_json, "r") as json_file:
                model_json = json_file.read()
            network = model_from_json(model_json)
            network.load_weights(parent_directory + model_directory +
                                 weights_of_model)
            embeds = network.predict(np.stack(getImages(eval_datasets)))
    return embeds
class MyPredictBrain():
    def __init__(self):
        self.basepath = os.path.dirname(__file__)
        self.Imgs_Test = np.ndarray((1, 512, 512, 1), dtype=np.float32)
        self.Predict = np.ndarray((512, 512), dtype=np.float32)

        keras.backend.clear_session()  # 用于重复使用模型
        self.graph = Graph()
        with self.graph.as_default():
            self.session = Session()
            with self.session.as_default():
                unet = myUnet3()
                self.model = unet.Model
                self.model.load_weights('Unet_Brain.hdf5')

    def LoadPic(self):
        self.img = cv2.imread(
            self.basepath + "/static/auto_photos/auto_picture.jpg",
            cv2.IMREAD_GRAYSCALE)
        self.img = cv2.resize(self.img, (512, 512),
                              interpolation=cv2.INTER_NEAREST)
        pix = np.float32(self.img)
        max = np.max(pix)
        pix = np.divide(pix, max)
        pix = img_to_array(pix)
        for i in range(512):
            for j in range(512):
                self.Imgs_Test[0][i][j] = pix[i][j]

    def PredictPic(self):
        K.set_session(self.session)
        with self.graph.as_default():
            self.Result = self.model.predict(self.Imgs_Test, verbose=1)

    def SavePic(self):
        for m in range(512):
            for n in range(512):
                if ((self.Result[0][m][n][0] >= self.Result[0][m][n][1]) and
                    (self.Result[0][m][n][0] >= self.Result[0][m][n][2])):
                    self.Predict[m][n] = 0
                elif (self.Result[0][m][n][1] >= self.Result[0][m][n][2]):
                    self.Predict[m][n] = 127.5
                else:
                    self.Predict[m][n] = 255
        # scipy.misc.imsave(self.basepath + '\\1.jpg', self.Predict)

        path = self.basepath + "/static/after_auto_brain_photo/"
        cv2.imwrite(os.path.join(path, 'after_auto_brain_temp.jpg'),
                    self.Predict)
def ai_predict(imgPath):
    print("PATH:", imgPath)
    #     #preprocessing frame to predict its label
    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    # config.log_device_placement = True  # to log device placement (on which device the operation ran)
    # sess = tf.Session(config=config)

    ### PRIMEIRO ####
    # #load previously trained model
    #K.clear_session()
    #global model
    #model = load_model(os.path.join(SAVE_DIR, CRAPPY_MODEL))
    #model._make_predict_function()
    #global graph
    #model = applications.VGG16(include_top=False, weights='imagenet', input_shape=(IMAGE_SIZE,IMAGE_SIZE,3))
    #graph = tf.get_default_graph()

    ### SEGUNDO ####
    graph1 = Graph()
    with graph1.as_default():
        session1 = Session(graph=graph1)
        with session1.as_default():
            model_1 = load_model(os.path.join(SAVE_DIR, CRAPPY_MODEL))

    frame2 = cv2.imread(imgPath)
    frame2 = cv2.resize(frame2, (IMAGE_SIZE, IMAGE_SIZE))
    frame2 = img_to_array(frame2)
    frame2 = np.array(frame2, dtype="float32") / 255.0

    Image.fromarray((frame2[-1] * 255).round().astype(np.uint8))
    frame2 = np.expand_dims(frame2, axis=0)
    #     # generating a prdiction of the frame

    ## PRIMEIRO
    #with graph.as_default():
    #y_pred = model.predict_classes(frame2)

    ## SEGUNDO
    K.set_session(session1)
    with graph1.as_default():
        try:
            y_pred = model_1.predict_classes(frame2)
        except:
            print()
    #y_pred = top_model.predict_classes(model.predict(frame2[None,:,:,:]))
    print("y_pred:", int(y_pred))

    return int(y_pred[0])
Exemple #24
0
class FaceID:

    def __init__(self):
        self.to_identify_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','To_Identify', 'Raw')
        self.to_identify_processed_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','To_Identify', 'Processed')
        self.to_process_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','To_Process')
        self.dataset_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','Dataset','DC')
        self.dataset_ds_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','Dataset','DS')
        self.weigths_path = os.path.join(os.path.abspath(os.sep), 'ALFI_Data','Weigths')

        self.current_sbj = 0
        while(os.path.exists(os.path.join(self.dataset_path,'sbj-'+str(self.current_sbj)))):
            self.current_sbj = self.current_sbj + 1

        self.graph = Graph()
        with self.graph.as_default():
            self.session = Session()
            with self.session.as_default():
                self.model = faceIDNet()

    def train(self, epochs, save_name, load=False):
        if load:
            self.load(save_name)
        K.set_session(self.session)
        with self.graph.as_default():
            gen = generator(24, self.dataset_ds_path)
            save_folder = os.path.join(self.weigths_path, save_name)
            if not os.path.exists(save_folder):
                os.makedirs(save_folder)
            cp_callback = ModelCheckpoint(os.path.join(save_folder, 'faceID_weights'), save_weights_only=True)
            self.model.fit_generator(gen, steps_per_epoch=30, epochs=epochs, validation_steps=20, callbacks=[cp_callback])
            lossTrain = self.model.evaluate_generator(gen, steps=30)
            print('* - Loss: '+str(lossTrain))

    def predict(self, inputs, threshold=0.2):
        K.set_session(self.session)
        with self.graph.as_default():
            inputs = [inputs[0,:].reshape((1,100,100,4)), inputs[1,:].reshape((1,100,100,4))]
            out = self.model.predict(inputs)
            return (out <= threshold)

    def load(self, save_name):
        K.set_session(self.session)
        with self.graph.as_default():
            self.model.load_weights(os.path.join(self.weigths_path, save_name, 'faceID_weights'))
            self.model._make_predict_function()
            print('--- Weights loaded ---')
Exemple #25
0
class KerasClassifier(ClassifierABC):
    def __init__(self, model: Model, trainer: KerasTrainer):
        self.model = model
        self.trainer: Optional[KerasTrainer] = trainer

    def fit(self, images: List[Image], labels: List[int], validation_size: int) -> "KerasClassifier":
        assert self.trainable, "You can't train an un-pickled classifier"
        images = asarray(images)
        labels = to_categorical(asarray(labels), self.n_classes)
        train_images, train_labels = images[:-validation_size], labels[:-validation_size]
        val_images, val_labels = images[-validation_size:], labels[-validation_size:]

        self.trainer.train(self.model, train_images, train_labels, val_images, val_labels)

        return self

    def predict_proba(self, examples: List[Image]) -> Sequence[float]:
        try:  # FIXME
            with self.graph.as_default(), self.session.as_default():
                return self.model.predict(asarray(examples))
        except AttributeError:
            return self.model.predict(asarray(examples))

    def __getstate__(self) -> Dict:
        with NamedTemporaryFile(suffix=".hdf5", delete=True) as fd:
            self.model.save(fd.name, overwrite=True, include_optimizer=False)
            model_str = fd.read()
        state = copy(self.__dict__)
        state.pop("model")
        state.pop("trainer")
        return {**state, "model_str": model_str}

    def __setstate__(self, state: Dict):
        self.__dict__.update(state)
        self.graph = Graph()
        with NamedTemporaryFile(suffix=".hdf5", delete=True) as fd:
            fd.write(state.pop("model_str"))
            fd.flush()
            with self.graph.as_default():
                self.session = Session(graph=self.graph)
                with self.session.as_default():
                    self.model = load_model(fd.name, compile=False)
        self.trainer = None

    @property
    def trainable(self) -> bool:
        return self.trainer is not None
Exemple #26
0
def get_eval_embeddings(eval_datasets):
    outer_graph = Graph()
    with outer_graph.as_default():
        outer_session = Session()
        with outer_session.as_default():
            #network = load_model(parent_directory + model_directory + complete_model, custom_objects={'triplet_loss':train.triplet_loss} )

            with open(
                    parent_directory + model_directory +
                    model_architecture_as_json, "r") as json_file:
                model_json = json_file.read()
            network = model_from_json(model_json)
            print(network.summary())
            network.load_weights(parent_directory + model_directory +
                                 weights_of_model)
            embeds = network.predict(np.stack(getImages(eval_datasets)))
    return embeds
Exemple #27
0
class EmotionsDetection:
    def __init__(self):
        self.emotion_model_path = 'emotion_model/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
        self.emotion_labels = get_labels('fer2013')
        self.graph = Graph()
        with self.graph.as_default():
            self.session = Session(graph=self.graph)
            with self.session.as_default():
                self.emotion_classifier = load_model(self.emotion_model_path,
                                                     compile=False)

        # starting lists for calculating modes
        self.emotion_target_size = self.emotion_classifier.input_shape[1:3]
        self.emotion_window = []
        self.frame_window = 10
        self.emotion_offsets = (20, 40)

    ##################################
    def processe(self, args):
        res = []
        faces = args
        for face in faces:
            face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
            # try:
            face = cv2.resize(face, self.emotion_target_size)
            # except :
            #     continue
            face = preprocess_input(face, True)
            face = np.expand_dims(face, 0)
            face = np.expand_dims(face, -1)
            t1 = time.time()
            k.set_session(self.session)
            with self.graph.as_default():
                emotion_prediction = self.emotion_classifier.predict(face)
            print('time is :', time.time() - t1)
            emotion_probability = np.max(emotion_prediction)
            all_emotions = emotion_prediction
            emotion_label_arg = np.argmax(all_emotions)
            if round(all_emotions[0][6] - all_emotions[0][4], 2) >= 0.45:
                emotion_label_arg = 4
            elif round(all_emotions[0][0], 2) >= 0.2:
                emotion_label_arg = 0
            emotion_text = self.emotion_labels[emotion_label_arg]
            res.append([emotion_text, emotion_probability])
        return res
Exemple #28
0
def train(masterLabels, filename):
    outer_graph = Graph()
    with outer_graph.as_default():
        outer_session = Session()
        with outer_session.as_default():
            network_input = Input((height, width, 3))
            network_output = InceptionResnetV2(network_input)
            network = Model(inputs=network_input, outputs=network_output)
            print(network.summary())
            opt = Adam(lr=0.01, beta_1=0.99, beta_2=0.999, epsilon=0.1)
            network.compile(loss=triplet_loss,
                            optimizer=opt,
                            metrics=['accuracy'])
            saved_triplet_paths = np.loadtxt(filename,
                                             delimiter=",",
                                             dtype=np.str)
            steps_for_each_epoch = len(saved_triplet_paths) // (
                max_nrof_epochs * training_images_per_step)
            print(' steps per each epoch is %d ' % (steps_for_each_epoch))
            checkpoint = ModelCheckpoint(parent_directory + model_directory +
                                         interim_best_weights_of_model,
                                         monitor='loss',
                                         verbose=1,
                                         save_best_only=True,
                                         mode='max')
            history = network.fit_generator(
                generator=getTriplets(saved_triplet_paths, masterLabels),
                epochs=max_nrof_epochs,
                steps_per_epoch=steps_for_each_epoch,
                verbose=2,
                max_queue_size=1,
                callbacks=[checkpoint, debug])
            history_dict = history.history
            for key, value in history_dict.items():
                print('Key is %s and value is %s' % (key, value))
            print(history_dict.keys())
            model_json = network.to_json()
            with open(
                    parent_directory + model_directory +
                    model_architecture_as_json, "w") as json_file:
                json_file.write(model_json)
            network.save(parent_directory + model_directory + complete_model)
            network.save_weights(parent_directory + model_directory +
                                 weights_of_model)
Exemple #29
0
class KerasModel(object):
    "Load the Keras Model"
    def __init__(self):
        self.initialized = False
        self.verbose = False
        self.model_path = self.set_model_path()
        self.model = None
        self.session = None

    @staticmethod
    def set_model_path():
        """ Set the path to the Face Alignment Network Model """
        model_path = os.path.join(os.path.dirname(__file__),
                                  ".cache", "2DFAN-4.h5")
        if not os.path.exists(model_path):
            raise Exception("Error: Unable to find {}, "
                            "reinstall the lib!".format(model_path))
        return model_path

    def load_model(self, verbose, dummy, ratio):
        """ Load the Keras Model """
        if self.initialized:
            return

        self.verbose = verbose
        if self.verbose:
            print("Initializing keras model...")

        keras_graph = Graph()
        with keras_graph.as_default():
            config = ConfigProto()
            if ratio:
                config.gpu_options.per_process_gpu_memory_fraction = ratio
            self.session = Session(config=config)
            with self.session.as_default():
                self.model = keras.models.load_model(
                    self.model_path,
                    custom_objects={'TorchBatchNorm2D':
                                    TorchBatchNorm2D})
                self.model.predict(dummy)
        keras_graph.finalize()

        self.initialized = True
def getOutput(hours, past, type):
    from keras.models import load_model
    from tensorflow import Graph, Session
    # import tensorflow as tf
    import numpy as np
    testData = np.array(past)
    testData = testData.reshape(1, len(past), 1)
    graph = Graph()
    with graph.as_default():
        session = Session()
        with session.as_default():
            # load model
            if(type == "load"):
                model = load_model("./model/model" + str(hours) + ".h5")
            elif(type == "generation"):
                model = load_model("./model/solar_model" + str(hours) + ".h5")
            model.summary()
            output = model.predict(testData)
            return output
class KerasModel(object):
    "Load the Keras Model"

    def __init__(self):
        self.initialized = False
        self.verbose = False
        self.model_path = self.set_model_path()
        self.model = None
        self.session = None

    @staticmethod
    def set_model_path():
        """ Set the path to the Face Alignment Network Model """
        model_path = os.path.join(os.path.dirname(__file__), ".cache",
                                  "2DFAN-4.h5")
        if not os.path.exists(model_path):
            raise Exception("Error: Unable to find {}, "
                            "reinstall the lib!".format(model_path))
        return model_path

    def load_model(self, verbose, dummy, ratio):
        """ Load the Keras Model """
        if self.initialized:
            return

        self.verbose = verbose
        if self.verbose:
            print("Initializing keras model...")

        keras_graph = Graph()
        with keras_graph.as_default():
            config = ConfigProto()
            if ratio:
                config.gpu_options.per_process_gpu_memory_fraction = ratio
            self.session = Session(config=config)
            with self.session.as_default():
                self.model = keras.models.load_model(
                    self.model_path,
                    custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D})
                self.model.predict(dummy)
        keras_graph.finalize()

        self.initialized = True
Exemple #32
0
    def create_detector(self, verbose, mtcnn_kwargs):
        """ Create the mtcnn detector """
        self.verbose = verbose

        if self.verbose:
            print("Adding MTCNN detector")

        self.kwargs = mtcnn_kwargs

        mtcnn_graph = Graph()
        with mtcnn_graph.as_default():
            mtcnn_session = Session()
            with mtcnn_session.as_default():
                pnet, rnet, onet = create_mtcnn(mtcnn_session, self.data_path)
        mtcnn_graph.finalize()

        self.kwargs["pnet"] = pnet
        self.kwargs["rnet"] = rnet
        self.kwargs["onet"] = onet
        self.initialized = True