Ejemplo n.º 1
0
    def setUp(self):
        super(AxiomsTest, self).setUp()

        # Make a linear model for testing.
        graph_lin = Graph()

        with graph_lin.as_default():
            x_lin = placeholder('float32', (None, self.input_size))
            y_lin = x_lin @ self.model_lin_weights + self.model_lin_bias

        self.model_lin = ModelWrapper(graph_lin, x_lin, y_lin)

        # Make a deeper model for testing.
        graph_deep = Graph()

        with graph_deep.as_default():
            x_deep = placeholder('float32', (None, self.input_size))
            z1_deep = (x_deep @ self.model_deep_weights_1 +
                       self.model_deep_bias_1)
            z2_deep = relu(z1_deep)
            z3_deep = (z2_deep @ self.model_deep_weights_2 +
                       self.model_deep_bias_2)
            z4_deep = relu(z3_deep)
            y_deep = (z4_deep @ self.model_deep_weights_3 +
                      self.model_deep_bias_3)

        self.model_deep = ModelWrapper(graph_deep, x_deep, y_deep,
                                       dict(layer2=z2_deep, layer3=z3_deep))

        self.layer2 = 'layer2'
        self.layer3 = 'layer3'
Ejemplo n.º 2
0
def _dojob(ready, e, queue):
    prctl.set_name('AI detector - do job')
    global session1, session2, ip_model, mac_model
    ip_graph = Graph()
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    with ip_graph.as_default():
        session1 = Session(config=config)
        with session1.as_default():
            ip_model = K.models.load_model(
                'gru_ip_4tuple.hdf5', custom_objects={'attention': attention})
            ip_model._make_predict_function()

    mac_graph = Graph()
    with mac_graph.as_default():
        session2 = Session(config=config)
        with session2.as_default():
            mac_model = K.models.load_model(
                'gru_mac_4tuple.hdf5', custom_objects={'attention': attention})
            mac_model._make_predict_function()
    ready.set()
    print 'set ready'
    last = time.time()
    global ignore_packet
    while e.is_set() == False:
        if queue.empty() == False:
            obj = queue.get()
            if (obj[0], obj[1]) in ignore_packet:
                if obj[3] <= ignore_packet[(obj[0], obj[1])]:
                    continue
            feature_extract((obj[2], obj[3]))
        if time.time() - last >= polling_interval:
            print queue.qsize()
            global flow_statics, src_addr_list, memory_data

            # calculate features in last 5 seconds
            result = calculate_feature(flow_statics)
            memory_data.pop(0)
            memory_data.append(result)
            t_run_exp = threading.Thread(target=_run_exp,
                                         args=(
                                             result,
                                             src_addr_list,
                                             memory_data,
                                         ))
            t_run_exp.start()
            t_run_exp.join()
            flow_statics = {}
            src_addr_list = {}
            last = time.time()
    K.backend.clear_session()
    del ip_model
    del mac_model
Ejemplo n.º 3
0
def _dojob(e, queue):
    prctl.set_name('AI detector - do job')
    global session1, session2, ip_model, mac_model
    ip_graph = Graph()
    with ip_graph.as_default():
        session1 = Session()
        with session1.as_default():
            ip_model = K.models.load_model(ip_model_path)
            ip_model._make_predict_function()

    mac_graph = Graph()
    with mac_graph.as_default():
        session2 = Session()
        with session2.as_default():
            mac_model = K.models.load_model(mac_model_path)
            mac_model._make_predict_function()
    #pcap_file = open('test.pcap', 'wb')
    #writer = dpkt.pcap.Writer(pcap_file)
    global total_tp, total_tn, total_fp, total_fn 
    total_tp = 0
    total_tn = 0
    total_fp = 0
    total_fn = 0
    last = time.time()
    #count_lock = threading.Lock()
    while e.is_set() == False:
        if queue.empty() == False:
            obj = queue.get()
            feature_extract(obj)
            current = obj[1]
        else:
            current = time.time()
        if current - last >= polling_interval:
            global flow_statics, src_addr_list, attacker
            #calculate features in last 5 seconds
            result = calculate_feature(flow_statics)
            memory_data.pop(0)
            memory_data.append(result)
            #t_run_exp = threading.Thread(target=_run_exp, args=(flow_statics, src_addr_list, attacker, memory_data, count_lock, ))
            t_run_exp = threading.Thread(target=_run_exp, args=(flow_statics, src_addr_list, attacker, memory_data, ))
            t_run_exp.start()
            t_run_exp.join()
            flow_statics = {}
            src_addr_list = []
            attacker = []
            last = current
            
    K.backend.clear_session()
    del ip_model
    del mac_model
Ejemplo n.º 4
0
def load():
    global session
    global graph
    global model
    global data_result

    data_result = DataResult(None, None)

    with open(script_dir + '/../temp/processed_data.json', 'r') as output:
        json_data = json.load(output)
        data_result.loadJSON(json_data)

    graph = Graph()
    with graph.as_default():
        session = Session(graph=graph)
        with session.as_default():
            temp_encoder = Encoder(data_result.input_data)
            temp_decoder = Decoder(data_result.output_data, temp_encoder)
            temp_model = Model([temp_encoder.inputs, temp_decoder.inputs],
                               temp_decoder.outputs)
            temp_model.compile(optimizer='rmsprop',
                               loss='categorical_crossentropy')
            temp_model.load_weights(
                os.path.dirname(__file__) + '/../model_weights.h5')

            model = temp_model
    def test_internal_slice_multiple_layers(self):
        graph = Graph()

        with graph.as_default():
            x1 = tf.placeholder('float32', (None, 5))
            z1 = x1 @ tf.random.normal((5, 6))
            x2 = tf.placeholder('float32', (None, 1))
            z2 = x2 @ tf.random.normal((1, 2))
            z3 = z2 @ tf.random.normal((2, 4))
            z4 = tf.concat([z1, z3], axis=1)
            z5 = z4 @ tf.random.normal((10, 7))
            y = z5 @ tf.random.normal((7, 3))

        model = ModelWrapper(
            graph, [x1, x2], y, dict(cut_layer1=z1, cut_layer2=z2))

        infl = InternalInfluence(
            model, Cut(['cut_layer1', 'cut_layer2']), ClassQoI(1), PointDoi())

        res = infl.attributions(
            [np.array([[1., 2., 3., 4., 5.]]),
             np.array([[1.]])])

        self.assertEqual(len(res), 2)
        self.assertEqual(res[0].shape, (1, 6))
        self.assertEqual(res[1].shape, (1, 2))
Ejemplo n.º 6
0
def _parse_graph_topologic_order(graph_def, output_nodes=None):
    # https://en.wikipedia.org/wiki/Topological_sorting
    if output_nodes is None:
        output_nodes = _parse_graph_layers(graph_def)[-1]
    graph = Graph()
    with graph.as_default():
        import_graph_def(graph_def, name='')

    queue = deepcopy(output_nodes)
    visited = set()  # temporary mark
    perm_visit = set()  # Permanent mark
    ops_torder = []  # L

    def visit(node_name):
        if node_name in perm_visit:
            return
        if node_name in visited:
            raise ValueError("Input graph is not a DAG")

        visited.add(node_name)
        op = graph.get_operation_by_name(node_name)

        for tensor in op.inputs:
            visit(tensor.op.name)

        perm_visit.add(node_name)
        ops_torder.insert(0, node_name)

    while queue:
        node_name = queue.pop(0)
        visit(node_name)

    # ops_bfs.reverse()
    return ops_torder, output_nodes
Ejemplo n.º 7
0
def buildTripletPairs(datasets, filename):
    embed_graph = Graph()
    triplet_paths_array = []
    with embed_graph.as_default():
        X_input = Input((height, width, 3))
        X = InceptionResnetV2(X_input)
        model = Model(inputs=X_input, outputs=X)
        with tf.Session(graph=embed_graph) as sess:
            sess.run(tf.global_variables_initializer())
            for file_inc in range(max_nrof_epochs):
                image_paths, num_per_class = sample_people(
                    datasets, people_per_batch, images_per_person)
                nrof_examples = people_per_batch * images_per_person
                emb_array = np.zeros((nrof_examples, embedding_size))
                embeds = model.predict(np.stack(getImages(image_paths)))
                for loc in range(nrof_examples):
                    emb_array[loc, :] = embeds[loc]
                triplets, nrof_random_negs, nrof_triplets = select_triplets(
                    emb_array, num_per_class, image_paths, people_per_batch,
                    0.2)
                triplet_paths = list(itertools.chain(*triplets))
                triplet_paths_array.extend(
                    np.reshape(np.expand_dims(np.array(triplet_paths), 1),
                               (-1, 3)))

    np.savetxt(filename,
               triplet_paths_array,
               fmt=("%s", "%s", "%s"),
               delimiter=",")
def load_KerasGraph(path):
    print("> ====== loading Keras model for classification")
    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            input_shape = (28, 28, 1)
            num_classes = 6

            model = Sequential()
            model.add(Conv2D(32, kernel_size=(3, 3), input_shape=input_shape))
            model.add(ReLU())
            model.add(Conv2D(32, kernel_size=(3, 3)))
            model.add(ReLU())
            model.add(Conv2D(32, kernel_size=(3, 3)))
            model.add(ReLU())
            model.add(Conv2D(32, kernel_size=(3, 3)))
            model.add(ReLU())
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Dropout(0.25))
            model.add(Flatten())
            model.add(Dense(128))
            model.add(ReLU())
            model.add(Dropout(0.5))
            model.add(Dense(num_classes))
            model.add(Softmax())
            model.load_weights(path)

            graph = tf.get_default_graph()
    print(">  ====== Keras model loaded")
    return model, graph, thread_session
Ejemplo n.º 9
0
    def loadModels(self, path):
        """Load models from the json file given."""
        logger.info('Loading models from %s', path)
        self.data_location = None
        self.models = []

        if path == self.data_location:
            return

        if not os.path.exists(path):
            logger.error('Could not find file: %s', path)

        self.data_location = path
        with open(path, 'r') as f:
            data = json.load(f)

        if 'models' not in data:
            logger.error('No models defined in data')
            return

        models = data['models']
        for i, model in enumerate(models):
            if not model:
                self.models.append(None)
                continue
            vertices = data['joint_map'][i]
            graph = Graph()
            with graph.as_default():
                session = Session()
                with session.as_default():
                    meta = model.get('meta')
                    root = model.get('root')
                    saver = tf.train.import_meta_graph(meta)
                    saver.restore(session, tf.train.latest_checkpoint(root))

                    in_tensor = session.graph.get_tensor_by_name(
                        model['input'])
                    out_tensor = session.graph.get_tensor_by_name(
                        model['output'])

                    normalized = model['normalized']
                    verts_max, verts_min, trans_max, trans_min = None, None, None, None
                    if normalized:
                        trans_max = np.array(model['trans_max'])
                        trans_min = np.array(model['trans_min'])
                        verts_max = np.array(model['verts_max'])
                        verts_min = np.array(model['verts_min'])

                    tfmodel = TFModel(graph=session.graph,
                                      session=session,
                                      input_tensor=in_tensor,
                                      output_tensor=out_tensor,
                                      vertices=vertices,
                                      normalized=normalized,
                                      trans_max=trans_max,
                                      trans_min=trans_min,
                                      verts_max=verts_max,
                                      verts_min=verts_min)

                    self.models.append(tfmodel)
Ejemplo n.º 10
0
    def __init__(self):
        # Set TF configuration
        config = tf.ConfigProto(gpu_options=tf.GPUOptions(
            per_process_gpu_memory_fraction=0.2))
        config.gpu_options.allow_growth = True

        self.graph = Graph()
        with self.graph.as_default():
            self.session = tf.Session(config=config)
            with self.session.as_default():

                # Load model
                K.set_learning_phase(0)
                #with open("./data/model.json", 'r') as json_file:
                #    loaded_model_json = json_file.read()
                #model = model_from_json(loaded_model_json)

                model = load_model("./data/model1.h5")
                K.set_learning_phase(0)

                # compile requirement for inrefence...
                model.compile(optimizer='SGD',
                              loss='binary_crossentropy',
                              metrics=['acc'])
                K.set_learning_phase(0)

                self.model = model
Ejemplo n.º 11
0
    def __init__(self):
        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        model_dir_path = os.path.join(os.path.dirname(__file__), "../models")
        default_model_file_path = os.path.join(model_dir_path,
                                               "EAST_IC15+13_model.h5")
        json_file = open(os.path.join(model_dir_path, 'model.json'), 'r')
        # try:
        #     os.makedirs(FLAGS.output_dir)
        # except OSError as e:
        #     if e.errno != 17:
        #         raise

        # load trained model
        loaded_model_json = json_file.read()
        json_file.close()
        self.graph1 = Graph()
        self.tf_session = None
        with self.graph1.as_default():
            self.tf_session = Session()
            with self.tf_session.as_default():
                self.model = model_from_json(loaded_model_json,
                                             custom_objects={
                                                 'tf': tf,
                                                 'RESIZE_FACTOR': RESIZE_FACTOR
                                             })
                self.model.load_weights(default_model_file_path)
                print("**** loading " + default_model_file_path +
                      "......successful *******")

        # call super
        super(Phase1_0ImageLineContourExtractorHandler, self).__init__()
Ejemplo n.º 12
0
    def __init__(self, n_users, steps):
        global topics, producers

        self.n_users = n_users
        self.steps = steps
        self.graph = Graph()

        with self.graph.as_default():
            self.sess = Session()

        K.set_session(self.sess)

        with self.graph.as_default():
            self.model = keras.Sequential()

        self.users = []
        futures = []
        with concurrent.futures.ThreadPoolExecutor() as executor:
            for _ in range(n_users):
                futures.append(
                    executor.submit(User, topics[_], producers[_],
                                    self.n_users, self.steps))
        for i in futures:
            self.users.append(i.result())
        for i in range(self.n_users):
            producers[i].flush()
Ejemplo n.º 13
0
    def __init__(self, n_users, num_steps):

        self.n_users = n_users

        self.steps = num_steps

        self.graph = Graph()

        with self.graph.as_default():

            self.sess = Session()

        K.set_session(self.sess)

        with self.graph.as_default():

            self.model = keras.Sequential()

        #self.users = [User(iplist[_],_) for _ in range(n_users)]
        self.users = []
        futures = []
        with concurrent.futures.ThreadPoolExecutor() as executor:
            for v in range(n_users):
                futures.append(executor.submit(User, iplist[v], v))
        for i in futures:
            self.users.append(i.result())
Ejemplo n.º 14
0
Archivo: views.py Proyecto: vadgama/MLW
    def loadKerasModel(self, filePath):
        global PMMLMODELSTORAGE
        fO = pathlib.Path(filePath)
        keyToModel = fO.name.replace(fO.suffix, '')
        # print (PMMLMODELSTORAGE)
        try:
            model_graph = Graph()
            with model_graph.as_default():
                tf_session = Session()
                with tf_session.as_default():
                    seqModel = load_model(filePath)

            tempDictModel = {
                'modelObj': seqModel,
                'model_graph': model_graph,
                'modelGeneratedFrom': 'Keras',
                'tf_session': tf_session,
                'inputShape': seqModel.input_shape,
            }
            PMMLMODELSTORAGE[keyToModel] = tempDictModel
            messageToWorld = "Model Loaded Successfully"
            reStat = 200
        except:
            messageToWorld = "Model load failed, please connect with admin"
            keyToModel = None
            reStat = 500
        resultResp = {'message': messageToWorld, 'keytoModel': keyToModel}
        return JsonResponse(resultResp, status=reStat)
Ejemplo n.º 15
0
def rsna_evaluate(model_path, backbone, anchor_boxes, score_threshold,
                  nms_threshold, rsna_path, rsna_test_json, anchor_scales):
    """ Evaluate an json using retinanet model, print mAP based on GT and generate kaggle submission file.
    """

    save_path = None
    from tensorflow import Graph, Session
    graph1 = Graph()
    with graph1.as_default():
        session1 = Session()
        with session1.as_default():
            model2 = models.load_model(model_path,
                                       backbone_name=backbone,
                                       convert=True,
                                       nms_threshold=nms_threshold,
                                       anchors_ratios=anchor_boxes,
                                       anchors_scales=anchor_scales)
            # create the generator
            generator = create_generator(rsna_test_json, rsna_path)

            map = evaluate(generator,
                           model2,
                           iou_threshold=0.5,
                           score_threshold=score_threshold,
                           max_detections=100,
                           generate_kaggle_output='teste.csv')
            del model2
            import gc
            gc.collect()
        with open('output_map.txt', 'a') as output_map:
            output_map.write('{} : {} \n'.format(model_path, map))
    def pretrainSingleClass(self, modelname, dataset, class_name, batch_size,
                            epochs, lr):
        #K.clear_session()
        graph2 = Graph()
        with graph2.as_default():
            session2 = Session()
            with session2.as_default():

                reader = LFWReader(dir_images=dataset, class_name=class_name)
                gen_train = TripletGeneratorSingleID(reader)
                gen_test = TripletGeneratorSingleID(reader)
                embedding_model, triplet_model = GetModel()
                for layer in embedding_model.layers[-3:]:
                    layer.trainable = True

                for layer in embedding_model.layers[:-3]:
                    layer.trainable = False
                triplet_model.compile(loss=None, optimizer=Adam(lr))

                history = triplet_model.fit_generator(gen_train,
                                                      validation_data=gen_test,
                                                      epochs=epochs,
                                                      verbose=1,
                                                      steps_per_epoch=50,
                                                      validation_steps=5)

                embedding_model.save_weights('./trained-models/weights/' +
                                             modelname + '.h5')
                self.embeddingmodel(modelname, dataset)
                K.get_session()
Ejemplo n.º 17
0
 def __init__(self):
     """
     Class Initialization.
     Performs initialization of all the models, using different Tensorflow sessions so one can use multiple models at
     the same time
     """
     self.hog_svm = HogSVM()
     self.cnn_graph = Graph()
     with self.cnn_graph.as_default():
         self.cnn_session = Session()
         with self.cnn_session.as_default():
             self.cnn_4layer = Cnn4Layer()
     self.vgg19_graph = Graph()
     with self.vgg19_graph.as_default():
         self.vgg_session = Session()
         with self.vgg_session.as_default():
             self.vgg19 = VGG_19()
Ejemplo n.º 18
0
 def __init__(self):
     self.graph = Graph()
     with self.graph.as_default():
         self.session = Session(graph=self.graph)
         with self.session.as_default():
             self.model = insightface.model_zoo.get_model(
                 'retinaface_r50_v1')
             self.model.prepare(ctx_id=-1, nms=0.4)
def load_KerasGraph(path): 
    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            model = keras.models.load_model(path)#model._make_predict_function()
            graph = tf.get_default_graph()
    return model, graph, thread_session
Ejemplo n.º 20
0
    def __init__(self, n_users, steps):
        imports()
        preprocess(n_users, steps)

        self.graph = Graph()

        with self.graph.as_default():
            self.sess = Session()
Ejemplo n.º 21
0
    def __init__(self, ip, user_id=None):

        self.user_id = user_id

        self.graph = Graph()

        with self.graph.as_default():

            self.sess = Session()
Ejemplo n.º 22
0
def graphandsession():

	model_graph = Graph()
	with model_graph.as_default():
		tf_session = Session()
		with tf_session.as_default():
			mdl=load_model("C:/Users/Ritik/Desktop/Machine Learning Practice/Bank Loan Classification/DjangoAPI/MyAPI/models/model.h5")

	return (model_graph,tf_session,mdl)
Ejemplo n.º 23
0
def load_from_file(filename):
    graph = Graph()
    with graph.as_default():
        sess = utils.get_nn_config()
        with sess.as_default():
            model = load_model(filename, compile=False)
            compile_model(model)
            return model, graph, sess
    return None
Ejemplo n.º 24
0
    def __init__(self):

        self.graph = Graph()

        with self.graph.as_default():

            self.sess = Session()

        K.set_session(self.sess)
Ejemplo n.º 25
0
def load_KerasGraph(path):
    print("> ====== loading Keras model for classification")
    thread_graph = Graph()
    with thread_graph.as_default():
        thread_session = Session()
        with thread_session.as_default():
            model = keras.models.load_model(path)
            graph = tf.get_default_graph()
    print(">  ====== Keras model loaded")
    return model, graph, thread_session
Ejemplo n.º 26
0
    def __init__(self, class_number: int) -> None:
        self.classes = [f"class_{i}" for i in range(class_number)]
        self.is_binary = class_number == 2

        self.graph = Graph()
        with self.graph.as_default():
            self.session = Session()
            with self.session.as_default():
                model = Sequential()

                # Basic LeNet/AlexNet convolution layers

                model.add(
                    Conv2D(16,
                           kernel_size=(11, 11),
                           strides=(4, 4),
                           input_shape=(self.input_size[0], self.input_size[1],
                                        1),
                           kernel_initializer='he_uniform',
                           bias_initializer=Constant(0.1)))
                model.add(Activation('relu'))
                # consider replacing pooling layers with batch normalization
                model.add(MaxPooling2D(pool_size=(3, 3)))

                model.add(
                    Conv2D(16,
                           kernel_size=(5, 5),
                           strides=(1, 1),
                           kernel_initializer='he_uniform',
                           bias_initializer=Constant(0.1)))
                model.add(Activation('relu'))
                model.add(MaxPooling2D(pool_size=(3, 3)))

                # Flatten to 1D Feature Set
                model.add(Flatten())

                # Basic 2 layer classification with high dropout
                model.add(
                    Dense(16,
                          kernel_initializer='random_uniform',
                          bias_initializer=Constant(0.1)))
                model.add(Activation('relu'))
                model.add(Dropout(
                    0.5))  # Overfitting prevention and increased non-linearity
                model.add(Dense(1 if self.is_binary else class_number))
                model.add(
                    Activation('sigmoid' if self.is_binary else 'softmax'))

                self.model = model
                # compile model
                self.model.compile(loss='binary_crossentropy' if self.is_binary
                                   else 'categorical_crossentropy',
                                   optimizer=SGD(lr=0.01, decay=1e-6),
                                   metrics=['accuracy'])
Ejemplo n.º 27
0
def load_single_model(path):
    graph = Graph()
    with graph.as_default():
        session = Session()
        with session.as_default():
            model = load_model(path)
            model._make_predict_function()

            MODELS.append(model)
            GRAPHS.append(graph)
            SESSIONS.append(session)
Ejemplo n.º 28
0
 def __setstate__(self, state: Dict):
     self.__dict__.update(state)
     self.graph = Graph()
     with NamedTemporaryFile(suffix=".hdf5", delete=True) as fd:
         fd.write(state.pop("model_str"))
         fd.flush()
         with self.graph.as_default():
             self.session = Session(graph=self.graph)
             with self.session.as_default():
                 self.model = load_model(fd.name, compile=False)
     self.trainer = None
Ejemplo n.º 29
0
 def load(self):
     print('loading....')
     self.__thread_graph = Graph()
     with self.__thread_graph.as_default():
         self.__thread_session = Session()
         with self.__thread_session.as_default():
             self.__model = keras.models.load_model('weights-28-0.77.h5', compile=False)
             self.__graph = tf.compat.v1.get_default_graph()
             self.__predictLegal = True
             print('loading done.')
     return self.__model, self.__graph, self.__thread_session
Ejemplo n.º 30
0
    def __init__(self):
        self.basepath = os.path.dirname(__file__)
        self.Imgs_Test = np.ndarray((1, 512, 512, 1), dtype=np.float32)
        self.Predict = np.ndarray((512, 512), dtype=np.float32)

        keras.backend.clear_session()  # 用于重复使用模型
        self.graph = Graph()
        with self.graph.as_default():
            self.session = Session()
            with self.session.as_default():
                unet = myUnet3()
                self.model = unet.Model
                self.model.load_weights('Unet_Brain.hdf5')